query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
constellation_calcdist(gr_complex_vector constellation, unsigned_int_vector pre_diff_code, unsigned int rotational_symmetry, unsigned int dimensionality) > digital_constellation_calcdist_sptr Calculate Euclidian distance for any constellation Constellation which calculates the distance to each point in the constellation for decision making. Inefficient for large constellations.
def constellation_calcdist(*args, **kwargs): return _digital_swig.constellation_calcdist(*args, **kwargs)
[ "def calculate_particle_critical_diameter(mean_molecular_speed,\n diffusion_constant,\n accommodation_coefficient):\n\n return 8 * diffusion_constant / (\n accommodation_coefficient * mean_molecular_speed * dfl[\n 'conversions']['nm2m'])", "def shift_der_C_l(bias, n_z, dn_dDz, Omega_m, sig_8):\n # Constants\n h = 0.682\n c = 2.99792e+5\n # =======================================================================\n # Selecting cosmology\n \n cosmo = cosmoselector(Omega_m, sig_8)\n \n #========================================================================\n #========================================================================\n #Redshift range for calculations and integration\n \n nz = 10000 #number of steps to use for the radial/redshift integration\n kmax=10.0 #kmax to use\n\n zarray = np.linspace(0,4.0,nz)\n dzarray = (zarray[2:]-zarray[:-2])/2.0\n zarray = zarray[1:-1]\n\n #=========================================================================\n #Calculate Hubble parameter and comoving distance\n \n Hubble = cosmo.H_z\n \n # Get comoving distance - in Mpc\n chis = cosmo.chi\n \n #========================================================================\n #========================================================================\n # Get the prefactor of the integral \n\n pref = ((bias/chis)**2.0)*(2.0*n_z*dn_dDz*Hubble)\n\n #===================================================================================\n #Do integral over z\n \n ls_lin = np.linspace(1.0, np.log10(2000.0), 55, dtype = np.float64)\n ls = 10.0**ls_lin\n \n der_C = np.zeros(ls.shape)\n w = np.ones(chis.shape) #this is just used to set to zero k values out of range of interpolation\n for i, l in enumerate(ls):\n k=(l+0.5)/chis\n w[:]=1\n w[k<1e-4]=0\n w[k>=kmax]=0\n der_C[i] = np.dot(dzarray, w*cosmo.PK.P(zarray, k, grid=False)*pref)\n \n #===================================================================================\n # Retrurn the array of C_ell\n \n return der_C", "def smooth_cf_distance(difference):\n num_samples, _ = np.shape(difference)\n sigma = np.cov(np.transpose(difference))\n\n mu = np.mean(difference, 0)\n\n stat = num_samples * mu.dot(np.linalg.solve(sigma, np.transpose(mu)))\n\n return stat", "def compute_gdpt(\n self, n_components: int = 10, key_added: str = \"gdpt_pseudotime\", **kwargs\n ):\n\n def _get_dpt_row(e_vals: np.ndarray, e_vecs: np.ndarray, i: int):\n row = sum(\n (\n np.abs(e_vals[eval_ix])\n / (1 - np.abs(e_vals[eval_ix]))\n * (e_vecs[i, eval_ix] - e_vecs[:, eval_ix])\n )\n ** 2\n # account for float32 precision\n for eval_ix in range(0, e_vals.size)\n if np.abs(e_vals[eval_ix]) < 0.9994\n )\n\n return np.sqrt(row)\n\n if \"iroot\" not in self.adata.uns.keys():\n raise KeyError(\"Key `'iroot'` not found in `adata.uns`.\")\n\n iroot = self.adata.uns[\"iroot\"]\n if isinstance(iroot, str):\n iroot = np.where(self.adata.obs_names == iroot)[0]\n if not len(iroot):\n raise ValueError(\n f\"Unable to find cell with name `{self.adata.uns['iroot']!r}` in `adata.obs_names`.\"\n )\n iroot = iroot[0]\n\n if n_components < 2:\n raise ValueError(\n f\"Expected number of components >= 2, found `{n_components}`.\"\n )\n\n if self._schur_vectors is None:\n logg.warning(\"No Schur decomposition found. Computing\")\n self.compute_schur(n_components, **kwargs)\n elif self._schur_matrix.shape[1] < n_components:\n logg.warning(\n f\"Requested `{n_components}` components, but only `{self._schur_matrix.shape[1]}` were found. \"\n f\"Recomputing using default values\"\n )\n self.compute_schur(n_components)\n else:\n logg.debug(\"DEBUG: Using cached Schur decomposition\")\n\n start = logg.info(\n f\"Computing Generalized Diffusion Pseudotime using n_components = {n_components}\"\n )\n\n Q, eigenvalues = (\n self._schur_vectors,\n self.eigendecomposition[\"D\"],\n )\n # may have to remove some values if too many converged\n Q, eigenvalues = Q[:, :n_components], eigenvalues[:n_components]\n\n D = _get_dpt_row(eigenvalues, Q, i=iroot)\n pseudotime = D / np.max(D[np.isfinite(D)])\n self.adata.obs[key_added] = pseudotime\n\n logg.info(f\"Adding `{key_added!r}` to `adata.obs`\\n Finish\", time=start)", "def calculatedifferencesEE(c_i,c_i_p):\n\n differencesp = [[0.0 for col in range(72)] for row in range (4) ]\n differencesm = [[0.0 for col in range(72)] for row in range (4) ]\n\n\n rmssp = [[0.0 for col in range(72)] for row in range (4) ]\n rmssm = [[0.0 for col in range(72)] for row in range (4) ] \n\n\n\n for ann in range(4):\n for sec in range(72):\n n=0 \n for hash in range(hashsizeEE): \n \n ix,iy,iz= unhashEEIndex(hash)\n\n if annulus(ix,iy,iz) == ann and sector(ix,iy,iz)==sec:\n n+=1 \n diff = c_i[hash] - c_i_p[hash]\n if iz>0 :differencesp[ann][sec]+=diff\n else :differencesm[ann][sec]+=diff\n \n differencesp[ann][sec]/=n\n differencesm[ann][sec]/=n\n\n\n for ann in range(4):\n for sec in range(72):\n n=0 \n for hash in range(hashsizeEE): \n \n ix,iy,iz= unhashEEIndex(hash)\n\n if annulus(ix,iy,iz) == ann and sector(ix,iy,iz)==sec:\n n+=1 \n diff = c_i[hash] - c_i_p[hash]\n if iz>0 :rmssp[ann][sec]+=pow(diff - differencesp[ann][sec],2)\n else :rmssm[ann][sec]+=pow(diff - differencesp[ann][sec],2)\n \n rmssp[ann][sec] = sqrt(rmssp[ann][sec]/n/(n-1))\n rmssm[ann][sec] = sqrt(rmssp[ann][sec]/n/(n-1))\n\n return differencesp,rmssp,differencesm,rmssm", "def get_cdist_grad_vmap_rule(prim, axis_size):\n if hasattr(prim, 'batch_rank'):\n batch_rank = prim.batch_rank + 1\n else:\n batch_rank = 1\n\n batch_prim = _vmap_clone_prim(prim)\n batch_prim.add_prim_attr(\"batch_rank\", batch_rank)\n\n def vmap_rule(grad_bdim, x_bdim, y_bdim, cdist_bdim):\n is_all_none, result = vmap_general_preprocess(prim,\n grad_bdim, x_bdim, y_bdim, cdist_bdim)\n if is_all_none:\n return result\n grad, grad_dim = grad_bdim\n x, x_dim = x_bdim\n y, y_dim = y_bdim\n cdist, cdist_dim = cdist_bdim\n\n grad = _bdim_at_front(grad, grad_dim, axis_size)\n x = _bdim_at_front(x, x_dim, axis_size)\n y = _bdim_at_front(y, y_dim, axis_size)\n cdist = _bdim_at_front(cdist, cdist_dim, axis_size)\n\n out = batch_prim(grad, x, y, cdist)\n return out, 0\n\n return vmap_rule", "def geodesic_distance(P, T, i, eps=0.0001):\n\n A = spdiag(barycentric_area(P, T))\n t = poly_edges_mean_length(P, T)**2\n k = kronecker_delta(P.size(0), i, device=P.device)\n G, D, L = differential_operator(T, P)\n u = heat_diffusion(A, t, L, k, eps=eps)\n du = normr(G(u))\n d = D(G)\n return poisson_equation(L, d, eps=eps)", "def _calculate_sd(self):\n cost = 0\n for k in range(self.k):\n cost += \\\n distance.cdist(np.array([self.centroids[k]]), np.array([self.previous_centroids[k]]),\n metric=self.metric)[\n 0][0]\n return cost", "def calculate_concentration_Cs_seed(concentrations_Cs_seed_p):\n return np.sum(concentrations_Cs_seed_p)", "def euclidean_multidim(*simulated, observed):\n pts_sim = np.column_stack(simulated)\n pts_obs = np.column_stack(observed)\n d_multidim = np.sum((pts_sim - pts_obs)**2., axis=1)\n d_squared = np.sum(d_multidim, axis=1)\n d = np.sqrt(d_squared)\n\n return d", "def calculate_distances(deltas: np.ndarray, sampling_freq_hz: float, c: float = 343) -> np.ndarray:\n conversion_factor = c / (2 * sampling_freq_hz)\n\n deltas_t = deltas.T\n\n k1 = deltas * np.eye(deltas.shape[0]) @ np.ones(deltas.shape)\n k2 = k1.T\n k = k1 + k2\n\n return conversion_factor * (np.abs(deltas - deltas_t) + k)", "def cdist(fX_trn, fX_tst, metric='euclidean', **kwargs):\n\n if metric == 'angular':\n cosine = scipy.spatial.distance.cdist(\n fX_trn, fX_tst, metric='cosine', **kwargs\n )\n return np.arccos(np.clip(1.0 - cosine, -1.0, 1.0))\n\n elif metric == 'equal':\n return _cdist_func_1D(\n fX_trn, fX_tst, lambda x_trn, X_tst: x_trn == X_tst\n )\n\n elif metric == 'minimum':\n return _cdist_func_1D(fX_trn, fX_tst, np.minimum)\n\n elif metric == 'maximum':\n return _cdist_func_1D(fX_trn, fX_tst, np.maximum)\n\n elif metric == 'average':\n return _cdist_func_1D(\n fX_trn, fX_tst, lambda x_trn, X_tst: 0.5 * (x_trn + X_tst)\n )\n\n else:\n return scipy.spatial.distance.cdist(\n fX_trn, fX_tst, metric=metric, **kwargs\n )", "def compute_distortion(cluster_list, data_table):\r\n distortion = 0\r\n \r\n for cluster in cluster_list:\r\n distortion += cluster.cluster_error(data_table)\r\n\r\n return distortion", "def calc_comoving_distance(self):\n if self.z is not None:\n z_sample = self.z\n elif self.z_host is not None:\n z_sample = self.z_host\n else:\n raise ValueError(\n \"\"\" Can not calculate comoving distance without a redshift.\n Use calc_redshift() to calculate the FRB redshift or provide\n a value for z_host.\n \"\"\")\n\n cosmo = cosmologies.cosmology_functions()[self.cosmology]\n return cosmo.comoving_distance(z_sample)", "def CalculateDeltaChiv3c4pc(mol):\r\n return abs(CalculateChiv3c(mol) - CalculateChiv4pc(mol))", "def compute_Dij(self):\n g_inv = self.g.inv\n dg_inv = self.dg_inv\n ddg_inv = np.asarray(self.metric.diff(self.point, diff=2, inverse=True))\n s = self.s\n ds = self.ds\n dds = self.compute_dds()\n return (\n ddg_inv.dot(s).dot(s)\n + 2 * dg_inv.dot(s).dot(ds)\n + 2 * dg_inv.dot(s).dot(ds).T\n + 2 * g_inv.dot(ds).T.dot(ds) + 2 * dds.dot(g_inv.dot(s))\n )", "def compute_distance(ix: int, c: int) -> float:\n if c == cell.FREE:\n nearest_occupied: Optional[\n Tuple[kdtree.Node, float]\n ] = occupied_tree.search_nn(to_pos(ix), dist=points_dist)\n\n # Contingency for a map with no occupied cells.\n if nearest_occupied is None:\n return DIST_UNKNOWN\n\n (_, distance) = nearest_occupied\n\n return distance\n\n return DIST_OCCUPIED if c == cell.OCCUPIED else DIST_UNKNOWN", "def comovingDistance(self, redshift=0.0):\n dd = self.activeCosmology.comoving_distance(redshift)\n\n if 'value' in dir(dd):\n if dd.unit == self.distanceUnits:\n return dd.value\n else:\n return dd.to(self.distanceUnits).value\n else:\n return dd", "def getDistance(self):\n self.listOfDistance.append(self.distanceCal(self.listOfatom2cal[0], self.listOfNi[0]))\n i=1\n while i < len(self.listOfNi):\n distance = self.distanceCal(self.listOfatom2cal[i*self.atomRepeat], self.listOfNi[i])\n self.listOfDistance.append(distance)\n i += 1", "def distortion(dist_embedd, dist_original, eps=1.0e-20):\n c1 = np.max([(dist_original / (dist_embedd + eps)).max(), 1.0])\n c2 = np.max([(dist_embedd / (dist_original + eps)).max(), 1.0])\n return c1 * c2" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_constellation_rect_sptr __init__(self, p) > digital_constellation_rect_sptr
def __init__(self, *args): this = _digital_swig.new_digital_constellation_rect_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, ptOrigin, connection):\n self.ptOrigin = ptOrigin\n self.connection = connection\n self.leftBound = 0\n self.leftTop = None\n self.leftBottom = None\n self.rightBound = connection.rightBound\n self.rightTop = None\n self.rightBottom = None", "def __init__(self):\n _snap.TCliqueOverlap_swiginit(self,_snap.new_TCliqueOverlap())", "def create_rect():\n pass", "def __init__(self, ptOrigin, connection):\n self.ptOrigin = ptOrigin\n self.connection = connection\n self.topBound = 0\n self.topLeft = None\n self.topRight = None\n self.bottomBound = connection.bottomBound\n self.bottomLeft = None\n self.bottomRight = None", "def __init__(self):\n this = _coin.new_SoTextureCoordinateCylinder()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoTextureCoordinate2()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoTextureCoordinatePlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, left, top, bottom, right):\n self.left = left # Value of the left boudnary in the rectangle\n self.top = top # Value of the top boudnary in the rectangle\n self.bottom = bottom # Value of the bottom boudnary in the rectangle\n self.right = right # Value of the right boudnary in the rectangle", "def __init__(self):\n this = _coin.new_SoClipPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoClipPlaneManip()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoPolygonOffset()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, position, spectrum, brightness):\n pass", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, row, column, board_coordinate, piece):\n self.row = row\n self.column = column\n self.board_coordinate = board_coordinate\n self.piece = piece\n self.rect = None", "def __init__(self):\n this = _coin.new_SoTextureCoordinate3()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n self.move_params = list()\n\n for x in (2, -2):\n for y in (1, -1):\n self.move_params.append((x, y))\n self.move_params.append((y, x))", "def __init__(self, x, y, w, h, r=0):\n self.x = x\n self.y = y\n self.w = w\n self.h = h\n self.r = r", "def __init__(self):\n this = _coin.new_SoTextureCoordinateObject()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n self.port=Config.PortPrinter # Assign the name of the port written in Config.py to self.port\n self.FirstMove=0 # Variable wich allow us to know if this is the first movement of the 3d-mill\n self.Coord={} # Create a dictionnary\n self.cnc=CNC(self.port) # Call the class CNC\n self.cnc.OpenConnection() # Open the Connection with the device\n self.NbWells=0 # Count the number of wells \n Wells.Wells_1(self)", "def __init__(self):\n this = _coin.new_SoCoordinate3()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
constellation_rect(gr_complex_vector constellation, unsigned_int_vector pre_diff_code, unsigned int rotational_symmetry, unsigned int real_sectors, unsigned int imag_sectors, float width_real_sectors, float width_imag_sectors) > digital_constellation_rect_sptr
def constellation_rect(*args, **kwargs): return _digital_swig.constellation_rect(*args, **kwargs)
[ "def rc2rect(self, r, c):\n return [(c - 1) * self._block_width, (r - 1) * self._block_height,\n c * self._block_width, r * self._block_height]", "def concrete_rectangular_section(\n b: float,\n d: float,\n dia_top: float,\n n_top: int,\n dia_bot: float,\n n_bot: int,\n n_circle: int,\n cover: float,\n dia_side: Optional[float] = None,\n n_side: int = 0,\n area_top: Optional[float] = None,\n area_bot: Optional[float] = None,\n area_side: Optional[float] = None,\n conc_mat: pre.Material = pre.DEFAULT_MATERIAL,\n steel_mat: pre.Material = pre.DEFAULT_MATERIAL,\n) -> geometry.CompoundGeometry:\n\n if n_top == 1 or n_bot == 1:\n raise ValueError(\"If adding a reinforcing layer, provide 2 or more bars.\")\n\n # create rectangular concrete geometry\n geom = primitive_sections.rectangular_section(b=b, d=d, material=conc_mat)\n\n # calculate reinforcing bar dimensions for top and bottom layers\n x_i_top = cover + dia_top / 2\n x_i_bot = cover + dia_bot / 2\n spacing_top = (b - 2 * cover - dia_top) / (n_top - 1)\n spacing_bot = (b - 2 * cover - dia_bot) / (n_bot - 1)\n\n # calculate reinforcing bar dimensions for side layers if specified\n if n_side != 0:\n x_i_side_left = cover + dia_side / 2\n x_i_side_right = b - x_i_side_left\n spacing_side = (d - 2 * cover - dia_top / 2 - dia_bot / 2) / (n_side + 1)\n\n if area_top is None:\n area_top = np.pi * dia_top**2 / 4\n if area_bot is None:\n area_bot = np.pi * dia_bot**2 / 4\n if area_side is None and dia_side is not None:\n area_side = np.pi * dia_side**2 / 4\n\n # add top bars\n for i in range(n_top):\n bar = primitive_sections.circular_section_by_area(\n area=area_top, n=n_circle, material=steel_mat\n )\n bar = bar.shift_section(\n x_offset=x_i_top + spacing_top * i, y_offset=d - cover - dia_top / 2\n )\n geom = (geom - bar) + bar\n\n # add bot bars\n for i in range(n_bot):\n bar = primitive_sections.circular_section_by_area(\n area=area_bot, n=n_circle, material=steel_mat\n )\n bar = bar.shift_section(\n x_offset=x_i_bot + spacing_bot * i, y_offset=cover + dia_bot / 2\n )\n geom = (geom - bar) + bar\n\n # add side bars if specified\n if n_side != 0:\n for i in range(n_side):\n bar_left = primitive_sections.circular_section_by_area(\n area=area_side, n=n_circle, material=steel_mat\n )\n bar_right = bar_left\n\n bar_left = bar_left.shift_section(\n x_offset=x_i_side_left,\n y_offset=cover + dia_bot / 2 + spacing_side * (i + 1),\n )\n bar_right = bar_right.shift_section(\n x_offset=x_i_side_right,\n y_offset=cover + dia_bot / 2 + spacing_side * (i + 1),\n )\n\n geom = (geom - bar_left - bar_right) + bar_left + bar_right\n\n return geom", "def rectdisc(m,xspan,n,yspan):\n\t# Initialize grid and finite differences.\n\tx,Dx,Dxx = diffmat2(m,xspan)\n\ty,Dy,Dyy = diffmat2(n,yspan)\n\tX,Y = meshgrid(x,y)\n\n\t# Locate boundary points.\n\tisbndy = tile(True,(n+1,m+1))\n\tisbndy[1:-1,1:-1] = False\n\n # Get the diff. matrices recognized as sparse. Also include reshaping functions.\n\tdisc = {\n\t\t\"Dx\":sp.lil_matrix(Dx), \"Dxx\":sp.lil_matrix(Dxx),\n\t\t\"Dy\":sp.lil_matrix(Dy), \"Dyy\":sp.lil_matrix(Dyy),\n\t\t\"Ix\":sp.eye(m+1,format=\"lil\"), \"Iy\":sp.eye(n+1,format=\"lil\"),\n\t\t\"isbndy\":isbndy,\n\t\t\"vec\": lambda U: U.flatten(),\n\t\t\"unvec\": lambda u: reshape(u,(n+1,m+1))\n\t}\n\treturn X,Y,disc", "def create_rect():\n pass", "def create_rectangular_prism(origin, size, ret_unique_vertices_and_faces=False):\n from .arity import quads_to_tris\n\n lower_base_plane = np.array(\n [\n # Lower base plane\n origin,\n origin + np.array([size[0], 0, 0]),\n origin + np.array([size[0], 0, size[2]]),\n origin + np.array([0, 0, size[2]]),\n ]\n )\n upper_base_plane = lower_base_plane + np.array([0, size[1], 0])\n\n vertices = np.vstack([lower_base_plane, upper_base_plane])\n\n faces = np.array(\n quads_to_tris(\n np.array(\n [\n [0, 1, 2, 3], # lower base (-y)\n [7, 6, 5, 4], # upper base (+y)\n [4, 5, 1, 0], # +z face\n [5, 6, 2, 1], # +x face\n [6, 7, 3, 2], # -z face\n [3, 7, 4, 0], # -x face\n ],\n dtype=np.uint64,\n )\n ),\n dtype=np.uint64,\n )\n\n return _maybe_flatten(vertices, faces, ret_unique_vertices_and_faces)", "def draw_cruved_rect(x1, x2, h1, h2, y1, y2, ax, fc='lightgray', ec='gray', lw=1, alpha=0.3):\n if h1 != 0 or h2 != 0:\n x05 = (x2+x1)/2\n v = np.array([[x1, y1],\n [x05, y1],\n [x05, y2],\n [x2, y2],\n [x2, y2 + h2],\n [x05, y2 + h2],\n [x05, y1 + h1],\n [x1, y1 + h1]])\n\n p = matplotlib.path.Path(v, codes = [1,4,4,4,2,4,4,4], closed=True)\n ax.add_patch(matplotlib.patches.PathPatch(p, lw=lw, ec=ec, fc=fc, alpha=alpha, zorder=-1))", "def calculate_chromaticity(im):\n\tshape = im.shape\n\tx, y = shape[0] // 2, shape[1] // 2\n\tcenter_patch = im[x - 4:x + 4, y - 4:y + 4, :]\n\tmean = center_patch.mean(axis=(0, 1))\n\tunit_vec = np.array([1, 1, 1])\n\tporjection_vec = unit_vec * np.dot(mean, unit_vec) / np.dot(unit_vec, unit_vec)\n\tchromaticity = mean / porjection_vec\n\tprint(\"calculated chromaticity: {}\".format(chromaticity))\n\treturn chromaticity", "def window_rectangular(\n opening_th=300,\n opening_height=1400,\n opening_width=1200,\n frame_width=50,\n frame_th=50,\n glass_th=21,\n n_pan=1,\n):\n import Part\n\n # permit to differentiate from the top-bottom and left right\n frame_height = frame_width\n\n # congruency check:\n frame_ov_wid = n_pan * (frame_width * 2) + frame_width * 2\n\n light_fact = (opening_width - frame_ov_wid) / opening_width\n # print(\"FW LF >\",frame_ov_wid, light_fact)\n\n # frame reduction\n ef_w = frame_width * 2\n ef_h = frame_height * 2\n\n res_w = opening_width - ef_w\n res_h = opening_height - ef_h\n\n # glass margin into the frame\n v_a = 0\n\n # TODO Adapt the warning to FreeCAD warning standard\n if light_fact < 0.40:\n print(\"Too Many panes in the window resulting in < 40% of the opening\")\n return\n\n # CREATE COMPONENTS\n components = []\n\n # CREATE FIXED FRAME\n components.append(\n frame_rectangular(\n opening_width, opening_height, frame_width, frame_height, frame_th\n )\n )\n\n # CREATE OPENING PANELS\n if n_pan == 0:\n # TODO: If n_pan == 0 create a fixed window\n glass_s = glass(\n opening_width,\n opening_height,\n frame_width,\n frame_height,\n v_a,\n frame_th,\n glass_th,\n )\n glass_s.Placement.Base.y = (frame_th - glass_th) * 0.5\n\n components.append(glass_s)\n\n elif n_pan == 1:\n # Create a single pane window\n ea_w = res_w\n ea_h = res_h\n\n open_frame = frame_rectangular(ea_w, ea_h, frame_width, frame_height, frame_th)\n open_frame.Placement.Base.z = frame_height\n components.append(open_frame)\n\n glass_s = glass(ea_w, ea_h, ef_w, ef_h, v_a, frame_th, glass_th)\n glass_s.Placement.Base.y = (frame_th - glass_th) * 0.5\n\n components.append(glass_s)\n\n elif n_pan > 1 and n_pan < 10:\n # Create a multi pane window\n fact_w = res_w / n_pan\n\n loop = True\n cnt = 1\n while loop is True:\n if cnt > n_pan:\n break\n ea_w = fact_w\n adv_x = (cnt - 1) * fact_w\n ofx = (res_w * -0.5) + fact_w * 0.5 + adv_x\n ea_h = res_h\n\n open_frame = frame_rectangular(\n ea_w, ea_h, frame_width, frame_height, frame_th\n )\n open_frame.Placement.Base.x = ofx\n open_frame.Placement.Base.z = frame_height\n\n components.append(open_frame)\n\n glass_s = glass(ea_w, ea_h, ef_w, ef_h, v_a, frame_th, glass_th)\n glass_s.Placement.Base.x = ofx\n glass_s.Placement.Base.y = (frame_th - glass_th) * 0.5\n\n components.append(glass_s)\n\n cnt += 1\n\n window = Part.makeCompound(components)\n\n return window", "def rect(self):\n return Rect(self.x, 0, PipePair.WIDTH, PipePair.PIECE_HEIGHT)", "def _build_wcs(self):\n crval1 = self.band_index * 360. / self.band['NBAND']\n crval2 = self.band['DEC']\n cd = np.array([[-self.scale / 3600., 0], [0, self.scale / 3600.]], dtype=np.float64)\n\n self.wcs = astropy.wcs.WCS(naxis=2)\n self.wcs.wcs.crval = [crval1, crval2]\n self.wcs.wcs.cd = cd\n self.wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n\n # Read in overall size of cell in pixels from grid definitions file\n naxis1 = self.band['XCELL']\n naxis2 = self.band['YCELL']\n\n # apply new definition to cell WCS\n self.wcs.wcs.crpix = [naxis1 / 2. + 0.5, naxis2 / 2. + 0.5]\n self.wcs.pixel_shape = (naxis1, naxis2)\n self.wcs.pscale = self.scale\n self.wcs.orientat = 0.0\n self.footprint = self.wcs.calc_footprint()\n # close the polygon\n self.corners = np.append(self.footprint, [self.footprint[0]], axis=0)", "def get_rect(self):\n return self.square_rect", "def rect2scad(rect, height, z_start = 0.0, mirrored = False):\n scad_cube = sc.translate([rect.left(), rect.bot(), z_start])(\n sc.cube([rect.width, rect.height, height])\n )\n if mirrored:\n return sc.scale([1,1,-1])(scad_cube)\n else:\n return scad_cube", "def __conserve_circulation(self,xBlobInsideList,yBlobInsideList,gBlobInsideList):\n \n #----------------------------------------------------------------------\n # Determine parameters\n\n # convert the hardware flag into an int to use in _base_convection\n if self.lagrangian.blobs.velocityComputationParams['hardware'] == 'gpu': \n blobs_hardware = blobOptions.GPU_HARDWARE\n else: \n blobs_hardware = blobOptions.CPU_HARDWARE\n\n # convert the method flag into an int to use in _base_convection\n if self.lagrangian.blobs.velocityComputationParams['method'] == 'fmm': \n blobs_method = blobOptions.FMM_METHOD\n else: \n blobs_method = blobOptions.DIRECT_METHOD\n \n #----------------------------------------------------------------------\n\n #----------------------------------------------------------------------\n # Make references to all the blobs\n\n # Make references to vortex-blobs\n xBlobOutside, yBlobOutside, gBlobOutside = self.lagrangian.blobs.x, self.lagrangian.blobs.y, self.lagrangian.blobs.g \n \n # Concatenate all the blobs inside\n xBlobInside = _numpy.concatenate(xBlobInsideList)\n yBlobInside = _numpy.concatenate(yBlobInsideList)\n gBlobInside = _numpy.concatenate(gBlobInsideList)\n \n # Full set of blobs\n xBlobAll = _numpy.concatenate((xBlobOutside,xBlobInside)).copy()\n yBlobAll = _numpy.concatenate((yBlobOutside,yBlobInside)).copy()\n gBlobAll = _numpy.concatenate((gBlobOutside,gBlobInside)).copy()\n \n # Determine the total circulations\n gBlobAllTotal = gBlobAll.sum()\n \n # Determine the total circulation of globs inside each eulerian domain\n gBlobInsideTotalList = _numpy.array([listItem.sum() for listItem in gBlobInsideList])\n\n # Make references to panel collocation points (where no-slip b.c. is enforced.)\n xCP, yCP = self.lagrangian.panels.xyCPGlobalCat\n \n # Determine total eulerian circulation\n gTotalEulerianList = self.multiEulerian.gTotalInside() # of N eulerian bodies\n \n # Determine the total disregarded circulation from the eulerian domain\n gTotalDisregardedList = gTotalEulerianList - gBlobInsideTotalList\n \n # Testing: print info\n # print 'gTotalEulerianList : %s' % str(gTotalEulerianList)\n # print 'gBlobInsideTotalList : %s' % str(gBlobInsideTotalList)\n # print 'gBlobOutside : %g' % gBlobOutside.sum()\n # print 'gTotalDisregardedList : %s' % str(gTotalDisregardedList)\n #----------------------------------------------------------------------\n \n #----------------------------------------------------------------------\n # Solve for panel strenths\n \n # Determine the slip velocity on panel collocation points\n vxSlip, vySlip = _blobs_velocity(xBlobAll,yBlobAll,gBlobAll,self.lagrangian.blobs.sigma,\n xEval=xCP,yEval=yCP,hardware=blobs_hardware, \n method=blobs_method) \\\n + self.lagrangian.vInf.reshape(2,-1)\n \n # Solve for no-slip panel strengths, gPanelTotal should be negative of gTotalIgnored\n self.lagrangian.panels.solve(vxSlip, vySlip, gTotal=gTotalDisregardedList)\n \n #----------------------------------------------------------------------\n\n #----------------------------------------------------------------------\n # Conserve circulation\n\n # Determine total panel circulation (of all bodies)\n gPanelTotal = _numpy.sum(self.lagrangian.panels.gTotal)\n \n # Determine the total lagrangian circulation\n gLagrangianTotal = gBlobAllTotal + gPanelTotal\n \n if _numpy.abs(gLagrangianTotal) > self.lagrangian.blobs.gThresholdGlobal:\n # Standard-uniform correction\n # Circulation to be given to particles inside.\n gExtraPerBlob = gLagrangianTotal / xBlobInside.shape[0]\n \n # Add circulation to each blobs\n gBlobInsideCorrected = gBlobInside - gExtraPerBlob \n \n # Testing: print info\n # print 'gExtraPerBlob: %g' % gExtraPerBlob\n else:\n # If the error is less that gThresholdGlobal, no need for correction.\n gBlobInsideCorrected = gBlobInside\n \n # Testing: print info\n # print 'gPanelTotal: %g' % gPanelTotal\n # print 'gLagrangianTotal: %g' % gLagrangianTotal\n # print 'final total lagrangian circulation : %g' % (gBlobInsideCorrected.sum()+gBlobOutside.sum()+gPanelTotal)\n #---------------------------------------------------------------------- \n\n # return the new blob circulation \n return xBlobInside, yBlobInside, gBlobInsideCorrected", "def drawcirc(r,w,du,dv,N):\n\n # check value of w to avoid dividing by zero\n w = np.maximum(w,1)\n\n #x plane\n x = np.ones([N,1]) * ((np.arange(0,N,1, dtype='float') - (N+1) / 2 - dv) / r)\n\n # y vector\n y = (((np.arange(0,N,1, dtype='float') - (N+1) / 2 - du) / r) * np.ones([1,N])).T\n\n # Final circle image plane\n p = 0.5 + 0.5 * np.sin(np.minimum(np.maximum((np.exp(np.array([-0.5]) * (x**2 + y**2)).T - np.exp((-0.5))) * (r * 3 / w), np.pi/(-2)), np.pi/2))\n return p", "def _compute_surface_disc(self):\n d = np.sqrt(self.window_.shape[0])\n num_radial = int((d - 1.0) / 2.0) + 1\n self.surface_disc_ = np.zeros([num_radial, self.num_angles_])\n for i in range(self.num_angles_):\n theta = 2.0 * np.pi * float(i) / self.num_angles_\n for r in range(num_radial):\n x = r * np.cos(theta) + num_radial - 1\n y = r * np.sin(theta) + num_radial - 1\n self.surface_disc_[r, i] = self._interpolate(x, y)", "def concrete_circular_section(\n d: float,\n n: int,\n dia: float,\n n_bar: int,\n n_circle: int,\n cover: float,\n area_conc: Optional[float] = None,\n area_bar: Optional[float] = None,\n conc_mat: pre.Material = pre.DEFAULT_MATERIAL,\n steel_mat: pre.Material = pre.DEFAULT_MATERIAL,\n) -> geometry.CompoundGeometry:\n\n if n_bar < 2:\n raise ValueError(\"Please provide 2 or more steel reinforcing bars.\")\n\n # create circular geometry\n if area_conc:\n geom = primitive_sections.circular_section_by_area(\n area=area_conc, n=n, material=conc_mat\n )\n else:\n geom = primitive_sections.circular_section(d=d, n=n, material=conc_mat)\n\n # calculate bar geometry\n r = d / 2 - cover - dia / 2\n d_theta = 2 * np.pi / n_bar\n\n if area_bar is None:\n area_bar = np.pi * dia**2 / 4\n for i in range(n_bar):\n bar = primitive_sections.circular_section_by_area(\n area=area_bar, n=n_circle, material=steel_mat\n )\n bar = bar.shift_section(\n x_offset=r * np.cos(i * d_theta), y_offset=r * np.sin(i * d_theta)\n )\n geom = (geom - bar) + bar\n\n return geom", "def _rpetro_circ_generic(self, center):\n # Find appropriate range for root finder\n npoints = 100\n r_inner = self._annulus_width\n r_outer = self._diagonal_distance\n assert r_inner < r_outer\n dr = (r_outer - r_inner) / float(npoints-1)\n r_min, r_max = None, None\n r = r_inner # initial value\n while True:\n if r >= r_outer:\n print('[rpetro_circ] rpetro larger than cutout.')\n self.flag = 1\n curval = self._petrosian_function_circ(r, center)\n if curval >= 0:\n r_min = r\n elif curval < 0:\n if r_min is None:\n print('[rpetro_circ] r_min is not defined yet.')\n self.flag = 1\n if r >= r_outer:\n # If r_min is still undefined at this point, then\n # rpetro must be smaller than the annulus width.\n print('rpetro_circ < annulus_width! ' +\n 'Setting rpetro_circ = annulus_width.')\n return r_inner\n else:\n r_max = r\n break\n r += dr\n\n rpetro_circ = opt.brentq(self._petrosian_function_circ,\n r_min, r_max, args=(center,), xtol=1e-6)\n\n return rpetro_circ", "def get_rect(*args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def OmegaCurvature(self, redshift=0.0):\n\n return self.activeCosmology.Ok(redshift)", "def Region_Proposal(image, RegionProposalMask, smallest_size, biggest_size, lowest_region_intensity, Roundness_thres, DeadPixelPercentageThreshold,\r\n contour_thres, contour_dilationparameter, cell_region_opening_factor, cell_region_closing_factor):\r\n cleared = RegionProposalMask.copy()\r\n clear_border(cleared)\r\n # label image regions, prepare for regionprops\r\n label_image = label(cleared) \r\n dtype = [('BoundingBox', 'U32'), ('Mean intensity', float), ('Mean intensity in contour', float), ('Contour soma ratio', float), ('Roundness', float)]\r\n CellSequenceInRegion = 0\r\n dirforcellprp = {}\r\n show_img = False\r\n if show_img == True:\r\n plt.figure()\r\n fig_showlabel, ax_showlabel = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))\r\n ax_showlabel.imshow(image)#Show the first image\r\n for region in regionprops(label_image,intensity_image = image): \r\n \r\n # skip small images\r\n if region.area > smallest_size and region.mean_intensity > lowest_region_intensity and region.area < biggest_size:\r\n\r\n # draw rectangle around segmented coins\r\n minr, minc, maxr, maxc = region.bbox\r\n boundingbox_info = 'minr{}_minc{}_maxr{}_maxc{}'.format(minr, minc, maxr, maxc)\r\n bbox_area = (maxr-minr)*(maxc-minc)\r\n # Based on the boundingbox for each cell from first image in the stack, raw image of slightly larger region is extracted from each round.\r\n RawRegionImg = image[max(minr-4,0):min(maxr+4, image[0].shape[0]), max(minc-4,0):min(maxc+4, image[0].shape[0])] # Raw region image \r\n \r\n RawRegionImg_for_contour = RawRegionImg.copy()\r\n \r\n #---------Get the cell filled mask-------------\r\n filled_mask_bef, MeanIntensity_Background = imageanalysistoolbox.get_cell_filled_mask(RawRegionImg = RawRegionImg, region_area = bbox_area*0.2, \r\n cell_region_opening_factor = cell_region_opening_factor, \r\n cell_region_closing_factor = cell_region_closing_factor)\r\n\r\n filled_mask_convolve2d = imageanalysistoolbox.smoothing_filled_mask(RawRegionImg, filled_mask_bef = filled_mask_bef, region_area = bbox_area*0.2, threshold_factor = 1.1)\r\n\r\n # Find contour along filled image\r\n contour_mask_thin_line = imageanalysistoolbox.contour(filled_mask_convolve2d, RawRegionImg_for_contour.copy(), contour_thres) \r\n\r\n # after here intensityimage_intensity is changed from contour labeled with number 5 to binary image\r\n contour_mask_of_cell = imageanalysistoolbox.inwarddilationmask(contour_mask_thin_line.copy() ,filled_mask_convolve2d, contour_dilationparameter)\r\n \r\n # Calculate Roundness\r\n #--------------------------------------------------------------\r\n filled_mask_area = len(np.where(filled_mask_convolve2d == 1)[0])\r\n contour_mask_perimeter = len(np.where(contour_mask_thin_line == 1)[0])\r\n Roundness = 4*3.1415*filled_mask_area/contour_mask_perimeter**2\r\n# print('Roundness: {}'.format(4*3.1415*filled_mask_area/contour_mask_perimeter**2))\r\n \r\n # Calculate central moments\r\n #--------------------------------------------------------------\r\n# M = moments(filled_mask_convolve2d)\r\n# centroid = (M[1, 0] / M[0, 0], M[0, 1] / M[0, 0])\r\n# Img_moments_central = moments_central(filled_mask_convolve2d, centroid, order=4)\r\n## print(Img_moments_central)\r\n# Img_moments_hu = moments_hu(Img_moments_central/np.amax(Img_moments_central))\r\n# \r\n# # Log scale hu moments\r\n# for EachMoment in range(len(Img_moments_hu)):\r\n# Img_moments_hu[EachMoment] = -1* np.copysign(1.0, Img_moments_hu[EachMoment]) * np.log10(abs(Img_moments_hu[EachMoment]))\r\n \r\n# print(sum(Img_moments_hu[0:4]))\r\n# print('Img_moments_hu is {}'.format(Img_moments_hu))\r\n \r\n #--------------------------------------------------------------\r\n # Roundness Threshold\r\n if Roundness < Roundness_thres:\r\n MeanIntensity_FilledArea = np.mean(RawRegionImg[np.where(filled_mask_bef == 1)]) - MeanIntensity_Background # Mean pixel value of filled raw cell area\r\n \r\n MeanIntensity_Contour = np.mean(RawRegionImg[np.where(contour_mask_of_cell == 1)]) - MeanIntensity_Background\r\n \r\n soma_mask_of_cell = filled_mask_convolve2d - contour_mask_of_cell\r\n MeanIntensity_Soma = np.mean(RawRegionImg[np.where(soma_mask_of_cell == 1)]) - MeanIntensity_Background#Mean pixel value of soma area \r\n contour_soma_ratio = MeanIntensity_Contour/MeanIntensity_Soma\r\n \r\n Cell_Area_Img = filled_mask_convolve2d * RawRegionImg\r\n # Calculate the entrophy of the image.\r\n # entr_img = entropy(Cell_Area_Img/np.amax(Cell_Area_Img), disk(5))\r\n # print(np.mean(entr_img))\r\n \r\n #---------------------Calculate dead pixels----------------\r\n DeadPixelNum = len(np.where(Cell_Area_Img >= 3.86)[0])\r\n filled_mask_convolve2d_area = len(np.where(filled_mask_convolve2d >= 0)[0])\r\n DeadPixelPercentage = round(DeadPixelNum / filled_mask_convolve2d_area, 3)\r\n# print('Dead Pixel percentage: {}'.format(DeadPixelPercentage)) # b[np.where(aa==16)]=2\r\n \r\n if str(MeanIntensity_FilledArea) == 'nan':\r\n MeanIntensity_FilledArea = 0\r\n if str(MeanIntensity_Contour) == 'nan':\r\n MeanIntensity_Contour = 0\r\n if str(contour_soma_ratio) == 'nan':\r\n contour_soma_ratio = 0\r\n \r\n if DeadPixelPercentage <= DeadPixelPercentageThreshold:\r\n \r\n dirforcellprp[CellSequenceInRegion] = (boundingbox_info, MeanIntensity_FilledArea, MeanIntensity_Contour, contour_soma_ratio, Roundness) \r\n \r\n # plt.figure()\r\n # plt.imshow(RawRegionImg)\r\n # plt.show()\r\n # # # \r\n # plt.figure()\r\n # plt.imshow(filled_mask_convolve2d)\r\n # plt.show()\r\n \r\n #--------------------------------------------------Add red boundingbox to axis----------------------------------------------\r\n rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr, fill=False, edgecolor='red', linewidth=2)\r\n contour_mean_bef_rounded = str(round(MeanIntensity_Contour, 3))[0:5]\r\n \r\n if show_img == True:\r\n ax_showlabel.add_patch(rect)\r\n ax_showlabel.text((maxc + minc)/2, (maxr + minr)/2, 'Cell-{}, {}: {}'.format(CellSequenceInRegion, 'c_m', contour_mean_bef_rounded),\r\n fontsize=8, color='yellow', style='italic')#,bbox={'facecolor':'red', 'alpha':0.3, 'pad':8})\r\n \r\n CellSequenceInRegion += 1\r\n if show_img == True:\r\n ax_showlabel.set_axis_off()\r\n plt.show()\r\n \r\n TagFluorescenceLookupBook = np.zeros(CellSequenceInRegion, dtype = dtype)\r\n for p in range(CellSequenceInRegion):\r\n TagFluorescenceLookupBook[p] = dirforcellprp[p]\r\n \r\n return TagFluorescenceLookupBook" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_constellation_psk_sptr __init__(self, p) > digital_constellation_psk_sptr
def __init__(self, *args): this = _digital_swig.new_digital_constellation_psk_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _wali.new_KeyPair(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__( self, public_key, secret_multiplier ):\n\n self.public_key = public_key\n self.secret_multiplier = secret_multiplier", "def construct_sk(public_key):\n point = public_key.pointQ\n x = int(point.x).to_bytes(PUBLIC_KEY_SIZE // 2, 'big')\n y = int(point.y).to_bytes(PUBLIC_KEY_SIZE // 2, 'big')\n sk = x + y\n return H(sk)", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoMFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoSFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_packet_sink_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoShaderParameter2i()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoShaderParameter4i()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_doublep()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self,\n active: bool,\n primary_cak: 'GatewayMacsecConfigPrimaryCak',\n status: str,\n *,\n active_cak: 'GatewayMacsecConfigActiveCak' = None,\n cipher_suite: str = None,\n confidentiality_offset: int = None,\n cryptographic_algorithm: str = None,\n fallback_cak: 'GatewayMacsecConfigFallbackCak' = None,\n key_server_priority: int = None,\n sak_expiry_time: int = None,\n security_policy: str = None,\n window_size: int = None) -> None:\n self.active = active\n self.active_cak = active_cak\n self.cipher_suite = cipher_suite\n self.confidentiality_offset = confidentiality_offset\n self.cryptographic_algorithm = cryptographic_algorithm\n self.fallback_cak = fallback_cak\n self.key_server_priority = key_server_priority\n self.primary_cak = primary_cak\n self.sak_expiry_time = sak_expiry_time\n self.security_policy = security_policy\n self.status = status\n self.window_size = window_size", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoClipPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoTextureCoordinateCylinder()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoWrapperKit()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
constellation_psk(gr_complex_vector constellation, unsigned_int_vector pre_diff_code, unsigned int n_sectors) > digital_constellation_psk_sptr digital_constellation_psk Constellation space is divided into pie slices sectors. Each slice is associated with the nearest constellation point. Works well for PSK but nothing else. Assumes that there is a constellation point at 1.x
def constellation_psk(*args, **kwargs): return _digital_swig.constellation_psk(*args, **kwargs)
[ "def pks_peptide(peptide_chain):\r\n\r\n\tlargo_cadena = len(peptide_chain)\r\n\tpk = []\r\n\tion = []\r\n\tionizado = []\r\n\r\n\r\n\tfor aa in range(largo_cadena):\r\n\t\tif aa == 0:\r\n\t\t\tamino_terminal = peptide_chain[aa]\r\n\t\t\tpk.append(aminoacidos[amino_terminal][2])\r\n\t\t\tion.append(\"basic\")\r\n\t\t\tionizado.append(aminoacidos[amino_terminal][0] + \"_at\")\r\n\t\t\t\r\n\t\t\t# Agregar cadena lateral del primer aa en caso de presentar\r\n\t\t\tif len(aminoacidos[amino_terminal]) == 5:\r\n\t\t\t\tpk.append(aminoacidos[amino_terminal][3])\r\n\t\t\t\tion.append(aminoacidos[amino_terminal][4])\r\n\t\t\t\tionizado.append(aminoacidos[amino_terminal][0] + \"_r\")\r\n\t\t\t\r\n\t\t\t# Para los casos con un solo aminoácido se toman solo los\r\n\t\t\t# valores de ese aminoácido\r\n\t\t\tif largo_cadena == 1:\r\n\t\t\t\tcarboxi_terminal = peptide_chain[aa]\r\n\t\t\t\tpk.append(aminoacidos[carboxi_terminal][1])\r\n\t\t\t\tion.append(\"acid\")\r\n\t\t\t\tionizado.append(aminoacidos[carboxi_terminal][0] + \"_ct\")\r\n\t\t\t\tbreak\r\n\r\n\t\telif aa == largo_cadena - 1:\r\n\t\t\tcarboxi_terminal = peptide_chain[aa]\r\n\r\n\t\t\t# Agregar cadena lateral del último aa en caso de presentar\r\n\t\t\tif len(aminoacidos[carboxi_terminal]) == 5:\r\n\t\t\t\tpk.append(aminoacidos[carboxi_terminal][3])\r\n\t\t\t\tion.append(aminoacidos[carboxi_terminal][4])\r\n\t\t\t\tionizado.append(aminoacidos[carboxi_terminal][0] + \"_r\")\r\n\r\n\t\t\tpk.append(aminoacidos[carboxi_terminal][1])\r\n\t\t\tion.append(\"acid\")\r\n\t\t\tionizado.append(aminoacidos[carboxi_terminal][0] + \"_ct\")\r\n\r\n\t\telif aa > 0 and aa < largo_cadena:\r\n\t\t\tno_terminales = peptide_chain[aa]\r\n\t\t\tif len(aminoacidos[no_terminales]) == 5:\r\n\t\t\t\tpk.append(aminoacidos[no_terminales][3])\r\n\t\t\t\tion.append(aminoacidos[no_terminales][4])\r\n\t\t\t\tionizado.append(aminoacidos[no_terminales][0] + \"_r\")\r\n\r\n\r\n\tprint(\"\\n***************** INFORMACION DE LA SECUENCIA ******************\\n\")\r\n\tprint(f\"Secuencia de aminoácidos --> {peptide_chain}\\n\")\r\n\tc = 1\r\n\tprint(\"Aminoácidos con cargas...\\n\")\r\n\tfor residuos in ionizado:\r\n\t\tprint(f\"{c} --> {residuos}\")\r\n\t\tc += 1\r\n\t#print(ion)\r\n\t#print(pk)\r\n\tprint(\"\\n****************************************************************\")\r\n\r\n\r\n\tpk_ordenado = sorted(pk)\r\n\r\n\tintervalos_pk, cantidad_intervalos = intervalos(pk_ordenado)\r\n\r\n\t# creación de tabla que indica la ionización, es decir,\r\n\t# si es positivo, neutro o negativo en un intervalo\r\n\tpks = tabla_intervalos(cantidad_intervalos, intervalos_pk, ion, pk, ionizado)\r\n\r\n\r\n\t# Detecta el punto del zwitterion\r\n\tpk_intervalo = zwitterion(pks, cantidad_intervalos)\r\n\t\r\n\r\n\t# calcular punto isoelectrico\r\n\tpunto_iso = punto_isoelectrico(pk_intervalo)\r\n\treturn(round(punto_iso, 2))", "def _kdp_vulpiani_profile(psidp_in, dr, windsize=10, band=\"X\", n_iter=10, interp=False):\n mask = np.ma.getmaskarray(psidp_in)\n size = windsize\n l2 = int(size / 2)\n drm = dr / 1000.0\n\n if mask.all() is True:\n # Check if all elements are masked\n return psidp_in, psidp_in, psidp_in # Return the NaNs...\n\n # Thresholds in kdp calculation\n if band == \"X\":\n th1 = -2.0\n th2 = 40.0\n std_th = 5.0\n elif band == \"C\":\n th1 = -2.0\n th2 = 20.0\n std_th = 5.0\n elif band == \"S\":\n th1 = -2.0\n th2 = 14.0\n std_th = 5.0\n else:\n print(\"Unexpected value set for the band keyword \")\n print(band)\n return None\n\n psidp = psidp_in\n nn = len(psidp_in)\n\n # Get information of valid and non valid points in psidp the new psidp\n valid = np.logical_not(mask)\n if interp:\n ranged = np.arange(0, nn)\n psidp_interp = psidp\n # interpolate\n if np.ma.is_masked(psidp):\n interp = interpolate.interp1d(\n ranged[valid],\n psidp[valid],\n kind=\"zero\",\n bounds_error=False,\n fill_value=np.nan,\n )\n psidp_interp[mask] = interp(ranged[mask])\n\n psidp = psidp_interp\n\n psidp = np.ma.filled(psidp, np.nan)\n kdp_calc = np.zeros([nn])\n\n # first guess\n # In the core of the profile\n kdp_calc[l2 : nn - l2] = (psidp[size:nn] - psidp[0 : nn - size]) / (\n 2.0 * size * drm\n )\n\n # set ray extremes to 0\n kdp_calc[0:l2] = 0.0\n kdp_calc[nn - l2 :] = 0.0\n\n # apply thresholds\n kdp_calc[kdp_calc <= th1] = 0.0\n kdp_calc[kdp_calc >= th2] = 0.0\n\n # set all non-valid data to 0\n kdp_calc[np.isnan(kdp_calc)] = 0.0\n\n # Remove bins with texture higher than treshold\n tex = np.ma.zeros(kdp_calc.shape)\n # compute the local standard deviation\n # (make sure that it is and odd window)\n tex_aux = np.ma.std(rolling_window(kdp_calc, l2 * 2 + 1), -1)\n tex[l2:-l2] = tex_aux\n kdp_calc[tex > std_th] = 0.0\n\n # Loop over iterations\n for i in range(0, n_iter):\n phidp_rec = np.ma.cumsum(kdp_calc) * 2.0 * drm\n\n # In the core of the profile\n kdp_calc[l2 : nn - l2] = (phidp_rec[size:nn] - phidp_rec[0 : nn - size]) / (\n 2.0 * size * drm\n )\n\n # set ray extremes to 0\n kdp_calc[0:l2] = 0.0\n kdp_calc[nn - l2 :] = 0.0\n\n # apply thresholds\n kdp_calc[kdp_calc <= th1] = 0.0\n kdp_calc[kdp_calc >= th2] = 0.0\n\n # Censor Kdp where Psidp was not defined\n kdp_calc = np.ma.masked_where(mask, kdp_calc)\n\n # final reconstructed PhiDP from KDP\n phidp_rec = np.ma.cumsum(kdp_calc) * 2.0 * drm\n\n return kdp_calc, phidp_rec", "def chordSlicesWithKey():\r\n totalChordsMajor = 0\r\n totalChordsMinor = 0\r\n chordCountMajor = {}\r\n chordCountMinor = {}\r\n theSlicesMajor = list()\r\n theSlicesMinor = list()\r\n problemFiles = []\r\n for f in listing:\r\n address = path + f\r\n print 'current file:',address\r\n try:\r\n oneSolo = converter.parse(address)\r\n except:\r\n problemFiles.append(f)\r\n print 'Problem with',f\r\n pass\r\n else:\r\n theKey = analysis.discrete.analyzeStream(oneSolo, 'bellman')\r\n theTonic = str(theKey).split(' ')[0]\r\n theMode = str(theKey).split(' ')[1]\r\n theKeyPC = pitch.Pitch(theTonic).pitchClass\r\n #print 'current key:', theKey, theKeyPC, theMode\r\n theSoloChords = oneSolo.chordify().flat.getElementsByClass('Chord')\r\n if theMode == 'major':\r\n theSlices = theSlicesMajor\r\n chordCount = chordCountMajor\r\n totalChords = totalChordsMajor\r\n elif theMode == 'minor':\r\n theSlices = theSlicesMinor\r\n chordCount = chordCountMinor\r\n totalChords = totalChordsMinor\r\n else:\r\n print 'WHAT FUCKING MODE??', theKey, address\r\n continue\r\n startToken = ['start']\r\n theSlices.append(startToken)\r\n for someChord in theSoloChords:\r\n midiListing = [p.midi for p in someChord.pitches]\r\n #print 'midi listing:', midiListing\r\n bassNoteMidi = someChord.bass().midi\r\n bassNotePC = someChord.bass().pitchClass\r\n bassNoteSD = (bassNotePC - theKeyPC)%12\r\n #print 'bass note:', bassNoteMidi\r\n distAboveBass = [n - bassNoteMidi for n in midiListing]\r\n #print \"intervals above bass:\", distAboveBass\r\n sorted_distAboveBass = sorted(distAboveBass)\r\n thisSlice = {}\r\n thisSlice['voicing_type'] = sorted_distAboveBass\r\n thisSlice['bassMIDI'] = bassNoteMidi\r\n thisSlice['bassSD'] = bassNoteSD\r\n thisSlice['key'] = str(theKey)\r\n thisSlice['normalform'] = someChord.normalForm\r\n thisSlice['solo'] = f\r\n thisSlice['pcset'] = someChord.orderedPitchClasses\r\n transPCs = chord.Chord([n - theKeyPC for n in someChord.orderedPitchClasses])\r\n thisSlice['transpc'] = transPCs.pitchNames\r\n theSlices.append(thisSlice)\r\n try:\r\n chordCount[str((someChord.orderedPitchClasses,bassNoteSD))] += 1\r\n except KeyError:\r\n chordCount[str((someChord.orderedPitchClasses,bassNoteSD))] = 1\r\n totalChords += 1\r\n endToken = ['end']\r\n theSlices.append(endToken)\r\n if theMode == 'major':\r\n theSlicesMajor = theSlices\r\n chordCountMajor = chordCount\r\n totalChordsMajor = totalChords\r\n elif theMode == 'minor':\r\n theSlicesMinor = theSlices\r\n chordCountMinor = chordCount\r\n totalChordsMinor = totalChords\r\n #pickle the slices\r\n fpPickleMaj = '1122MajModeSliceDictwSDB.pkl'\r\n #fpPickleMaj = 'combinedMajSliceDictwSDB.pkl'\r\n fpPickleMin = '1122MinModeSliceDictwSDB.pkl'\r\n # fpPickleMin = 'combinedMinSliceDictwSDB.pkl'\r\n pickle.dump(theSlicesMajor, open(fpPickleMaj, \"wb\"))\r\n pickle.dump(theSlicesMinor, open(fpPickleMin, \"wb\"))\r\n #tally up the frequencies for each chord\r\n sorted_chordCountMaj = sorted(chordCountMajor.iteritems(), key=operator.itemgetter(1), reverse=True)\r\n sorted_chordCountMin = sorted(chordCountMinor.iteritems(), key=operator.itemgetter(1), reverse=True)\r\n print 'Total number of Major slices:', totalChordsMajor\r\n print 'Total number of Minor slices:', totalChordsMinor\r\n #export the tally as a csv file\r\n csvNameMaj = '1122MajModepcSTallywSDB.csv'\r\n #csvNameMaj = 'combinedMajorChordTallywSDB.csv'\r\n csvNameMin = '1122MinModepcSTallywSDB.csv'\r\n #csvNameMin = 'combinedMinorChordTallywSDB.csv'\r\n xmaj = csv.writer(open(csvNameMaj, 'wb'))\r\n for pair in sorted_chordCountMaj:\r\n xmaj.writerow([pair[0],pair[1]])\r\n x = csv.writer(open(csvNameMin, 'wb'))\r\n for pair in sorted_chordCountMin:\r\n x.writerow([pair[0], pair[1]])\r\n print 'problem files are:', problemFiles", "def CPA_K(self, correctKey=0, leakage='HW'):\n Nk = self.NkDES\n numPT = 2 ** 6\n sboxNum = self.sboxNum\n cv = self.CPA_CV(correctKey = correctKey) # Diagonal of the confusion matrix\n K = np.zeros((Nk, Nk))\n Ks = np.zeros((Nk, Nk)) # K*\n Kss = np.zeros((Nk, Nk)) # K**\n keys = np.arange(64) # List of wrong keys\n keys = np.delete(keys, correctKey)\n\n evkc = [] # E [V | kc]\n for ptBlock in range(numPT):\n sboxOutc = des_block.sbox(sboxNum, ptBlock ^ correctKey)\n evkc.append(self.hw(sboxOutc))\n evkc = np.mean(evkc)\n #print evkc\n\n for i in keys:\n for j in keys:\n # Calculate kcij = E[(V|kc - V|ki) * (V|kc - V|ki)]\n # Calculate kcijss = E[4 * (V|kc - E[V|kc])^2 * (V|kc - V|ki) * (V|kc - V | kj)]\n kcij = []\n kcijs = []\n kcijss = []\n for ptBlock in range(numPT):\n sboxOutc = des_block.sbox(sboxNum, ptBlock ^ correctKey)\n sboxOuti = des_block.sbox(sboxNum, ptBlock ^ i)\n sboxOutj = des_block.sbox(sboxNum, ptBlock ^ j)\n if self.leakage =='HW':\n vkc = self.hw(sboxOutc) # V | kc\n vki = self.hw(sboxOuti) # V | ki\n vkj = self.hw(sboxOutj) # V | kj\n elif self.leakage =='HD':\n vkc = HD(sboxOutc, correctKey^ptBlock) # V | kc\n vki = HD(sboxOuti, i^ptBlock) # V | ki\n vkj = HD(sboxOutj, j^ptBlock) # V | kj\n\n\n kcij.append((vkc - vki) * (vkc -vkj))\n kcijs.append(((vkc-vki)**2) * ((vkc-vkj)**2))\n kcijss.append( 4 * ((vkc - evkc)**2) * (vkc - vki) * (vkc -vkj))\n kcij = np.mean(kcij)\n kcijss = np.mean(kcijss)\n kcijs = np.mean(kcijs)\n\n K[i][j] = kcij\n Ks[i][j] = kcijs\n Kss[i][j] = kcijs\n\n K = np.delete(K, correctKey,0)\n K = np.delete(K, correctKey,1)\n Ks = np.delete(Ks, correctKey,0)\n Ks = np.delete(Ks, correctKey,1)\n Kss = np.delete(Kss, correctKey,0)\n Kss = np.delete(Kss, correctKey,1)\n\n return K, Ks, Kss", "def make_holeplate_deg3(R, L):\n p = 3\n q = 3\n\n ctrlpts = np.zeros((6, 4, 3))\n ctrlpts[:, 0, :] = [[-R, 0, 1],\n [-0.9024, 0.2357, 0.9024],\n [-0.7702, 0.4369, 0.8536],\n [-0.4369, 0.7702, 0.8536],\n [-0.2357, 0.9024, 0.9024],\n [0., 1., 1.]]\n\n ctrlpts[:, 1, :] = [[-2., 0, 1],\n [-1.9675, 0.4119, 0.9675],\n [-1.7290, 0.8401, 0.9512],\n [-0.8401, 1.7290, 0.9512],\n [-0.4119, 1.967, 0.9675],\n [0., 2., 1.]]\n\n ctrlpts[:, 2, :] = [[-3., 0, 1],\n [-3., 1.2222, 1.],\n [-2.8056, 2.0278, 1.],\n [-2.0278, 2.8056, 1.],\n [-1.2222, 3., 1.],\n [0., 3., 1.]]\n\n ctrlpts[:, 3, :] = [[-4, 0, 1],\n [-4, 2.6667, 1.],\n [-4, 4., 1.],\n [-4, 4., 1.],\n [-2.6667, 4., 1.],\n [0., 4., 1.]]\n # Set knot vectors\n knotvector_u = [0, 0, 0, 0, 0.5, 0.5, 1, 1, 1, 1]\n knotvector_v = [0, 0, 0, 0, 1, 1, 1, 1]\n\n return p, q, ctrlpts, knotvector_u, knotvector_v", "def generate_k_pp(dataset, k):\n random_centers: list = generate_k(dataset, k)\n random_assignments: list = assign_points(dataset, random_centers)\n\n distances: list = [distance(random_centers[random_assignments[i]], dataset[i]) for i in range(len(dataset))]\n \n # Generate indices for each distance then sort in ascending order of distance\n indices: list = [i for i in range(len(distances))]\n indices = [j for i, j in sorted(zip(distances, indices))]\n\n weighted_indices: list = []\n for i in range(len(indices)):\n n: int = int(distances[indices[i]])\n \n for j in range(n):\n weighted_indices.append(indices[i])\n\n N: int = len(weighted_indices) - 1\n\n pp_centers: list = []\n random_numbers: list = []\n choices: list = []\n for i in range(k):\n random_choice: int = random.randint(0, N)\n index = weighted_indices[random_choice]\n\n if random_choice in random_numbers or index in choices:\n while random_choice in choices or index in choices:\n random_choice = random.randint(0, N)\n index = weighted_indices[random_choice]\n\n random_numbers.append(random_choice)\n choices.append(index)\n pp_centers.append(dataset[index])\n \n return pp_centers", "def makeKPT(params):\n import math as m\n import numpy as np\n\n recipcell,kpts = makeAtoms(params).get_reciprocal_cell(),[]\n for i in range(3):\n k = 2 * 3.14159 * m.sqrt((recipcell[i]**2).sum()) * params['kptden'] \n kpts.append(2 * int(np.ceil(k / 2)))\n\n kind = params['kind']\n if kind=='surface': return np.array(kpts[:2]+[1])\n elif kind=='molecule': return np.array([1,1,1])\n else: return np.array(kpts)", "def get_short_chord_lengths(particles, contour_colors, long_pairs, nm_per_pixel):\n\n # store short chord length pair of points once found\n short_pixels = []\n # store scores (observed perpendicular slope vs. actual perpendicular slope)\n scores = []\n # keep track of each particle's centerpoints\n centerpoints = []\n # keep track of max x,y coordinates\n max_x = 0\n max_y = 0\n\n # loop through long pairs to get midpoint for each pair and then perpendicular short pairs\n for pair in long_pairs:\n # get all pixels of a color\n current_pixels = contour_colors[pair[0]]\n # get start & end x,y coordinates\n x1 = pair[1][0]\n y1 = pair[1][1]\n x2 = pair[2][0]\n y2 = pair[2][1]\n \n #deal with edge case where x1 and x2 are the same\n if x2-x1 == 0:\n denominator = 0.01\n else:\n denominator = x2-x1\n\n # calculate slope as rise over run\n slope = (y2-y1) / denominator\n # calculate orthogonal slope\n orthogonal_slope = -1 / slope\n # check which pixel start, which is end and calculate midpoint accordingly \n if x1 > x2:\n mid_x = x2 + int((x1-x2) / 2)\n else:\n mid_x = x1 + int((x2-x1) / 2)\n if y1 > y2:\n mid_y = y2 + int((y1-y2) / 2)\n # calculate rotation in X-Y plane with long length's slope\n if x1 > x2:\n theta = 360 - np.arctan((y1-y2)/denominator)*(180/np.pi)\n else:\n theta = 180 + np.arctan((y1-y2)/denominator)*(180/np.pi)\n else:\n mid_y = y1 + int((y2-y1) / 2)\n # calculate rotation in X-Y plane with long length's slope\n if slope > 0:\n theta = -1*(180 - (np.arctan(slope)*(180/np.pi)))\n else:\n theta = np.arctan(slope)*(180/np.pi)\n \n # add X-Y rotation angle to particles dictionary\n particles[pair[0]] += [(\"theta\", theta)]\n \n short_pixel = []\n current_score = []\n \n # loop through all pixels for a color\n for pixel in current_pixels:\n \n # calculate change in x and y with respect to midpoint for pixel\n dx = pixel[0] - mid_x\n dy = pixel[1] - mid_y\n \n # change in x cannot be 0\n if dx == 0:\n dx = 0.01\n \n # compute slope between pixel and midpoint\n comp_slope = dy/dx\n # score ~= ratio of pixel slope / orthogonal slope from long chord length\n score = np.abs(1 - comp_slope / orthogonal_slope)\n\n # set threshold that pixel slope / orthogonal slope must be < 0.5\n if (score < 0.5):\n\n # if pixel isn't already in short_pixel\n if pixel not in short_pixel:\n short_pixel += [pixel]\n current_score += [score]\n \n # if pixel is right next to midpoint, add it to short_pixel regardless of score\n elif abs(mid_x - pixel[0]) < 2 and abs(mid_y - pixel[1]) < 2:\n short_pixel += [pixel]\n current_score += [score]\n \n # record pair of points of short chord length\n short_pixels += [short_pixel]\n # record each pair of points' scores\n scores += [current_score]\n \n # store min pixel pairs to visualize lines\n short_pairs = []\n \n # create list of indices to delete from long_chords\n to_be_deleted = []\n\n # process pairs to keep ones with min score\n for i in range(len(short_pixels)):\n if len(short_pixels[i]) >= 2:\n\n # first find pixel with overall minimum score\n min_index = np.argmin(scores[i])\n min_pixel = short_pixels[i][min_index]\n \n # calculate distances from pixel with minimum score\n distances_1 = []\n for pixel in short_pixels[i]:\n distances_1 += [pixel_distance(min_pixel, pixel)]\n \n # find the furthest pixel from the pixel with the minimum score\n max_dist_index = np.argmax(distances_1)\n max_dist_pixel = short_pixels[i][max_dist_index]\n \n # calculate distances from furthest pixel\n distances_2 = []\n for pixel in short_pixels[i]:\n distances_2 += [pixel_distance(max_dist_pixel, pixel)]\n \n pixels_1 = []\n pixels_2 = []\n scores_1 = []\n scores_2 = []\n # loop through the pixels and group the pixels into 2 groups\n for k in range(len(short_pixels[i])):\n \n # group pixels closest to the min pixel\n if distances_1[k] < distances_2[k]:\n pixels_1 += [short_pixels[i][k]]\n scores_1 += [scores[i][k]]\n \n # group pixels closest to the furthest pixel\n else:\n pixels_2 += [short_pixels[i][k]]\n scores_2 += [scores[i][k]]\n \n # find the two pixels by finding the minimum score for each group\n min_index_1 = np.argmin(scores_1)\n min_index_2 = np.argmin(scores_2)\n min_pixel_1 = pixels_1[min_index_1]\n min_pixel_2 = pixels_2[min_index_2]\n \n # find b radius, accounting for nm per pixel\n b_radius = (pixel_distance(min_pixel_1, min_pixel_2)/2)*nm_per_pixel\n \n # if b_radius is bigger than 15% of the expected radius\n if b_radius > 0.15 * expected_radius:\n # add b radius to particles dictionary\n particles[long_pairs[i][0]] += [(\"b\", b_radius)]\n \n # store pixels for cv.line() later\n short_pairs += [(min_pixel_1, min_pixel_2)]\n\n # delete corresponding long chord and contour\n else:\n # list the indices of particles to be deleted from largest to smallest so deletions do not affect subsequent indices\n to_be_deleted = [i] + to_be_deleted\n del contour_colors[long_pairs[i][0]]\n\n # delete long chords for particles that are too small\n for index in to_be_deleted:\n del long_pairs[index]\n\n return short_pairs, particles", "def kgen(filename='POSCAR', directory=None, make_folders=False, symprec=0.01,\n kpts_per_split=None, ibzkpt=None, spg=None, density=60,\n mode='bradcrack', cart_coords=False, kpt_list=None, labels=None):\n poscar = Poscar.from_file(filename)\n kpath, kpoints, labels = get_path_data(poscar.structure, mode=mode,\n symprec=symprec, kpt_list=kpt_list,\n labels=labels, spg=spg,\n line_density=density)\n\n logging.info('\\nk-point label indices:')\n for i, label in enumerate(labels):\n if label:\n logging.info('\\t{}: {}'.format(label, i+1))\n\n if not kpt_list and not np.allclose(poscar.structure.lattice.matrix,\n kpath.prim.lattice.matrix):\n prim_filename = '{}_prim'.format(os.path.basename(filename))\n kpath.prim.to(filename=prim_filename)\n\n logging.error(\"\\nWARNING: The input structure does not match the \"\n \"expected standard\\nprimitive symmetry, the path may be \"\n \"incorrect! Use at your own risk.\\n\\nThe correct \"\n \"symmetry primitive structure has been saved as {}.\".\n format(prim_filename))\n\n ibz = _parse_ibzkpt(ibzkpt)\n\n if make_folders and ibz and kpts_per_split is None:\n logging.info(\"\\nFound {} total kpoints in path, do you want to \"\n \"split them up? (y/n)\".format(len(kpoints)))\n if input()[0].lower() == 'y':\n logging.info(\"How many kpoints per file?\")\n kpts_per_split = int(input())\n\n write_kpoint_files(filename, kpoints, labels, make_folders=make_folders,\n ibzkpt=ibz, kpts_per_split=kpts_per_split,\n directory=directory, cart_coords=cart_coords)", "def LamC2pKK ( self ) : \n from GaudiConfUtils.ConfigurableGenerators import DaVinci__N3BodyDecays\n #\n return self.make_selection (\n 'LambdaCpKK' ,\n DaVinci__N3BodyDecays ,\n ## inputs \n [ self.protons() , self.kaons() ] ,\n ##\n DecayDescriptor = \" [ Lambda_c+ -> p+ K- K+ ]cc\" ,\n ##\n Combination12Cut = \"\"\"\n ( AM < 2.5 * GeV ) &\n ( ACHI2DOCA(1,2) < 16 ) \n \"\"\" ,\n ## \n CombinationCut = \"\"\"\n ( ( ADAMASS ( 'Lambda_c+' ) < 65 * MeV ) \n | ( ADAMASS ( 'Xi_c+' ) < 65 * MeV ) ) &\n ( APT > %s ) & \n ( ACHI2DOCA(1,3) < 16 ) &\n ( ACHI2DOCA(2,2) < 16 ) \n \"\"\" % ( 0.95 * self[ 'pT(Lc+)' ] ) ,\n ##\n MotherCut = \"\"\"\n ( chi2vx < 25 ) &\n ( PT > %s ) &\n ( ( ADMASS ( 'Lambda_c+' ) < 55 * MeV ) \n | ( ADMASS ( 'Xi_c+' ) < 55 * MeV ) ) &\n ( ctau > 100 * micrometer ) \n \"\"\" % self [ 'pT(Lc+)']\n )", "def construct_single_ply_piece(self, fraction=1.0):\n th_nom = np.radians(self.fiber_angle)\n # Step 1: Define origin line L0\n origin_point = Point2D(self.starting_position, 0.0)\n L0 = Line2D.from_point_angle(origin_point, th_nom)\n # Step 2: Define line L2, perpendicular to L0, tangent to circle s4\n tangent_point = Point2D.from_polar(self.cg.s4, th_nom)\n L2 = Line2D.from_point_angle(tangent_point, th_nom + np.pi/2)\n P0 = L0.intersection_line(L2)\n\n # Step 3: Position P2 and P3 based on max_width and eccentricity\n P2_dist = self.max_width * self.eccentricity\n P3_dist = self.max_width * (1 - self.eccentricity)\n P2 = P0 + Point2D.from_polar(P2_dist, L2.angle())\n P3 = P0 + Point2D.from_polar(P3_dist, L2.angle() + np.pi)\n\n # Step 4: Calculate the spanned angle (both deltas should be >= 0)\n T2 = L0.intersection_circle_near(P2.norm(), P0)\n T3 = L0.intersection_circle_near(P3.norm(), P0)\n delta_phi_1 = fraction*(P2.angle() - T2.angle())\n delta_phi_2 = fraction*(T3.angle() - P3.angle())\n\n # Step 5: Calculate the side lines L1 and L3\n L1 = L0.rotate(delta_phi_1)\n L3 = L0.rotate(-delta_phi_2)\n near_pt = Point2D(self.cg.s1, 0)\n P1a = L1.intersection_circle_near(self.cg.s1, near_pt)\n P4a = L3.intersection_circle_near(self.cg.s1, near_pt)\n # Redefine P2 and P3 if needed (for rest pieces)\n if fraction != 1.0:\n P2 = L2.intersection_line(L1)\n P3 = L2.intersection_line(L3)\n\n # Step 6: Construct L4, parallel to L2, through either P1a or P4a,\n # whichever is furthest from L2\n if L2.distance_point(P1a) > L2.distance_point(P4a):\n L4_through_point = P1a\n else:\n L4_through_point = P4a\n L4 = Line2D.from_point_angle(L4_through_point, L2.angle())\n # now redefine P1 and P4 as the intersection points:\n P1b = L4.intersection_line(L1)\n P4b = L4.intersection_line(L3)\n\n ip_L1_L3 = L1.intersection_line(L3)\n if L2.distance_point(ip_L1_L3) < L2.distance_point(P4b):\n # Line segments L1 and L3 intersect within the polygon, so we have\n # a 'hourglass' shape. Move P1 and P4 to the intersection point,\n # effectively forming a triangle. We could just drop P4, if not\n # for some other code expeccting 4-point polygons.\n P1, P4 = ip_L1_L3, ip_L1_L3\n else:\n P1, P4 = P1b, P4b\n\n # Step 7: Return the final ply piece\n return PlyPiece(Polygon2D((P1, P2, P3, P4)), 0.0, -delta_phi_2, delta_phi_1)", "def construct_sk(public_key):\n point = public_key.pointQ\n x = int(point.x).to_bytes(PUBLIC_KEY_SIZE // 2, 'big')\n y = int(point.y).to_bytes(PUBLIC_KEY_SIZE // 2, 'big')\n sk = x + y\n return H(sk)", "def canvas_partition ( canvas , \n nx ,\n ny ,\n left_margin = margin_left , \n right_margin = margin_right , \n bottom_margin = margin_bottom , \n top_margin = margin_right ,\n hSpacing = 0.0 ,\n vSpacing = 0.0 ) :\n\n if not isinstance ( nx , int ) or nx<= 0 :\n raise AttributeError('partition: invalid nx=%s' % nx )\n if not isinstance ( ny , int ) or ny<= 0 :\n raise AttributeError('partition: invalid ny=%s' % ny )\n\n ## get the window size\n wsx = abs ( canvas.GetWindowWidth () ) \n wsy = abs ( canvas.GetWindowHeight () ) \n\n #\n ## if parameters given in the absolute units, convert them into relative coordinates\n #\n \n if not 0 < left_margin < 1 : left_margin = abs ( left_margin ) / wsx\n if not 0 < right_margin < 1 : right_margin = abs ( right_margin ) / wsx\n if not 0 < bottom_margin < 1 : bottom_margin = abs ( bottom_margin ) / wsy\n if not 0 < top_margin < 1 : top_margin = abs ( top_margin ) / wsy \n if not 0 < vSpacing < 1 : vSpacing = abs ( vSpacing ) / wsy\n if not 0 < hSpacing < 1 : hSpacing = abs ( hSpacing ) / wsx\n\n #\n ## check consistency \n # \n if 1 <= left_margin :\n raise AttributeError('partition: invalid left margin=%f' % left_margin )\n if 1 <= right_margin :\n raise AttributeError('partition: invalid right margin=%f' % right_margin )\n if 1 <= bottom_margin :\n raise AttributeError('partition: invalid bottom margin=%f' % bottom_margin )\n if 1 <= top_margin :\n raise AttributeError('partition: invalid top margin=%f' % top_margin )\n\n ## delete the pad dictionary \n del canvas.pads \n \n ## make new empty dictionary \n pads = {} \n \n vStep = ( 1.0 - bottom_margin - top_margin - (ny-1) * vSpacing ) / ny\n if 0 > vStep : raise AttributeError('partition: v-step=%f' % vStep )\n \n hStep = ( 1.0 - left_margin - right_margin - (nx-1) * hSpacing ) / nx \n if 0 > hStep : raise AttributeError('partition: h-step=%f' % hStep )\n\n hposr, hposl, hmarr, hmarl, hfactor = 0.,0.,0.,0.,0.\n vposr, vposd, vmard, vmaru, vfactor = 0.,0.,0.,0.,0.\n \n for ix in range ( nx ) :\n \n if 0 == ix : \n hposl = 0\n hposr = left_margin + hStep\n hfactor = hposr - hposl\n hmarl = left_margin / hfactor\n hmarr = 0.0 \n elif nx == ix + 1 :\n hposl = hposr + hSpacing \n hposr = hposl + hStep + right_margin\n hfactor = hposr - hposl \n hmarl = 0.0\n hmarr = right_margin / hfactor \n else : \n hposl = hposr + hSpacing\n hposr = hposl + hStep\n hfactor = hposr - hposl\n hmarl = 0.0\n hmarr = 0.0\n\n for iy in range(ny) :\n if 0 == iy : \n vposd = 0.0\n vposu = bottom_margin + vStep\n vfactor = vposu - vposd\n vmard = bottom_margin / vfactor\n vmaru = 0.0 \n elif ny == iy + 1 : \n vposd = vposu + vSpacing\n vposu = vposd + vStep + top_margin\n vfactor = vposu - vposd;\n vmard = 0.0\n vmaru = top_margin / vfactor \n else :\n vposd = vposu + vSpacing\n vposu = vposd + vStep\n vfactor = vposu - vposd\n vmard = 0.0\n vmaru = 0.0\n\n canvas.cd(0)\n pname = 'glPad_%s_%d_%d' % ( canvas.GetName() , ix , iy )\n groot = ROOT.ROOT.GetROOT()\n pad = groot.FindObject ( pname )\n if pad : del pad\n pad = ROOT.TPad ( pname , '' , hposl , vposd , hposr , vposu )\n\n logger.verbose ( ' Create pad[%d,%d]=(%f,%f,%f,%f),[%f,%f,%f,%f] %s ' % (\n ix , iy ,\n hposl , vposd , hposr , vposu , \n hmarl , hmarr , vmard , vmaru , pad.GetName() ) ) \n \n pad.SetLeftMargin ( hmarl )\n pad.SetRightMargin ( hmarr )\n pad.SetBottomMargin ( vmard )\n pad.SetTopMargin ( vmaru )\n \n pad.SetFrameBorderMode ( 0 )\n pad.SetBorderMode ( 0 )\n pad.SetBorderSize ( 0 )\n\n ROOT.SetOwnership ( pad , True )\n \n if not hasattr ( canvas , 'pads' ) : canvas.pads = {}\n pads[ ( ix , iy ) ] = pad\n\n ## fill pads structure \n for iy in reversed ( range ( ny ) ) : \n for ix in range ( nx ) :\n key = ix , iy \n canvas.pads [ key ] = pads[ key ]\n \n return canvas.pads", "def SpherePlaneWave2(k,a,r,theta,just_scattered=False):\n ka=k*a\n x = r*np.cos(theta)\n \n N=0\n badcells=False,False\n while np.any(badcells)==False:\n \n N += 100\n n = np.arange(N+1)\n\n djnka = sph_jn(N,ka)[1]\n dynka = sph_yn(N,ka)[1]\n dhnka = djnka + 1j*dynka\n \n djnka = np.repeat([djnka],r.size,axis=0).reshape(r.size,N+1)\n dhnka = np.repeat([dhnka],r.size,axis=0).reshape(r.size,N+1)\n \n jnkr = np.vstack([sph_jn(N,kr)[0] for kr in k*r])\n ynkr = np.vstack([sph_yn(N,kr)[0] for kr in k*r])\n hnkr = jnkr + 1j*ynkr\n\n simplefilter(\"ignore\")\n pscat= - (1j**n) * (2*n+1) * djnka * hnkr / dhnka\n simplefilter(\"default\")\n\n badcells = np.isnan(pscat)+np.isinf(pscat)\n \n pscat *= Pn(N,np.cos(theta)) \n \n pscat = pscat.compress(np.all(np.logical_not(badcells)==True,axis=0),axis=1)\n \n if just_scattered: return np.sum(pscat,axis=1)\n else: return np.sum(pscat,axis=1) + np.exp(1j*k*x)", "def pDpk(self, x, k):\n k = np.array(k)\n return 2*c*c*k/(self._omega*self._omega)", "def _get_clipping_slices(cost_fpath, sc_point_idx, radius=None):\n with ExclusionLayers(cost_fpath) as f:\n shape = f.shape\n\n if radius is not None:\n row, col = sc_point_idx\n row_min = max(row - radius, 0)\n row_max = min(row + radius, shape[0])\n col_min = max(col - radius, 0)\n col_max = min(col + radius, shape[1])\n\n start_indices = (row - row_min, col - col_min)\n else:\n start_indices = sc_point_idx\n row_min, row_max = None, None\n col_min, col_max = None, None\n\n row_slice = slice(row_min, row_max)\n col_slice = slice(col_min, col_max)\n\n return start_indices, row_slice, col_slice", "def makeKst2Kpi(name,\n KaonPT,\n KaonIPCHI2,\n PionPT,\n PionIPCHI2,\n PionPIDK,\n KstarPT,\n KaonPIDK,\n KstarVCHI2,\n KstarMassWin):\n\n _stdKaons = DataOnDemand(Location=\"Phys/StdLooseKaons/Particles\")\n _stdPions = DataOnDemand(Location=\"Phys/StdLoosePions/Particles\")\n\n _Kstar2Kpi = CombineParticles()\n\n _Kstar2Kpi.DecayDescriptor = \"[K*(892)0 -> K+ pi-]cc\"\n _Kstar2Kpi.DaughtersCuts = {\"K+\" : \"(PT > %(KaonPT)s *MeV) & (PIDK > %(KaonPIDK)s) & (MIPCHI2DV(PRIMARY)> %(KaonIPCHI2)s)\" % locals()\n ,\"pi-\" : \"(PT > %(PionPT)s *MeV) & (PIDK < %(PionPIDK)s) & (MIPCHI2DV(PRIMARY)> %(PionIPCHI2)s)\"% locals()}\n\n _Kstar2Kpi.CombinationCut = \"(ADAMASS('K*(892)0') < %(KstarMassWin)s *MeV)\"% locals()\n _Kstar2Kpi.MotherCut = \"(VFASPF(VCHI2/VDOF)< %(KstarVCHI2)s) & (PT > %(KstarPT)s *MeV)\"% locals()\n\n\n return Selection (name,\n Algorithm = _Kstar2Kpi,\n RequiredSelections = [_stdKaons,_stdPions])", "def composable_key_rate(n_bks, N, n, p, q, r_code, r_m_star, h_m, p_ec, e_sm, e_hash, e_cor, e_pe, h_k):\r\n\r\n Delta_AEP = 4 * np.log2(2 ** (1 + p / 2) + 1) * np.sqrt(np.log2(18 / ((p_ec ** 2) * (e_sm ** 4))))\r\n Theta = np.log2(p_ec * (1 - ((e_sm ** 2) / 3))) + 2 * np.log2(np.sqrt(2) * e_hash)\r\n\r\n # The practical secret key rate can be compared with a corresponding theoretical rate\r\n r_tilde_star = r_m_star - (Delta_AEP / np.sqrt(n)) + (Theta / n)\r\n r_theo = ((n * p_ec) / N) * r_tilde_star\r\n\r\n # Practical composable key rate\r\n r_m = h_k + r_code * q - p - h_m\r\n r_tilde = r_m - (Delta_AEP / np.sqrt(n)) + (Theta / n) # The composable key rate without finite-size effects\r\n r_final = ((n * p_ec) / N) * r_tilde # The composable key rate under finite-size effects\r\n\r\n # Privacy amplification input\r\n n_pa = int(p_ec * n_bks * p * n) # The bit length of the concatenated decoded sequences\r\n r = int(np.ceil(p_ec * n_bks * n * r_tilde)) # The length of the final key\r\n\r\n # The ε-security of the protocol\r\n e = e_cor + e_sm + e_hash + p_ec * e_pe\r\n\r\n # Ensure the composable key rate is positive. There may be a case of the practical key rate being positive, while\r\n # the theoretical key rate is negative. This is communicated to the user for future simulation reference.\r\n if r_final < 0:\r\n warnings.warn(\"The composable key rate is negative. Privacy amplification cannot be performed.\")\r\n elif r_theo < 0:\r\n warnings.warn(\"The theoretical composable key rate is negative. This means that future simulations under the\"\r\n \"specified parameters will highly likely return a negative composable key rate.\")\r\n\r\n return r_final, r_theo, n_pa, r, e", "def construct_single_ply_piece(self, fraction=1.0):\n th_nom = np.radians(self.fiber_angle)\n # Define origin line L0\n origin_point = Point2D(self.starting_position, 0.0)\n L0 = Line2D.from_point_angle(origin_point, th_nom)\n # Define line L4\n L4 = Line2D.from_point_angle(Point2D(0.0, 0.0), th_nom + np.pi/2)\n ip_L0_L4 = L0.intersection_line(L4)\n\n # Construct P1 and P4\n P1_dist = self.max_width * self.eccentricity\n P4_dist = self.max_width * (1 - self.eccentricity)\n P1 = ip_L0_L4 + Point2D.from_polar(P1_dist, L4.angle())\n P4 = ip_L0_L4 + Point2D.from_polar(P4_dist, L4.angle() + np.pi)\n\n # Construct side lines L1, L3, parallel to L0\n L1 = Line2D.from_point_angle(P1, L0.angle())\n L3 = Line2D.from_point_angle(P4, L0.angle())\n # Intersection points L0, L1, L3 with circle\n P0 = L0.intersection_circle_near(self.cg.s4, origin_point)\n P2 = L1.intersection_circle_near(self.cg.s4, P0)\n P3 = L3.intersection_circle_near(self.cg.s4, P0)\n\n # Handle creation of rest pieces, with fraction < 1.0\n delta_phi_1 = (P2.angle() - P0.angle())\n delta_phi_2 = (P0.angle() - P3.angle())\n if fraction != 1.0:\n P2 = P2.rotate((fraction - 1.0) * delta_phi_1)\n P3 = P3.rotate((1.0 - fraction) * delta_phi_2)\n L1 = Line2D.from_point_angle(P2, L1.angle())\n L3 = Line2D.from_point_angle(P3, L3.angle())\n P1 = L4.intersection_line(L1)\n P4 = L4.intersection_line(L3)\n delta_phi_1 *= fraction\n delta_phi_2 *= fraction\n\n # Construct L2 through P2 or P3, whichever is furthest from L4\n # Adjust the other point\n if L4.distance_point(P2) > L4.distance_point(P3):\n L2 = Line2D.from_point_angle(P2, L4.angle())\n P3 = L2.intersection_line(L3)\n else:\n L2 = Line2D.from_point_angle(P3, L4.angle())\n P2 = L2.intersection_line(L1)\n\n # Return the final ply piece\n return PlyPiece(Polygon2D((P1, P2, P3, P4)), 0.0, -delta_phi_2, delta_phi_1)", "def smart_ss_construct(cpdag, k):\n\n\n n = cpdag.shape[0]\n\n G = cpdag.copy()\n\n #convert the cpdag to a chordal graph\n #remove the directed edges\n for i in range(n):\n for j in range(i, n):\n if G[i][j] != G[j][i]:\n G[i][j] = 0\n G[j][i] = 0\n\n #now construct an approximately minimal vertex cover\n Gx = nx.DiGraph(G)\n S = vertex_cover.min_weighted_vertex_cover(Gx)\n #construct a coloring of the graph induced by the vertex cover\n Gs = nx.subgraph(Gx, S)\n\n coloring = nx.greedy_color(Gs, strategy='largest_first')\n\n #from each color, select intervention of size at most k\n def chunks(l, k):\n #chunks up a list l into pieces of size at most k\n assert k > 0\n return [l[i:i+k] for i in range(0, len(l), k)]\n \n interventions = []\n all_colors = set(coloring.values()) #set removes repeats\n for color in all_colors:\n l = [k for k,v in coloring.items() if v == color]\n interventions = interventions + chunks(l, k)\n\n return interventions" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_constellation_bpsk_sptr __init__(self, p) > digital_constellation_bpsk_sptr
def __init__(self, *args): this = _digital_swig.new_digital_constellation_bpsk_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoSFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoMFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoClipPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoShaderParameter2i()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _coin.new_SbPlaneProjector(*args)\n try: self.this.append(this)\n except: self.this = this", "def init(self, state: 'SoState') -> \"void\":\n return _coin.SoClipPlaneElement_init(self, state)", "def __init__(self, *args):\n this = _wali.new_KeyPair(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def construct_sk(public_key):\n point = public_key.pointQ\n x = int(point.x).to_bytes(PUBLIC_KEY_SIZE // 2, 'big')\n y = int(point.y).to_bytes(PUBLIC_KEY_SIZE // 2, 'big')\n sk = x + y\n return H(sk)", "def __init__(self, *args):\n this = _coin.new_SbCylinderPlaneProjector(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoShapeHints()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoClipPlaneManip()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, p=0.5, name='dropout_layer'):\n assert 0. <= p < 1.\n self.p = p\n self.name = name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
decision_maker(self, gr_complex sample) > unsigned int Returns the constellation point that matches best.
def decision_maker(self, *args, **kwargs): return _digital_swig.digital_constellation_bpsk_sptr_decision_maker(self, *args, **kwargs)
[ "def Cuffme(rpoint, cuff_sites, strand):\n for cuff in cuff_sites:\n if int(cuff[1])-50 < rpoint < int(cuff[2])+50:\n return 1\n\n return 0", "def _calc_matching_prob(self):\n if not self.professional:\n return 1", "def selectXClassifierT(self):\r\n \r\n actionSetSize = len(self.clSet) \r\n tSize = int(actionSetSize*cons.theta_Select) # sets the number of items in the action set to be included in the tournament selection\r\n posList = []\r\n for i in range(tSize): #hold onto a list of random positions, then select the position with the highest fitness\r\n pos = randrange(actionSetSize)\r\n if pos in posList: # make sure that pos is a pos that has not yet been selected.\r\n pos = randrange(actionSetSize)\r\n else:\r\n posList.append(pos)\r\n \r\n bestF = 0\r\n bestC = 0\r\n for j in posList:\r\n if self.clSet[j].getFitness() > bestF:\r\n bestF = self.clSet[j].getFitness()\r\n bestC = j\r\n\r\n return self.clSet[bestC]", "def getDecisionThreshold(self) -> retval:\n ...", "def switch_strategy():\n doors, prize_pos = init_doors()\n\n choice = first_participant_choice()\n pos_shown = monty_shows(doors, prize_pos, choice)\n\n # The doors chosen by the participant and Monty are distinct and the\n # indices of the doors sum up to 0 + 1 + 2 = 3. So, the participant\n # chooses switches to the position (3 - choice - pos_shown)\n new_choice = 3 - choice - pos_shown\n\n if doors[new_choice] == 'c':\n return 1\n return 0", "def decide(self, observation, prev_decision, internal_state, learn=False):\r\n raise NotImplementedError", "def get_winner(self, logS=None, logS_MAX=None):\n\t\tN=self.N\n\t\tif N<1: return -1\n\t\tassert(len(self.P_w_a.shape) == 1)\n\t\tP_w_a = self.extern(self.P_w_a[:N]).copy() # make a copy because some entries will be reset\n\t\tif (logS_MAX is not None) and (logS_MAX < self.plus_inf):\n\t\t\tlogS_MAX = self.extern(logS_MAX)\n\t\t\tif logS is None: logS=self.logS\n\t\t\tlogS = self.extern(logS)[:N]\n\t\t\tP_w_a[logS > logS_MAX] = self.log0 # reset probabs where hypervolume > S_MAX\n\t\t\t\n\t\tj = argmax(P_w_a)\n\t\t\n\t\t# in degenerate cases when all p_a_w fields are zero then argmax returns 0\n\t\t# which would falsely yield to the conclusion that category j=0 is the winner\n\t\t# when in fact there is no winner, thus a new category needs to be created\n#\t\tprint 'P_w_a=',P_w_a\n\t\tassert(j<N)\n\t\tif self.logp[j] <= self.log0 or P_w_a[j] <= self.log0:\n\t\t\tj = -1\n\n\t\treturn j", "def my_candidate(self):\n if self.turn != 0:\n #print(\"CCCC\")\n new_result = [self.prev_candidate['score']] + self.prev_candidate['candidate'] \n self.data = np.vstack((self.data, new_result))\n X, y= self.data[:,1:], self.data[:,0]\n\n #print(\"CCCC222\")\n\n test_weights = minimize(fun=loss, x0=np.zeros(self.n), args=(X,y), constraints=self.con, bounds=self.bounds).x\n\n ga = SAT(test_weights, 50, 100, 0.95, 0.1)\n ga.evolve()\n\n #print(\"CCC3333\")\n tmp = ga.best_chrm.tolist()\n #print(\"1111111\", tmp)\n\n return ga.best_chrm.tolist()\n else:\n # print(\"CCC444\")\n X, y= self.data[:,1:], self.data[:,0]\n #print(\"CCC5555\")\n test_weights = minimize(fun=loss, x0=np.zeros(self.n), args=(X,y), constraints=self.con, bounds=self.bounds).x\n\n ga = SAT(test_weights, 50, 100, 0.95, 0.1)\n ga.evolve()\n\n #print(\"CCCC666\")\n\n tmp = ga.best_chrm.tolist()\n #print(\"222222222\", tmp)\n\n return tmp", "def _choose_initial_point(self) -> np.ndarray:\n if self._warm_start and self._fit_result is not None:\n self._initial_point = self._fit_result.x\n elif self._initial_point is None:\n self._initial_point = algorithm_globals.random.random(self._neural_network.num_weights)\n return self._initial_point", "def strategy(self):\n # first move \"scissors\" (2)\n if len(self.get_past_moves()) == 0:\n counter_play = 2\n #print(counter_play)\n return counter_play\n else: # predict the next move and counter it\n counter_play = self.counter(self.predict(self.get_opp_moves()))\n #print(counter_play)\n return counter_play", "def custom_heuristic(gameState):\r\n center_weight = 0.5\r\n lib_weight = 1.5\r\n own_loc = gameState.locs[self.player_id]\r\n opp_loc = gameState.locs[1- self.player_id]\r\n own_liberties = gameState.liberties(own_loc)\r\n opp_liberties = gameState.liberties(opp_loc)\r\n # Custom 1: distanceToCenter(own_loc)\r\n # Custom 2: len(own_liberties) - ( center_weight * distanceToCenter(own_loc) )\r\n # Custom 3: len(own_liberties) - ( len(opp_liberties) ) - ( center_weight * distanceToCenter(own_loc) ) \r\n # Custom 4: len(own_liberties) - ( lib_weight * len(opp_liberties) ) - ( center_weight * distanceToCenter(own_loc) )\r\n # Custom 5: ( lib_weight * (len(own_liberties) / len(opp_liberties)) - ( center_weight * distanceToCenter(own_loc)) )\r\n return ( lib_weight * (len(own_liberties) / len(opp_liberties)) - (center_weight * distanceToCenter(own_loc)) )", "def getBestSolutionValue(self) -> float:", "def select_pts(self):\n lst=[]\n for r in self.rewards:\n if r.earning_category_id==15:\n lst.append(r.reward_rate.points)\n if lst:\n return lst[0]\n else:\n return 0", "def choose_distractor(self, model, dict, threshold_func, params, banned):\n for surprisal in self.surprisals: # calculate desired surprisal thresholds\n self.surprisal_targets.append(max(params[\"min_abs\"], surprisal + params[\"min_delta\"]))\n # get us some distractor candidates\n min_length, max_length, min_freq, max_freq = threshold_func(self.words)\n distractor_opts = dict.get_potential_distractors(min_length, max_length, min_freq, max_freq, params)\n avoid=[]\n for word in self.words: #it's real awkward if the distractor is the same as the real word, so let's not do that\n avoid.append(strip_punct(word).lower())\n # initialize\n best_word = \"x-x-x\"\n best_min_surp = 0\n for dist in distractor_opts:\n if dist not in banned and dist not in avoid: # if we've already used it in this sentence set, don't bother\n good = True\n min_surp = 100\n for i in range(len(self.probs)): # check distractor candidate against each sentence's probs\n dist_surp = model.get_surprisal(self.probs[i], dist)\n if dist_surp < self.surprisal_targets[i]:\n good = False # it doesn't meet the target\n min_surp = min(min_surp, dist_surp) # but we should keep track of the lowest anyway\n if good: # stayed above all surprisal thresholds\n self.distractor = dist # we're done, yay!\n return self.distractor\n if min_surp > best_min_surp: # best so far\n best_min_surp = min_surp\n best_word = dist\n logging.warning(\"Could not find a word to meet threshold for item %s, label %s, returning %s with %d min surp instead\",\n self.id, self.lab, best_word, best_min_surp)\n self.distractor = best_word\n return self.distractor", "def recommend_sensing(self):\n # Enter your code and remove the statement below\n # should select from unobserved location\n # unobserved = open: with largest probability\n best_unobserved = max(self.open, key=lambda position: self.tprob[position])\n # if max is 0, then unobserved ones all have zero probabilities\n if self.tprob[best_unobserved] != 0:\n return best_unobserved\n # directly using max, will return the max key in a dictionary\n # should find observed locations instead of all positions\n # all locations - unobserved locations = observed locations\n # tprob.keys() is views, which can be used as set\n best_observed = max(self.tprob.keys() - self.open, key=lambda position: self.tprob[position])\n return utils.closest_point(best_observed, self.open)", "def covers(self, proposal, min_match, nhood=None):\n max_match = 0\n template = None\n\n # find templates in the bank \"near\" this tmplt\n prop_nhd = getattr(proposal, self.nhood_param)\n if not nhood:\n low, high = _find_neighborhood(self._nhoods, prop_nhd, self.nhood_size)\n tmpbank = self._templates[low:high]\n else:\n tmpbank = nhood\n if not tmpbank: return (max_match, template)\n\n # sort the bank by its nearness to tmplt in mchirp\n # NB: This sort comes up as a dominating cost if you profile,\n # but it cuts the number of match evaluations by 80%, so turns out\n # to be worth it even for metric match, where matches are cheap.\n tmpbank.sort(key=lambda b: abs( getattr(b, self.nhood_param) - prop_nhd))\n\n # set parameters of match calculation that are optimized for this block\n df_end, f_max = get_neighborhood_df_fmax(tmpbank + [proposal], self.flow)\n if self.fhigh_max:\n f_max = min(f_max, self.fhigh_max)\n df_start = max(df_end, self.iterative_match_df_max)\n\n # find and test matches\n for tmplt in tmpbank:\n\n self._nmatch += 1\n df = df_start\n match_last = 0\n\n if self.coarse_match_df:\n # Perform a match at high df to see if point can be quickly\n # ruled out as already covering the proposal\n PSD = get_PSD(self.coarse_match_df, self.flow, f_max, self.noise_model)\n match = self.compute_match(tmplt, proposal, self.coarse_match_df,\n PSD=PSD)\n if match == 0:\n err_msg = \"Match is 0. This might indicate that you have \"\n err_msg += \"the df value too high. Please try setting the \"\n err_msg += \"coarse-value-df value lower.\"\n # FIXME: This could be dealt with dynamically??\n raise ValueError(err_msg)\n\n if (1 - match) > 0.05 + (1 - min_match):\n continue\n\n while df >= df_end:\n\n PSD = get_PSD(df, self.flow, f_max, self.noise_model)\n match = self.compute_match(tmplt, proposal, df, PSD=PSD)\n if match == 0:\n err_msg = \"Match is 0. This might indicate that you have \"\n err_msg += \"the df value too high. Please try setting the \"\n err_msg += \"iterative-match-df-max value lower.\"\n # FIXME: This could be dealt with dynamically??\n raise ValueError(err_msg)\n\n # if the result is a really bad match, trust it isn't\n # misrepresenting a good match\n if (1 - match) > 0.05 + (1 - min_match):\n break\n\n # calculation converged\n if match_last > 0 and abs(match_last - match) < 0.001:\n break\n\n # otherwise, refine calculation\n match_last = match\n df /= 2.0\n\n if match > min_match:\n return (match, tmplt)\n\n # record match and template params for highest match\n if match > max_match:\n max_match = match\n template = tmplt\n\n return (max_match, template)", "def pick_threshold(self):", "def knn_predict(new_point, points, point_classes, k=5):\n k_nearest = find_nearest_neighbors(new_point, points, k)\n return majority_vote(point_classes[k_nearest])", "def draw_candidate(self, arr_ind, do_propMH):\n self.aCorr = 1\n if self.stepType == 'Uniform':\n cand = self.parent.val[arr_ind] + self.stepParam[arr_ind] * np.random.uniform(-0.5, 0.5)\n elif self.stepType == 'BetaRho':\n cand = np.exp(-0.25 * self.parent.val[arr_ind]) + self.stepParam[arr_ind] * np.random.uniform(-0.5, 0.5)\n if cand <= 0:\n cand = np.inf\n else:\n cand = -4 * np.log(cand)\n elif self.stepType == 'PropMH':\n if do_propMH:\n cval = self.parent.val[arr_ind]\n w = np.max([1, cval/3])\n dval = cval + w * np.random.uniform(-1, 1)\n w1 = np.max([1, dval/3])\n if cval > (dval+w1):\n aCorr = False # never will accept in this case\n else:\n aCorr = w/w1\n cand = dval\n self.aCorr = aCorr\n else:\n cand = self.parent.val[arr_ind] + self.stepParam[arr_ind] * np.random.uniform(-0.5, 0.5)\n else:\n raise Exception('Unknown stepType')\n return cand", "def get_best(self, dataset, sample_size): # always according to accuracy\n # create dev-set\n data_idx = range(len(dataset))\n np.random.shuffle(data_idx)\n dataset_sample = [dataset[i] for i in np.random.choice(data_idx, sample_size)]\n\n # evaluate elitism\n ops = [x[1] for x in self.elitism]\n ls = [(mlp.check_on_dataset(dataset_sample)[0], mlp) for mlp in ops]\n ls.sort(key=lambda a: a[0])\n return ls[-1][1]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set_pre_diff_code(self, bool a) Whether to apply an encoding before doing differential encoding. (e.g. gray coding)
def set_pre_diff_code(self, *args, **kwargs): return _digital_swig.digital_constellation_bpsk_sptr_set_pre_diff_code(self, *args, **kwargs)
[ "def recode(self, new_encoding: dict):\n self.edges = set(map(lambda edge: edge.recode(self.states_encoding, new_encoding), self.edges))", "def pre_encode(fxn):\n unclaimed[fxn] = 'pre_encode'\n return fxn", "def set_precommit(c):\n c.run(\n 'cp githooks/pre-commit .git/hooks/pre-commit '\n '&& chmod +x .git/hooks/pre-commit'\n '&& git config --bool flake8.strict true',\n pty=True\n )", "def preprocessing():", "def set_pre_tokenizer(self, custom_pre_tokenizer: CPT):\n self.pre_tokenizer = PreTokenizer.custom(custom_pre_tokenizer)", "def hook_pre_trained(self, x):\n self.pre_trained = x", "def pre_arranged(self, pre_arranged):\n\n self._pre_arranged = pre_arranged", "def setPrediction(self,new_predition):\r\n \r\n \tself.prediction=new_predition", "def set_codes(self, codes, reject=False):\n\n self.codes = set(codes)\n self.reject = reject", "def _pre_compile(self, content=None):\r\n pass", "def update_code(self, new_code):\n self.code = new_code # code from __inti ___\n\n # Fill in the rest", "def test_preds_before_and_after_convert_equal():\n init_alpha = 12.1\n pipeline = pipeline_with_custom_parameters(init_alpha)\n\n # Generate data\n input_data = get_synthetic_regression_data(n_samples=10, n_features=2,\n random_state=2021)\n # Init fit\n pipeline.fit(input_data)\n init_preds = pipeline.predict(input_data)\n\n # Convert into OptGraph object\n adapter = PipelineAdapter()\n opt_graph = adapter.adapt(pipeline)\n restored_pipeline = adapter.restore(opt_graph)\n\n # Restored pipeline fit\n restored_pipeline.fit(input_data)\n restored_preds = restored_pipeline.predict(input_data)\n\n assert np.array_equal(init_preds.predict, restored_preds.predict)", "def preconstrain_flag_page(self):\n\n if not self._preconstrain_flag:\n return\n\n if self._magic_content is None:\n e_msg = \"Trying to preconstrain flag page without CGC magic content. \"\n e_msg += \"You should have set record_magic flag for Runner dynamic tracing. \"\n e_msg += \"For now, nothing will happen.\"\n l.warning(e_msg)\n return\n\n for b in range(0x1000):\n self._preconstrain(self._magic_content[b], self.state.cgc.flag_bytes[b])", "def setCode(self, c):\n\t\t\n\t\tself.code = c", "def pre(self, neoart=None):\n pass", "def update_code(self, new_code):\n\n # Fill in the rest\n self.code = new_code\n # print(self.code) #for checking\n return self.code", "def setup_label_coding(self, verbose=True, debug=False):\n all_labels = set()\n for _key in [*self.__class__.PUBLIC_SUBSETS, *self.__class__.PRIVATE_SUBSETS]:\n _df = self.dfs[_key]\n _found_labels = set(_df[\"label\"].tolist())\n all_labels = all_labels.union(_found_labels)\n\n # exclude ABSTAIN from self.classes, but include it in the encoding\n all_labels.discard(module_config.ABSTAIN_DECODED)\n self.classes = sorted(all_labels)\n self.label_encoder = {\n **{_label: _i for _i, _label in enumerate(self.classes)},\n module_config.ABSTAIN_DECODED: module_config.ABSTAIN_ENCODED,\n }\n self.label_decoder = {_v: _k for _k, _v in self.label_encoder.items()}\n\n if verbose:\n self._good(\n f\"Set up label encoder/decoder with {len(self.classes)} classes.\"\n )\n if debug:\n self.validate_labels()", "def setDoPreCompile(self, *args):\r\n return _osgDB.DatabasePager_setDoPreCompile(self, *args)", "def apply_coder(text, coder):\n ### TODO.\n codedText = ''\n for char in text:\n if char in coder:\n codedText += coder[char]\n else:\n codedText += char\n return codedText", "def run_pre_hooks(self, dataframe: DataFrame) -> DataFrame:\n for hook in self.pre_hooks:\n dataframe = hook.run(dataframe)\n return dataframe" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
constellation_bpsk() > digital_constellation_bpsk_sptr Digital constellation for BPSK.
def constellation_bpsk(): return _digital_swig.constellation_bpsk()
[ "def bpsk(input_bits, noise):\n modulator = Modulator()\n demodulator = Demodulator()\n channel = Channel()\n signal = modulator.make_bpsk_mod(input_bits)\n\n signal = channel.send_signal(signal, noise)\n\n result_bits = demodulator.make_bpsk_demod(signal, channel)\n return result_bits", "def construct_sk(public_key):\n point = public_key.pointQ\n x = int(point.x).to_bytes(PUBLIC_KEY_SIZE // 2, 'big')\n y = int(point.y).to_bytes(PUBLIC_KEY_SIZE // 2, 'big')\n sk = x + y\n return H(sk)", "def sc_and_bcc_kps(kmax):\n\n kpds = []\n for k in range(kmax):\n for m in range(1,2,4):\n p = m*(k**3)\n if p <= kmax and p not in kpds:\n kpds.append(p)\n\n return kpds", "def getInstance(state: 'SoState') -> \"SoClipPlaneElement const *\":\n return _coin.SoClipPlaneElement_getInstance(state)", "def SoClipPlaneElement_getInstance(state: 'SoState') -> \"SoClipPlaneElement const *\":\n return _coin.SoClipPlaneElement_getInstance(state)", "def getBCProjWF_simple(self, discretization='CC'):\n\n if discretization is not 'CC':\n raise NotImplementedError('Boundary conditions only implemented'\n 'for CC discretization.')\n\n def projBC(n):\n ij = ([0, n], [0, 1])\n vals = [0, 0]\n vals[0] = 1\n vals[1] = 1\n return sp.csr_matrix((vals, ij), shape=(n+1, 2))\n\n def projDirichlet(n, bc):\n bc = checkBC(bc)\n ij = ([0, n], [0, 1])\n vals = [0, 0]\n if(bc[0] == 'dirichlet'):\n vals[0] = -1\n if(bc[1] == 'dirichlet'):\n vals[1] = 1\n return sp.csr_matrix((vals, ij), shape=(n+1, 2))\n\n BC = [['dirichlet', 'dirichlet'], ['dirichlet', 'dirichlet'],\n ['dirichlet', 'dirichlet']]\n n = self.vnC\n indF = self.faceBoundaryInd\n\n if self.dim == 1:\n Pbc = projDirichlet(n[0], BC[0])\n B = projBC(n[0])\n indF = indF[0] | indF[1]\n Pbc = Pbc*sdiag(self.area[indF])\n\n elif self.dim == 2:\n Pbc1 = sp.kron(speye(n[1]), projDirichlet(n[0], BC[0]))\n Pbc2 = sp.kron(projDirichlet(n[1], BC[1]), speye(n[0]))\n Pbc = sp.block_diag((Pbc1, Pbc2), format=\"csr\")\n B1 = sp.kron(speye(n[1]), projBC(n[0]))\n B2 = sp.kron(projBC(n[1]), speye(n[0]))\n B = sp.block_diag((B1, B2), format=\"csr\")\n indF = np.r_[(indF[0] | indF[1]), (indF[2] | indF[3])]\n Pbc = Pbc*sdiag(self.area[indF])\n\n elif self.dim == 3:\n Pbc1 = kron3(speye(n[2]), speye(n[1]), projDirichlet(n[0], BC[0]))\n Pbc2 = kron3(speye(n[2]), projDirichlet(n[1], BC[1]), speye(n[0]))\n Pbc3 = kron3(projDirichlet(n[2], BC[2]), speye(n[1]), speye(n[0]))\n Pbc = sp.block_diag((Pbc1, Pbc2, Pbc3), format=\"csr\")\n B1 = kron3(speye(n[2]), speye(n[1]), projBC(n[0]))\n B2 = kron3(speye(n[2]), projBC(n[1]), speye(n[0]))\n B3 = kron3(projBC(n[2]), speye(n[1]), speye(n[0]))\n B = sp.block_diag((B1, B2, B3), format=\"csr\")\n indF = np.r_[\n (indF[0] | indF[1]),\n (indF[2] | indF[3]),\n (indF[4] | indF[5])\n ]\n Pbc = Pbc*sdiag(self.area[indF])\n\n return Pbc, B.T", "def pc_nproduced(self):\n return _OFDM_Cyclic_Prefix_swig.vamsi_OFDMCP_ff_sptr_pc_nproduced(self)", "def bell():\n qc = QuantumCircuit(2)\n qc.h(0)\n qc.cx(0, 1)\n qc.measure_all()\n return CircuitProbsPair(qc, {0: 0.5, 3: 0.5, 1: 0, 2: 0})", "def associado_spc_brasil(self):\n return self._associado_spc_brasil", "def gc_sandstone(Vp, B=0.80416, C=-0.85588):\r\n\r\n Vs = B*Vp + C\r\n\r\n return Vs", "def bond_B(k):\n return (4-k) * 300000", "def _CkdPub(self, index: Bip32KeyIndex) -> Bip32Base:\n\n # Not supported by Ed25519 SLIP-0010\n pass", "def pc_nproduced(self):\n return _frame_detection_swig.deinterleaver_bb_sptr_pc_nproduced(self)", "def pc_nproduced(self):\n return _wavelet_swig.squash_ff_sptr_pc_nproduced(self)", "def get_pbc(self):\n return self.atoms.get_pbc()", "def smart_ss_construct(cpdag, k):\n\n\n n = cpdag.shape[0]\n\n G = cpdag.copy()\n\n #convert the cpdag to a chordal graph\n #remove the directed edges\n for i in range(n):\n for j in range(i, n):\n if G[i][j] != G[j][i]:\n G[i][j] = 0\n G[j][i] = 0\n\n #now construct an approximately minimal vertex cover\n Gx = nx.DiGraph(G)\n S = vertex_cover.min_weighted_vertex_cover(Gx)\n #construct a coloring of the graph induced by the vertex cover\n Gs = nx.subgraph(Gx, S)\n\n coloring = nx.greedy_color(Gs, strategy='largest_first')\n\n #from each color, select intervention of size at most k\n def chunks(l, k):\n #chunks up a list l into pieces of size at most k\n assert k > 0\n return [l[i:i+k] for i in range(0, len(l), k)]\n \n interventions = []\n all_colors = set(coloring.values()) #set removes repeats\n for color in all_colors:\n l = [k for k,v in coloring.items() if v == color]\n interventions = interventions + chunks(l, k)\n\n return interventions", "def LamC2pKK ( self ) : \n from GaudiConfUtils.ConfigurableGenerators import DaVinci__N3BodyDecays\n #\n return self.make_selection (\n 'LambdaCpKK' ,\n DaVinci__N3BodyDecays ,\n ## inputs \n [ self.protons() , self.kaons() ] ,\n ##\n DecayDescriptor = \" [ Lambda_c+ -> p+ K- K+ ]cc\" ,\n ##\n Combination12Cut = \"\"\"\n ( AM < 2.5 * GeV ) &\n ( ACHI2DOCA(1,2) < 16 ) \n \"\"\" ,\n ## \n CombinationCut = \"\"\"\n ( ( ADAMASS ( 'Lambda_c+' ) < 65 * MeV ) \n | ( ADAMASS ( 'Xi_c+' ) < 65 * MeV ) ) &\n ( APT > %s ) & \n ( ACHI2DOCA(1,3) < 16 ) &\n ( ACHI2DOCA(2,2) < 16 ) \n \"\"\" % ( 0.95 * self[ 'pT(Lc+)' ] ) ,\n ##\n MotherCut = \"\"\"\n ( chi2vx < 25 ) &\n ( PT > %s ) &\n ( ( ADMASS ( 'Lambda_c+' ) < 55 * MeV ) \n | ( ADMASS ( 'Xi_c+' ) < 55 * MeV ) ) &\n ( ctau > 100 * micrometer ) \n \"\"\" % self [ 'pT(Lc+)']\n )", "def __repr__(self):\n return \"The Blum-Goldwasser public-key encryption scheme.\"", "def bishop():\n game_bishop = Bishop(Color.BLACK)\n game_bishop.coords = Coords(x=7, y=4)\n return game_bishop", "def pc_nproduced(self):\n return _radio_astro_swig.dedispersion_sptr_pc_nproduced(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_constellation_qpsk_sptr __init__(self, p) > digital_constellation_qpsk_sptr
def __init__(self, *args): this = _digital_swig.new_digital_constellation_qpsk_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def construct_sk(public_key):\n point = public_key.pointQ\n x = int(point.x).to_bytes(PUBLIC_KEY_SIZE // 2, 'big')\n y = int(point.y).to_bytes(PUBLIC_KEY_SIZE // 2, 'big')\n sk = x + y\n return H(sk)", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n super(CorrelogramPooling3D, self).__init__()", "def __init__(self):\n this = _coin.new_SoSFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args, **kwargs):\n # If a PSCU has been passed in keyword arguments use that, otherwise create a new one\n if 'pscu' in kwargs:\n self.pscu = kwargs['pscu']\n else:\n self.pscu = PSCU(*args, **kwargs)\n\n # Get the QuadData containers associated with the PSCU\n self.quad_data = [QuadData(quad=q) for q in self.pscu.quad]\n\n # Get the temperature and humidity containers associated with the PSCU\n self.temperature_data = [\n TempData(self.pscu, i) for i in range(self.pscu.num_temperatures)\n ]\n self.humidity_data = [\n HumidityData(self.pscu, i) for i in range(self.pscu.num_humidities)\n ]\n\n # Build the parameter tree of the PSCU\n self.param_tree = ParameterTree({\n \"quad\": {\n \"quads\": [q.param_tree for q in self.quad_data],\n 'trace': (self.get_quad_traces, None),\n },\n \"temperature\": {\n \"sensors\": [t.param_tree for t in self.temperature_data],\n \"overall\": (self.pscu.get_temperature_state, None),\n \"latched\": (self.pscu.get_temperature_latched, None),\n },\n \"humidity\": {\n \"sensors\": [h.param_tree for h in self.humidity_data],\n \"overall\": (self.pscu.get_humidity_state, None),\n \"latched\": (self.pscu.get_humidity_latched, None),\n },\n \"fan\": {\n \"target\": (self.pscu.get_fan_target, self.pscu.set_fan_target),\n \"currentspeed_volts\": (self.pscu.get_fan_speed_volts, None),\n \"currentspeed\": (self.pscu.get_fan_speed, None),\n \"setpoint\": (self.pscu.get_fan_set_point, None),\n \"setpoint_volts\": (self.pscu.get_fan_set_point_volts, None),\n \"tripped\": (self.pscu.get_fan_tripped, None),\n \"overall\": (self.pscu.get_fan_state, None),\n \"latched\": (self.pscu.get_fan_latched, None),\n \"mode\": (self.pscu.get_fan_mode, None),\n },\n \"pump\": {\n \"flow\": (self.pscu.get_pump_flow, None),\n \"flow_volts\": (self.pscu.get_pump_flow_volts, None),\n \"setpoint\": (self.pscu.get_pump_set_point, None),\n \"setpoint_volts\": (self.pscu.get_pump_set_point_volts, None),\n \"tripped\": (self.pscu.get_pump_tripped, None),\n \"overall\": (self.pscu.get_pump_state, None),\n \"latched\": (self.pscu.get_pump_latched, None),\n \"mode\": (self.pscu.get_pump_mode, None),\n },\n \"trace\": {\n \"overall\": (self.pscu.get_trace_state, None),\n \"latched\": (self.pscu.get_trace_latched, None),\n },\n \"position\": (self.pscu.get_position, None),\n \"position_volts\": (self.pscu.get_position_volts, None),\n \"overall\": (self.pscu.get_health, None),\n \"latched\": (self.get_all_latched, None),\n \"armed\": (self.pscu.get_armed, self.pscu.set_armed),\n \"allEnabled\": (self.pscu.get_all_enabled, self.pscu.enable_all),\n \"enableInterval\": (self.pscu.get_enable_interval, None),\n \"displayError\": (self.pscu.get_display_error, None),\n })", "def __init__(self, spec_data, lvol, dp=0.0, innout=0, plusminus=+1):\n super().__init__(spec_data, lvol)\n\n self.fortran_module.specpjh.init_pjh(dp, innout, plusminus)\n self.dp = dp\n self.innout = innout\n self.plusminus = plusminus\n self.initialized = True\n\n ## the size of the problem, 2 for 1.5 or 2D system\n self.problem_size = 2\n ## choose the variable for Poincare plot\n self.poincare_plot_type = \"yx\"\n ## the x label of Poincare plot\n self.poincare_plot_xlabel = \"theta\"\n ## the y label of Poincare plot\n self.poincare_plot_ylabel = \"p_theta\"", "def __init__(self, pscu, sensor_idx):\n self.pscu = pscu\n self.sensor_idx = sensor_idx\n\n self.param_tree = ParameterTree({\n \"leak_impedance\": (self.get_leak_impedance, None),\n \"leak_volts\": (self.get_leak_volts, None),\n \"setpoint\": (self.get_set_point, None),\n \"setpoint_volts\": (self.get_set_point_volts, None),\n \"tripped\": (self.get_tripped, None),\n \"trace\": (self.get_trace, None),\n \"disabled\": (self.get_disabled, None),\n \"sensor_name\": (self.get_name, None),\n \"mode\": (self.get_mode, None),\n })", "def __init__(self):\n self.port=Config.PortPrinter # Assign the name of the port written in Config.py to self.port\n self.FirstMove=0 # Variable wich allow us to know if this is the first movement of the 3d-mill\n self.Coord={} # Create a dictionnary\n self.cnc=CNC(self.port) # Call the class CNC\n self.cnc.OpenConnection() # Open the Connection with the device\n self.NbWells=0 # Count the number of wells \n Wells.Wells_1(self)", "def init_p(cls, p):\n return cls(p, np.zeros_like(p), np.zeros_like(p))", "def __init__(self):\n this = _coin.new_SoShaderParameter2i()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n # Flag this instance as compiled now\n self.is_compiled = True\n \n super(HCompositeState2ProcDef, self).__init__(name='HCompositeState2ProcDef', num_nodes=152, edges=[])\n \n # Add the edges\n self.add_edges([(8, 39), (39, 1), (1, 40), (40, 6), (6, 41), (41, 9), (9, 42), (42, 11), (8, 43), (43, 13), (6, 44), (44, 14), (6, 45), (45, 15), (6, 46), (46, 16), (11, 47), (47, 17), (11, 48), (48, 18), (11, 49), (49, 19), (11, 50), (50, 20), (9, 51), (51, 12), (12, 52), (52, 21), (12, 53), (53, 22), (12, 54), (54, 23), (71, 55), (55, 119), (72, 56), (56, 120), (73, 57), (57, 121), (74, 58), (58, 122), (75, 59), (59, 123), (76, 60), (60, 124), (77, 61), (61, 125), (78, 62), (62, 126), (79, 63), (63, 127), (80, 64), (64, 128), (81, 65), (65, 129), (82, 66), (66, 130), (83, 67), (67, 131), (84, 68), (68, 132), (85, 69), (69, 133), (86, 70), (70, 134), (13, 24), (24, 88), (14, 25), (25, 89), (15, 26), (26, 90), (16, 27), (27, 91), (11, 28), (28, 92), (17, 29), (29, 93), (18, 30), (30, 94), (19, 31), (31, 95), (20, 32), (32, 96), (12, 33), (33, 97), (21, 34), (34, 98), (22, 35), (35, 99), (23, 36), (36, 100), (8, 37), (37, 101), (1, 38), (38, 102), (5, 0), (0, 135), (0, 136), (0, 137), (0, 138), (0, 139), (0, 140), (0, 141), (0, 142), (0, 143), (0, 144), (0, 145), (0, 146), (0, 147), (0, 148), (0, 149), (0, 150), (0, 151), (136, 1), (7, 2), (2, 4), (4, 3), (3, 87), (10, 4), (7, 5), (137, 6), (71, 103), (72, 104), (73, 105), (74, 106), (75, 107), (76, 108), (77, 109), (78, 110), (79, 111), (80, 112), (81, 113), (82, 114), (83, 115), (84, 116), (85, 117), (86, 118), (135, 8), (138, 9), (139, 13), (140, 14), (141, 15), (142, 16), (143, 11), (144, 12), (145, 17), (146, 18), (147, 19), (148, 20), (149, 21), (150, 22), (151, 23), (8, 10), (103, 87), (104, 88), (105, 89), (106, 90), (107, 91), (108, 92), (109, 93), (110, 94), (111, 95), (112, 96), (113, 97), (114, 98), (115, 99), (116, 100), (117, 101), (118, 102)])\n # Set the graph attributes\n self[\"mm__\"] = pickle.loads(\"\"\"(lp1\nS'UMLRT2Kiltera_MM'\np2\na.\"\"\")\n self[\"name\"] = \"\"\"CompositeState2ProcDef\"\"\"\n self[\"GUID__\"] = UUID('d5e9d5a2-c202-49ef-a74d-abc96e53b4fe')\n \n # Set the node attributes\n self.vs[0][\"mm__\"] = \"\"\"ApplyModel\"\"\"\n self.vs[0][\"GUID__\"] = UUID('4f03b792-e84e-4c84-bbae-3072cf6a293c')\n self.vs[1][\"name\"] = \"\"\"localdef1\"\"\"\n self.vs[1][\"classtype\"] = \"\"\"LocalDef\"\"\"\n self.vs[1][\"mm__\"] = \"\"\"LocalDef\"\"\"\n self.vs[1][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[1][\"GUID__\"] = UUID('00ff12a2-181f-4200-81a2-75850a58d99f')\n self.vs[2][\"mm__\"] = \"\"\"match_contains\"\"\"\n self.vs[2][\"GUID__\"] = UUID('938cefd8-a8a4-4aaf-be3a-e728f6d4b308')\n self.vs[3][\"mm__\"] = \"\"\"hasAttribute_S\"\"\"\n self.vs[3][\"GUID__\"] = UUID('a1001fa8-fbfb-4491-a555-e688afae9a35')\n self.vs[4][\"name\"] = \"\"\"state1\"\"\"\n self.vs[4][\"classtype\"] = \"\"\"State\"\"\"\n self.vs[4][\"mm__\"] = \"\"\"State\"\"\"\n self.vs[4][\"cardinality\"] = \"\"\"+\"\"\"\n self.vs[4][\"GUID__\"] = UUID('2de4b186-4d1b-49c5-a24d-837430de86c3')\n self.vs[5][\"mm__\"] = \"\"\"paired_with\"\"\"\n self.vs[5][\"GUID__\"] = UUID('6864a62e-0c16-41ec-85cb-5304c66b2167')\n self.vs[6][\"name\"] = \"\"\"new1\"\"\"\n self.vs[6][\"classtype\"] = \"\"\"New\"\"\"\n self.vs[6][\"mm__\"] = \"\"\"New\"\"\"\n self.vs[6][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[6][\"GUID__\"] = UUID('6e918d39-761f-4145-980d-e035e8956e4c')\n self.vs[7][\"mm__\"] = \"\"\"MatchModel\"\"\"\n self.vs[7][\"GUID__\"] = UUID('9d3c9ff3-d943-45c5-9a68-4b94f8ae4f55')\n self.vs[8][\"name\"] = \"\"\"procdef1\"\"\"\n self.vs[8][\"classtype\"] = \"\"\"ProcDef\"\"\"\n self.vs[8][\"mm__\"] = \"\"\"ProcDef\"\"\"\n self.vs[8][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[8][\"GUID__\"] = UUID('b36423c7-5f8e-4565-9124-9dedad23d1e1')\n self.vs[9][\"name\"] = \"\"\"par1\"\"\"\n self.vs[9][\"classtype\"] = \"\"\"Par\"\"\"\n self.vs[9][\"mm__\"] = \"\"\"Par\"\"\"\n self.vs[9][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[9][\"GUID__\"] = UUID('64a7af82-a641-4084-b5c3-db88c40c7b99')\n self.vs[10][\"type\"] = \"\"\"ruleDef\"\"\"\n self.vs[10][\"mm__\"] = \"\"\"backward_link\"\"\"\n self.vs[10][\"GUID__\"] = UUID('869d5d52-235c-4240-af78-31e36a1f47d7')\n self.vs[11][\"name\"] = \"\"\"inst1\"\"\"\n self.vs[11][\"classtype\"] = \"\"\"Inst\"\"\"\n self.vs[11][\"mm__\"] = \"\"\"Inst\"\"\"\n self.vs[11][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[11][\"GUID__\"] = UUID('a4079b80-e123-4015-96c9-8e664b15e053')\n self.vs[12][\"name\"] = \"\"\"inst2\"\"\"\n self.vs[12][\"classtype\"] = \"\"\"Inst\"\"\"\n self.vs[12][\"mm__\"] = \"\"\"Inst\"\"\"\n self.vs[12][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[12][\"GUID__\"] = UUID('a3eef854-3648-462d-be65-3eca75bdebf7')\n self.vs[13][\"name\"] = \"\"\"name1\"\"\"\n self.vs[13][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[13][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[13][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[13][\"GUID__\"] = UUID('9b94a56a-dd11-415e-8663-6f429c2c0753')\n self.vs[14][\"name\"] = \"\"\"name2\"\"\"\n self.vs[14][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[14][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[14][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[14][\"GUID__\"] = UUID('d90c8a9c-eee1-48af-9308-abbb6052af8f')\n self.vs[15][\"name\"] = \"\"\"name3\"\"\"\n self.vs[15][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[15][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[15][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[15][\"GUID__\"] = UUID('8e53fe34-6fcc-4059-8042-db911db6e812')\n self.vs[16][\"name\"] = \"\"\"name4\"\"\"\n self.vs[16][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[16][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[16][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[16][\"GUID__\"] = UUID('4f23669c-d236-4a8d-b52b-1f37ba406f94')\n self.vs[17][\"name\"] = \"\"\"name5\"\"\"\n self.vs[17][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[17][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[17][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[17][\"GUID__\"] = UUID('91bc841f-2211-4638-a340-584da8347c98')\n self.vs[18][\"name\"] = \"\"\"name6\"\"\"\n self.vs[18][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[18][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[18][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[18][\"GUID__\"] = UUID('8a109a2d-2d70-4318-8a72-46c784206075')\n self.vs[19][\"name\"] = \"\"\"name7\"\"\"\n self.vs[19][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[19][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[19][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[19][\"GUID__\"] = UUID('5a95e461-d2f8-435b-9e77-af581d91ee29')\n self.vs[20][\"name\"] = \"\"\"name8\"\"\"\n self.vs[20][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[20][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[20][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[20][\"GUID__\"] = UUID('c600b1fb-8c9c-4ef2-b597-8137d9bdfb08')\n self.vs[21][\"name\"] = \"\"\"name9\"\"\"\n self.vs[21][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[21][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[21][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[21][\"GUID__\"] = UUID('708cd8f1-6e3d-4dfa-af00-18e9d43a01a4')\n self.vs[22][\"name\"] = \"\"\"name10\"\"\"\n self.vs[22][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[22][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[22][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[22][\"GUID__\"] = UUID('132e8292-4471-498d-a202-3d2abc7ab5ca')\n self.vs[23][\"name\"] = \"\"\"name11\"\"\"\n self.vs[23][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[23][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[23][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[23][\"GUID__\"] = UUID('fdb484f0-a8b5-4b9e-86a6-b679b1012005')\n self.vs[24][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[24][\"GUID__\"] = UUID('2a8418a3-cb80-496b-a1e0-7419de2ae33f')\n self.vs[25][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[25][\"GUID__\"] = UUID('4f37af75-2b77-45c1-93d1-8aae7cf14cc8')\n self.vs[26][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[26][\"GUID__\"] = UUID('54ef6fcc-cb9a-494e-aa36-f44525e4a0b0')\n self.vs[27][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[27][\"GUID__\"] = UUID('22858e97-7bbe-460d-b44b-14652852a592')\n self.vs[28][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[28][\"GUID__\"] = UUID('c3fcdb66-34da-4c82-b163-e5ab5f04e5c0')\n self.vs[29][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[29][\"GUID__\"] = UUID('88c90884-ae83-49af-96da-74f03c7f80ce')\n self.vs[30][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[30][\"GUID__\"] = UUID('1e3c412d-8372-4ba5-8a56-9d82407b79d0')\n self.vs[31][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[31][\"GUID__\"] = UUID('a500f0c7-1535-40ed-802e-a883517bbc64')\n self.vs[32][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[32][\"GUID__\"] = UUID('ed658c5a-81c3-4938-920e-98953de205ba')\n self.vs[33][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[33][\"GUID__\"] = UUID('49be0f69-494e-4f45-8923-582778c6828a')\n self.vs[34][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[34][\"GUID__\"] = UUID('e3709cc9-ed04-44f9-b8a7-a8f9f5939f3b')\n self.vs[35][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[35][\"GUID__\"] = UUID('8a657ede-e29d-4a28-9c1c-4c95a3ecd3b6')\n self.vs[36][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[36][\"GUID__\"] = UUID('b3cd8a7c-7deb-4b8c-9ed2-4a22bd6b5a39')\n self.vs[37][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[37][\"GUID__\"] = UUID('2287628a-d22b-427b-bdfd-d24d04bd46ad')\n self.vs[38][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[38][\"GUID__\"] = UUID('65083504-7423-4b8f-8b3e-7dc369fa08db')\n self.vs[39][\"associationType\"] = \"\"\"p\"\"\"\n self.vs[39][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[39][\"GUID__\"] = UUID('dd5a6c0f-e438-4f23-ad0f-acd02dd4afe8')\n self.vs[40][\"associationType\"] = \"\"\"p\"\"\"\n self.vs[40][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[40][\"GUID__\"] = UUID('d4bcb4b5-37a3-4d04-895f-d689ea89c825')\n self.vs[41][\"associationType\"] = \"\"\"p\"\"\"\n self.vs[41][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[41][\"GUID__\"] = UUID('b860cc3c-a70a-4c66-9bb9-c1fd1395b23c')\n self.vs[42][\"associationType\"] = \"\"\"p\"\"\"\n self.vs[42][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[42][\"GUID__\"] = UUID('97c4f558-4e1a-4a85-82e4-e0500374d80f')\n self.vs[43][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[43][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[43][\"GUID__\"] = UUID('58acb66a-2008-4ef3-975f-1db1219bd830')\n self.vs[44][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[44][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[44][\"GUID__\"] = UUID('5e14b29f-f5e6-4d6d-bfac-8616df51ab56')\n self.vs[45][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[45][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[45][\"GUID__\"] = UUID('57ac3f37-c63f-4a74-bc90-a846fb38e370')\n self.vs[46][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[46][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[46][\"GUID__\"] = UUID('9fc39a10-40e0-47f4-93c6-eccc9fdbd594')\n self.vs[47][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[47][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[47][\"GUID__\"] = UUID('00e09455-e8b5-414e-8eee-abbe55b7a65d')\n self.vs[48][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[48][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[48][\"GUID__\"] = UUID('17170197-069c-44fa-9239-dec8622935ee')\n self.vs[49][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[49][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[49][\"GUID__\"] = UUID('a4654b49-ee9c-4f69-a4e2-b8101c7086d2')\n self.vs[50][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[50][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[50][\"GUID__\"] = UUID('f9e0515c-b37c-4c22-8fe9-49c98acd152d')\n self.vs[51][\"associationType\"] = \"\"\"p\"\"\"\n self.vs[51][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[51][\"GUID__\"] = UUID('2c60fd52-acfa-4cba-8c04-53c9affdc4db')\n self.vs[52][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[52][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[52][\"GUID__\"] = UUID('f8f3ccd7-1cd5-4a57-b6a8-d35ba5bef6e4')\n self.vs[53][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[53][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[53][\"GUID__\"] = UUID('7c94a074-10cb-4087-acd1-09f74b36fee5')\n self.vs[54][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[54][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[54][\"GUID__\"] = UUID('857117de-5cb0-4717-8c19-a916f3913d44')\n self.vs[55][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[55][\"GUID__\"] = UUID('be66b7a4-a420-4307-9c3e-15a25480f612')\n self.vs[56][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[56][\"GUID__\"] = UUID('8b06f23c-dc76-480c-a91b-2a89628187bb')\n self.vs[57][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[57][\"GUID__\"] = UUID('a30e8284-77ae-44b5-83fe-950b7a7cf134')\n self.vs[58][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[58][\"GUID__\"] = UUID('d79efc53-0195-4578-9e6e-f325fa1b9347')\n self.vs[59][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[59][\"GUID__\"] = UUID('4c20c97d-c715-4ddc-ba86-f4b8f93342f2')\n self.vs[60][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[60][\"GUID__\"] = UUID('b6badd99-bce6-4ecb-95f2-2a56eb8e31ec')\n self.vs[61][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[61][\"GUID__\"] = UUID('784aca61-7263-4894-ada3-514b7dc1263c')\n self.vs[62][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[62][\"GUID__\"] = UUID('b751aba0-9035-400e-81b0-a05af5ff13f8')\n self.vs[63][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[63][\"GUID__\"] = UUID('f5e9aa39-f124-44ff-bf9e-835d8231fa1c')\n self.vs[64][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[64][\"GUID__\"] = UUID('adb9f451-c62d-4218-aebc-d7065b89a497')\n self.vs[65][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[65][\"GUID__\"] = UUID('71250a4b-2989-43ad-8a29-d2c8f7011af6')\n self.vs[66][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[66][\"GUID__\"] = UUID('ef32cf77-f92d-4364-b997-484a66740660')\n self.vs[67][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[67][\"GUID__\"] = UUID('c3c01696-8c64-45f7-a598-6e443991711f')\n self.vs[68][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[68][\"GUID__\"] = UUID('0481036c-254e-4f46-a7c3-6f4a865fe7bd')\n self.vs[69][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[69][\"GUID__\"] = UUID('f98b92f3-81c2-403a-ba4b-29cb117d561a')\n self.vs[70][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[70][\"GUID__\"] = UUID('c32d7a5a-e311-48d5-b3fc-2a284673c4aa')\n self.vs[71][\"name\"] = \"\"\"eq1\"\"\"\n self.vs[71][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[71][\"GUID__\"] = UUID('0abf26da-d349-4bad-be96-014c8959a4cd')\n self.vs[72][\"name\"] = \"\"\"eq2\"\"\"\n self.vs[72][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[72][\"GUID__\"] = UUID('af92b37e-0c63-4fe5-a906-7cd312cad172')\n self.vs[73][\"name\"] = \"\"\"eq3\"\"\"\n self.vs[73][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[73][\"GUID__\"] = UUID('108e8752-a98c-44df-b24a-3b958c450846')\n self.vs[74][\"name\"] = \"\"\"eq4\"\"\"\n self.vs[74][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[74][\"GUID__\"] = UUID('340c5b78-fbbc-4734-ac7d-8a1f953679e3')\n self.vs[75][\"name\"] = \"\"\"eq5\"\"\"\n self.vs[75][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[75][\"GUID__\"] = UUID('63513c17-c285-47ce-9b5c-e658df31b8bf')\n self.vs[76][\"name\"] = \"\"\"eq6\"\"\"\n self.vs[76][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[76][\"GUID__\"] = UUID('dfd958e8-0fd4-4975-b28f-dab1df8a6858')\n self.vs[77][\"name\"] = \"\"\"eq7\"\"\"\n self.vs[77][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[77][\"GUID__\"] = UUID('1cd0e4a3-2b1a-42c8-bdf3-f98e156d8265')\n self.vs[78][\"name\"] = \"\"\"eq8\"\"\"\n self.vs[78][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[78][\"GUID__\"] = UUID('d7c1a1c4-4b83-4e3c-9e1f-2212a30343b1')\n self.vs[79][\"name\"] = \"\"\"eq9\"\"\"\n self.vs[79][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[79][\"GUID__\"] = UUID('aea37644-aa22-4e82-92a7-17d85ad5acf3')\n self.vs[80][\"name\"] = \"\"\"eq10\"\"\"\n self.vs[80][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[80][\"GUID__\"] = UUID('f7db1558-e110-4984-b825-62e4ce6f1324')\n self.vs[81][\"name\"] = \"\"\"eq11\"\"\"\n self.vs[81][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[81][\"GUID__\"] = UUID('a0722a1f-aaa4-4ac3-99d3-5bea37c15e79')\n self.vs[82][\"name\"] = \"\"\"eq12\"\"\"\n self.vs[82][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[82][\"GUID__\"] = UUID('ddbd74ac-21f7-4724-a2a8-b78c7389a8f4')\n self.vs[83][\"name\"] = \"\"\"eq13\"\"\"\n self.vs[83][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[83][\"GUID__\"] = UUID('a8fe40b1-4985-43d2-a874-0741d09ba4ae')\n self.vs[84][\"name\"] = \"\"\"eq14\"\"\"\n self.vs[84][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[84][\"GUID__\"] = UUID('281fd930-5f47-4b53-949b-e274ec95fdef')\n self.vs[85][\"name\"] = \"\"\"eq15\"\"\"\n self.vs[85][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[85][\"GUID__\"] = UUID('2e2199ae-3f44-4d76-b322-4b617a8c58db')\n self.vs[86][\"name\"] = \"\"\"eq16\"\"\"\n self.vs[86][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[86][\"GUID__\"] = UUID('25ad532f-5f8d-433a-bb65-507c97469275')\n self.vs[87][\"name\"] = \"\"\"isComposite\"\"\"\n self.vs[87][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[87][\"Type\"] = \"\"\"'Bool'\"\"\"\n self.vs[87][\"GUID__\"] = UUID('75b3e3d3-2cfc-4444-b65e-2fc5a8b7ae5d')\n self.vs[88][\"name\"] = \"\"\"literal\"\"\"\n self.vs[88][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[88][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[88][\"GUID__\"] = UUID('426aea1c-8a9f-4651-b297-9ec3c1c1352e')\n self.vs[89][\"name\"] = \"\"\"literal\"\"\"\n self.vs[89][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[89][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[89][\"GUID__\"] = UUID('284a3a1d-8a2d-4cef-9551-98d424afe038')\n self.vs[90][\"name\"] = \"\"\"literal\"\"\"\n self.vs[90][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[90][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[90][\"GUID__\"] = UUID('3b7a1cdc-9ffb-48db-994f-497c06449458')\n self.vs[91][\"name\"] = \"\"\"literal\"\"\"\n self.vs[91][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[91][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[91][\"GUID__\"] = UUID('40cff5ab-cab2-4fab-bbc1-c8039fe486ac')\n self.vs[92][\"name\"] = \"\"\"name\"\"\"\n self.vs[92][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[92][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[92][\"GUID__\"] = UUID('b9e0ab51-1690-44de-875b-773826f9e420')\n self.vs[93][\"name\"] = \"\"\"literal\"\"\"\n self.vs[93][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[93][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[93][\"GUID__\"] = UUID('708e489d-456a-4974-9198-73334eb3d1d8')\n self.vs[94][\"name\"] = \"\"\"literal\"\"\"\n self.vs[94][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[94][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[94][\"GUID__\"] = UUID('bdabcea3-c164-4f6b-a54f-be957abedb49')\n self.vs[95][\"name\"] = \"\"\"literal\"\"\"\n self.vs[95][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[95][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[95][\"GUID__\"] = UUID('22f79d9e-a9bf-41b5-9559-4560af4afc10')\n self.vs[96][\"name\"] = \"\"\"literal\"\"\"\n self.vs[96][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[96][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[96][\"GUID__\"] = UUID('56b242b2-5ebd-4a02-a1bb-829ecc6822a7')\n self.vs[97][\"name\"] = \"\"\"name\"\"\"\n self.vs[97][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[97][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[97][\"GUID__\"] = UUID('46680774-a892-41cb-8005-809b5eea2003')\n self.vs[98][\"name\"] = \"\"\"literal\"\"\"\n self.vs[98][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[98][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[98][\"GUID__\"] = UUID('c8c58f99-e94c-442b-a747-c873a43b903b')\n self.vs[99][\"name\"] = \"\"\"literal\"\"\"\n self.vs[99][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[99][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[99][\"GUID__\"] = UUID('18aa7445-341a-40e8-b09c-70904b3f9994')\n self.vs[100][\"name\"] = \"\"\"literal\"\"\"\n self.vs[100][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[100][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[100][\"GUID__\"] = UUID('9f63580a-288f-4d14-b275-b96062163c5a')\n self.vs[101][\"name\"] = \"\"\"pivot\"\"\"\n self.vs[101][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[101][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[101][\"GUID__\"] = UUID('c8777ba9-8c6e-4582-a082-81f2f34e6016')\n self.vs[102][\"name\"] = \"\"\"pivot\"\"\"\n self.vs[102][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[102][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[102][\"GUID__\"] = UUID('ce2a6aa7-c8ce-4cee-807c-cd4de96a08bf')\n self.vs[103][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[103][\"GUID__\"] = UUID('8119a747-1d59-4f48-83a6-16869a919672')\n self.vs[104][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[104][\"GUID__\"] = UUID('b7c5aeaf-7e59-4a81-9616-bb2474f2660f')\n self.vs[105][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[105][\"GUID__\"] = UUID('ced29f38-6ce7-449c-823f-34aaab43899b')\n self.vs[106][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[106][\"GUID__\"] = UUID('e29dc6da-439d-4a9d-9d40-e87aa9fbebd3')\n self.vs[107][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[107][\"GUID__\"] = UUID('af49357e-a46d-4ee5-ab4a-b6d6ef261df0')\n self.vs[108][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[108][\"GUID__\"] = UUID('ff49109b-ccc0-4635-9a33-d88c1d675bc6')\n self.vs[109][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[109][\"GUID__\"] = UUID('423ad2a2-0a19-4192-902d-706965800fef')\n self.vs[110][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[110][\"GUID__\"] = UUID('5864c11a-7792-4549-999f-bc86a4246314')\n self.vs[111][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[111][\"GUID__\"] = UUID('7182946d-d5f6-4a7c-acaa-d4eeb97133db')\n self.vs[112][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[112][\"GUID__\"] = UUID('d965f0b2-048d-490c-81f5-2b18446941de')\n self.vs[113][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[113][\"GUID__\"] = UUID('6e4c8ba9-6ab0-44d3-9cc6-c181772a1e3b')\n self.vs[114][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[114][\"GUID__\"] = UUID('5633c48b-1add-43eb-9789-1bece00f8079')\n self.vs[115][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[115][\"GUID__\"] = UUID('d2c598e2-09b1-4c12-acff-871f6662238a')\n self.vs[116][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[116][\"GUID__\"] = UUID('33a09bc8-cfa9-4367-834e-41bfae2fa7b6')\n self.vs[117][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[117][\"GUID__\"] = UUID('858b7fe0-edf0-4eda-ae81-6477f6499fb7')\n self.vs[118][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[118][\"GUID__\"] = UUID('a19a8472-6b86-4aee-b2bb-d66b4d26aeea')\n self.vs[119][\"name\"] = \"\"\"true\"\"\"\n self.vs[119][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[119][\"Type\"] = \"\"\"'Bool'\"\"\"\n self.vs[119][\"GUID__\"] = UUID('ba19f7ae-c0e3-43f5-9c87-2e08b3ff7d4e')\n self.vs[120][\"name\"] = \"\"\"sh\"\"\"\n self.vs[120][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[120][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[120][\"GUID__\"] = UUID('b78c45bc-2ecd-438a-a905-dbd90a4edeed')\n self.vs[121][\"name\"] = \"\"\"exit_in\"\"\"\n self.vs[121][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[121][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[121][\"GUID__\"] = UUID('0bbc3f31-d9e3-49a7-b213-d874f9d6e0ac')\n self.vs[122][\"name\"] = \"\"\"exack_in\"\"\"\n self.vs[122][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[122][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[122][\"GUID__\"] = UUID('e58ce45d-49e1-44b9-8a0a-78c1f1305afd')\n self.vs[123][\"name\"] = \"\"\"sh_in\"\"\"\n self.vs[123][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[123][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[123][\"GUID__\"] = UUID('34144527-9a72-44f3-8afe-f49bbe5fac47')\n self.vs[124][\"name\"] = \"\"\"C\"\"\"\n self.vs[124][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[124][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[124][\"GUID__\"] = UUID('61ed1583-c983-4369-b0de-0c3ca82aba52')\n self.vs[125][\"name\"] = \"\"\"enp\"\"\"\n self.vs[125][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[125][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[125][\"GUID__\"] = UUID('a4bfdfad-6e17-46b1-9939-685bd4cbfb62')\n self.vs[126][\"name\"] = \"\"\"exit_in\"\"\"\n self.vs[126][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[126][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[126][\"GUID__\"] = UUID('92007092-a080-4cb3-ba90-cbc8e6637732')\n self.vs[127][\"name\"] = \"\"\"exack_in\"\"\"\n self.vs[127][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[127][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[127][\"GUID__\"] = UUID('1a61b1e5-e926-45cd-bf6a-60adeef0d338')\n self.vs[128][\"name\"] = \"\"\"sh_in\"\"\"\n self.vs[128][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[128][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[128][\"GUID__\"] = UUID('95c52a1f-42ae-4384-bcfc-0cab537ee1cf')\n self.vs[129][\"name\"] = \"\"\"H\"\"\"\n self.vs[129][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[129][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[129][\"GUID__\"] = UUID('146f9ec3-3f2d-48a1-92ac-a5546268e069')\n self.vs[130][\"name\"] = \"\"\"exit_in\"\"\"\n self.vs[130][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[130][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[130][\"GUID__\"] = UUID('c52aec39-171b-4710-8150-b343a557bebf')\n self.vs[131][\"name\"] = \"\"\"exack_in\"\"\"\n self.vs[131][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[131][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[131][\"GUID__\"] = UUID('e2ab70c6-01a2-420a-9e96-bb238fe29689')\n self.vs[132][\"name\"] = \"\"\"sh_in\"\"\"\n self.vs[132][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[132][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[132][\"GUID__\"] = UUID('f476d190-6014-4c6a-a27f-c3f45b9d10ba')\n self.vs[133][\"name\"] = \"\"\"procdef\"\"\"\n self.vs[133][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[133][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[133][\"GUID__\"] = UUID('5a678e2c-8444-4e53-a430-5f0b1a603c07')\n self.vs[134][\"name\"] = \"\"\"localdefcompstate\"\"\"\n self.vs[134][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[134][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[134][\"GUID__\"] = UUID('dfac50b9-4956-45c0-b8a5-14f609e078e5')\n self.vs[135][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[135][\"GUID__\"] = UUID('632f235b-d18d-4939-b4b8-9d38a7505cc8')\n self.vs[136][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[136][\"GUID__\"] = UUID('b5724e21-522d-415c-8538-59b279583ff4')\n self.vs[137][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[137][\"GUID__\"] = UUID('a05b3ebc-4b86-43f0-adcd-d46d8c4d773e')\n self.vs[138][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[138][\"GUID__\"] = UUID('b43788ff-9ab6-4bec-b8ae-c76b10985fc3')\n self.vs[139][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[139][\"GUID__\"] = UUID('16c28ca0-6429-4540-9505-e8057aad958a')\n self.vs[140][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[140][\"GUID__\"] = UUID('e97eb3e2-8fca-41a6-9599-a173acee4c22')\n self.vs[141][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[141][\"GUID__\"] = UUID('eda0c12e-26c0-4296-9d34-62cbe764e151')\n self.vs[142][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[142][\"GUID__\"] = UUID('336e11b9-cbc3-41b4-9c07-041ed4ba1453')\n self.vs[143][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[143][\"GUID__\"] = UUID('31297722-e0a1-4e03-8c28-44abe1930256')\n self.vs[144][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[144][\"GUID__\"] = UUID('51fcd9e5-c817-4710-9a24-d080b3f8fa71')\n self.vs[145][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[145][\"GUID__\"] = UUID('7acc9f40-a78c-47ac-8e38-fc7f4647c2f1')\n self.vs[146][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[146][\"GUID__\"] = UUID('c94eef8e-f552-4b53-ba99-f21c13dfca4a')\n self.vs[147][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[147][\"GUID__\"] = UUID('09d8138f-8be9-4a30-af93-cc714a2570db')\n self.vs[148][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[148][\"GUID__\"] = UUID('792865ce-75f2-41cb-9c42-74f831e96a76')\n self.vs[149][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[149][\"GUID__\"] = UUID('fb0f0ebe-59c0-4ffc-8370-2ead7eb40f18')\n self.vs[150][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[150][\"GUID__\"] = UUID('23a9a8da-507e-4d11-a8e0-f5b721f01f96')\n self.vs[151][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[151][\"GUID__\"] = UUID('38fd864c-df5e-4e85-9838-2e665d75637c')", "def __init__(self):\n this = _coin.new_SoMFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoShaderParameter4i()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoClipPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\r\n\r\n super(Panel, self).__init__()\r\n\r\n # Define private dictionary attributes.\r\n\r\n # Define private list attributes.\r\n self._lambdab_count = []\r\n\r\n # Define private scalar attributes.\r\n\r\n # Define public dictionary attributes.\r\n\r\n # Define public list attributes.\r\n\r\n # Define public scalar attributes.\r\n self.quality = 0\r\n self.q_override = 0.0\r\n self.function = 0\r\n self.piA = 0.0\r\n self.piF = 0.0\r\n self.piQ = 0.0", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n _itkQuadEdgeMeshPointPython.itkQuadEdgeMeshPointF2GQEULLULLBBT_swiginit(self, _itkQuadEdgeMeshPointPython.new_itkQuadEdgeMeshPointF2GQEULLULLBBT(*args))", "def __init__(self, *args):\n _itkOptimizerParametersPython.itkOptimizerParametersD_swiginit(self, _itkOptimizerParametersPython.new_itkOptimizerParametersD(*args))", "def __init__(self, params: parameters_lib.SwirlLMParameters):\n super(ConstantDensity, self).__init__(params)\n\n self.rho = params.rho", "def init(self, state: 'SoState') -> \"void\":\n return _coin.SoClipPlaneElement_init(self, state)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
bits_per_symbol(self) > unsigned int
def bits_per_symbol(self): return _digital_swig.digital_constellation_qpsk_sptr_bits_per_symbol(self)
[ "def bitness():\n pass", "def __int__(self):\n return self.bits", "def __len__(self):\n return self._bits", "def bits_per_register(cls) -> int:\n return cls._bits_per_register", "def max_symbols (self):\n \n raise NotImplementedError", "def symbol_type(self):\n return bool(self.current_token in JackTokenizer.symbols)", "def has_names(self):\n return self & (0b0000_0100 | 0b0000_1000)", "def symbol_count (self):\n \n raise NotImplementedError", "def is_special_symbol(self, symbol):\r\n i = len(symbol)\r\n return i in SPECIAL_SYMBOLS and symbol in SPECIAL_SYMBOLS[i]", "def atom_bits_extra(self):\n if self.pop_type == 16:\n atom_bits = self.atom_bits()\n assert atom_bits <= 9, \"Too many atom bits\"\n return max(atom_bits - 5, 0)\n else:\n return 0 # meaningless if pop_type != 16", "def check_symbols(self, symbol_map):\n\n raise NotImplementedError()", "def num_symbols(self):\r\n return self['sh_size'] // self['sh_entsize']", "def hook_IsSymbolicUInt(state, arg):\n return DeepManticore(state).api_is_symbolic_uint(arg)", "def num_bits(self):\n raise NotImplementedError", "def bitsize(x):\n return len(bin(x)) - 2", "def decodeBits(packets):\n raise SnmplibNotImplemented, \"SNMP BITS data type not implemented yet.\"", "def __repr__(self):\n if self.bit:\n return '1'\n else:\n return '0'", "def isSymbol(x):\n return type(x) == type(__empty_symbol)", "def bit_size(self):\n return type_get_bit_size(self)", "def is_int8(self) -> bool:\n return self.has_layer_of_type('FakeQuantize')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
constellation_qpsk() > digital_constellation_qpsk_sptr Digital constellation for QPSK.
def constellation_qpsk(): return _digital_swig.constellation_qpsk()
[ "def pc_nproduced(self):\n return _radio_astro_swig.dedispersion_sptr_pc_nproduced(self)", "def pc_nproduced(self):\n return _OFDM_Cyclic_Prefix_swig.vamsi_OFDMCP_ff_sptr_pc_nproduced(self)", "def qpsk(input_bits, noise):\n modulator = Modulator()\n demodulator = Demodulator()\n channel = Channel()\n signal = modulator.make_qpsk_mod(input_bits)\n\n signal = channel.send_signal(signal, noise)\n\n result_bits = demodulator.make_qpsk_demod(signal, channel)\n return result_bits", "def pc_nproduced(self):\n return _wavelet_swig.squash_ff_sptr_pc_nproduced(self)", "def pc_nproduced_var(self):\n return _radio_astro_swig.dedispersion_sptr_pc_nproduced_var(self)", "def pc_nproduced(self):\n return _wavelet_swig.wvps_ff_sptr_pc_nproduced(self)", "def pc_nproduced(self):\n return _wavelet_swig.wavelet_ff_sptr_pc_nproduced(self)", "def chisq(self, star, logger=None):\n\n # Start by getting all interpolation coefficients for all observed points\n data, weight, u, v = star.data.getDataVector()\n if not star.data.values_are_sb:\n # If the images are flux instead of surface brightness, convert\n # them into SB\n star_pix_area = star.data.pixel_area\n data /= star_pix_area\n weight *= star_pix_area*star_pix_area\n\n # Subtract star.fit.center from u, v:\n u -= star.fit.center[0]\n v -= star.fit.center[1]\n\n if self._force_model_center:\n coeffs, dcdu, dcdv, psfx, psfy = self.interp.derivatives(u/self.du, v/self.du)\n dcdu /= self.du\n dcdv /= self.du\n else:\n coeffs, psfx, psfy = self.interp(u/self.du, v/self.du)\n\n # Turn the (psfy,psfx) coordinates into an index into 1d parameter vector.\n index1d = self._indexFromPsfxy(psfx, psfy)\n # All invalid pixel references now have negative index; record and set to zero\n nopsf = index1d < 0\n index1d = np.where(nopsf, 0, index1d)\n # And null the coefficients for such pixels\n coeffs = np.where(nopsf, 0., coeffs)\n if self._force_model_center:\n dcdu = np.where(nopsf, 0., dcdu)\n dcdv = np.where(nopsf, 0., dcdv)\n\n # Multiply kernel (and derivs) by current PSF element values\n # to get current estimates\n pvals = self._fullPsf1d(star)[index1d]\n mod = np.sum(coeffs*pvals, axis=1)\n if self._force_model_center:\n dmdu = star.fit.flux * np.sum(dcdu*pvals, axis=1)\n dmdv = star.fit.flux * np.sum(dcdv*pvals, axis=1)\n resid = data - mod*star.fit.flux\n\n # Now begin construction of alpha/beta/chisq that give\n # chisq vs linearized model.\n rw = resid * weight\n chisq = np.sum(resid * rw)\n\n # To begin with, we build alpha and beta over all PSF points\n # within mask, *and* the flux (and center) shifts. Then\n # will eliminate the constrained PSF points, and then\n # marginalize over the flux (and center).\n\n # Augment the coeffs and index1d vectors with extra column(s)\n # for the shift in flux (and center), so it will be\n # the derivative of model w.r.t. augmented parameter set\n derivs = np.zeros( (coeffs.shape[0], coeffs.shape[1]+self._constraints),\n dtype=float)\n indices = np.zeros( (index1d.shape[0], index1d.shape[1]+self._constraints),\n dtype=int)\n derivs[:, :coeffs.shape[1]] = star.fit.flux * coeffs #derivs wrt PSF elements\n indices[:,:index1d.shape[1]] = index1d\n\n # Add derivs wrt flux\n derivs[:,coeffs.shape[1]] = mod\n dflux_index = self._nparams + self._constraints\n indices[:,coeffs.shape[1]] = dflux_index\n if self._force_model_center:\n # Derivs w.r.t. center shift:\n derivs[:,coeffs.shape[1]+1] = dmdu\n derivs[:,coeffs.shape[1]+2] = dmdv\n indices[:,coeffs.shape[1]+1] = dflux_index+1\n indices[:,coeffs.shape[1]+2] = dflux_index+2\n\n # Accumulate alpha and beta point by point. I don't\n # know how to do it purely with numpy calls instead of a loop over data points\n nderivs = self._nparams + 2*self._constraints\n beta = np.zeros(nderivs, dtype=float)\n alpha = np.zeros( (nderivs,nderivs), dtype=float)\n for i in range(len(data)):\n ii = indices[i,:]\n cc = derivs[i,:]\n # beta_j += resid_i * weight_i * coeff_{ij}\n beta[ii] += rw[i] * cc\n # alpha_jk += weight_i * coeff_ij * coeff_ik\n dalpha = cc[np.newaxis,:]*cc[:,np.newaxis] * weight[i]\n iouter = np.broadcast_to(ii, (len(ii),len(ii)))\n alpha[iouter.flatten(), iouter.T.flatten()] += dalpha.flatten()\n\n # Next we eliminate the first _constraints PSF values from the parameters\n # using the linear constraints that dp0 = - _a * dp1\n s0 = slice(None, self._constraints) # parameters to eliminate\n s1 = slice(self._constraints, None) # parameters to keep\n beta = beta[s1] - np.dot(beta[s0], self._a).T\n alpha = alpha[s1,s1] \\\n - np.dot( self._a.T, alpha[s0,s1]) \\\n - np.dot( alpha[s1,s0], self._a) \\\n + np.dot( self._a.T, np.dot(alpha[s0,s0],self._a))\n\n # Now we marginalize over the flux (and center). These shifts are at\n # the back end of the parameter array.\n # But first we need to apply a prior to the shift of flux (and center)\n # to avoid numerical instabilities when these are degenerate because of\n # missing pixel data or otherwise unspecified PSF\n # ??? make these properties of the Model???\n fractional_flux_prior = 0.5 # prior of 50% on pre-existing flux ???\n center_shift_prior = 0.5*self.du #prior of 0.5 uv-plane pixels ???\n alpha[self._nparams, self._nparams] += (fractional_flux_prior*star.fit.flux)**(-2.)\n if self._force_model_center:\n alpha[self._nparams+1, self._nparams+1] += (center_shift_prior)**(-2.)\n alpha[self._nparams+2, self._nparams+2] += (center_shift_prior)**(-2.)\n\n s0 = slice(None, self._nparams) # parameters to keep\n s1 = slice(self._nparams, None) # parameters to marginalize\n a11inv = np.linalg.inv(alpha[s1,s1])\n # Calculate shift in flux - ??? Note that this is the solution for shift\n # when PSF parameters do *not* move; so if we subsequently update\n # the PSF params, we miss shifts due to covariances between flux and PSF.\n\n df = np.dot(a11inv, beta[s1])\n outflux = star.fit.flux + df[0]\n if self._force_model_center:\n outcenter = (star.fit.center[0] + df[1],\n star.fit.center[1] + df[2])\n else:\n outcenter = star.fit.center\n\n # Now get the final alpha, beta, chisq for the remaining PSF params\n outchisq = chisq - np.dot(beta[s1].T,np.dot(a11inv, beta[s1]))\n tmp = np.dot(a11inv, alpha[s1,s0])\n outbeta = beta[s0] - np.dot(beta[s1].T,tmp)\n outalpha = alpha[s0,s0] - np.dot(alpha[s0,s1],tmp)\n\n outfit = StarFit(star.fit.params,\n flux = outflux,\n center = outcenter,\n chisq = outchisq,\n dof = np.count_nonzero(weight) - self._nparams,\n alpha = outalpha,\n beta = outbeta)\n\n return Star(star.data, outfit)", "def construct_sk(public_key):\n point = public_key.pointQ\n x = int(point.x).to_bytes(PUBLIC_KEY_SIZE // 2, 'big')\n y = int(point.y).to_bytes(PUBLIC_KEY_SIZE // 2, 'big')\n sk = x + y\n return H(sk)", "def pc_nproduced(self):\n return _bs_swig.ec_descrambler_sync_sptr_pc_nproduced(self)", "def pc_nproduced_var(self):\n return _wavelet_swig.squash_ff_sptr_pc_nproduced_var(self)", "def pc_nproduced(self):\n return _radio_astro_swig.vmedian_sptr_pc_nproduced(self)", "def pc_nproduced(self):\n return _mediatools_swig.mediatools_audiosource_s_sptr_pc_nproduced(self)", "def play_cartpole_q_learning():\n Q = defaultdict(float)\n gamma = 0.99 # Discounting factor\n alpha = 0.5 # Soft update param\n\n env = gym.make('CartPole-v0')\n actions = env.action_space\n\n def update_Q(s, r, a, s_next, done):\n \"\"\"Updates the current q value.\n\n This learns the action value (Q-value) and estimates the next action\n using the Bellman equation, estimating the next action by adopting the\n best Q value instead of following the current policy.\n\n TODO: Document parameters.\n \"\"\"\n max_q_next = max([Q[s_next, action] for action in actions])\n # Do not include the next state's value if currently at the terminal state.\n Q[s, a] += alpha * (r + gamma * max_q_next * (1.0 - done) - Q[s, a])", "def pc_nproduced(self):\n return _wmbus_swig.wmbus_preamble_sptr_pc_nproduced(self)", "def initqp(self):\n\n self.qp = get_spherical_quad_points()\n sp = cartesian2spherical(self.qp.points)\n self.sqp = sp", "def pc_nproduced(self):\n return _raw_util.raw_regenerate_peak3_sptr_pc_nproduced(self)", "def pc_nproduced(self):\n return _wmbus_swig.wmbus_packet_sink_sptr_pc_nproduced(self)", "def pc_nproduced(self):\n return _raw_util.raw_divide_ff_sptr_pc_nproduced(self)", "def pc_nproduced(self):\n return _bs_swig.ec_invert_sync_sptr_pc_nproduced(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_constellation_dqpsk_sptr __init__(self, p) > digital_constellation_dqpsk_sptr
def __init__(self, *args): this = _digital_swig.new_digital_constellation_dqpsk_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n _itkOptimizerParametersPython.itkOptimizerParametersD_swiginit(self, _itkOptimizerParametersPython.new_itkOptimizerParametersD(*args))", "def __init__(self, *args):\n _itkOptimizerParametersPython.itkOptimizerParametersHelperD_swiginit(self, _itkOptimizerParametersPython.new_itkOptimizerParametersHelperD(*args))", "def construct_sk(public_key):\n point = public_key.pointQ\n x = int(point.x).to_bytes(PUBLIC_KEY_SIZE // 2, 'big')\n y = int(point.y).to_bytes(PUBLIC_KEY_SIZE // 2, 'big')\n sk = x + y\n return H(sk)", "def __init__(self, spec_data, lvol, dp=0.0, innout=0, plusminus=+1):\n super().__init__(spec_data, lvol)\n\n self.fortran_module.specpjh.init_pjh(dp, innout, plusminus)\n self.dp = dp\n self.innout = innout\n self.plusminus = plusminus\n self.initialized = True\n\n ## the size of the problem, 2 for 1.5 or 2D system\n self.problem_size = 2\n ## choose the variable for Poincare plot\n self.poincare_plot_type = \"yx\"\n ## the x label of Poincare plot\n self.poincare_plot_xlabel = \"theta\"\n ## the y label of Poincare plot\n self.poincare_plot_ylabel = \"p_theta\"", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\r\n\r\n super(Panel, self).__init__()\r\n\r\n # Define private dictionary attributes.\r\n\r\n # Define private list attributes.\r\n self._lambdab_count = []\r\n\r\n # Define private scalar attributes.\r\n\r\n # Define public dictionary attributes.\r\n\r\n # Define public list attributes.\r\n\r\n # Define public scalar attributes.\r\n self.quality = 0\r\n self.q_override = 0.0\r\n self.function = 0\r\n self.piA = 0.0\r\n self.piF = 0.0\r\n self.piQ = 0.0", "def __init__(self):\n self.port=Config.PortPrinter # Assign the name of the port written in Config.py to self.port\n self.FirstMove=0 # Variable wich allow us to know if this is the first movement of the 3d-mill\n self.Coord={} # Create a dictionnary\n self.cnc=CNC(self.port) # Call the class CNC\n self.cnc.OpenConnection() # Open the Connection with the device\n self.NbWells=0 # Count the number of wells \n Wells.Wells_1(self)", "def __init__(self, params: parameters_lib.SwirlLMParameters):\n super(ConstantDensity, self).__init__(params)\n\n self.rho = params.rho", "def __init__(self, pscu, sensor_idx):\n self.pscu = pscu\n self.sensor_idx = sensor_idx\n\n self.param_tree = ParameterTree({\n \"leak_impedance\": (self.get_leak_impedance, None),\n \"leak_volts\": (self.get_leak_volts, None),\n \"setpoint\": (self.get_set_point, None),\n \"setpoint_volts\": (self.get_set_point_volts, None),\n \"tripped\": (self.get_tripped, None),\n \"trace\": (self.get_trace, None),\n \"disabled\": (self.get_disabled, None),\n \"sensor_name\": (self.get_name, None),\n \"mode\": (self.get_mode, None),\n })", "def __init__(self):\n this = _coin.new_SoSFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, p, i, d, get_current_time, get_feedback_value):\r\n # p, i, and d constants\r\n self.p, self.i, self.d = p, i, d\r\n\r\n # saves the functions that return the time and the feedback\r\n self.get_current_time = get_current_time\r\n self.get_feedback_value = get_feedback_value", "def __init__(self):\n super(CorrelogramPooling3D, self).__init__()", "def __init__(self, *args):\n _itkQuadEdgeMeshPointPython.itkQuadEdgeMeshPointF2GQEULLULLBBT_swiginit(self, _itkQuadEdgeMeshPointPython.new_itkQuadEdgeMeshPointF2GQEULLULLBBT(*args))", "def __init__(self):\n this = _coin.new_SoShaderParameter4i()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args, **kwargs):\n # If a PSCU has been passed in keyword arguments use that, otherwise create a new one\n if 'pscu' in kwargs:\n self.pscu = kwargs['pscu']\n else:\n self.pscu = PSCU(*args, **kwargs)\n\n # Get the QuadData containers associated with the PSCU\n self.quad_data = [QuadData(quad=q) for q in self.pscu.quad]\n\n # Get the temperature and humidity containers associated with the PSCU\n self.temperature_data = [\n TempData(self.pscu, i) for i in range(self.pscu.num_temperatures)\n ]\n self.humidity_data = [\n HumidityData(self.pscu, i) for i in range(self.pscu.num_humidities)\n ]\n\n # Build the parameter tree of the PSCU\n self.param_tree = ParameterTree({\n \"quad\": {\n \"quads\": [q.param_tree for q in self.quad_data],\n 'trace': (self.get_quad_traces, None),\n },\n \"temperature\": {\n \"sensors\": [t.param_tree for t in self.temperature_data],\n \"overall\": (self.pscu.get_temperature_state, None),\n \"latched\": (self.pscu.get_temperature_latched, None),\n },\n \"humidity\": {\n \"sensors\": [h.param_tree for h in self.humidity_data],\n \"overall\": (self.pscu.get_humidity_state, None),\n \"latched\": (self.pscu.get_humidity_latched, None),\n },\n \"fan\": {\n \"target\": (self.pscu.get_fan_target, self.pscu.set_fan_target),\n \"currentspeed_volts\": (self.pscu.get_fan_speed_volts, None),\n \"currentspeed\": (self.pscu.get_fan_speed, None),\n \"setpoint\": (self.pscu.get_fan_set_point, None),\n \"setpoint_volts\": (self.pscu.get_fan_set_point_volts, None),\n \"tripped\": (self.pscu.get_fan_tripped, None),\n \"overall\": (self.pscu.get_fan_state, None),\n \"latched\": (self.pscu.get_fan_latched, None),\n \"mode\": (self.pscu.get_fan_mode, None),\n },\n \"pump\": {\n \"flow\": (self.pscu.get_pump_flow, None),\n \"flow_volts\": (self.pscu.get_pump_flow_volts, None),\n \"setpoint\": (self.pscu.get_pump_set_point, None),\n \"setpoint_volts\": (self.pscu.get_pump_set_point_volts, None),\n \"tripped\": (self.pscu.get_pump_tripped, None),\n \"overall\": (self.pscu.get_pump_state, None),\n \"latched\": (self.pscu.get_pump_latched, None),\n \"mode\": (self.pscu.get_pump_mode, None),\n },\n \"trace\": {\n \"overall\": (self.pscu.get_trace_state, None),\n \"latched\": (self.pscu.get_trace_latched, None),\n },\n \"position\": (self.pscu.get_position, None),\n \"position_volts\": (self.pscu.get_position_volts, None),\n \"overall\": (self.pscu.get_health, None),\n \"latched\": (self.get_all_latched, None),\n \"armed\": (self.pscu.get_armed, self.pscu.set_armed),\n \"allEnabled\": (self.pscu.get_all_enabled, self.pscu.enable_all),\n \"enableInterval\": (self.pscu.get_enable_interval, None),\n \"displayError\": (self.pscu.get_display_error, None),\n })", "def __init__(self):\n this = _coin.new_doublep()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n _ida_pro.sel_pointer_swiginit(self, _ida_pro.new_sel_pointer(*args))", "def initialize_PSD(self):\n self.W_noise_NnFK = self.xp.random.rand(self.n_noise, self.n_freq, self.n_basis_noise).astype(self.TYPE_FLOAT)\n self.H_noise_NnKT = self.xp.random.rand(self.n_noise, self.n_basis_noise, self.n_time).astype(self.TYPE_FLOAT)\n\n self.U_F = self.xp.ones(self.n_freq, dtype=self.TYPE_FLOAT) / self.n_freq\n self.V_T = self.xp.ones(self.n_time, dtype=self.TYPE_FLOAT)\n\n power_observation_FT = (self.xp.abs(self.X_FTM) ** 2).mean(axis=2)\n if self.normalize_encoder_input:\n power_observation_FT = power_observation_FT / power_observation_FT.sum(axis=0).mean()\n\n self.Z_speech_DT = self.speech_VAE.encode_cupy(power_observation_FT.astype(self.xp.float32))\n self.z_link_speech = Z_link(self.Z_speech_DT.T)\n self.z_optimizer_speech = chainer.optimizers.Adam().setup(self.z_link_speech)\n self.power_speech_FT = self.speech_VAE.decode_cupy(self.Z_speech_DT)\n\n self.lambda_NFT = self.xp.zeros([self.n_source, self.n_freq, self.n_time], dtype=self.TYPE_FLOAT)\n self.lambda_NFT[0] = self.U_F[:, None] * self.V_T[None] * self.power_speech_FT\n self.lambda_NFT[1:] = self.W_noise_NnFK @ self.H_noise_NnKT" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
constellation_dqpsk() > digital_constellation_dqpsk_sptr Digital constellation for DQPSK.
def constellation_dqpsk(): return _digital_swig.constellation_dqpsk()
[ "def pc_nproduced(self):\n return _radio_astro_swig.dedispersion_sptr_pc_nproduced(self)", "def pDpk(self, x, k):\n k = np.array(k)\n return 2*c*c*k/(self._omega*self._omega)", "def GradSimplex3DP(a, b, c, id, jd, kd):\n\n fa = JacobiP(a, 0, 0, id).reshape(len(a),1)\n dfa = GradJacobiP(a, 0, 0, id)\n gb = JacobiP(b, 2*id+1,0, jd).reshape(len(b),1)\n dgb = GradJacobiP(b, 2*id+1,0, jd)\n hc = JacobiP(c, 2*(id+jd)+2,0, kd).reshape(len(c),1)\n dhc = GradJacobiP(c, 2*(id+jd)+2,0, kd)\n\n # r-derivative\n # d/dr = da/dr d/da + db/dr d/db + dc/dr d/dx\n dmodedr = dfa*gb*hc\n if(id>0):\n dmodedr = dmodedr*((0.5*(1-b))**(id-1))\n if(id+jd>0):\n dmodedr = dmodedr*((0.5*(1-c))**(id+jd-1))\n\n # s-derivative\n dmodeds = 0.5*(1+a)*dmodedr\n tmp = dgb*((0.5*(1-b))**id)\n if(id>0):\n tmp = tmp+(-0.5*id)*(gb*(0.5*(1-b))**(id-1))\n\n if(id+jd>0):\n tmp = tmp*((0.5*(1-c))**(id+jd-1))\n\n tmp = fa*tmp*hc\n dmodeds = dmodeds + tmp\n\n # t-derivative\n dmodedt = 0.5*(1+a)*dmodedr+0.5*(1+b)*tmp\n tmp = dhc*((0.5*(1-c))**(id+jd))\n if(id+jd>0):\n tmp = tmp-0.5*(id+jd)*(hc*((0.5*(1-c))**(id+jd-1)));\n\n tmp = fa*(gb*tmp)\n tmp = tmp*((0.5*(1-b))**id);\n dmodedt = dmodedt+tmp;\n\n # Normalize\n dmodedr = 2**(2*id+jd+1.5)*dmodedr\n dmodeds = 2**(2*id+jd+1.5)*dmodeds\n dmodedt = 2**(2*id+jd+1.5)*dmodedt\n\n return dmodedr[:,0], dmodeds[:,0], dmodedt[:,0]", "def pc_nproduced_var(self):\n return _radio_astro_swig.dedispersion_sptr_pc_nproduced_var(self)", "def chisq(self, star, logger=None):\n\n # Start by getting all interpolation coefficients for all observed points\n data, weight, u, v = star.data.getDataVector()\n if not star.data.values_are_sb:\n # If the images are flux instead of surface brightness, convert\n # them into SB\n star_pix_area = star.data.pixel_area\n data /= star_pix_area\n weight *= star_pix_area*star_pix_area\n\n # Subtract star.fit.center from u, v:\n u -= star.fit.center[0]\n v -= star.fit.center[1]\n\n if self._force_model_center:\n coeffs, dcdu, dcdv, psfx, psfy = self.interp.derivatives(u/self.du, v/self.du)\n dcdu /= self.du\n dcdv /= self.du\n else:\n coeffs, psfx, psfy = self.interp(u/self.du, v/self.du)\n\n # Turn the (psfy,psfx) coordinates into an index into 1d parameter vector.\n index1d = self._indexFromPsfxy(psfx, psfy)\n # All invalid pixel references now have negative index; record and set to zero\n nopsf = index1d < 0\n index1d = np.where(nopsf, 0, index1d)\n # And null the coefficients for such pixels\n coeffs = np.where(nopsf, 0., coeffs)\n if self._force_model_center:\n dcdu = np.where(nopsf, 0., dcdu)\n dcdv = np.where(nopsf, 0., dcdv)\n\n # Multiply kernel (and derivs) by current PSF element values\n # to get current estimates\n pvals = self._fullPsf1d(star)[index1d]\n mod = np.sum(coeffs*pvals, axis=1)\n if self._force_model_center:\n dmdu = star.fit.flux * np.sum(dcdu*pvals, axis=1)\n dmdv = star.fit.flux * np.sum(dcdv*pvals, axis=1)\n resid = data - mod*star.fit.flux\n\n # Now begin construction of alpha/beta/chisq that give\n # chisq vs linearized model.\n rw = resid * weight\n chisq = np.sum(resid * rw)\n\n # To begin with, we build alpha and beta over all PSF points\n # within mask, *and* the flux (and center) shifts. Then\n # will eliminate the constrained PSF points, and then\n # marginalize over the flux (and center).\n\n # Augment the coeffs and index1d vectors with extra column(s)\n # for the shift in flux (and center), so it will be\n # the derivative of model w.r.t. augmented parameter set\n derivs = np.zeros( (coeffs.shape[0], coeffs.shape[1]+self._constraints),\n dtype=float)\n indices = np.zeros( (index1d.shape[0], index1d.shape[1]+self._constraints),\n dtype=int)\n derivs[:, :coeffs.shape[1]] = star.fit.flux * coeffs #derivs wrt PSF elements\n indices[:,:index1d.shape[1]] = index1d\n\n # Add derivs wrt flux\n derivs[:,coeffs.shape[1]] = mod\n dflux_index = self._nparams + self._constraints\n indices[:,coeffs.shape[1]] = dflux_index\n if self._force_model_center:\n # Derivs w.r.t. center shift:\n derivs[:,coeffs.shape[1]+1] = dmdu\n derivs[:,coeffs.shape[1]+2] = dmdv\n indices[:,coeffs.shape[1]+1] = dflux_index+1\n indices[:,coeffs.shape[1]+2] = dflux_index+2\n\n # Accumulate alpha and beta point by point. I don't\n # know how to do it purely with numpy calls instead of a loop over data points\n nderivs = self._nparams + 2*self._constraints\n beta = np.zeros(nderivs, dtype=float)\n alpha = np.zeros( (nderivs,nderivs), dtype=float)\n for i in range(len(data)):\n ii = indices[i,:]\n cc = derivs[i,:]\n # beta_j += resid_i * weight_i * coeff_{ij}\n beta[ii] += rw[i] * cc\n # alpha_jk += weight_i * coeff_ij * coeff_ik\n dalpha = cc[np.newaxis,:]*cc[:,np.newaxis] * weight[i]\n iouter = np.broadcast_to(ii, (len(ii),len(ii)))\n alpha[iouter.flatten(), iouter.T.flatten()] += dalpha.flatten()\n\n # Next we eliminate the first _constraints PSF values from the parameters\n # using the linear constraints that dp0 = - _a * dp1\n s0 = slice(None, self._constraints) # parameters to eliminate\n s1 = slice(self._constraints, None) # parameters to keep\n beta = beta[s1] - np.dot(beta[s0], self._a).T\n alpha = alpha[s1,s1] \\\n - np.dot( self._a.T, alpha[s0,s1]) \\\n - np.dot( alpha[s1,s0], self._a) \\\n + np.dot( self._a.T, np.dot(alpha[s0,s0],self._a))\n\n # Now we marginalize over the flux (and center). These shifts are at\n # the back end of the parameter array.\n # But first we need to apply a prior to the shift of flux (and center)\n # to avoid numerical instabilities when these are degenerate because of\n # missing pixel data or otherwise unspecified PSF\n # ??? make these properties of the Model???\n fractional_flux_prior = 0.5 # prior of 50% on pre-existing flux ???\n center_shift_prior = 0.5*self.du #prior of 0.5 uv-plane pixels ???\n alpha[self._nparams, self._nparams] += (fractional_flux_prior*star.fit.flux)**(-2.)\n if self._force_model_center:\n alpha[self._nparams+1, self._nparams+1] += (center_shift_prior)**(-2.)\n alpha[self._nparams+2, self._nparams+2] += (center_shift_prior)**(-2.)\n\n s0 = slice(None, self._nparams) # parameters to keep\n s1 = slice(self._nparams, None) # parameters to marginalize\n a11inv = np.linalg.inv(alpha[s1,s1])\n # Calculate shift in flux - ??? Note that this is the solution for shift\n # when PSF parameters do *not* move; so if we subsequently update\n # the PSF params, we miss shifts due to covariances between flux and PSF.\n\n df = np.dot(a11inv, beta[s1])\n outflux = star.fit.flux + df[0]\n if self._force_model_center:\n outcenter = (star.fit.center[0] + df[1],\n star.fit.center[1] + df[2])\n else:\n outcenter = star.fit.center\n\n # Now get the final alpha, beta, chisq for the remaining PSF params\n outchisq = chisq - np.dot(beta[s1].T,np.dot(a11inv, beta[s1]))\n tmp = np.dot(a11inv, alpha[s1,s0])\n outbeta = beta[s0] - np.dot(beta[s1].T,tmp)\n outalpha = alpha[s0,s0] - np.dot(alpha[s0,s1],tmp)\n\n outfit = StarFit(star.fit.params,\n flux = outflux,\n center = outcenter,\n chisq = outchisq,\n dof = np.count_nonzero(weight) - self._nparams,\n alpha = outalpha,\n beta = outbeta)\n\n return Star(star.data, outfit)", "def _calc_psi_deriv(self):\n try:\n self.bkg['psi'].mean()\n except:\n self.build_bkg()\n \n # psi = self.eqdsk.psi\n # self.dpsidR = np.zeros((self.eqdsk.nzbox, self.eqdsk.nrbox))\n # self.dpsidZ = np.zeros((self.eqdsk.nzbox, self.eqdsk.nrbox))\n psi = self.bkg['psi']\n self.dpsidR = np.zeros((self.nz, self.nR))\n self.dpsidZ = np.zeros((self.nz, self.nR)) \n deriv = np.gradient(psi)\n # Note np.gradient gives y\n # derivative first, then x derivative\n ddR = deriv[1]\n ddZ = deriv[0]\n # dRdi = np.asarray(1.0)/np.gradient(self.R_eqd)\n # dRdi = np.tile(dRdi, [self.eqdsk.nzbox,1])\n # dZdi = np.asarray(1.0)/np.gradient(self.Z_eqd)\n # dZdi = np.tile(dZdi, [self.eqdsk.nrbox,1])\n # dZdi = np.transpose(dZdi)\n dRdi = np.asarray(1.0)/np.gradient(self.bkg['R'])\n dRdi = np.tile(dRdi, [self.nz,1])\n dZdi = np.asarray(1.0)/np.gradient(self.bkg['z'])\n dZdi = np.tile(dZdi, [self.nR,1])\n dZdi = np.transpose(dZdi)\n #print(\"shape ddR:\",np.shape(ddR),'shape dRdi:', np.shape(dRdi))\n #print('shape ddZ:',np.shape(ddZ),'shape dZdi:', np.shape(dZdi))\n \n self.dpsidR[:, :] = ddR*dRdi\n self.dpsidZ[:, :] = ddZ*dZdi", "def dw2000q_graph(**kwargs):\n target_graph = dnx.generators.chimera_graph(16, 16, 4, **kwargs)\n target_graph.graph['chip_id'] = 'DW_2000Q'\n return target_graph", "def qpsk(input_bits, noise):\n modulator = Modulator()\n demodulator = Demodulator()\n channel = Channel()\n signal = modulator.make_qpsk_mod(input_bits)\n\n signal = channel.send_signal(signal, noise)\n\n result_bits = demodulator.make_qpsk_demod(signal, channel)\n return result_bits", "def _ddGf_pH7_sym(self, compound_id, pH, IS, T, metal_conc_dict):\n pH7_sid = self.pH7species_id_dict[compound_id]\n # energy difference dG_0(pH7 species) - dG_0(least protonated species)\n pH7_sid_binding_constant = self.get_binding_constant(pH7_sid, write_pK_as_variable = True)\n ddGf_pH7_and_least_H = - R * 298.15 * np.log(10) * pH7_sid_binding_constant\n #energy difference dG'(compound) - dG_0(least protonated state)\n ddGf_prime_least_H = self._ddGf_least_H_state_sym(compound_id, pH, IS, T, metal_conc_dict)\n #energy difference dG'(compound) - dG_0(pH7 species)\n ddGf_prime_pH7 = ddGf_prime_least_H + '- (' + str(ddGf_pH7_and_least_H) + ')'\n return ddGf_prime_pH7", "def _CkdPub(self, index: Bip32KeyIndex) -> Bip32Base:\n\n # Not supported by Ed25519 SLIP-0010\n pass", "def visc_diss(self):\n # NY95b Eq 2.5\n qplus = 3*self._eps_prime * self.dens * np.fabs(self.vel_rad)\n qplus *= np.square(self.vel_snd) / (2 * self.rads)\n return qplus", "def pc_nproduced(self):\n return _OFDM_Cyclic_Prefix_swig.vamsi_OFDMCP_ff_sptr_pc_nproduced(self)", "def make_delwaqg_dataset(scen):\n layers = scen.bottom_layers\n grid = scen.hydro.grid()\n\n ds=grid.write_xarray() # create_restart expects face and layer dimensions\n\n ds['layer_thickness']=('layer',), layers\n ds['layer_thickness'].attrs['long_name']=\"Thickness of sediment layers in DelwaqG\"\n ds['layer_thickness'].attrs['units']='m'\n\n for sub in delwaqg_substances:\n # create_restart expects \"native\" dwaq order of 'layer','face'\n ds[sub] = ('layer','face'), np.zeros( (ds.dims['layer'], ds.dims['face']), np.float32)\n if '-pore' in sub:\n ds[sub].attrs['units']='g/m3 of porewater'\n else:\n ds[sub].attrs['units']='g/m3 bulk'\n \n # DelwaqG does not use the names.\n # Must get the order correct\n \n # read title\n # read nosysini, nosegini (substance count, segment count)\n # read substances names into synameinit array, but they are never used.\n # read data into sedconc array ~ [layer, substance, segment]\n # dissolved substances (the first 12) are scaled by porosity.\n # re units: the code that reads in WC parameters appears to assume\n # g/m2 input, which is then divided by total bed thickness to\n # get g/m3.\n # dissolved substances are assumed to come in as a porewater concentration,\n # and scaling by porisity then gives a bulk concentration.\n\n # Order:\n # Names don't matter. For simplicity, use the same names as in the map and restart\n # files:\n\n # create_restart() expects this.\n ds['header']=\"Initial conditions file for DelwaqG\"\n\n return ds", "def get_cpsd(self, params=None):\n if params is None:\n params = self.params\n\n if self.cpsd_model is None:\n return np.array([self.params['%sln_cpsd%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])\n else:\n return np.log(self.cpsd_model(self.params, self.fbins.bin_cent))", "def construct_sk(public_key):\n point = public_key.pointQ\n x = int(point.x).to_bytes(PUBLIC_KEY_SIZE // 2, 'big')\n y = int(point.y).to_bytes(PUBLIC_KEY_SIZE // 2, 'big')\n sk = x + y\n return H(sk)", "def _shock_dissipation_phase(self):\n # update artificial viscosity coefficients for Jameson scheme\n self.jameson.update_artificial_viscosity(\n self.R * self.g * self.Ch * self.h,\n self.R * self.g * self.Ch_link * self.h_link,\n )\n\n # apply Jameson's filter\n for i in range(self.number_gclass):\n self.jameson.run(\n self.Ch_i[i, :],\n self.wet_pwet_nodes,\n at=\"node\",\n out=self.Ch_i_temp[i, :],\n )\n self.jameson.run(self.h, self.wet_pwet_nodes,\n at=\"node\", out=self.h_temp)\n self.jameson.run(\n self.u, self.wet_pwet_horizontal_links, at=\"hlink\", out=self.u_temp\n )\n self.jameson.run(\n self.v, self.wet_pwet_vertical_links, at=\"vlink\", out=self.v_temp\n )\n\n # if self.model == \"4eq\":\n # self.jameson.run(\n # self.Kh, self.wet_pwet_vertical_links, at=\"vlink\", out=self.Kh_temp\n # )\n # self.jameson.run(\n # self.Kh, self.wet_pwet_horizontal_links, at=\"hlink\", out=self.Kh_temp\n # )\n\n # update gradient terms\n self.update_gradients()\n\n # Reset our field values with the newest flow depth and\n # discharge.\n self.update_values()\n map_values(\n self,\n h=self.h,\n dhdx=self.dhdx,\n dhdy=self.dhdy,\n Ch=self.Ch,\n Ch_i=self.Ch_i,\n dChdx_i=self.dChdx_i,\n dChdy_i=self.dChdy_i,\n u=self.u,\n dudx=self.dudx,\n dudy=self.dudy,\n u_node=self.u_node,\n v=self.v,\n dvdx=self.dvdx,\n dvdy=self.dvdy,\n v_node=self.v_node,\n U=self.U,\n U_node=self.U_node,\n eta=self.eta,\n h_link=self.h_link,\n Ch_link=self.Ch_link,\n )", "def d_cost_sh(x_0, z_0, gamma, sig_c, nu, r_0, fn, gn, P, d):\n\n eps = 1e-9\n\n P_p = np.copy(P) # placeholder\n P_n = np.copy(P) # placeholder\n P_p[d] += eps\n P_n[d] -= eps\n\n # derivative of parameter P[d]\n dJ = (cost_sh(x_0, z_0, gamma, sig_c, nu, r_0, fn, gn, *P_p) -\n cost_sh(x_0, z_0, gamma, sig_c, nu, r_0, fn, gn, *P_n)) / (2 * eps)\n\n return dJ", "def _prepare_ligand_CD(self):\n if self.data['CD'].protocol == []:\n params_o = self.system.paramsFromAlpha(1.0, 'CD')\n self.system.setParams(params_o)\n\n if (self.args.params['CD']['pose'] == -1):\n seeds = self._get_confs_to_rescore(site=True, minimize=True)[0]\n self.data['CD'].confs['starting_poses'] = seeds\n else:\n # For pose BPMF, starting_poses is defined in _set_universe_evaluator\n seeds = self.data['CD'].confs['starting_poses']\n\n if seeds == []:\n seeds = None\n else:\n # initializes smart darting for CD and sets the universe\n # to the lowest energy configuration\n self.iterator.initializeSmartDartingConfigurations(\n seeds, 'CD', self.log, self.data)\n if len(seeds) > 0:\n self.top.universe.setConfiguration(\\\n Configuration(self.top.universe,np.copy(seeds[-1])))\n\n # Ramp up the temperature using HMC\n self._ramp_T(self.args.params['BC']['T_TARGET'], normalize=False)\n\n seeds = [np.copy(self.top.universe.configuration().array) \\\n for n in range(self.args.params['CD']['seeds_per_state'])]\n return seeds", "def getDispersivePhaseDelay(self, nc=int(-1), spwid=int(0)):\n schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}}\n doc = {'nc': nc, 'spwid': spwid}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getDispersivePhaseDelay_result = _quant_dc(self._swigobj.getDispersivePhaseDelay(_pc.document['nc'], _pc.document['spwid']))\n return _getDispersivePhaseDelay_result", "def dipolePotential(x,y,q,d):\n V1 = pointPotential(x,y,q,-d/2,0.)\n V2 = pointPotential(x,y,q,d/2,0.)\n Vdp = V1 - V2\n return Vdp" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_constellation_8psk_sptr __init__(self, p) > digital_constellation_8psk_sptr
def __init__(self, *args): this = _digital_swig.new_digital_constellation_8psk_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _wali.new_KeyPair(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoSFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def construct_sk(public_key):\n point = public_key.pointQ\n x = int(point.x).to_bytes(PUBLIC_KEY_SIZE // 2, 'big')\n y = int(point.y).to_bytes(PUBLIC_KEY_SIZE // 2, 'big')\n sk = x + y\n return H(sk)", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoShaderParameter2i()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoMFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoShaderParameter4i()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__( self, public_key, secret_multiplier ):\n\n self.public_key = public_key\n self.secret_multiplier = secret_multiplier", "def __init__(self, *args):\n this = _digital_swig.new_digital_packet_sink_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoShaderParameter1i()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoShaderParameter3i()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoTextureCoordinatePlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoClipPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n _snap.TPairHashImpl2_swiginit(self,_snap.new_TPairHashImpl2())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
constellation_8psk() > digital_constellation_8psk_sptr Digital constellation for 8PSK.
def constellation_8psk(): return _digital_swig.constellation_8psk()
[ "def psk8(input_bits, noise):\n modulator = Modulator()\n demodulator = Demodulator()\n channel = Channel()\n signal = modulator.make_8psk_mod(input_bits)\n\n signal = channel.send_signal(signal, noise)\n\n result_bits = demodulator.make_8psk_demod(signal, channel)\n return result_bits", "def preset_8(self, packet):\n return self.preset(packet)", "def genWEP(iv, dst, bssid, src):\n \n data_pckt = dot11.Dot11(type=\"Data\", addr1=dst, \\\n addr2=bssid, addr3=src,\\\n FCfield='from-DS')\n\n data_pckt.FCfield |= 0x40\n data_pckt /= dot11.Dot11WEP(iv = iv, keyid=4)/dot11.LLC(ctrl=3) / dot11.SNAP()\\\n / dot11.ARP(\n op = \"is-at\",\n hwsrc = src,\n psrc = \"192.168.1.1\",\n hwdst = dst,\n pdst = \"192.168.1.2\")\n\n\n #data_resp = Dot11(addr1=mac2, addr2=mac1,FCfield='to-DS')\n # data_resp.FCfield |= 0x40\n #data_resp /= Dot11WEP(iv=str(iv+1),keyid=0)/LLC()/SNAP()/scapy.packet.Padding('a'*100)\n return data_pckt", "def encode_pkcs8_private(self):\n\n if not self._key.private_value:\n raise KeyExportError('Key is not private')\n\n return OMIT, der_encode(self._key.private_value)", "def decode_pkcs8_public(cls, alg_params, key_data):\n\n # pylint: disable=unused-argument\n\n return (key_data,)", "def getC8YConnection(self):\n\t\treturn self._c8yConn", "def Crc8(data):\n crc = 0\n for byte in six.iterbytes(data):\n crc ^= (byte << 8)\n for _ in range(8):\n if crc & 0x8000:\n crc ^= (0x1070 << 3)\n crc <<= 1\n return crc >> 8", "def _draw_8point(_canvas, _cx, _cy, _i, _j, _colour):\n # Square symmetry\n local_coord = [(_i * (-1) ** (k % 2), _j * (-1) ** (k // 2)) for k in range(4)]\n # Diagonal symmetry\n local_coord += [(j_, i_) for i_, j_ in local_coord]\n for i_, j_ in local_coord:\n # print(\"_draw_8point\", _cy + j_, _cx + i_)\n _canvas[_cy + j_, _cx + i_] = _colour", "def i8(self):\n return self.input_get('I8')", "def test_legacy_vpn_ipsec_xauth_psk_strongswan(self):\n vpn = VPN_TYPE.IPSEC_XAUTH_PSK\n vpn_profile = self.generate_legacy_vpn_profile(\n vpn, self.vpn_server_addresses[vpn.name][0],\n self.ipsec_server_type[0])\n self.legacy_vpn_connection_test_logic(vpn_profile)", "def encode_kiss(self):\n encoded_ssid = (int(self.ssid) << 1) | 0x60\n _callsign = self.callsign\n\n if self.digi:\n # _callsign = ''.join([_callsign, '*'])\n encoded_ssid |= 0x80\n\n # Pad the callsign to at least 6 characters.\n while len(_callsign) < 6:\n _callsign = ''.join([_callsign, ' '])\n\n encoded_callsign = ''.join([chr(ord(p) << 1) for p in _callsign])\n\n return ''.join([encoded_callsign, chr(encoded_ssid)])", "def ssl168bit3desciphersrate(self) :\n try :\n return self._ssl168bit3desciphersrate\n except Exception as e:\n raise e", "def test_legacy_vpn_l2tp_ipsec_psk_strongswan(self):\n vpn = VPN_TYPE.L2TP_IPSEC_PSK\n vpn_profile = self.generate_legacy_vpn_profile(\n vpn, self.vpn_server_addresses[vpn.name][0],\n self.ipsec_server_type[0])\n self.legacy_vpn_connection_test_logic(vpn_profile)", "def generate_keypair(self) -> str:\n # create an NaClDSEncoder object\n nacl_enc = NaClDSEncoder()\n # generate new keys\n nacl_enc.generate()\n\n self.keypair = nacl_enc.keypair\n self.public_key = nacl_enc.public_key\n self.private_key = nacl_enc.private_key\n return self.keypair", "def psk16(input_bits, noise):\n modulator = Modulator()\n demodulator = Demodulator()\n channel = Channel()\n signal = modulator.make_16psk_mod(input_bits)\n\n signal = channel.send_signal(signal, noise)\n\n result_bits = demodulator.make_16psk_demod(signal, channel)\n return result_bits", "def sslbe168bit3desciphersrate(self) :\n try :\n return self._sslbe168bit3desciphersrate\n except Exception as e:\n raise e", "def decapsulate( self , packet , key ):\n\t\tassert( packet.haslayer( Dot11WEP ) ), \\\n\t\t\t'The given packet does not contain a Dot11WEP message (decapsulating WEP).'\n\t\tdot11wep = packet.getlayer( Dot11WEP )\n\t\n\t\t# Check if the Frame Check Sequence (FCS) flag is set in the Radiotap header.\n\t\t# If true assert the correctness of the FCS, and remove the FCS by shifting\n\t\t# the packet ICV and wepdata accordingly to keep consistency with non-FCS\n\t\t# implementations.\n\t\tradiotapFCSFlag\t= hasFCS( packet )\n\t\tif radiotapFCSFlag is True:\n\t\t\tassertDot11FCS( packet , expectedFCS=dot11wep.icv )\n\t\t\tdot11wep.icv \t\t= int( dot11wep.wepdata[-4:].encode('hex') , 16 ) # Integer for consistency.\n\t\t\tdot11wep.wepdata \t= dot11wep.wepdata[:-4]\n\t\n\t\t# Generate the key and decrypt the ciphertext.\n\t\tkey\t\t= dot11wep.iv + key\n\t\tarc4 \t\t= ARC4.new( key )\n\t\tplaintext\t= arc4.decrypt( dot11wep.wepdata )\n\t\n\t\t# Decrypt the dot11wep ICV, and calculate the ICV over the plaintext.\n\t\ticv \t\t= '{0:0{1}x}'.format( dot11wep.icv , 8 ).decode('hex')\n\t\ticvReceived\t= arc4.decrypt( icv )\n\t\ticvCalculated \t= struct.pack( '<L' , crc32( plaintext ) % (1<<32) )\n\t\n\t\t# Assert that the ICV's match.\n\t\tassert( icvReceived == icvCalculated ), \\\n\t\t\t'The received ICV \"0x%s\" does not match the calculated ICV \"0x%s\".' \\\n\t\t\t% ( icvReceived.encode('hex') , icvCalculated.encode('hex') )\n\t\n\t\t# Return the plaintext.\n\t\treturn plaintext", "def Bip38Encrypt(password,serializedprivkey):\n\n if ('str' not in str(type(serializedprivkey)) and 'unicode' not in str(type(serializedprivkey))) or \\\n (serializedprivkey[:4] != \"xprv\" and serializedprivkey[:4] != \"tprv\"):\n raise TypeError(\"xprv/tprv key str required for first input.\")\n if len(serializedprivkey) != 111:\n raise TypeError(\"xprv/tprv key str entered is not correct length, please check for errors.\")\n try:\n keyhex, isValid = base58_decode(serializedprivkey,True,False)\n except:\n raise Exception(\"Error with Base58 decode attempt.\")\n if not isValid:\n raise Exception(\"Base58 checksum mis-match.\")\n privkeyhex = str(keyhex)[-64:]\n from pyBIP0038 import encrypt_privkey_from_password\n try:\n enc6Pkey = encrypt_privkey_from_password(password,privkeyhex,True)\n except Exception as e:\n raise Exception(\"Error attempting to encrypt prviate key. Possible bad password, but not 100% sure that was the problem. Exception thrown was: \" + str(e))\n try:\n enchex, isValid = base58_decode(enc6Pkey,True,False)\n except:\n raise Exception(\"Error (1) encrypting key.\")\n if not isValid:\n raise Exception(\"Error (2) encrypting key.\")\n hashcheck = enchex[6:14]\n enckeyhex = enchex[14:]\n assert len(hashcheck) == 8\n assert len(enckeyhex) == 64\n if keyhex[:8] == \"0488ade4\":\n newkeyprefix = \"282d214d\"\n elif keyhex[:8] == \"04358394\":\n newkeyprefix = \"254bfd62\"\n else:\n raise Exception(\"Previously checked key for xprv/tprv but later check failed.\")\n newkey = str(newkeyprefix + keyhex[8:-64] + hashcheck + enckeyhex)\n return base58_check_and_encode(newkey)", "def AsymmetricPause(self):\r\n\t\treturn self._get_attribute('asymmetricPause')", "def DESKeySpec(self, key) -> bytes:\r\n if not isinstance(key, bytes):\r\n key = bytes(key)\r\n self.keyBytes = key[:8] # DES keys are only 8 bytes in length\r\n return self.keyBytes" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_constellation_decoder_cb_sptr __init__(self, p) > digital_constellation_decoder_cb_sptr
def __init__(self, *args): this = _digital_swig.new_digital_constellation_decoder_cb_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n self.interface = \\\n {'initialization variables': None,\n 'input variables': None,\n 'input events': None,\n 'output events': None}", "def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pn_correlator_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, params, model,\n name=\"fully_connected_decoder\", mode='train'):\n super(FullyConnectedSCDecoder, self).__init__(params, model, name, mode)", "def __init__(self, *args):\n this = _digital_swig.new_digital_packet_sink_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, args, phase):\n self.args = args\n self.phase = phase", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_ic_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n # Flag this instance as compiled now\n self.is_compiled = True\n \n super(HCompositeState2ProcDef, self).__init__(name='HCompositeState2ProcDef', num_nodes=152, edges=[])\n \n # Add the edges\n self.add_edges([(8, 39), (39, 1), (1, 40), (40, 6), (6, 41), (41, 9), (9, 42), (42, 11), (8, 43), (43, 13), (6, 44), (44, 14), (6, 45), (45, 15), (6, 46), (46, 16), (11, 47), (47, 17), (11, 48), (48, 18), (11, 49), (49, 19), (11, 50), (50, 20), (9, 51), (51, 12), (12, 52), (52, 21), (12, 53), (53, 22), (12, 54), (54, 23), (71, 55), (55, 119), (72, 56), (56, 120), (73, 57), (57, 121), (74, 58), (58, 122), (75, 59), (59, 123), (76, 60), (60, 124), (77, 61), (61, 125), (78, 62), (62, 126), (79, 63), (63, 127), (80, 64), (64, 128), (81, 65), (65, 129), (82, 66), (66, 130), (83, 67), (67, 131), (84, 68), (68, 132), (85, 69), (69, 133), (86, 70), (70, 134), (13, 24), (24, 88), (14, 25), (25, 89), (15, 26), (26, 90), (16, 27), (27, 91), (11, 28), (28, 92), (17, 29), (29, 93), (18, 30), (30, 94), (19, 31), (31, 95), (20, 32), (32, 96), (12, 33), (33, 97), (21, 34), (34, 98), (22, 35), (35, 99), (23, 36), (36, 100), (8, 37), (37, 101), (1, 38), (38, 102), (5, 0), (0, 135), (0, 136), (0, 137), (0, 138), (0, 139), (0, 140), (0, 141), (0, 142), (0, 143), (0, 144), (0, 145), (0, 146), (0, 147), (0, 148), (0, 149), (0, 150), (0, 151), (136, 1), (7, 2), (2, 4), (4, 3), (3, 87), (10, 4), (7, 5), (137, 6), (71, 103), (72, 104), (73, 105), (74, 106), (75, 107), (76, 108), (77, 109), (78, 110), (79, 111), (80, 112), (81, 113), (82, 114), (83, 115), (84, 116), (85, 117), (86, 118), (135, 8), (138, 9), (139, 13), (140, 14), (141, 15), (142, 16), (143, 11), (144, 12), (145, 17), (146, 18), (147, 19), (148, 20), (149, 21), (150, 22), (151, 23), (8, 10), (103, 87), (104, 88), (105, 89), (106, 90), (107, 91), (108, 92), (109, 93), (110, 94), (111, 95), (112, 96), (113, 97), (114, 98), (115, 99), (116, 100), (117, 101), (118, 102)])\n # Set the graph attributes\n self[\"mm__\"] = pickle.loads(\"\"\"(lp1\nS'UMLRT2Kiltera_MM'\np2\na.\"\"\")\n self[\"name\"] = \"\"\"CompositeState2ProcDef\"\"\"\n self[\"GUID__\"] = UUID('d5e9d5a2-c202-49ef-a74d-abc96e53b4fe')\n \n # Set the node attributes\n self.vs[0][\"mm__\"] = \"\"\"ApplyModel\"\"\"\n self.vs[0][\"GUID__\"] = UUID('4f03b792-e84e-4c84-bbae-3072cf6a293c')\n self.vs[1][\"name\"] = \"\"\"localdef1\"\"\"\n self.vs[1][\"classtype\"] = \"\"\"LocalDef\"\"\"\n self.vs[1][\"mm__\"] = \"\"\"LocalDef\"\"\"\n self.vs[1][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[1][\"GUID__\"] = UUID('00ff12a2-181f-4200-81a2-75850a58d99f')\n self.vs[2][\"mm__\"] = \"\"\"match_contains\"\"\"\n self.vs[2][\"GUID__\"] = UUID('938cefd8-a8a4-4aaf-be3a-e728f6d4b308')\n self.vs[3][\"mm__\"] = \"\"\"hasAttribute_S\"\"\"\n self.vs[3][\"GUID__\"] = UUID('a1001fa8-fbfb-4491-a555-e688afae9a35')\n self.vs[4][\"name\"] = \"\"\"state1\"\"\"\n self.vs[4][\"classtype\"] = \"\"\"State\"\"\"\n self.vs[4][\"mm__\"] = \"\"\"State\"\"\"\n self.vs[4][\"cardinality\"] = \"\"\"+\"\"\"\n self.vs[4][\"GUID__\"] = UUID('2de4b186-4d1b-49c5-a24d-837430de86c3')\n self.vs[5][\"mm__\"] = \"\"\"paired_with\"\"\"\n self.vs[5][\"GUID__\"] = UUID('6864a62e-0c16-41ec-85cb-5304c66b2167')\n self.vs[6][\"name\"] = \"\"\"new1\"\"\"\n self.vs[6][\"classtype\"] = \"\"\"New\"\"\"\n self.vs[6][\"mm__\"] = \"\"\"New\"\"\"\n self.vs[6][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[6][\"GUID__\"] = UUID('6e918d39-761f-4145-980d-e035e8956e4c')\n self.vs[7][\"mm__\"] = \"\"\"MatchModel\"\"\"\n self.vs[7][\"GUID__\"] = UUID('9d3c9ff3-d943-45c5-9a68-4b94f8ae4f55')\n self.vs[8][\"name\"] = \"\"\"procdef1\"\"\"\n self.vs[8][\"classtype\"] = \"\"\"ProcDef\"\"\"\n self.vs[8][\"mm__\"] = \"\"\"ProcDef\"\"\"\n self.vs[8][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[8][\"GUID__\"] = UUID('b36423c7-5f8e-4565-9124-9dedad23d1e1')\n self.vs[9][\"name\"] = \"\"\"par1\"\"\"\n self.vs[9][\"classtype\"] = \"\"\"Par\"\"\"\n self.vs[9][\"mm__\"] = \"\"\"Par\"\"\"\n self.vs[9][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[9][\"GUID__\"] = UUID('64a7af82-a641-4084-b5c3-db88c40c7b99')\n self.vs[10][\"type\"] = \"\"\"ruleDef\"\"\"\n self.vs[10][\"mm__\"] = \"\"\"backward_link\"\"\"\n self.vs[10][\"GUID__\"] = UUID('869d5d52-235c-4240-af78-31e36a1f47d7')\n self.vs[11][\"name\"] = \"\"\"inst1\"\"\"\n self.vs[11][\"classtype\"] = \"\"\"Inst\"\"\"\n self.vs[11][\"mm__\"] = \"\"\"Inst\"\"\"\n self.vs[11][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[11][\"GUID__\"] = UUID('a4079b80-e123-4015-96c9-8e664b15e053')\n self.vs[12][\"name\"] = \"\"\"inst2\"\"\"\n self.vs[12][\"classtype\"] = \"\"\"Inst\"\"\"\n self.vs[12][\"mm__\"] = \"\"\"Inst\"\"\"\n self.vs[12][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[12][\"GUID__\"] = UUID('a3eef854-3648-462d-be65-3eca75bdebf7')\n self.vs[13][\"name\"] = \"\"\"name1\"\"\"\n self.vs[13][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[13][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[13][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[13][\"GUID__\"] = UUID('9b94a56a-dd11-415e-8663-6f429c2c0753')\n self.vs[14][\"name\"] = \"\"\"name2\"\"\"\n self.vs[14][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[14][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[14][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[14][\"GUID__\"] = UUID('d90c8a9c-eee1-48af-9308-abbb6052af8f')\n self.vs[15][\"name\"] = \"\"\"name3\"\"\"\n self.vs[15][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[15][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[15][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[15][\"GUID__\"] = UUID('8e53fe34-6fcc-4059-8042-db911db6e812')\n self.vs[16][\"name\"] = \"\"\"name4\"\"\"\n self.vs[16][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[16][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[16][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[16][\"GUID__\"] = UUID('4f23669c-d236-4a8d-b52b-1f37ba406f94')\n self.vs[17][\"name\"] = \"\"\"name5\"\"\"\n self.vs[17][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[17][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[17][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[17][\"GUID__\"] = UUID('91bc841f-2211-4638-a340-584da8347c98')\n self.vs[18][\"name\"] = \"\"\"name6\"\"\"\n self.vs[18][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[18][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[18][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[18][\"GUID__\"] = UUID('8a109a2d-2d70-4318-8a72-46c784206075')\n self.vs[19][\"name\"] = \"\"\"name7\"\"\"\n self.vs[19][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[19][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[19][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[19][\"GUID__\"] = UUID('5a95e461-d2f8-435b-9e77-af581d91ee29')\n self.vs[20][\"name\"] = \"\"\"name8\"\"\"\n self.vs[20][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[20][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[20][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[20][\"GUID__\"] = UUID('c600b1fb-8c9c-4ef2-b597-8137d9bdfb08')\n self.vs[21][\"name\"] = \"\"\"name9\"\"\"\n self.vs[21][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[21][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[21][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[21][\"GUID__\"] = UUID('708cd8f1-6e3d-4dfa-af00-18e9d43a01a4')\n self.vs[22][\"name\"] = \"\"\"name10\"\"\"\n self.vs[22][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[22][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[22][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[22][\"GUID__\"] = UUID('132e8292-4471-498d-a202-3d2abc7ab5ca')\n self.vs[23][\"name\"] = \"\"\"name11\"\"\"\n self.vs[23][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[23][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[23][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[23][\"GUID__\"] = UUID('fdb484f0-a8b5-4b9e-86a6-b679b1012005')\n self.vs[24][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[24][\"GUID__\"] = UUID('2a8418a3-cb80-496b-a1e0-7419de2ae33f')\n self.vs[25][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[25][\"GUID__\"] = UUID('4f37af75-2b77-45c1-93d1-8aae7cf14cc8')\n self.vs[26][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[26][\"GUID__\"] = UUID('54ef6fcc-cb9a-494e-aa36-f44525e4a0b0')\n self.vs[27][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[27][\"GUID__\"] = UUID('22858e97-7bbe-460d-b44b-14652852a592')\n self.vs[28][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[28][\"GUID__\"] = UUID('c3fcdb66-34da-4c82-b163-e5ab5f04e5c0')\n self.vs[29][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[29][\"GUID__\"] = UUID('88c90884-ae83-49af-96da-74f03c7f80ce')\n self.vs[30][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[30][\"GUID__\"] = UUID('1e3c412d-8372-4ba5-8a56-9d82407b79d0')\n self.vs[31][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[31][\"GUID__\"] = UUID('a500f0c7-1535-40ed-802e-a883517bbc64')\n self.vs[32][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[32][\"GUID__\"] = UUID('ed658c5a-81c3-4938-920e-98953de205ba')\n self.vs[33][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[33][\"GUID__\"] = UUID('49be0f69-494e-4f45-8923-582778c6828a')\n self.vs[34][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[34][\"GUID__\"] = UUID('e3709cc9-ed04-44f9-b8a7-a8f9f5939f3b')\n self.vs[35][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[35][\"GUID__\"] = UUID('8a657ede-e29d-4a28-9c1c-4c95a3ecd3b6')\n self.vs[36][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[36][\"GUID__\"] = UUID('b3cd8a7c-7deb-4b8c-9ed2-4a22bd6b5a39')\n self.vs[37][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[37][\"GUID__\"] = UUID('2287628a-d22b-427b-bdfd-d24d04bd46ad')\n self.vs[38][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[38][\"GUID__\"] = UUID('65083504-7423-4b8f-8b3e-7dc369fa08db')\n self.vs[39][\"associationType\"] = \"\"\"p\"\"\"\n self.vs[39][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[39][\"GUID__\"] = UUID('dd5a6c0f-e438-4f23-ad0f-acd02dd4afe8')\n self.vs[40][\"associationType\"] = \"\"\"p\"\"\"\n self.vs[40][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[40][\"GUID__\"] = UUID('d4bcb4b5-37a3-4d04-895f-d689ea89c825')\n self.vs[41][\"associationType\"] = \"\"\"p\"\"\"\n self.vs[41][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[41][\"GUID__\"] = UUID('b860cc3c-a70a-4c66-9bb9-c1fd1395b23c')\n self.vs[42][\"associationType\"] = \"\"\"p\"\"\"\n self.vs[42][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[42][\"GUID__\"] = UUID('97c4f558-4e1a-4a85-82e4-e0500374d80f')\n self.vs[43][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[43][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[43][\"GUID__\"] = UUID('58acb66a-2008-4ef3-975f-1db1219bd830')\n self.vs[44][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[44][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[44][\"GUID__\"] = UUID('5e14b29f-f5e6-4d6d-bfac-8616df51ab56')\n self.vs[45][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[45][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[45][\"GUID__\"] = UUID('57ac3f37-c63f-4a74-bc90-a846fb38e370')\n self.vs[46][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[46][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[46][\"GUID__\"] = UUID('9fc39a10-40e0-47f4-93c6-eccc9fdbd594')\n self.vs[47][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[47][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[47][\"GUID__\"] = UUID('00e09455-e8b5-414e-8eee-abbe55b7a65d')\n self.vs[48][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[48][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[48][\"GUID__\"] = UUID('17170197-069c-44fa-9239-dec8622935ee')\n self.vs[49][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[49][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[49][\"GUID__\"] = UUID('a4654b49-ee9c-4f69-a4e2-b8101c7086d2')\n self.vs[50][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[50][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[50][\"GUID__\"] = UUID('f9e0515c-b37c-4c22-8fe9-49c98acd152d')\n self.vs[51][\"associationType\"] = \"\"\"p\"\"\"\n self.vs[51][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[51][\"GUID__\"] = UUID('2c60fd52-acfa-4cba-8c04-53c9affdc4db')\n self.vs[52][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[52][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[52][\"GUID__\"] = UUID('f8f3ccd7-1cd5-4a57-b6a8-d35ba5bef6e4')\n self.vs[53][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[53][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[53][\"GUID__\"] = UUID('7c94a074-10cb-4087-acd1-09f74b36fee5')\n self.vs[54][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[54][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[54][\"GUID__\"] = UUID('857117de-5cb0-4717-8c19-a916f3913d44')\n self.vs[55][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[55][\"GUID__\"] = UUID('be66b7a4-a420-4307-9c3e-15a25480f612')\n self.vs[56][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[56][\"GUID__\"] = UUID('8b06f23c-dc76-480c-a91b-2a89628187bb')\n self.vs[57][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[57][\"GUID__\"] = UUID('a30e8284-77ae-44b5-83fe-950b7a7cf134')\n self.vs[58][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[58][\"GUID__\"] = UUID('d79efc53-0195-4578-9e6e-f325fa1b9347')\n self.vs[59][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[59][\"GUID__\"] = UUID('4c20c97d-c715-4ddc-ba86-f4b8f93342f2')\n self.vs[60][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[60][\"GUID__\"] = UUID('b6badd99-bce6-4ecb-95f2-2a56eb8e31ec')\n self.vs[61][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[61][\"GUID__\"] = UUID('784aca61-7263-4894-ada3-514b7dc1263c')\n self.vs[62][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[62][\"GUID__\"] = UUID('b751aba0-9035-400e-81b0-a05af5ff13f8')\n self.vs[63][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[63][\"GUID__\"] = UUID('f5e9aa39-f124-44ff-bf9e-835d8231fa1c')\n self.vs[64][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[64][\"GUID__\"] = UUID('adb9f451-c62d-4218-aebc-d7065b89a497')\n self.vs[65][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[65][\"GUID__\"] = UUID('71250a4b-2989-43ad-8a29-d2c8f7011af6')\n self.vs[66][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[66][\"GUID__\"] = UUID('ef32cf77-f92d-4364-b997-484a66740660')\n self.vs[67][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[67][\"GUID__\"] = UUID('c3c01696-8c64-45f7-a598-6e443991711f')\n self.vs[68][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[68][\"GUID__\"] = UUID('0481036c-254e-4f46-a7c3-6f4a865fe7bd')\n self.vs[69][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[69][\"GUID__\"] = UUID('f98b92f3-81c2-403a-ba4b-29cb117d561a')\n self.vs[70][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[70][\"GUID__\"] = UUID('c32d7a5a-e311-48d5-b3fc-2a284673c4aa')\n self.vs[71][\"name\"] = \"\"\"eq1\"\"\"\n self.vs[71][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[71][\"GUID__\"] = UUID('0abf26da-d349-4bad-be96-014c8959a4cd')\n self.vs[72][\"name\"] = \"\"\"eq2\"\"\"\n self.vs[72][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[72][\"GUID__\"] = UUID('af92b37e-0c63-4fe5-a906-7cd312cad172')\n self.vs[73][\"name\"] = \"\"\"eq3\"\"\"\n self.vs[73][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[73][\"GUID__\"] = UUID('108e8752-a98c-44df-b24a-3b958c450846')\n self.vs[74][\"name\"] = \"\"\"eq4\"\"\"\n self.vs[74][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[74][\"GUID__\"] = UUID('340c5b78-fbbc-4734-ac7d-8a1f953679e3')\n self.vs[75][\"name\"] = \"\"\"eq5\"\"\"\n self.vs[75][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[75][\"GUID__\"] = UUID('63513c17-c285-47ce-9b5c-e658df31b8bf')\n self.vs[76][\"name\"] = \"\"\"eq6\"\"\"\n self.vs[76][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[76][\"GUID__\"] = UUID('dfd958e8-0fd4-4975-b28f-dab1df8a6858')\n self.vs[77][\"name\"] = \"\"\"eq7\"\"\"\n self.vs[77][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[77][\"GUID__\"] = UUID('1cd0e4a3-2b1a-42c8-bdf3-f98e156d8265')\n self.vs[78][\"name\"] = \"\"\"eq8\"\"\"\n self.vs[78][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[78][\"GUID__\"] = UUID('d7c1a1c4-4b83-4e3c-9e1f-2212a30343b1')\n self.vs[79][\"name\"] = \"\"\"eq9\"\"\"\n self.vs[79][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[79][\"GUID__\"] = UUID('aea37644-aa22-4e82-92a7-17d85ad5acf3')\n self.vs[80][\"name\"] = \"\"\"eq10\"\"\"\n self.vs[80][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[80][\"GUID__\"] = UUID('f7db1558-e110-4984-b825-62e4ce6f1324')\n self.vs[81][\"name\"] = \"\"\"eq11\"\"\"\n self.vs[81][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[81][\"GUID__\"] = UUID('a0722a1f-aaa4-4ac3-99d3-5bea37c15e79')\n self.vs[82][\"name\"] = \"\"\"eq12\"\"\"\n self.vs[82][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[82][\"GUID__\"] = UUID('ddbd74ac-21f7-4724-a2a8-b78c7389a8f4')\n self.vs[83][\"name\"] = \"\"\"eq13\"\"\"\n self.vs[83][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[83][\"GUID__\"] = UUID('a8fe40b1-4985-43d2-a874-0741d09ba4ae')\n self.vs[84][\"name\"] = \"\"\"eq14\"\"\"\n self.vs[84][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[84][\"GUID__\"] = UUID('281fd930-5f47-4b53-949b-e274ec95fdef')\n self.vs[85][\"name\"] = \"\"\"eq15\"\"\"\n self.vs[85][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[85][\"GUID__\"] = UUID('2e2199ae-3f44-4d76-b322-4b617a8c58db')\n self.vs[86][\"name\"] = \"\"\"eq16\"\"\"\n self.vs[86][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[86][\"GUID__\"] = UUID('25ad532f-5f8d-433a-bb65-507c97469275')\n self.vs[87][\"name\"] = \"\"\"isComposite\"\"\"\n self.vs[87][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[87][\"Type\"] = \"\"\"'Bool'\"\"\"\n self.vs[87][\"GUID__\"] = UUID('75b3e3d3-2cfc-4444-b65e-2fc5a8b7ae5d')\n self.vs[88][\"name\"] = \"\"\"literal\"\"\"\n self.vs[88][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[88][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[88][\"GUID__\"] = UUID('426aea1c-8a9f-4651-b297-9ec3c1c1352e')\n self.vs[89][\"name\"] = \"\"\"literal\"\"\"\n self.vs[89][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[89][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[89][\"GUID__\"] = UUID('284a3a1d-8a2d-4cef-9551-98d424afe038')\n self.vs[90][\"name\"] = \"\"\"literal\"\"\"\n self.vs[90][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[90][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[90][\"GUID__\"] = UUID('3b7a1cdc-9ffb-48db-994f-497c06449458')\n self.vs[91][\"name\"] = \"\"\"literal\"\"\"\n self.vs[91][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[91][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[91][\"GUID__\"] = UUID('40cff5ab-cab2-4fab-bbc1-c8039fe486ac')\n self.vs[92][\"name\"] = \"\"\"name\"\"\"\n self.vs[92][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[92][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[92][\"GUID__\"] = UUID('b9e0ab51-1690-44de-875b-773826f9e420')\n self.vs[93][\"name\"] = \"\"\"literal\"\"\"\n self.vs[93][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[93][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[93][\"GUID__\"] = UUID('708e489d-456a-4974-9198-73334eb3d1d8')\n self.vs[94][\"name\"] = \"\"\"literal\"\"\"\n self.vs[94][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[94][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[94][\"GUID__\"] = UUID('bdabcea3-c164-4f6b-a54f-be957abedb49')\n self.vs[95][\"name\"] = \"\"\"literal\"\"\"\n self.vs[95][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[95][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[95][\"GUID__\"] = UUID('22f79d9e-a9bf-41b5-9559-4560af4afc10')\n self.vs[96][\"name\"] = \"\"\"literal\"\"\"\n self.vs[96][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[96][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[96][\"GUID__\"] = UUID('56b242b2-5ebd-4a02-a1bb-829ecc6822a7')\n self.vs[97][\"name\"] = \"\"\"name\"\"\"\n self.vs[97][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[97][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[97][\"GUID__\"] = UUID('46680774-a892-41cb-8005-809b5eea2003')\n self.vs[98][\"name\"] = \"\"\"literal\"\"\"\n self.vs[98][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[98][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[98][\"GUID__\"] = UUID('c8c58f99-e94c-442b-a747-c873a43b903b')\n self.vs[99][\"name\"] = \"\"\"literal\"\"\"\n self.vs[99][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[99][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[99][\"GUID__\"] = UUID('18aa7445-341a-40e8-b09c-70904b3f9994')\n self.vs[100][\"name\"] = \"\"\"literal\"\"\"\n self.vs[100][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[100][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[100][\"GUID__\"] = UUID('9f63580a-288f-4d14-b275-b96062163c5a')\n self.vs[101][\"name\"] = \"\"\"pivot\"\"\"\n self.vs[101][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[101][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[101][\"GUID__\"] = UUID('c8777ba9-8c6e-4582-a082-81f2f34e6016')\n self.vs[102][\"name\"] = \"\"\"pivot\"\"\"\n self.vs[102][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[102][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[102][\"GUID__\"] = UUID('ce2a6aa7-c8ce-4cee-807c-cd4de96a08bf')\n self.vs[103][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[103][\"GUID__\"] = UUID('8119a747-1d59-4f48-83a6-16869a919672')\n self.vs[104][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[104][\"GUID__\"] = UUID('b7c5aeaf-7e59-4a81-9616-bb2474f2660f')\n self.vs[105][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[105][\"GUID__\"] = UUID('ced29f38-6ce7-449c-823f-34aaab43899b')\n self.vs[106][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[106][\"GUID__\"] = UUID('e29dc6da-439d-4a9d-9d40-e87aa9fbebd3')\n self.vs[107][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[107][\"GUID__\"] = UUID('af49357e-a46d-4ee5-ab4a-b6d6ef261df0')\n self.vs[108][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[108][\"GUID__\"] = UUID('ff49109b-ccc0-4635-9a33-d88c1d675bc6')\n self.vs[109][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[109][\"GUID__\"] = UUID('423ad2a2-0a19-4192-902d-706965800fef')\n self.vs[110][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[110][\"GUID__\"] = UUID('5864c11a-7792-4549-999f-bc86a4246314')\n self.vs[111][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[111][\"GUID__\"] = UUID('7182946d-d5f6-4a7c-acaa-d4eeb97133db')\n self.vs[112][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[112][\"GUID__\"] = UUID('d965f0b2-048d-490c-81f5-2b18446941de')\n self.vs[113][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[113][\"GUID__\"] = UUID('6e4c8ba9-6ab0-44d3-9cc6-c181772a1e3b')\n self.vs[114][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[114][\"GUID__\"] = UUID('5633c48b-1add-43eb-9789-1bece00f8079')\n self.vs[115][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[115][\"GUID__\"] = UUID('d2c598e2-09b1-4c12-acff-871f6662238a')\n self.vs[116][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[116][\"GUID__\"] = UUID('33a09bc8-cfa9-4367-834e-41bfae2fa7b6')\n self.vs[117][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[117][\"GUID__\"] = UUID('858b7fe0-edf0-4eda-ae81-6477f6499fb7')\n self.vs[118][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[118][\"GUID__\"] = UUID('a19a8472-6b86-4aee-b2bb-d66b4d26aeea')\n self.vs[119][\"name\"] = \"\"\"true\"\"\"\n self.vs[119][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[119][\"Type\"] = \"\"\"'Bool'\"\"\"\n self.vs[119][\"GUID__\"] = UUID('ba19f7ae-c0e3-43f5-9c87-2e08b3ff7d4e')\n self.vs[120][\"name\"] = \"\"\"sh\"\"\"\n self.vs[120][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[120][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[120][\"GUID__\"] = UUID('b78c45bc-2ecd-438a-a905-dbd90a4edeed')\n self.vs[121][\"name\"] = \"\"\"exit_in\"\"\"\n self.vs[121][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[121][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[121][\"GUID__\"] = UUID('0bbc3f31-d9e3-49a7-b213-d874f9d6e0ac')\n self.vs[122][\"name\"] = \"\"\"exack_in\"\"\"\n self.vs[122][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[122][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[122][\"GUID__\"] = UUID('e58ce45d-49e1-44b9-8a0a-78c1f1305afd')\n self.vs[123][\"name\"] = \"\"\"sh_in\"\"\"\n self.vs[123][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[123][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[123][\"GUID__\"] = UUID('34144527-9a72-44f3-8afe-f49bbe5fac47')\n self.vs[124][\"name\"] = \"\"\"C\"\"\"\n self.vs[124][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[124][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[124][\"GUID__\"] = UUID('61ed1583-c983-4369-b0de-0c3ca82aba52')\n self.vs[125][\"name\"] = \"\"\"enp\"\"\"\n self.vs[125][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[125][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[125][\"GUID__\"] = UUID('a4bfdfad-6e17-46b1-9939-685bd4cbfb62')\n self.vs[126][\"name\"] = \"\"\"exit_in\"\"\"\n self.vs[126][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[126][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[126][\"GUID__\"] = UUID('92007092-a080-4cb3-ba90-cbc8e6637732')\n self.vs[127][\"name\"] = \"\"\"exack_in\"\"\"\n self.vs[127][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[127][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[127][\"GUID__\"] = UUID('1a61b1e5-e926-45cd-bf6a-60adeef0d338')\n self.vs[128][\"name\"] = \"\"\"sh_in\"\"\"\n self.vs[128][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[128][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[128][\"GUID__\"] = UUID('95c52a1f-42ae-4384-bcfc-0cab537ee1cf')\n self.vs[129][\"name\"] = \"\"\"H\"\"\"\n self.vs[129][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[129][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[129][\"GUID__\"] = UUID('146f9ec3-3f2d-48a1-92ac-a5546268e069')\n self.vs[130][\"name\"] = \"\"\"exit_in\"\"\"\n self.vs[130][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[130][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[130][\"GUID__\"] = UUID('c52aec39-171b-4710-8150-b343a557bebf')\n self.vs[131][\"name\"] = \"\"\"exack_in\"\"\"\n self.vs[131][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[131][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[131][\"GUID__\"] = UUID('e2ab70c6-01a2-420a-9e96-bb238fe29689')\n self.vs[132][\"name\"] = \"\"\"sh_in\"\"\"\n self.vs[132][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[132][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[132][\"GUID__\"] = UUID('f476d190-6014-4c6a-a27f-c3f45b9d10ba')\n self.vs[133][\"name\"] = \"\"\"procdef\"\"\"\n self.vs[133][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[133][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[133][\"GUID__\"] = UUID('5a678e2c-8444-4e53-a430-5f0b1a603c07')\n self.vs[134][\"name\"] = \"\"\"localdefcompstate\"\"\"\n self.vs[134][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[134][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[134][\"GUID__\"] = UUID('dfac50b9-4956-45c0-b8a5-14f609e078e5')\n self.vs[135][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[135][\"GUID__\"] = UUID('632f235b-d18d-4939-b4b8-9d38a7505cc8')\n self.vs[136][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[136][\"GUID__\"] = UUID('b5724e21-522d-415c-8538-59b279583ff4')\n self.vs[137][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[137][\"GUID__\"] = UUID('a05b3ebc-4b86-43f0-adcd-d46d8c4d773e')\n self.vs[138][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[138][\"GUID__\"] = UUID('b43788ff-9ab6-4bec-b8ae-c76b10985fc3')\n self.vs[139][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[139][\"GUID__\"] = UUID('16c28ca0-6429-4540-9505-e8057aad958a')\n self.vs[140][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[140][\"GUID__\"] = UUID('e97eb3e2-8fca-41a6-9599-a173acee4c22')\n self.vs[141][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[141][\"GUID__\"] = UUID('eda0c12e-26c0-4296-9d34-62cbe764e151')\n self.vs[142][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[142][\"GUID__\"] = UUID('336e11b9-cbc3-41b4-9c07-041ed4ba1453')\n self.vs[143][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[143][\"GUID__\"] = UUID('31297722-e0a1-4e03-8c28-44abe1930256')\n self.vs[144][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[144][\"GUID__\"] = UUID('51fcd9e5-c817-4710-9a24-d080b3f8fa71')\n self.vs[145][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[145][\"GUID__\"] = UUID('7acc9f40-a78c-47ac-8e38-fc7f4647c2f1')\n self.vs[146][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[146][\"GUID__\"] = UUID('c94eef8e-f552-4b53-ba99-f21c13dfca4a')\n self.vs[147][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[147][\"GUID__\"] = UUID('09d8138f-8be9-4a30-af93-cc714a2570db')\n self.vs[148][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[148][\"GUID__\"] = UUID('792865ce-75f2-41cb-9c42-74f831e96a76')\n self.vs[149][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[149][\"GUID__\"] = UUID('fb0f0ebe-59c0-4ffc-8370-2ead7eb40f18')\n self.vs[150][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[150][\"GUID__\"] = UUID('23a9a8da-507e-4d11-a8e0-f5b721f01f96')\n self.vs[151][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[151][\"GUID__\"] = UUID('38fd864c-df5e-4e85-9838-2e665d75637c')", "def __init__(self, *args):\n this = _digital_swig.new_digital_simple_framer_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_if_sptr(*args)\n try: self.this.append(this)\n except: self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
constellation_decoder_cb(digital_constellation_sptr constellation) > digital_constellation_decoder_cb_sptr Constellation Decoder.
def constellation_decoder_cb(*args, **kwargs): return _digital_swig.constellation_decoder_cb(*args, **kwargs)
[ "def decode_func():\n dec_outputs, _, dec_lengths = contrib_seq2seq.dynamic_decode(\n decoder=self.decoder(\n embeddings=embeddings,\n inputs=inputs,\n inputs_length=inputs_length,\n hiddens=hiddens,\n hiddens_length=hiddens_length,\n enc_state=enc_state,\n mode=mode,\n hparams=self._hparams,\n decoder_hparams=decoder_hparams,\n reuse=tf.AUTO_REUSE),\n impute_finished=impute_finished,\n maximum_iterations=decoder_iterations)\n return {\n \"rnn_output\": dec_outputs.rnn_output,\n \"sample_id\": dec_outputs.sample_id,\n \"length\": dec_lengths}", "def _get_conv_decoder(self, decoder_phase):\n if decoder_phase == \"past\":\n conv_decoder = self.conv_decoder\n elif decoder_phase == \"future\":\n conv_decoder = self.conv_decoder_future\n else:\n raise ValueError(\"Unknown decoder.\")\n\n return conv_decoder", "def decoder(self, decoder):\n\n self._decoder = decoder", "def _create_decoder(self):\n params = self.params['decoder_params']\n return self.params['decoder'](params=params, mode=self.mode, model=self)", "def __decryptvalchange(self,x,y,listread,grysc,count,msgencoded,endcheck,initcheck):\n\n\t\t\"\"\" If its a greyscale value, it creates a list and appends the greyscale value, otherwise it just converts the tuple to a list. It then reads the LSB and adds it to another list, once its read 8 bits it converts it to a character and apppends it to a new list of characters\"\"\"\n\t\tinit = '$a$a$'\n\t\tinitlist = list(init)\n\t\tfinal = '$a$a$'\n\t\tfinalist = list(final)\n\t\tif(grysc == 1):\n\t\t\tnewrgb = []\n\t\t\tdig = []\n\t\t\tdig.append(self.pic[x,y])\n\t\t\tnewrgb.append(self.pic[x,y])\n\t\telse:\n\t\t\tnewrgb = list(self.pic[x, y])\n\t\t\tdig = self.pic[x, y]\n\t\t\n\t\t\"\"\" Gets a pixel and gets the LSB, once 8 bits are got, it adds it to the message list\"\"\"\n\t\tfor z in range(0, len(newrgb)):\n\t\t\tif(count % 8 == 0) and (count != 0):\n\t\t\t\tasciival = 0\n\t\t\t\tpower = 7\n\t\t\t\tfor l in listread:\n\t\t\t\t\tasciival = asciival + l*(2**power)\n\t\t\t\t\tpower -= 1\n\t\t\t\tcharread = chr(asciival)\n\t\t\t\tdel listread\n\t\t\t\tlistread = []\n\t\t\t\tmsgencoded.append(charread)\n\t\t\t\tif(len(msgencoded) == 5):\n\t\t\t\t\tif(initlist != msgencoded):\n\t\t\t\t\t\traise ValueError(\"The direction provided to decrypt does not contain any data\")\n\t\t\t\tif(len(endcheck) != 0):\t\n\t\t\t\t\tendcheck.append(charread)\n\t\t\t\t\tif(len(endcheck) == 5):\n\t\t\t\t\t\tif(endcheck!=finalist):\n\t\t\t\t\t\t\tdel endcheck\n\t\t\t\t\t\t\tendcheck = []\n\t\t\t\t\t\telif(initcheck == 1):\n\t\t\t\t\t\t\treturn count, listread,endcheck\n\t\t\t\tif (charread == '$' and len(endcheck) == 0 and initcheck == 1):\t\n\t\t\t\t\tendcheck.append(charread)\n\t\t\tlistread.append((newrgb[z] % 2))\n\t\t\tcount = count + 1\n\t\tdel newrgb\n\t\tdel dig\n\t\treturn count,listread,endcheck", "def build_decoder(self):\n\n dec_input = self.build_decoder_input()\n dec_dense = self.build_decoder_dense(dec_input)\n dec_reshape = self.build_decoder_reshape(dec_dense)\n dec_conv = self.build_decoder_convs(dec_reshape)\n dec_output = self.build_decoder_output(dec_conv)\n\n self.decoder = Model(dec_input, dec_output,\n name='Decoder')", "def _create_decode_layer(self):\n with tf.name_scope(\"decoder\"):\n\n activation = tf.add(\n tf.matmul(self.encode, tf.transpose(self.W_)),\n self.bv_\n )\n\n if self.dec_act_func:\n self.reconstruction = self.dec_act_func(activation)\n else:\n self.reconstruction = activation", "def vaegan_decoder(z, args, reuse=False):\n with tf.variable_scope(\"decoder\", reuse=reuse):\n # number of filters in the last convulational layer of encoder\n num_last_conv_filters = 256\n\n # height and width to which the output of dense layer must\n # be reshaped to feed to subsequent deconv layers\n # 3 height downsampling layers in encoder = 8x reduction\n reshape_height = floor(args.crop_height / 8)\n # 3 width downsampling layers in encoder = 8x reduction\n reshape_width = floor(args.crop_width / 8)\n\n # 8x8x256 fc, batch norm, relu\n fc1 = tf.layers.dense(\n inputs=z,\n units=num_last_conv_filters*reshape_height*reshape_height,\n activation=None,\n use_bias=True\n )\n batch_norm_fc = tf.layers.batch_normalization(fc1)\n relu_fc = tf.nn.relu(batch_norm_fc)\n\n # reshape for deconv layers\n unflattened = tf.reshape(\n relu_fc,\n shape=(\n args.batch_size,\n reshape_height,\n reshape_width,\n num_last_conv_filters\n )\n )\n\n # if downsampled from even number shaped input,\n # then output_shape=2xinput_shape (same padding)\n # NOTE: same padding out = in * stride\n # else output_shape=2xinput_shape+1 (valid paddIing)\n # NOTE: valid padding out = (in-1) * stride + filter_size\n if floor(args.crop_height / 4) == reshape_height*2:\n padding = 'same'\n else:\n padding = 'valid'\n\n # 5x5 256 upsampling conv, batch norm, relu\n deconv1 = tf.layers.conv2d_transpose(\n inputs=unflattened,\n filters=256,\n kernel_size=5,\n strides=2,\n padding=padding,\n activation=None,\n use_bias=True\n )\n batch_norm1 = tf.layers.batch_normalization(deconv1)\n relu1 = tf.nn.relu(batch_norm1)\n\n # padding for current upsampling\n if floor(args.crop_height / 2) == relu1.shape[1]*2:\n padding = 'same'\n else:\n padding = 'valid'\n\n # 5x5 128 upsampling conv, batch norm, relu\n deconv2 = tf.layers.conv2d_transpose(\n inputs=relu1,\n filters=128,\n kernel_size=5,\n strides=2,\n padding=padding,\n activation=None,\n use_bias=True)\n batch_norm2 = tf.layers.batch_normalization(deconv2)\n relu2 = tf.nn.relu(batch_norm2)\n\n # padding for current upsampling\n if floor(args.crop_height) == relu2.shape[1]*2:\n padding = 'same'\n else:\n padding = 'valid'\n\n # 5x5 32 upsampling conv, batch norm, relu\n deconv3 = tf.layers.conv2d_transpose(\n inputs=relu2,\n filters=32,\n kernel_size=5,\n strides=2,\n padding=padding,\n activation=None,\n use_bias=True)\n batch_norm3 = tf.layers.batch_normalization(deconv3)\n relu3 = tf.nn.relu(batch_norm3)\n\n # 5x5 1 conv to get reconstructed frame\n recon_frame = tf.layers.conv2d(\n inputs=relu3,\n filters=1,\n kernel_size=5,\n strides=1,\n padding='same',\n activation=tf.nn.sigmoid,\n use_bias=True)\n return recon_frame", "def diff_decoder_bb(*args, **kwargs):\n return _digital_swig.diff_decoder_bb(*args, **kwargs)", "def h264_frame_cb(self, h264_frame):\n pass", "def decrypt_and_decode(self, data, **kwargs):\n return", "def _create_decode_layer(self):\n\n with tf.name_scope(\"Decode\"):\n if self.dec_act_func == 'sigmoid':\n _dec_act_func = tf.nn.sigmoid\n\n elif self.dec_act_func == 'tanh':\n _dec_act_func = tf.nn.tanh\n\n else:\n _dec_act_func = lambda x: x\n\n self.decode = _dec_act_func(tf.matmul(self.encode, tf.transpose(self.W_)) + self.bv_)\n\n tf.summary.histogram('weights', tf.transpose(self.W_))\n tf.summary.histogram('bias', self.bv_)\n tf.summary.histogram('decodings', self.decode)", "def build_decoder_input(self):\n decoder_input_layer = layers.Input(\n shape=(self.latent_dim,),\n name=\"decoder_input\")\n\n return decoder_input_layer", "def _decode_player_utterance(self, player_utterance):\n # THIS CAN'T BE DONE UNTIL WE SETTLE ON AN ELEGANT WAY OF INCORPORATING TENSORFLOW\n # INTO THE TALK OF THE TOWN FRAMEWORK", "def __init_decoder_params_fc(self):\n self.dec_params_fc = list(reversed(\n [self.__inshape[-1]]+self.layer_cfg[:-1]))", "def new_decoded_pad(self, decoder, pad, is_last):\n\t\tself.probe_id = pad.add_buffer_probe(self._buffer_probe)\n\t\tself.probed_pad = pad\n\t\tself.processing = True\n\t\tself.query_duration()", "def decode_card(card):\n return Card(card[0], card[1])", "def __init__(self, model_dimension, dropout_percentage, number_of_heads, feedforward_dimension, number_of_layers):\n super(Decoder, self).__init__()\n self.dec_layers = clone(DecoderLayer(model_dimension, dropout_percentage, number_of_heads, feedforward_dimension), number_of_layers)", "def mostlikelydecode(self):\n \treturn Cipher(self.mostlikelycodeword()).decode(self.ciphertext)", "def _decode(lconf, dconf, econf, fold):\n if fp.exists(_counts_file_path(lconf, econf, fold)):\n print(\"skipping %s/%s (already done)\" % (econf.learner.name,\n econf.decoder.name),\n file=sys.stderr)\n return\n\n fold_dir = _fold_dir_path(lconf, fold)\n if not os.path.exists(fold_dir):\n os.makedirs(fold_dir)\n args = FakeDecodeArgs(lconf, econf, fold)\n phrasebook = args_to_phrasebook(args)\n decoder = args_to_decoder(args)\n\n fold_attach, fold_relate =\\\n att.decode.select_fold(dconf.attach, dconf.relate,\n args, phrasebook)\n attach = DataAndModel(fold_attach,\n load_model(args.attachment_model))\n relate = DataAndModel(fold_relate,\n load_model(args.relation_model))\n threshold = args_to_threshold(attach.model, decoder,\n requested=args.threshold)\n config = DecoderConfig(phrasebook=phrasebook,\n threshold=threshold,\n post_labelling=False,\n use_prob=args.use_prob)\n\n att.decode.main_for_harness(args, config, decoder, attach, relate)\n args.cleanup()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_constellation_receiver_cb_sptr __init__(self, p) > digital_constellation_receiver_cb_sptr
def __init__(self, *args): this = _digital_swig.new_digital_constellation_receiver_cb_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_pn_correlator_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoCallback()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n self.interface = \\\n {'initialization variables': None,\n 'input variables': None,\n 'input events': None,\n 'output events': None}", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_packet_sink_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, params=None):\n super(NetPositionsMe, self).__init__()\n self.params = params", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, name=None):\n self._mng = pn_messenger(name)", "def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoCallbackList()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoEventCallback()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_ccf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_framer_sink_1_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, args, phase):\n self.args = args\n self.phase = phase", "def init(self, state: 'SoState') -> \"void\":\n return _coin.SoListenerDopplerElement_init(self, state)", "def __init__(self, callback = None, userdata = None):\n this = _coin.new_SbClip(callback, userdata)\n try: self.this.append(this)\n except: self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
constellation_receiver_cb(digital_constellation_sptr constellation, float loop_bw, float fmin, float fmax) > digital_constellation_receiver_cb_sptr This block takes care of receiving generic modulated signals through phase, frequency, and symbol synchronization. This block takes care of receiving generic modulated signals through phase, frequency, and symbol synchronization. It performs carrier frequency and phase locking as well as symbol timing recovery. The phase and frequency synchronization are based on a Costas loop that finds the error of the incoming signal point compared to its nearest constellation point. The frequency and phase of the NCO are updated according to this error.
def constellation_receiver_cb(*args, **kwargs): return _digital_swig.constellation_receiver_cb(*args, **kwargs)
[ "def set_loop_bandwidth(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_set_loop_bandwidth(self, *args, **kwargs)", "def receiver_block(self):\n\t\t\n\t\t# first create a list of received signals\n\t\treceived_signals = list()\n\t\tfor src in self.sources:\n\t\t\tif (isinstance(src,sr.PointSource)): # no other source types available yet\n\t\t\t\tsrc_pos = src.position\n\t\t\t\tif (isinstance(src_pos,sr.LocalPosition)):\n\t\t\t\t\t# for LocalPosition sources, the signal is received as-is\n\t\t\t\t\treceived_signals.append(src.signal)\n\t\t\t\telif (isinstance(src_pos,sr.SkyPosition)):\n\t\t\t\t\t# add propagation delay; other effects can be added later\n\t\t\t\t\t# and can include atmospheric delay / attenuation, gain\n\t\t\t\t\t# pattern of the antenna, etc.\n\t\t\t\t\tl,m,n = src_pos.coords_lmn\n\t\t\t\t\tant_x,ant_y,ant_z = self.position\n\t\t\t\t\tdelay = -(l*ant_x + m*ant_y + n*ant_z)/const.c\n\t\t\t\t\tdelay_block = bl.AnalogDelay(delay)\n\t\t\t\t\tdelay_block.attach_source(src.signal)\n\t\t\t\t\treceived_signals.append(delay_block)\n\t\t\t\telif (isinstance(src_pos,sr.CartesianPosition)):\n\t\t\t\t\t# add propagation delay\n\t\t\t\t\tsrc_x,src_y,src_z = src_pos.coords\n\t\t\t\t\tant_x,ant_y,ant_z = self.position\n\t\t\t\t\tdelay = -np.sqrt((ant_x-src_x)**2 + (ant_y-src_y)**2 + (ant_z-src_z)**2)/const.c\n\t\t\t\t\tdelay_block = bl.AnalogDelay(delay)\n\t\t\t\t\tdelay_block.attach_source(src.signal)\n\t\t\t\t\treceived_signals.append(delay_block)\n\t\t\n\t\tresult = bl.AnalogCombiner()\n\t\tresult.attach_source(received_signals)\n\t\t\n\t\treturn result", "def reconstruct_pu(self, receivers):\n self.fpts = receivers\n # Initialize variables\n self.p_recon = np.zeros((self.fpts.coord.shape[0], len(self.controls.k0)), dtype=complex)\n self.uz_recon = np.zeros((self.fpts.coord.shape[0], len(self.controls.k0)), dtype=complex)\n # Initialize bar\n bar = tqdm(total = len(self.controls.k0), desc = 'Reconstructing sound field...')\n for jf, k0 in enumerate(self.controls.k0):\n # For smooth transition from continous to discrete k domain\n kappa = np.sqrt(self.delta_kx*self.delta_ky/(2*np.pi*k0**2))\n # compute kz\n kz_f = form_kz(k0, self.kx_f, self.ky_f)\n k_vec_ref = np.array([self.kx_f, self.ky_f, kz_f])\n # Reflected or radiating part\n fz_ref = self.f_ref * np.sqrt(k0/np.abs(kz_f))\n recs = np.array([self.fpts.coord[:,0], self.fpts.coord[:,1],\n self.fpts.coord[:,2]-self.zp]).T\n psi_ref = fz_ref * kappa * np.exp(-1j * recs @ k_vec_ref)\n # Incident part\n if self.f_inc != 0:\n k_vec_inc = np.array([self.kx_f, self.ky_f, -kz_f])\n fz_inc = self.f_inc * np.sqrt(k0/np.abs(kz_f))\n recs = np.array([self.fpts.coord[:,0], self.fpts.coord[:,1],\n self.fpts.coord[:,2]-self.zm]).T\n psi_inc = fz_inc * kappa * np.exp(-1j * recs @ k_vec_inc)\n # Forming the sensing matrix\n if self.f_inc == 0:\n h_mtx = psi_ref\n else:\n h_mtx = np.hstack((psi_inc, psi_ref))\n # Compute p and uz\n self.p_recon[:,jf] = h_mtx @ self.pk[:,jf]\n if self.f_inc == 0:\n self.uz_recon[:,jf] = -((np.divide(kz_f, k0)) * h_mtx) @ self.pk[:,jf]\n else:\n self.uz_recon[:,jf] = -((np.divide(np.concatenate((-kz_f, kz_f)), k0)) * h_mtx) @ self.pk[:,jf]\n bar.update(1)\n bar.close()", "def cavity(\n component: Component,\n coupler: Component = coupler,\n length: float = 0.1,\n gap: float = 0.2,\n wg_width: float = 0.5,\n) -> Component:\n mirror = pp.call_if_func(component)\n coupler = pp.call_if_func(coupler, length=length, gap=gap, wg_width=wg_width)\n\n c = pp.Component()\n cr = c << coupler\n ml = c << mirror\n mr = c << mirror\n\n ml.connect(\"W0\", destination=cr.ports[\"W1\"])\n mr.connect(\"W0\", destination=cr.ports[\"E1\"])\n c.add_port(\"W0\", port=cr.ports[\"W0\"])\n c.add_port(\"E0\", port=cr.ports[\"E0\"])\n return c", "def add_constant_signal(self,\n f_start,\n drift_rate,\n level,\n width,\n f_profile_type='sinc2',\n doppler_smearing=False):\n f_start = unit_utils.get_value(f_start, u.Hz)\n drift_rate = unit_utils.get_value(drift_rate, u.Hz / u.s)\n width = unit_utils.get_value(width, u.Hz)\n\n start_index = self.get_index(f_start)\n\n # Calculate the bounding box, to optimize signal insertion calculation\n px_width_offset = 2 * width / self.df\n if drift_rate < 0:\n px_width_offset = -px_width_offset\n px_drift_offset = self.dt * (self.tchans - 1) * drift_rate / self.df\n if doppler_smearing:\n px_drift_offset += drift_rate * self.dt / self.df\n\n bounding_start_index = start_index + int(-px_width_offset)\n bounding_stop_index = start_index + int(px_drift_offset + px_width_offset)\n\n bounding_min_index = max(min(bounding_start_index, bounding_stop_index), 0)\n bounding_max_index = min(max(bounding_start_index, bounding_stop_index), self.fchans)\n\n # Select common frequency profile types\n if f_profile_type == 'gaussian':\n f_profile = f_profiles.gaussian_f_profile(width)\n elif f_profile_type == 'lorentzian':\n f_profile = f_profiles.lorentzian_f_profile(width)\n elif f_profile_type == 'voigt':\n f_profile = f_profiles.voigt_f_profile(width, width)\n elif f_profile_type == 'sinc2':\n f_profile = f_profiles.sinc2_f_profile(width)\n elif f_profile_type == 'box':\n f_profile = f_profiles.box_f_profile(width)\n else:\n raise ValueError('Unsupported f_profile for constant signal!')\n \n return self.add_signal(path=paths.constant_path(f_start, drift_rate),\n t_profile=t_profiles.constant_t_profile(level),\n f_profile=f_profile,\n bp_profile=bp_profiles.constant_bp_profile(level=1),\n bounding_f_range=(self.get_frequency(bounding_min_index),\n self.get_frequency(bounding_max_index)),\n doppler_smearing=doppler_smearing,\n smearing_subsamples=int(np.ceil(drift_rate / self.unit_drift_rate)))", "def set_loop_bandwidth(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_set_loop_bandwidth(self, *args, **kwargs)", "def __init__(self, fft_length, cp_length, occupied_tones, snr, ks, carrier_map_bin, nc_filter, logging=False):\n\n\tgr.hier_block2.__init__(self, \"ofdm_receiver\",\n\t\t\t\tgr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature\n gr.io_signature2(2, 2, gr.sizeof_gr_complex*occupied_tones, gr.sizeof_char)) # Output signature\n\n bw = (float(occupied_tones) / float(fft_length)) / 2.0\n tb = bw*0.04\n print \"ofdm_receiver:__init__:occupied_tones %s fft_length %d \" % (occupied_tones, fft_length)\n \n chan_coeffs = filter.firdes.low_pass (1.0, # gain\n 1.0, # sampling rate\n bw+tb, # midpoint of trans. band\n tb, # width of trans. band\n filter.firdes.WIN_HAMMING) # filter type\n \n self.chan_filt = filter.fft_filter_ccc(1, chan_coeffs)\n\n # linklab, get ofdm parameters\n self._fft_length = fft_length\n self._occupied_tones = occupied_tones\n self._cp_length = cp_length\n self._nc_filter = nc_filter\n self._carrier_map_bin = carrier_map_bin\n \n win = [1 for i in range(fft_length)]\n \n # linklab, initialization function\n self.initialize(ks, self._carrier_map_bin)\n \n\n zeros_on_left = int(math.ceil((fft_length - occupied_tones)/2.0))\n ks0 = fft_length*[0,]\n ks0[zeros_on_left : zeros_on_left + occupied_tones] = ks[0]\n\n ks0 = np_fft.ifftshift(ks0)\n ks0time = np_fft.ifft(ks0)\n # ADD SCALING FACTOR\n ks0time = ks0time.tolist()\n\n SYNC = \"pn\"\n if SYNC == \"ml\":\n nco_sensitivity = -1.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_ml(fft_length,\n cp_length,\n snr,\n ks0time,\n logging)\n elif SYNC == \"pn\":\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_pn(fft_length,\n cp_length,\n logging)\n elif SYNC == \"pnac\":\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_pnac(fft_length,\n cp_length,\n ks0time,\n logging)\n # for testing only; do not user over the air\n # remove filter and filter delay for this\n elif SYNC == \"fixed\":\n self.chan_filt = gr.multiply_const_cc(1.0)\n nsymbols = 18 # enter the number of symbols per packet\n freq_offset = 0.0 # if you use a frequency offset, enter it here\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_fixed(fft_length,\n cp_length,\n nsymbols,\n freq_offset,\n logging)\n\n # Set up blocks\n\n # Create a delay line, linklab\n self.delay = blocks.delay(gr.sizeof_gr_complex, fft_length)\n\n self.nco = analog.frequency_modulator_fc(nco_sensitivity) # generate a signal proportional to frequency error of sync block\n self.sigmix = blocks.multiply_cc()\n self.sampler = gr_papyrus.ofdm_sampler(fft_length, fft_length+cp_length)\n self.fft_demod = gr_fft.fft_vcc(fft_length, True, win, True)\n self.ofdm_frame_acq = gr_papyrus.ofdm_frame_acquisition(occupied_tones,\n fft_length,\n cp_length, ks[0])\n # linklab, check current mode: non-contiguous OFDM or not\n if self._nc_filter:\n print '\\nMulti-band Filter Turned ON!'\n # linklab, non-contiguous filter\n self.ncofdm_filt = ncofdm_filt(self._fft_length, self._occupied_tones, self._carrier_map_bin)\n self.connect(self, self.chan_filt, self.ncofdm_filt)\n self.connect(self.ncofdm_filt, self.ofdm_sync) # into the synchronization alg.\n self.connect((self.ofdm_sync,0), self.nco, (self.sigmix,1)) # use sync freq. offset output to derotate input signal\n self.connect(self.ncofdm_filt, self.delay, (self.sigmix,0)) # signal to be derotated\n else :\n print '\\nMulti-band Filter Turned OFF!'\n self.connect(self, self.chan_filt)\n self.connect(self.chan_filt, self.ofdm_sync) # into the synchronization alg.\n self.connect((self.ofdm_sync,0), self.nco, (self.sigmix,1)) # use sync freq. offset output to derotate input signal\n self.connect(self.chan_filt, self.delay, (self.sigmix,0)) # signal to be derotated\n\n self.connect(self.sigmix, (self.sampler,0)) # sample off timing signal detected in sync alg\n self.connect((self.ofdm_sync,1), (self.sampler,1)) # timing signal to sample at\n\n self.connect((self.sampler,0), self.fft_demod) # send derotated sampled signal to FFT\n self.connect(self.fft_demod, (self.ofdm_frame_acq,0)) # find frame start and equalize signal\n self.connect((self.sampler,1), (self.ofdm_frame_acq,1)) # send timing signal to signal frame start\n self.connect((self.ofdm_frame_acq,0), (self,0)) # finished with fine/coarse freq correction,\n self.connect((self.ofdm_frame_acq,1), (self,1)) # frame and symbol timing, and equalization\n\n if logging:\n self.connect(self.chan_filt, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-chan_filt_c.dat\"))\n self.connect(self.fft_demod, gr.file_sink(gr.sizeof_gr_complex*fft_length, \"ofdm_receiver-fft_out_c.dat\"))\n self.connect(self.ofdm_frame_acq,\n gr.file_sink(gr.sizeof_gr_complex*occupied_tones, \"ofdm_receiver-frame_acq_c.dat\"))\n self.connect((self.ofdm_frame_acq,1), gr.file_sink(1, \"ofdm_receiver-found_corr_b.dat\"))\n self.connect(self.sampler, gr.file_sink(gr.sizeof_gr_complex*fft_length, \"ofdm_receiver-sampler_c.dat\"))\n self.connect(self.sigmix, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-sigmix_c.dat\"))\n self.connect(self.nco, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-nco_c.dat\"))", "def compCIBForeground(band,debug=False,verb=False):\n # MS - Because we treat the CIB as a grey-body, this code is\n # formally identical to that which computes the emission of the \n # telescope mirrors.\n # This is also due to the assumption that we consider that the \n # transmission factor due to the telescope mirrors is negligible\n # with respect to that due to the instrument, so we have applied\n # the same value to a light source on the telescope or beyond.\n # 02/06/2020 Danger of copy/paste. in the plot the emissivity was that\n # of the telescope..\n\n # First perform a input check\n if (checkBandForError(band) == False):\n # the band is not within the accepted band labels\n return\n \n # We will need the pixel size\n pixSizeBand = getPixSizeBand(over=useDef)\n # if over=True it will return the default pixel size, useDef is set in the\n # preamble.\n\n # Now define the function that we will need to integrate\n # first define the grid. We will use a single frequency grid (overkill)\n # we cover the range 30mic->500mic, corresponding to 600 GHz to 10 THz\n numEltsGrid = 10000\n waveGrid = np.linspace(6e11,1e13,num=numEltsGrid)\n # same but normalized by 10^11\n waveGridNorm = np.linspace(6.,100.,num=numEltsGrid)\n\n # Get the filter transmission for this grid\n filtBandpass = filterTrans(band,waveGrid,worf='f',debug=debug)\n \n # now let's build the function under the integral\n twoHNu3C2 = 1.47e-17 * waveGridNorm**3\n hNuKT = hoverk * (waveGrid / cibTemp)\n \n # as math.exp only takes a scalar as an argument I need a loop here\n powSpecDens = []\n for i in range(numEltsGrid):\n # avoid divergence in computation\n if (hNuKT[i]<1e-3):\n powSpecDens.append(filtBandpass[i] * cibEmissivity*cibCorrEm[useFGCase]\\\n * twoHNu3C2[i] * (1./hNuKT[i]))\n elif (hNuKT[i]<100.):\n powSpecDens.append(filtBandpass[i] * cibEmissivity*cibCorrEm[useFGCase]\\\n * twoHNu3C2[i] * (1. / (math.exp(hNuKT[i])-1)))\n else:\n powSpecDens.append(filtBandpass[i] * cibEmissivity*cibCorrEm[useFGCase]\\\n * twoHNu3C2[i] * math.exp(-1*hNuKT[i]))\n\n # convert it as an array\n powSpecDens = np.asarray(powSpecDens)\n \n if (verb):\n print('Using Foreground prescription: ',foreCases[useFGCase])\n idx = np.abs(waveGrid - 3e8/bandWave[band-1]).argmin()\n fg = powSpecDens[idx] / 1e-20\n print('Foreground at {0:6.2f} micron is {1:5.2g} MJy/sr'.format(bandWave[band-1]*1e6,fg))\n\n if (debug):\n plt.figure(figsize=(12,6))\n plt.xscale('log')\n plt.yscale('log')\n plt.grid(True,which='both')\n plt.plot(1e6*lightSpeed/waveGrid,powSpecDens)\n plt.xlabel('Wavelength in $\\mu$m')\n plt.ylabel('Brightness in W.m$^{-2}$.sr$^{-1}$.Hz$^{-1}$')\n \n # now integrate that function\n power = np.trapz(powSpecDens,x=waveGrid)\n # multiply by the terms in the formula that had no dependency on frequency\n power *= instTrans\n power *= etaColdStop\n power *= telEffArea\n pixSolidAngle = ((np.pi*pixSizeBand[band-1])/(3600.*180.))**2\n power *= pixSolidAngle\n \n return power", "def get_loop_bandwidth(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_loop_bandwidth(self)", "def get_blockdim_and_loop_cycle(self):\n #block_num = tik.Dprofile().get_aicore_num()\n block_num = tbe_platform.cce_conf.get_soc_spec(tbe_platform.cce_conf.CORE_NUM)\n \n shape_y = self.input_dict.get(\"y\").get(\"shape\")\n limit_size_of_each_block = shape_y[2] * shape_y[3]\n total_channel = shape_y[0] * shape_y[1]\n each_block_num = constant.BLOCK_SIZE // self.dsize\n each_block_align = \\\n ((each_block_num + limit_size_of_each_block - 1) //\n limit_size_of_each_block) * limit_size_of_each_block\n if limit_size_of_each_block * self.dsize < constant.BLOCK_SIZE:\n all_size = total_channel * limit_size_of_each_block * self.dsize\n if all_size < constant.BLOCK_SIZE:\n block_num = 1\n return block_num, total_channel, 0\n\n limit_size_of_each_block = each_block_align\n limit_channel_of_each_block = limit_size_of_each_block // \\\n (shape_y[2] * shape_y[3])\n loop = (total_channel * shape_y[2] * shape_y[3]) // \\\n limit_size_of_each_block\n mod_channel = ((total_channel * shape_y[2] * shape_y[3]) % \\\n limit_size_of_each_block) // (shape_y[2] * shape_y[3])\n if loop <= block_num:\n block_num = loop\n inner_loop = limit_channel_of_each_block\n inner_loop_mod = mod_channel\n else:\n inner_loop = (loop // block_num) * limit_channel_of_each_block\n inner_loop_mod = (loop % block_num) * limit_channel_of_each_block \\\n + mod_channel\n if inner_loop_mod > block_num:\n inner_loop = inner_loop + inner_loop_mod // block_num\n inner_loop_mod = inner_loop_mod % block_num\n\n return block_num, inner_loop, inner_loop_mod", "def compCIBNEP(band,debug=True):\n \n # First perform a input check\n if (checkBandForError(band) == False):\n # the band is not within the accepted band labels\n return\n \n # We will need the pixel size\n pixSizeBand = getPixSizeBand(over=useDef)\n # if over=True it will return the default pixel size, useDef is set in the\n # preamble.\n\n # Now define the function that we will need to integrate\n # first define the grid. We will use a single frequency grid (overkill)\n # we cover the range 30mic->500mic, corresponding to 600 GHz to 10 THz\n numEltsGrid = 10000\n waveGrid = np.linspace(6e11,1e13,num=numEltsGrid)\n # same but normalized by 10^11\n waveGridNorm = np.linspace(6.,100.,num=numEltsGrid)\n \n # Get the filter transmission for this grid\n filtBandpass = filterTrans(band,waveGrid,worf='f',debug=False)\n \n # now let's build the function under the integral\n twoH2Nu4C2 = 9.74e-40 * waveGridNorm**4\n hNuKT = hoverk * (waveGrid / cibTemp)\n \n # as math.exp only takes a scalar as an argument I need a loop here\n nepSqInt = []\n for i in range(numEltsGrid):\n # avoid divergence in computation\n if (hNuKT[i]<1e-3):\n expTermInt = (1./hNuKT[i])\n elif (hNuKT[i]<100.):\n expTermInt = (1. / (math.exp(hNuKT[i])-1))\n else:\n expTermInt = (math.exp(-1*hNuKT[i]))\n # some terms have for the moment no frequency dependence so they can be\n # made into a single constant\n nepConst = cibEmissivity*cibCorrEm[useFGCase] * instTrans * etaColdStop\n nepSqInt.append(twoH2Nu4C2[i] * nepConst * filtBandpass[i] * expTermInt *\\\n (1 + nepConst * filtBandpass[i] * expTermInt))\n\n # convert it as an array\n nepSqInt = np.asarray(nepSqInt)\n \n if (debug):\n plt.figure(figsize=(12,6))\n plt.xscale('log')\n plt.yscale('log')\n plt.grid(True,which='both')\n plt.plot(1e6*lightSpeed/waveGrid,nepSqInt)\n plt.xlabel('Wavelength in $\\mu$m')\n plt.ylabel('Brightness in W.m$^{-2}$.sr$^{-1}$.Hz$^{-1/2}$')\n \n # now integrate that function\n nepSquare = np.trapz(nepSqInt,x=waveGrid)\n # multiply by the beam etendue\n nepSquare *= telEffArea\n pixSolidAngle = ((np.pi*pixSizeBand[band-1])/(3600.*180.))**2\n # Take into account that one bolometer branch fills only pixFrac of the full\n # pixel angle\n nepSquare *= (pixSolidAngle*pixFrac)\n \n # account for the existence of two polarisation state per photon energy state\n # 10/04/2020 I no longer return the NEP corresponding to the two polarisations\n # I return the NEP per branch of the bolometers.\n #nepSquare *=2\n \n return math.sqrt(nepSquare)", "def __conserve_circulation(self,xBlobInsideList,yBlobInsideList,gBlobInsideList):\n \n #----------------------------------------------------------------------\n # Determine parameters\n\n # convert the hardware flag into an int to use in _base_convection\n if self.lagrangian.blobs.velocityComputationParams['hardware'] == 'gpu': \n blobs_hardware = blobOptions.GPU_HARDWARE\n else: \n blobs_hardware = blobOptions.CPU_HARDWARE\n\n # convert the method flag into an int to use in _base_convection\n if self.lagrangian.blobs.velocityComputationParams['method'] == 'fmm': \n blobs_method = blobOptions.FMM_METHOD\n else: \n blobs_method = blobOptions.DIRECT_METHOD\n \n #----------------------------------------------------------------------\n\n #----------------------------------------------------------------------\n # Make references to all the blobs\n\n # Make references to vortex-blobs\n xBlobOutside, yBlobOutside, gBlobOutside = self.lagrangian.blobs.x, self.lagrangian.blobs.y, self.lagrangian.blobs.g \n \n # Concatenate all the blobs inside\n xBlobInside = _numpy.concatenate(xBlobInsideList)\n yBlobInside = _numpy.concatenate(yBlobInsideList)\n gBlobInside = _numpy.concatenate(gBlobInsideList)\n \n # Full set of blobs\n xBlobAll = _numpy.concatenate((xBlobOutside,xBlobInside)).copy()\n yBlobAll = _numpy.concatenate((yBlobOutside,yBlobInside)).copy()\n gBlobAll = _numpy.concatenate((gBlobOutside,gBlobInside)).copy()\n \n # Determine the total circulations\n gBlobAllTotal = gBlobAll.sum()\n \n # Determine the total circulation of globs inside each eulerian domain\n gBlobInsideTotalList = _numpy.array([listItem.sum() for listItem in gBlobInsideList])\n\n # Make references to panel collocation points (where no-slip b.c. is enforced.)\n xCP, yCP = self.lagrangian.panels.xyCPGlobalCat\n \n # Determine total eulerian circulation\n gTotalEulerianList = self.multiEulerian.gTotalInside() # of N eulerian bodies\n \n # Determine the total disregarded circulation from the eulerian domain\n gTotalDisregardedList = gTotalEulerianList - gBlobInsideTotalList\n \n # Testing: print info\n # print 'gTotalEulerianList : %s' % str(gTotalEulerianList)\n # print 'gBlobInsideTotalList : %s' % str(gBlobInsideTotalList)\n # print 'gBlobOutside : %g' % gBlobOutside.sum()\n # print 'gTotalDisregardedList : %s' % str(gTotalDisregardedList)\n #----------------------------------------------------------------------\n \n #----------------------------------------------------------------------\n # Solve for panel strenths\n \n # Determine the slip velocity on panel collocation points\n vxSlip, vySlip = _blobs_velocity(xBlobAll,yBlobAll,gBlobAll,self.lagrangian.blobs.sigma,\n xEval=xCP,yEval=yCP,hardware=blobs_hardware, \n method=blobs_method) \\\n + self.lagrangian.vInf.reshape(2,-1)\n \n # Solve for no-slip panel strengths, gPanelTotal should be negative of gTotalIgnored\n self.lagrangian.panels.solve(vxSlip, vySlip, gTotal=gTotalDisregardedList)\n \n #----------------------------------------------------------------------\n\n #----------------------------------------------------------------------\n # Conserve circulation\n\n # Determine total panel circulation (of all bodies)\n gPanelTotal = _numpy.sum(self.lagrangian.panels.gTotal)\n \n # Determine the total lagrangian circulation\n gLagrangianTotal = gBlobAllTotal + gPanelTotal\n \n if _numpy.abs(gLagrangianTotal) > self.lagrangian.blobs.gThresholdGlobal:\n # Standard-uniform correction\n # Circulation to be given to particles inside.\n gExtraPerBlob = gLagrangianTotal / xBlobInside.shape[0]\n \n # Add circulation to each blobs\n gBlobInsideCorrected = gBlobInside - gExtraPerBlob \n \n # Testing: print info\n # print 'gExtraPerBlob: %g' % gExtraPerBlob\n else:\n # If the error is less that gThresholdGlobal, no need for correction.\n gBlobInsideCorrected = gBlobInside\n \n # Testing: print info\n # print 'gPanelTotal: %g' % gPanelTotal\n # print 'gLagrangianTotal: %g' % gLagrangianTotal\n # print 'final total lagrangian circulation : %g' % (gBlobInsideCorrected.sum()+gBlobOutside.sum()+gPanelTotal)\n #---------------------------------------------------------------------- \n\n # return the new blob circulation \n return xBlobInside, yBlobInside, gBlobInsideCorrected", "def _run_nccf(self, original_audio, fs, downsampled_audio = None, downsample_rate = None):\n if self.params.is_two_pass_nccf:\n first_pass = self._first_pass_nccf(downsampled_audio, downsample_rate)\n nccf_results = self._second_pass_nccf(original_audio, fs, first_pass)\n return nccf_results, first_pass\n else:\n nccf_results = self._one_pass_nccf(original_audio, fs)\n return nccf_results, None", "def coil_combine_cmrr_sequential(chain):\n block = chain._block\n set = chain._block.set\n dataset = chain._dataset\n raw = chain.raw\n\n ncoils = raw.shape[1]\n nfids = raw.shape[2]\n dim0 = raw.shape[3]\n acqdim0 = dim0\n xaxis = range(dim0)\n\n flag_norm_to_sum = False # default for now\n\n dat_comb = np.ndarray([nfids,dim0], dtype=np.complex128)\n\n all_weight = np.ndarray([nfids,ncoils], dtype=np.float)\n all_phases = np.ndarray([nfids,ncoils], dtype=np.complex)\n\n for i in range(nfids):\n\n # determine weighting and phz for each coil\n # zero-order phase correction\n # correct for phase based on 1st point in 1st wref fid\n\n # for each average, calc phase and weights to correct for coil geometry\n chans = []\n weight = []\n phases = []\n \n for j in range(ncoils):\n chan = chain.raw[0,j,i,:].copy()\n \n magn = np.abs(chan[0])\n phas = np.conjugate(chan[0])/magn # normalized complex conj to cancel phase \n chan = phas * chan # Note. applying phase here NOT below as in Siemens\n \n # amplitude of zero order phased fid in time domain\n # using 9th order polynomial fit (based on Uzay's script)\n coeffs = np.polyfit(xaxis, np.absolute(chan), 9)\n \n weight.append(coeffs[-1]) # last entry is amplitude - zero order coeff\n phases.append(phas)\n chans.append(chan)\n \n # normalize weighting function based on spectro data \n tmp = np.sum([val*val for val in weight]) # sum squared values \n if tmp == 0.0: tmp = 1.0\n if flag_norm_to_sum:\n # sum of sensitivities\n lamda = np.sum(weight) / tmp \n else:\n # sqrt of sum of squared sensitivities\n lamda = 1.0 / np.sqrt(tmp)\n\n weight = [val*lamda for val in weight]\n\n all_weight[i,:] = weight\n all_phases[i,:] = phases\n \n # apply weighting ... phase corrections done above\n for j,chan in enumerate(chans):\n chans[j] = chan * weight[j]\n \n # sum corrected FIDs from each coil into one combined FID\n dat_comb[i,:] = np.sum(chans, axis=0) \n\n print_combine_stats(all_weight, all_phases, method='CMRR_Sequential')\n \n return normalize_shape(dat_comb), all_weight, all_phases", "def forster_coupling_extended_py(donor, acceptor, conditions, supercell, cell_incr, longitude=3, n_divisions=300):\n function_name = inspect.currentframe().f_code.co_name\n\n # donor <-> acceptor interaction symmetry\n hash_string = generate_hash(function_name, donor, acceptor, conditions, supercell, cell_incr)\n # hash_string = str(hash((donor, acceptor, function_name))) # No symmetry\n\n if hash_string in coupling_data:\n return coupling_data[hash_string]\n\n mu_d = donor.get_transition_moment(to_state=_ground_state_) # transition dipole moment (donor) e*angs\n mu_a = acceptor.get_transition_moment(to_state=donor.state.label) # transition dipole moment (acceptor) e*angs\n\n ref_index = conditions['refractive_index'] # refractive index of the material\n\n r_vector = intermolecular_vector(donor, acceptor, supercell, cell_incr) # position vector between donor and acceptor\n\n mu_ai = mu_a / n_divisions\n mu_di = mu_d / n_divisions\n\n k_e = 1.0 / (4.0 * np.pi * VAC_PERMITTIVITY)\n\n forster_coupling = 0\n for x in np.linspace(-0.5 + 0.5/n_divisions, 0.5 - 0.5/n_divisions, n_divisions):\n for y in np.linspace(-0.5 + 0.5/n_divisions, 0.5 - 0.5/ n_divisions, n_divisions):\n\n #print(x, y)\n dr_a = mu_a / np.linalg.norm(mu_a) * longitude * x\n dr_d = mu_d / np.linalg.norm(mu_d) * longitude * y\n r_vector_i = r_vector + dr_a + dr_d\n\n distance = np.linalg.norm(r_vector_i)\n\n k = orientation_factor(mu_ai, mu_di, r_vector_i) # orientation factor between molecules\n\n forster_coupling += k_e * k**2 * np.dot(mu_ai, mu_di) / (ref_index**2 * distance**3)\n\n coupling_data[hash_string] = forster_coupling # memory update for new couplings\n\n return forster_coupling", "def convolve(self, lc):\n #t = np.arange(lc.time.min() + len(self)*self.dt, lc.time.max()+self.dt, self.dt)\n t = lc.time[len(self)-1:] + self.time[0]\n r = scipy.signal.convolve(lc.rate, self.rate, mode='valid')\n return SimLightCurve(t=t, r=r)", "def compCMBForeground(band,debug=False,verb=False):\n # MS - Because we treat the CIB as a grey-body, this code is\n # formally identical to that which computes the emission of the \n # telescope mirrors.\n # This is also due to the assumption that we consider that the \n # transmission factor due to the telescope mirrors is negligible\n # with respect to that due to the instrument, so we have applied\n # the same value to a light source on the telescope or beyond.\n # 02/06/2020 Danger of copy/paste. in the plot the emissivity was that\n # of the telescope..\n\n # First perform a input check\n if (checkBandForError(band) == False):\n # the band is not within the accepted band labels\n return\n \n # We will need the pixel size\n pixSizeBand = getPixSizeBand(over=useDef)\n # if over=True it will return the default pixel size, useDef is set in the\n # preamble.\n\n # Now define the function that we will need to integrate\n # first define the grid. We will use a single frequency grid (overkill)\n # we cover the range 30mic->500mic, corresponding to 600 GHz to 10 THz\n numEltsGrid = 10000\n waveGrid = np.linspace(6e11,1e13,num=numEltsGrid)\n # same but normalized by 10^11\n waveGridNorm = np.linspace(6.,100.,num=numEltsGrid)\n\n # Get the filter transmission for this grid\n filtBandpass = filterTrans(band,waveGrid,worf='f',debug=debug)\n \n # now let's build the function under the integral\n twoHNu3C2 = 1.47e-17 * waveGridNorm**3\n hNuKT = hoverk * (waveGrid / cmbTemp)\n \n # as math.exp only takes a scalar as an argument I need a loop here\n powSpecDens = []\n for i in range(numEltsGrid):\n # avoid divergence in computation\n if (hNuKT[i]<1e-3):\n powSpecDens.append(filtBandpass[i] * cmbEmissivity * twoHNu3C2[i] * (1./hNuKT[i]))\n elif (hNuKT[i]<100.):\n powSpecDens.append(filtBandpass[i] * cmbEmissivity * twoHNu3C2[i] * (1. / (math.exp(hNuKT[i])-1)))\n else:\n powSpecDens.append(filtBandpass[i] * cmbEmissivity * twoHNu3C2[i] * math.exp(-1*hNuKT[i]))\n \n # convert it as an array\n powSpecDens = np.asarray(powSpecDens)\n \n if (verb):\n idx = np.abs(waveGrid - 3e8/bandWave[band-1]).argmin()\n fg = powSpecDens[idx] / 1e-20\n print('Foreground at {0:6.2f} micron is {1:5.2g} MJy/sr'.format(bandWave[band-1]*1e6,fg))\n\n if (debug):\n plt.figure(figsize=(12,6))\n plt.xscale('log')\n plt.yscale('log')\n plt.grid(True,which='both')\n plt.plot(1e6*lightSpeed/waveGrid,powSpecDens)\n plt.xlabel('Wavelength in $\\mu$m')\n plt.ylabel('Brightness in W.m$^{-2}$.sr$^{-1}$.Hz$^{-1}$')\n \n # now integrate that function\n power = np.trapz(powSpecDens,x=waveGrid)\n # multiply by other instrumental terms in the formula that had no dependency on frequency\n power *= instTrans\n power *= etaColdStop\n power *= telEffArea\n pixSolidAngle = ((np.pi*pixSizeBand[band-1])/(3600.*180.))**2\n power *= pixSolidAngle\n \n return power", "def compCMBNEP(band,debug=True):\n \n # First perform a input check\n if (checkBandForError(band) == False):\n # the band is not within the accepted band labels\n return\n \n # We will need the pixel size\n pixSizeBand = getPixSizeBand(over=useDef)\n # if over=True it will return the default pixel size, useDef is set in the\n # preamble.\n\n # Now define the function that we will need to integrate\n # first define the grid. We will use a single frequency grid (overkill)\n # we cover the range 30mic->500mic, corresponding to 600 GHz to 10 THz\n numEltsGrid = 10000\n waveGrid = np.linspace(6e11,1e13,num=numEltsGrid)\n # same but normalized by 10^11\n waveGridNorm = np.linspace(6.,100.,num=numEltsGrid)\n \n # Get the filter transmission for this grid\n filtBandpass = filterTrans(band,waveGrid,worf='f',debug=False)\n \n # now let's build the function under the integral\n twoH2Nu4C2 = 9.74e-40 * waveGridNorm**4\n hNuKT = hoverk * (waveGrid / cmbTemp)\n \n # as math.exp only takes a scalar as an argument I need a loop here\n nepSqInt = []\n for i in range(numEltsGrid):\n # avoid divergence in computation\n if (hNuKT[i]<1e-3):\n expTermInt = (1./hNuKT[i])\n elif (hNuKT[i]<100.):\n expTermInt = (1. / (math.exp(hNuKT[i])-1))\n else:\n expTermInt = (math.exp(-1*hNuKT[i]))\n # some terms have for the moment no frequency dependence so they can be\n # made into a single constant\n nepConst = cmbEmissivity * instTrans * etaColdStop\n nepSqInt.append(twoH2Nu4C2[i] * nepConst * filtBandpass[i] * expTermInt *\\\n (1 + nepConst * filtBandpass[i] * expTermInt))\n\n # convert it as an array\n nepSqInt = np.asarray(nepSqInt)\n \n if (debug):\n plt.figure(figsize=(12,6))\n plt.xscale('log')\n plt.yscale('log')\n plt.grid(True,which='both')\n plt.plot(1e6*lightSpeed/waveGrid,nepSqInt)\n plt.xlabel('Wavelength in $\\mu$m')\n plt.ylabel('Brightness in W.m$^{-2}$.sr$^{-1}$.Hz$^{-1/2}$')\n \n # now integrate that function\n nepSquare = np.trapz(nepSqInt,x=waveGrid)\n # multiply by the beam etendue\n nepSquare *= telEffArea\n pixSolidAngle = ((np.pi*pixSizeBand[band-1])/(3600.*180.))**2\n # Take into account that one bolometer branch fills only pixFrac of the full\n # pixel angle\n nepSquare *= (pixSolidAngle*pixFrac)\n \n # account for the existence of two polarisation state per photon energy state\n # 10/04/2020 I no longer return the NEP corresponding to the two polarisations\n # I return the NEP per branch of the bolometers.\n #nepSquare *=2\n \n return math.sqrt(nepSquare)", "def plot_chpi_snr_raw(raw, win_length, n_harmonics=None, show=True, *,\n verbose=None):\n import matplotlib.pyplot as plt\n try:\n from mne.chpi import get_chpi_info\n except ImportError:\n from mne.chpi import _get_hpi_info as get_chpi_info\n\n # plotting parameters\n legend_fontsize = 6\n title_fontsize = 10\n tick_fontsize = 10\n label_fontsize = 10\n\n # get some info from fiff\n sfreq = raw.info['sfreq']\n linefreq = raw.info['line_freq']\n if n_harmonics is not None:\n linefreqs = (np.arange(n_harmonics + 1) + 1) * linefreq\n else:\n linefreqs = np.arange(linefreq, raw.info['lowpass'], linefreq)\n buflen = int(win_length * sfreq)\n if buflen <= 0:\n raise ValueError('Window length should be >0')\n cfreqs = get_chpi_info(raw.info, verbose=False)[0]\n logger.info(f'Nominal cHPI frequencies: {cfreqs} Hz')\n logger.info(f'Sampling frequency: {sfreq:0.1f} Hz')\n logger.info(f'Using line freqs: {linefreqs} Hz')\n logger.info(f'Using buffers of {buflen} samples = '\n f'{buflen / sfreq:0.3f} seconds')\n\n pick_meg = pick_types(raw.info, meg=True, exclude=[])\n pick_mag = pick_types(raw.info, meg='mag', exclude=[])\n pick_grad = pick_types(raw.info, meg='grad', exclude=[])\n nchan = len(pick_meg)\n # grad and mag indices into an array that already has meg channels only\n pick_mag_ = np.in1d(pick_meg, pick_mag).nonzero()[0]\n pick_grad_ = np.in1d(pick_meg, pick_grad).nonzero()[0]\n\n # create general linear model for the data\n t = np.arange(buflen) / float(sfreq)\n model = np.empty((len(t), 2 + 2 * (len(linefreqs) + len(cfreqs))))\n model[:, 0] = t\n model[:, 1] = np.ones(t.shape)\n # add sine and cosine term for each freq\n allfreqs = np.concatenate([linefreqs, cfreqs])\n model[:, 2::2] = np.cos(2 * np.pi * t[:, np.newaxis] * allfreqs)\n model[:, 3::2] = np.sin(2 * np.pi * t[:, np.newaxis] * allfreqs)\n inv_model = linalg.pinv(model)\n\n # drop last buffer to avoid overrun\n bufs = np.arange(0, raw.n_times, buflen)[:-1]\n tvec = bufs / sfreq\n snr_avg_grad = np.zeros([len(cfreqs), len(bufs)])\n hpi_pow_grad = np.zeros([len(cfreqs), len(bufs)])\n snr_avg_mag = np.zeros([len(cfreqs), len(bufs)])\n resid_vars = np.zeros([nchan, len(bufs)])\n pb = ProgressBar(bufs, mesg='Buffer')\n for ind, buf0 in enumerate(pb):\n megbuf = raw[pick_meg, buf0:buf0 + buflen][0].T\n coeffs = np.dot(inv_model, megbuf)\n coeffs_hpi = coeffs[2 + 2 * len(linefreqs):]\n resid_vars[:, ind] = np.var(megbuf - np.dot(model, coeffs), 0)\n # get total power by combining sine and cosine terms\n # sinusoidal of amplitude A has power of A**2/2\n hpi_pow = (coeffs_hpi[0::2, :] ** 2 + coeffs_hpi[1::2, :] ** 2) / 2\n hpi_pow_grad[:, ind] = hpi_pow[:, pick_grad_].mean(1)\n # divide average HPI power by average variance\n snr_avg_grad[:, ind] = hpi_pow_grad[:, ind] / \\\n resid_vars[pick_grad_, ind].mean()\n snr_avg_mag[:, ind] = hpi_pow[:, pick_mag_].mean(1) / \\\n resid_vars[pick_mag_, ind].mean()\n logger.info('[done]')\n\n cfreqs_legend = ['%s Hz' % fre for fre in cfreqs]\n fig, axs = plt.subplots(4, 1, sharex=True)\n\n # SNR plots for gradiometers and magnetometers\n ax = axs[0]\n lines1 = ax.plot(tvec, 10 * np.log10(snr_avg_grad.T))\n lines1_med = ax.plot(tvec, 10 * np.log10(np.median(snr_avg_grad, axis=0)),\n lw=2, ls=':', color='k')\n ax.set_xlim([tvec.min(), tvec.max()])\n ax.set(ylabel='SNR (dB)')\n ax.yaxis.label.set_fontsize(label_fontsize)\n ax.set_title('Mean cHPI power / mean residual variance, gradiometers',\n fontsize=title_fontsize)\n ax.tick_params(axis='both', which='major', labelsize=tick_fontsize)\n ax = axs[1]\n lines2 = ax.plot(tvec, 10 * np.log10(snr_avg_mag.T))\n lines2_med = ax.plot(tvec, 10 * np.log10(np.median(snr_avg_mag, axis=0)),\n lw=2, ls=':', color='k')\n ax.set_xlim([tvec.min(), tvec.max()])\n ax.set(ylabel='SNR (dB)')\n ax.yaxis.label.set_fontsize(label_fontsize)\n ax.set_title('Mean cHPI power / mean residual variance, magnetometers',\n fontsize=title_fontsize)\n ax.tick_params(axis='both', which='major', labelsize=tick_fontsize)\n ax = axs[2]\n lines3 = ax.plot(tvec, hpi_pow_grad.T)\n lines3_med = ax.plot(tvec, np.median(hpi_pow_grad, axis=0),\n lw=2, ls=':', color='k')\n ax.set_xlim([tvec.min(), tvec.max()])\n ax.set(ylabel='Power (T/m)$^2$')\n ax.yaxis.label.set_fontsize(label_fontsize)\n ax.set_title('Mean cHPI power, gradiometers',\n fontsize=title_fontsize)\n ax.tick_params(axis='both', which='major', labelsize=tick_fontsize)\n # residual (unexplained) variance as function of time\n ax = axs[3]\n cls = plt.get_cmap('plasma')(np.linspace(0., 0.7, len(pick_meg)))\n ax.set_prop_cycle(color=cls)\n ax.semilogy(tvec, resid_vars[pick_grad_, :].T, alpha=.4)\n ax.set_xlim([tvec.min(), tvec.max()])\n ax.set(ylabel='Var. (T/m)$^2$', xlabel='Time (s)')\n ax.xaxis.label.set_fontsize(label_fontsize)\n ax.yaxis.label.set_fontsize(label_fontsize)\n ax.set_title('Residual (unexplained) variance, all gradiometer channels',\n fontsize=title_fontsize)\n ax.tick_params(axis='both', which='major', labelsize=tick_fontsize)\n tight_layout(pad=.5, w_pad=.1, h_pad=.2) # from mne.viz\n # tight_layout will screw these up\n ax = axs[0]\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n # order curve legends according to mean of data\n sind = np.argsort(snr_avg_grad.mean(axis=1))[::-1]\n handles = [lines1[i] for i in sind]\n handles.append(lines1_med[0])\n labels = [cfreqs_legend[i] for i in sind]\n labels.append('Median')\n leg_kwargs = dict(\n prop={'size': legend_fontsize}, bbox_to_anchor=(1.02, 0.5, ),\n loc='center left', borderpad=1, handlelength=1,\n )\n ax.legend(handles, labels, **leg_kwargs)\n ax = axs[1]\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n sind = np.argsort(snr_avg_mag.mean(axis=1))[::-1]\n handles = [lines2[i] for i in sind]\n handles.append(lines2_med[0])\n labels = [cfreqs_legend[i] for i in sind]\n labels.append('Median')\n ax.legend(handles, labels, **leg_kwargs)\n ax = axs[2]\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n sind = np.argsort(hpi_pow_grad.mean(axis=1))[::-1]\n handles = [lines3[i] for i in sind]\n handles.append(lines3_med[0])\n labels = [cfreqs_legend[i] for i in sind]\n labels.append('Median')\n ax.legend(handles, labels, **leg_kwargs)\n ax = axs[3]\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n if show:\n plt.show()\n\n return fig", "def read_block(self, lazy=False, cascade=True, n_starts=[None],\n n_stops=[None], channel_list=None, nsx=None, units=None,\n events=False, waveforms=False, corrections=False):\n\n # reading correction parameters from 'corrections.txt' file and saving them\n # gap_corrections = [gap_start_bin,gap_size_bins]\n gap_corrections = [None, None]\n if corrections:\n try:\n correction_file = open(\n os.path.dirname(__file__) + '/corrections.txt', 'r')\n for line in correction_file:\n if os.path.basename(self.filename) in line:\n numbers = [int(s) for s in line.split() if s.isdigit()]\n if len(numbers) == 2:\n gap_corrections = numbers * \\\n np.array(\n 1.0) * pq.CompoundUnit('1.0/%i*s' % (self.timestamp_res))\n else:\n warnings.warn(\n 'Wrong number of integers in corrections.txt for session %s' % os.path.basename(self.filename))\n break\n correction_file.close()\n except IOError:\n warnings.warn('No file \"corrections.txt\" found.')\n\n # correcting n_starts and n_stops for gap\n # listify if necessary\n n_starts_c = n_starts if type(n_starts) == list else [n_starts]\n n_stops_c = n_stops if type(n_stops) == list else [n_stops]\n\n # shift start and stop times to allow gap correction if gap is\n # known\n if gap_corrections[0] != None:\n for time_list in [n_starts_c, n_stops_c]:\n # iterate over all n_start and n_stops\n for i in range(len(time_list)):\n if time_list[i] >= gap_corrections[0]:\n time_list[i] += gap_corrections[1]\n\n # Load neo block\n block = BlackrockIO.read_block(self, lazy=lazy, cascade=cascade,\n n_starts=n_starts, n_stops=n_stops,\n channel_list=channel_list, nsx=nsx,\n units=units, events=events,\n waveforms=waveforms)\n\n # post correct gaps if gap is known\n if corrections and gap_corrections[0] != None:\n # correct alignment\n for i in range(len(block.segments)):\n\n # # use t_start if specified\n # t_start = None\n # if n_starts:\n # t_start = n_starts[i]\n #\n # # use t_stop if specified\n # t_stop = None\n # if n_stops:\n # t_stop = n_stops[i]\n\n # adjust spiketrains\n for j in range(len(block.segments[i].spiketrains)):\n st = block.segments[i].spiketrains[j]\n\n # adjusting t_start\n if st.t_start >= gap_corrections[0] + gap_corrections[1]:\n st.t_start -= gap_corrections[1]\n\n # correct for gap\n st = st - ((st > gap_corrections[0]) * gap_corrections[1])\n\n# # discard spikes before t_start\n# if t_start:\n# idx_valid = np.nonzero(st >= t_start)[0]\n# if len(idx_valid):\n# st = st[idx_valid[0]:]\n#\n# # discard spikes after t_stop\n# if t_stop:\n# idx_invalid = np.nonzero(st >= t_stop)[0]\n# if len(idx_invalid):\n# st = st[:idx_invalid[0]]\n\n # shallow copy from original spiketrain (annotations,\n # waveforms, etc.)\n st.__dict__ = block.segments[\n i].spiketrains[j].__dict__.copy()\n\n # adjusting t_stop\n if st.t_stop >= gap_corrections[0] + gap_corrections[1]:\n st.t_stop -= gap_corrections[1]\n\n # link block to new spiketrain\n block.segments[i].spiketrains[j] = st\n\n # TODO: odML <-> nev consistency checks? Low priority\n # condition, trials, SUA IDs, MUA IDs\n\n # Block annotations\n if self.condition is not None:\n block.annotate(condition=self.condition)\n\n # Add annotations of odML meta data info\n if self.odML_avail:\n # Annotate electrode id lists\n ff = lambda x: x.name.startswith('Electrode_')\n sobjs = [s for s in self.odML_doc.itersections(filter_func=ff)]\n\n # Annotate electrode id lists\n elid_list = [] # general available ids\n elid_list_ca = np.zeros((100)) - 1 # for connector alignement\n elid_list_ba = np.zeros((100)) - 1 # for brain alignement\n for s in sobjs:\n elid_list.append(s.properties['ID'].value.data)\n elid_list_ca[s.properties['IDca'].value.data - 1] = \\\n s.properties['ID'].value.data\n elid_list_ba[s.properties['IDba'].value.data - 1] = \\\n s.properties['ID'].value.data\n block.annotate(elid_list=sorted(elid_list))\n block.annotate(elid_list_ca=elid_list_ca)\n\n # Brain-aligned will be dropped\n# block.annotate(elid_list_ba=elid_list_ba)\n\n # Annotate performed task type\n ff = lambda x: x.name in 'TaskType' and \\\n 'Subsession' == x.parent.name\n pobjs = [p for p in self.odML_doc.iterproperties(filter_func=ff)]\n vdata = [p.value.data for p in pobjs if p.value.data]\n block.annotate(task_type=vdata[0])\n\n # Annotate trial type order (random, block, size of block)\n cue_task_types = ['Observation', 'OneCue', 'TwoCues']\n if block.annotations['task_type'] in cue_task_types:\n ff = lambda x: x.name in ['OrderGrip', 'OrderForce'] and \\\n 'TrialTypeSettings' in x.parent.name\n pobjs = [\n p for p in self.odML_doc.iterproperties(filter_func=ff)]\n block.annotate(\n random_grip=['random' == p.value.data for p in\n pobjs if p.name == 'OrderGrip'][0])\n block.annotate(\n random_force=['random' == p.value.data for p in\n pobjs if p.name == 'OrderForce'][0])\n\n if False in [block.annotations['random_grip'],\n block.annotations['random_force']]:\n ff = lambda x: x.name == 'BlockSize' and \\\n 'TrialTypeSettings' in x.parent.name\n pobjs = [p for p in\n self.odML_doc.iterproperties(filter_func=ff)]\n block.annotate(block_size=[p.value.data for p in pobjs][0])\n\n elif self.metadata_dir is not None:\n # If no odML is available, but metadata directory was specified,\n # read a bare minimum of information\n\n # Annotate available electrode id list for connector alignement\n einfo = rgodml.metadata_io.load_blackrock_electrodes_info(\n os.path.join(\n self.metadata_dir, 'source', 'blackrock',\n 'electrodes_info.txt'))\n elid_list = sorted([einfo[k]['ID']['data'] for k in einfo.keys()\n if einfo[k]['ID']['data'] > 0])\n elid_list_ca = list(np.ones(100, dtype=int) * -1)\n for elid in elid_list:\n elid_str = 'Electrode_%03d' % elid\n elid_list_ca[einfo[elid_str]['IDca']['data'] - 1] = elid\n block.annotate(elid_list=elid_list)\n block.annotate(elid_list_ca=elid_list_ca)\n # Annotate available electrode id list for brain alignement\n # Removed -- brainaligned will be dropped\n# einfo = rgodml.metadata_io.load_brain_aligned_elids(\n# os.path.join(\n# self.metadata_dir, 'source', 'monkey',\n# 'brain_aligned_elids.txt'))\n# elid_list_ba = list(np.ones(100, dtype=int) * -1)\n# for elid in elid_list:\n# elid_str = 'Electrode_%03d' % elid\n# elid_list_ba[einfo[elid_str]['IDba']['data'] - 1] = elid\n# block.annotate(elid_list_ba=elid_list_ba)\n\n # Add interpreted events to block\n if events:\n for seg_i, seg in enumerate(block.segments):\n # Find index of reach-grasp event in this segment\n for event_i in self.trial_events_str.keys():\n if n_starts[seg_i] == None:\n tstart = 0\n else:\n tstart = n_starts[seg_i] / self.nev_unit\n if n_stops[seg_i] == None:\n tstop = sys.maxsize\n else:\n tstop = n_stops[seg_i] / self.nev_unit\n idx = np.nonzero(np.logical_and(\n self.trial_data[:, event_i] >= tstart,\n self.trial_data[:, event_i] < tstop))[0]\n ev = neo.EventArray(\n times=pq.Quantity(self.trial_data[idx, event_i],\n units=self.nev_unit,\n dtype=\"int\"),\n labels=np.tile(self.trial_events_str[event_i],\n (len(idx))),\n name=self.trial_events_str[event_i],\n file_origin=self.associated_fileset,\n marker_id=event_i,\n digital_marker=True,\n analog_marker=False,\n analog_channel=0,\n event_name=self.trial_events_str[event_i])\n seg.eventarrays.append(ev)\n\n # Add annotations of spike sorting and odML meta data info\n for seg in block.segments:\n # Add annotations to analogsignals\n for asig in seg.analogsignals:\n if asig.annotations['channel_id'] <= 100:\n el_id = asig.annotations['channel_id']\n asig.annotations['electrode_id'] = el_id\n if self.odML_avail:\n # Annotate connector and brain aligned id\n idx = block.annotations['elid_list'].index(el_id)\n asig.annotations['ca_id'] = \\\n block.annotations['elid_list_ca'][idx]\n asig.annotations['ba_id'] = \\\n block.annotations['elid_list_ba'][idx]\n\n # Annotate if electrode should be rejected\n ff = lambda x: x.name == 'RejElectrodes' and \\\n 'RejectionsLFP' in x.get_path()\n pobjs = [p for p in\n self.odML_doc.iterproperties(filter_func=ff)]\n vdata = [v.data for v in pobjs[0].values]\n fc = pobjs[0].parent.name\n if el_id in vdata:\n asig.annotations['rej' + fc] = True\n else:\n asig.annotations['rej' + fc] = False\n elif self.metadata_dir is not None:\n # Annotate connector and brain aligned id\n asig.annotations['ca_id'] = block.annotations[\n 'elid_list_ca'].index(el_id) + 1\n\n # Add annotations to spiketrains\n for st in seg.spiketrains:\n if st.annotations['channel_id'] <= 100:\n el_id = st.annotations['channel_id']\n st.annotations['electrode_id'] = el_id\n if self.odML_avail:\n # Annotate connector and brain aligned id\n idx = block.annotations['elid_list'].index(el_id)\n st.annotations['ca_id'] = \\\n block.annotations['elid_list_ca'][idx]\n st.annotations['ba_id'] = \\\n block.annotations['elid_list_ba'][idx]\n\n # Annotate if electrode should be rejected\n ff = lambda x: x.name == 'RejElectrodes' and \\\n 'RejectionsLFP' in x.get_path()\n pobjs = [p for p in\n self.odML_doc.iterproperties(filter_func=ff)]\n vdata = [v.data for v in pobjs[0].values]\n fc = pobjs[0].parent.name\n if el_id in vdata:\n st.annotations['rej' + fc] = True\n else:\n st.annotations['rej' + fc] = False\n elif self.metadata_dir is not None:\n # Annotate connector and brain aligned id\n st.annotations['ca_id'] = \\\n block.annotations['elid_list_ca'].index(el_id) + 1\n\n # Annotate if unit_id corresponds to sua or mua\n if st.annotations['unit_id'] in self.get_sua_ids(el_id):\n st.annotations['sua'] = True\n else:\n st.annotations['sua'] = False\n if st.annotations['unit_id'] in self.get_mua_ids(el_id):\n st.annotations['mua'] = True\n else:\n st.annotations['mua'] = False\n\n # Add annotations to units\n for unit in block.list_units:\n if unit.annotations['channel_id'] <= 100:\n el_id = unit.annotations['channel_id']\n unit.annotations['electrode_id'] = el_id\n if self.odML_avail:\n # Annotate connector and brain aligned id\n idx = block.annotations['elid_list'].index(el_id)\n unit.annotations['ca_id'] = \\\n block.annotations['elid_list_ca'][idx]\n st.annotations['ba_id'] = \\\n block.annotations['elid_list_ba'][idx]\n\n # Annotate if electrode should be rejected\n ff = lambda x: x.name == 'RejElectrodes' and \\\n 'RejectionsLFP' in x.get_path()\n pobjs = [p for p in\n self.odML_doc.iterproperties(filter_func=ff)]\n vdata = [v.data for v in pobjs[0].values]\n fc = pobjs[0].parent.name\n if el_id in vdata:\n unit.annotations['rej' + fc] = True\n else:\n unit.annotations['rej' + fc] = False\n\n # Annotate if unit_id corresponds to sua or mua\n if unit.annotations['unit_id'] in self.get_sua_ids(el_id):\n unit.annotations['sua'] = True\n else:\n unit.annotations['sua'] = False\n if unit.annotations['unit_id'] in self.get_mua_ids(el_id):\n unit.annotations['mua'] = True\n else:\n unit.annotations['mua'] = False\n\n return block" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_correlate_access_code_bb_sptr __init__(self, p) > digital_correlate_access_code_bb_sptr
def __init__(self, *args): this = _digital_swig.new_digital_correlate_access_code_bb_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pn_correlator_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_glfsr_source_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_ccf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self,kim_code,*args,**kwargs):\n super(VirtualMachine,self).__init__(kim_code,*args,**kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n _ida_pro.ea_pointer_swiginit(self, _ida_pro.new_ea_pointer(*args))", "def __init__(self,kim_code,*args,**kwargs):\n super(ReferenceDatum,self).__init__(kim_code,*args,**kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n self.parameters = {}", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_ic_sptr(*args)\n try: self.this.append(this)\n except: self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set_access_code(self, string access_code) > bool
def set_access_code(self, *args, **kwargs): return _digital_swig.digital_correlate_access_code_bb_sptr_set_access_code(self, *args, **kwargs)
[ "def set_access_code(self, *args, **kwargs):\n return _digital_swig.digital_correlate_access_code_tag_bb_sptr_set_access_code(self, *args, **kwargs)", "def set_code(self, code):\n self._code = code", "def setCode(self, c):\n\t\t\n\t\tself.code = c", "def setCode(self, code):\n if not utils.is_valid_code(code)[0]:\n raise ValueError, utils.mapping(_(\"Invalid code: $1\"),(str(code),))\n self.__code = code", "def UseCode(code):\n access_token = client.exchange_code_for_token(client_id=client_id, client_secret=secret, code=code)\n client.access_token = access_token\n open(\"access_token\", \"w\").write(access_token)\n callback(client)", "def set_code(self, key, value):\n self._code[key] = value", "def markAsCode(self,addr):\n return HopperLowLevel.markAsCode(self.__internal_segment_addr__,addr)", "def setAccessMode(self, mode): \n self.__accessMode = mode", "def sso_code(self, sso_code):\n\n self._sso_code = sso_code", "def update_code(self, new_code):\n self.code = new_code # code from __inti ___\n\n # Fill in the rest", "def acriss_code(self, acriss_code):\n if acriss_code is None:\n raise ValueError(\"Invalid value for `acriss_code`, must not be `None`\")\n\n self._acriss_code = acriss_code", "def validate_code(self, client_id, code, client, request, *args, **kwargs):\r\n client = client or self._clientgetter(client_id)\r\n log.debug(\r\n 'Validate code for client %r and code %r', client.client_id, code\r\n )\r\n grant = self._grantgetter(client_id=client.client_id, code=code)\r\n if not grant:\r\n log.debug('Grant not found.')\r\n return False\r\n if hasattr(grant, 'expires') and \\\r\n datetime.datetime.utcnow() > grant.expires:\r\n log.debug('Grant is expired.')\r\n return False\r\n\r\n request.state = kwargs.get('state')\r\n request.user = grant.user\r\n request.scopes = grant.scopes\r\n return True", "def allow_access(self, share, access, share_server):", "def save_authorization_code(self, client_id, code, request, *args, **kwargs):\n msg = \"Scopes in the request: {}\".format(request.scopes)\n current_app.logger.debug(msg)\n associations = {\n \"scopes\": request.scopes,\n \"redirect_uri\": request.redirect_uri,\n \"client_id\": client_id,\n \"state\": request.state,\n \"user\": request.user,\n }\n\n cache.set(code[\"code\"], associations, timeout=10 * 60)\n\n return", "def update_code(self, new_code):\n\n # Fill in the rest\n self.code = new_code\n # print(self.code) #for checking\n return self.code", "def modify_access_mode(self, snapshot_id, access_mode_list):\n\n try:\n changed = False\n for temp in access_mode_list:\n if temp['accessMode']:\n self.powerflex_conn.volume.set_access_mode_for_sdc(\n volume_id=snapshot_id, sdc_id=temp['sdc_id'],\n access_mode=temp['accessMode'])\n changed = True\n return changed\n except Exception as e:\n errormsg = \"Modify access mode of SDC %s operation failed \" \\\n \"with error %s\" % (temp['sdc_id'], str(e))\n LOG.error(errormsg)\n self.module.fail_json(msg=errormsg)", "def validate_code(self, client_id, code, client, request, *args, **kwargs):\n cached = cache.get(code)\n\n if cached is None:\n msg = \"validate_code - Code {} not found, possibly invalidated\".format(code)\n current_app.logger.info(msg)\n return False\n\n if cached.get(\"client_id\", None) != client_id:\n msg = \"validate_code - Client id in cache does not make supplied client id\"\n current_app.logger.info(msg)\n return False\n\n request.scopes = cached[\"scopes\"]\n msg = \"Scopes in the request: {}\".format(request.scopes)\n current_app.logger.debug(msg)\n request.user = cached[\"user\"]\n return True", "def verify_code(self, code: str) -> bool:\n self.verified = self.verification_code == code\n return self.verified", "def response_code(self, response_code: str):\n\n self._response_code = response_code", "def esri_access(self, value):\r\n if self._portal.is_arcgisonline:\r\n if value == True:\r\n ret = self._portal.update_user(self._user_id,\r\n user_type=\"both\")\r\n else:\r\n ret = self._portal.update_user(self._user_id,\r\n user_type=\"arcgisonly\")\r\n self._hydrate()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
correlate_access_code_bb(string access_code, int threshold) > digital_correlate_access_code_bb_sptr Examine input for specified access code, one bit at a time.
def correlate_access_code_bb(*args, **kwargs): return _digital_swig.correlate_access_code_bb(*args, **kwargs)
[ "def correlate_access_code_tag_bb(*args, **kwargs):\n return _digital_swig.correlate_access_code_tag_bb(*args, **kwargs)", "def set_access_code(self, *args, **kwargs):\n return _digital_swig.digital_correlate_access_code_tag_bb_sptr_set_access_code(self, *args, **kwargs)", "def test_03_bandpass_calibrator_analysis_flagging():\n\tcasalog.origin(\"test_03_bandpass_calibrator_analysis_flagging\")\n\tcasalog.post(\"starting\")\n\n\tflaglist = ['antenna=\"ea01,ea10,ea19,ea13\"',\n\t 'antenna=\"ea24\" spw=\"40,47~48\"',\n\t 'antenna=\"ea18\" spw=\"16~31\"']\n\tflagcmd(vis='G192_6s.ms', inpmode='list', inpfile=flaglist, \\\n\t action='apply', flagbackup=True)", "def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def _get_all_barcodes(accessions=EBI_STUDY_ACCESSIONS):\n acc_from_barcode = {}\n fields_list = ['sample_accession', 'library_name']\n for acc in accessions:\n ebi_info_set = get_ebi_info_set(accession=acc, fields_list=fields_list)\n for sample_info in ebi_info_set:\n # Notes on barcodes: The standard barcode seems to be 9 digits,\n # but many don't match this pattern. Most are probably blanks and\n # other controls. To be safe, we save information for all of them.\n barcode = sample_info['library_name'].split(':')[0]\n acc_from_barcode[barcode] = sample_info['sample_accession']\n return acc_from_barcode", "def get_access_information(access_code):\n reddit = get_reddit()\n try:\n access_information = reddit.get_access_information(access_code)\n except praw.errors.OAuthInvalidGrant:\n return INVALID_CODE_ERR_MSG\n else:\n return access_information", "def delegate_ghidra(extract_opcodes, extract_api_calls):\n if (not extract_opcodes and not extract_api_calls):\n return\n\n # Get all functions\n # pylint: disable=undefined-variable\n function_manager = currentProgram.getFunctionManager() # noqa\n functions = function_manager.getFunctions(True)\n\n # Iterate through functions\n opcodes = []\n apis = []\n for function in functions:\n listing = currentProgram.getListing() # noqa\n function_body = function.getBody()\n codeUnits = listing.getCodeUnits(function_body, True)\n\n # Get the opcodes for the current function\n if extract_opcodes:\n for codeUnit in codeUnits:\n stringified_instruction = str(codeUnit.toString())\n opcodes.append(stringified_instruction.split(\" \")[0].lower())\n\n # Get the API calls for the current function\n if extract_api_calls:\n called_functions = function.getCalledFunctions(TaskMonitor.DUMMY)\n\n for called_function in called_functions:\n # Save the API name if the parent namespace is not the\n # global one\n namespace = str(called_function.getParentNamespace())\n if namespace != GHIDRA_ANALYSIS_GLOBAL_NAMESPACE:\n apis.append(str(called_function.getName()))\n\n if extract_opcodes:\n opcodes_str = GHIDRA_ANALYSIS_ITEMS_DELIMITATOR.join(opcodes)\n print(GHIDRA_ANALYSIS_OPCODES_LINE_START + opcodes_str)\n if extract_api_calls:\n apis_str = GHIDRA_ANALYSIS_ITEMS_DELIMITATOR.join(apis)\n print(GHIDRA_ANALYSIS_APIS_LINE_START + apis_str)", "def capture_tomogram_flyscan(self, start_angle, end_angle,\n num_projections, ccd_readout=0.270,\n extra_projections=0):\n # Calculate angle parameters\n delta = (end_angle - start_angle) / (num_projections)\n total_time = num_projections * (self.exposure_time + ccd_readout)\n slew_speed = (end_angle - start_angle) / total_time\n # Set values for fly scan parameters\n self.Fly_ScanControl = \"Custom\"\n self.Fly_ScanDelta = delta\n self.Fly_StartPos = start_angle\n self.Fly_EndPos = end_angle\n self.Fly_SlewSpeed = slew_speed\n # Pause to let the values update\n time.sleep(3)\n # Update the value for the number of projections from instrument\n extra_projections = self.HDF1_NumCapture_RBV - num_projections\n log.debug('Acquiring %d extra projections (flat/dark)', extra_projections)\n calc_num_proj = math.ceil(self.Fly_Calc_Projections)\n if calc_num_proj is not None:\n num_projections = calc_num_proj\n log.debug('Fly scan resetting num_projections to %d (%d)',\n num_projections, extra_projections)\n # Logging\n # Prepare the instrument for scanning\n self.Reset_Theta = 1\n self.Cam1_TriggerMode = 'Overlapped'\n self.Cam1_NumImages = num_projections\n self.HDF1_NumCapture = num_projections + extra_projections\n self.Cam1_ImageMode = self.IMAGE_MODE_MULTIPLE\n self.Cam1_Acquire = self.DETECTOR_ACQUIRE\n self.wait_pv('Cam1_Status', self.DETECTOR_WAITING)\n # Execute the fly scan\n theta = []\n self.Cam1_FrameType = self.FRAME_DATA\n self.Fly_Taxi = 1\n self.wait_pv('Fly_Taxi', 0)\n self.Fly_Run = 1\n self.wait_pv('Fly_Run', 0, timeout=-1)\n # Clean up\n self.wait_pv('Cam1_Status', self.DETECTOR_IDLE)\n time.sleep(0.25)\n self.Proc_Theta = 1\n self.Fly_ScanControl = \"Standard\"\n # Retrieve the actual theta array to return\n pv_name = getattr(type(self), 'Theta_Array').pv_name(txm=self)\n theta = self.pv_get(pv_name, count=int(num_projections))\n if theta is None:\n # No theta array was retrieved, so calculate the angles instead\n warnings.warn(\"Could not retrieve actual angles, \"\n \"storing predicted values instead.\")\n theta = np.linspace(start_angle, end_angle, num=num_projections)\n return theta", "def __correlate_barcodes(self, reads_in_file: int, file_path: TextIO):\n\n # These lines will find the match \"barcode##\"\n barocde_index = re.search(\"(barcode)[0-9]{2}\", str(file_path).lower())\n unclassified_index = re.search(\"(unclassified)\", str(file_path).lower())\n folder_number = \"\"\n\n \"\"\"\n Barcodes are written in the format of `barcode##`, where ## are two integers. Alternatively, if the barcode is not matched,\n it will be `unclassified`.\n When trying to search for the barcode## or unclassified location in the file path, a .start() and .end() location will\n be returned if the location was found. If it was not found, the object will be None (i.e. barcode_index = None)\n We can use this to determine if the current path is a barcode## or unclassified\n \"\"\"\n # we have found a barcode## in the file path\n if barocde_index is not None:\n folder_number = str(file_path)[barocde_index.start(): barocde_index.end()]\n # we have not found a barcode##, see if we have found unclassified\n elif unclassified_index is not None:\n folder_number = str(file_path)[unclassified_index.start(): unclassified_index.end()]\n\n # we want to make sure we have found barcode## or unclassified in folder_number\n # if folder_number == \"\", we will simply pass over this section\n if folder_number != \"\":\n # we want to add a new entry if the current barcode has not been added\n if folder_number not in self.barcode_correlations.keys():\n self.barcode_correlations[folder_number] = reads_in_file\n\n # if the entry is already present, we want to add reads_in_file to the current value\n else:\n self.barcode_correlations[folder_number] += reads_in_file", "def AnalyzeDynamicADC(Codes,N=4096,SamplingRate=1, window = None):\r\n CodeArray = np.array(Codes)\r\n if window is not None:\r\n CodeArray = CodeArray * window \r\n CodeArray = (CodeArray-N/2)/N # normalize array \r\n \r\n X = np.abs(np.fft.fft(CodeArray)/(N/2))\r\n X = X[0:int(N/2)] # drop redundant half\r\n X_db = 20*np.log10(np.abs(X)) # want to plot in log scale\r\n #print(X[19:24])\r\n\r\n # calculate SNDR\r\n # SNDR is (input tone power) / (sum of all other bins)\r\n X_sndr = np.copy(X) # make copy for calculations\r\n X_sndr[0] = 0.0\r\n if window is not None:\r\n X_sndr[1] = 0.0 #probably need this when windowing\r\n InputBin = np.argmax(X_sndr) # where is the input tone\r\n if window is not None:\r\n leakage_bins = 20 \r\n leakage_power = np.sum(X_sndr[max(0,InputBin-leakage_bins):InputBin+leakage_bins+1]**2)\r\n InputPower = 10*np.log10(leakage_power)\r\n NoisePower = 10*np.log10(np.sum(X_sndr**2)-leakage_power)\r\n else:\r\n InputPower = 20*np.log10(X_sndr[InputBin])\r\n NoisePower = 10*np.log10(np.sum(X_sndr**2) - X_sndr[InputBin]**2)\r\n X_sndr[InputBin] = 0.0\r\n SNDR = InputPower - NoisePower\r\n\r\n # calculate ENOB\r\n ENOB = (SNDR - 1.76) / 6.02\r\n\r\n #calculate SFDR\r\n # SFDR is difference between input tone and highest spur\r\n PeakSpurBin = np.argmax(X_sndr)\r\n SFDR = InputPower - 20*np.log10(X_sndr[PeakSpurBin])\r\n \r\n # calculate THD\r\n # THD is measure of distortion in data without concern with noise\r\n # first get location of harmonics\r\n NumHarms = 10 # use first 10 harmonics\r\n AliasedHarms = FindAliasedHarmonics(N,InputBin,NumHarms) \r\n DistortionPower = 0.0\r\n for CurrentHarm in AliasedHarms:\r\n if (CurrentHarm != InputBin): # ignore fundamental harmonic\r\n DistortionPower += 20*np.log10(X_sndr[CurrentHarm])\r\n THD = DistortionPower - InputPower\r\n\r\n\r\n #plot results \r\n \r\n Freqs = np.linspace(0,0.5,int(N/2))\r\n if (SamplingRate != 1): # scaling value provided\r\n xLabelText = 'Frequency [MHz]'\r\n Freqs = Freqs * SamplingRate\r\n else:\r\n xLabelText = r'Normalized Frequency [$\\Omega$]'\r\n xFraction = 0.5 # how far along x-axis to put text\r\n FontSize = 10\r\n if window is not None:\r\n plt.plot(Freqs[2:Freqs.size],X_db[2:X_db.size]) # no DC component \r\n else:\r\n plt.plot(Freqs[1:Freqs.size],X_db[1:X_db.size]) # no DC component \r\n plt.xlabel(xLabelText)\r\n plt.text(xFraction*max(Freqs),max(X_db)-20,'Bins = %d' %(N), fontsize = FontSize)\r\n plt.text(xFraction*max(Freqs),max(X_db)-30,\r\n 'SNDR = %.2f dB' %(SNDR), fontsize = FontSize)\r\n plt.text(xFraction*max(Freqs),max(X_db)-40,\r\n 'ENOB = %.2f bits' %(ENOB), fontsize = FontSize)\r\n plt.text(xFraction*max(Freqs),max(X_db)-50,\r\n 'SFDR = %.2f dB' %(SFDR), fontsize = FontSize)\r\n #plt.text(xFraction*max(Freqs),max(X_db)-60,\r\n #'THD = %.2f dB' %(THD), fontsize = FontSize)\r\n plt.ylabel('Amplitude [dBFS]')\r\n \"\"\"\r\n fig = plt.figure()\r\n x0 = np.arange(0, 2048)*1./2E6 \r\n plt.title(\"Input codes (after windowing)\")\r\n plt.xlabel(\"time (s)\")\r\n plt.scatter(x0, CodeArray)\r\n\r\n fig = plt.figure()\r\n x0 = np.arange(0, 1024)\r\n plt.title(\"fft values\")\r\n plt.xlabel(\"freq (Mhz)\")\r\n plt.scatter(Freqs, X_db)\r\n\r\n plt.show()\r\n \"\"\"\r\n #plt.savefig('adc_dynamic_performace.png',format='png',dpi=300)\r\n return SNDR, ENOB, SFDR, THD, plt", "def test_determine_accession(self):\n survey_job = self.create_job_for_accession(\"E-MTAB-3050\")\n ae_surveyor = ArrayExpressSurveyor(survey_job)\n\n EXPERIMENTS_URL = \"https://www.ebi.ac.uk/arrayexpress/json/v3/experiments/\"\n SAMPLES_URL = EXPERIMENTS_URL + \"{}/samples\"\n\n ex_accessions = [\n \"E-MTAB-3050\",\n \"E-MEXP-669\",\n \"E-MEXP-2215\",\n \"E-MEXP-2288\",\n \"E-MEXP-2381\",\n \"E-MTAB-6739\",\n ]\n\n for ex_accession in ex_accessions:\n samples_endpoint = SAMPLES_URL.format(ex_accession)\n r = requests.get(samples_endpoint, timeout=60)\n samples = r.json()[\"experiment\"][\"sample\"]\n\n # An experiment can have many samples\n for sample in samples:\n\n # For some reason, this sample has no files associated with it.\n if \"file\" not in sample or len(sample[\"file\"]) == 0:\n continue\n\n # The accession code is not a simple matter to determine.\n sample_source_name = sample[\"source\"].get(\"name\", \"\")\n sample_assay_name = sample[\"assay\"].get(\"name\", \"\")\n\n has_raw = False\n for sub_file in sample[\"file\"]:\n\n # For ex: E-GEOD-15645\n if isinstance(sub_file[\"comment\"], list):\n sub_file_mod = sub_file\n sub_file_mod[\"comment\"] = sub_file[\"comment\"][0]\n else:\n sub_file_mod = sub_file\n\n if (\n sub_file_mod[\"type\"] == \"data\"\n and sub_file_mod[\"comment\"].get(\"value\", None) is not None\n ):\n has_raw = True\n if \"raw\" in sub_file_mod[\"comment\"].get(\"value\", \"\"):\n has_raw = True\n\n # Skip derived data if we have it raw.\n if has_raw and \"derived data\" in sub_file[\"type\"]:\n continue\n elif (not has_raw) and \"derived data\" not in sub_file[\"type\"]:\n # If there is a platform warning then we don't want raw data.\n has_raw = False\n continue\n filename = sub_file[\"name\"]\n\n sample_accession_code = ae_surveyor.determine_sample_accession(\n ex_accession, sample_source_name, sample_assay_name, filename\n )\n self.assertTrue(sample_accession_code is not None)", "def test_gc_map_short_sequences():\n assert gc_map.gc_map('ATGACTACGT', 4, 0.4) == 'atgaCTAC'\n assert gc_map.gc_map('ATGACTACGT', 4, 0.5) == 'atgaCTAC' # Must be greater than or equal to the threshold", "def correlate(self):\n self.logger.debug('correlate()')\n integration_time = self.server._integration_time\n self.logger.info(\"correlating for %0.2f seconds\" %integration_time)\n self.bee2.write_int('hb_cntto', integration_time+1)\n for baseline in self._include_baselines:\n raw = self.bee2.read('corr_out%d' %(int(baseline[1])-1), 128)\n self._correlations[baseline] = array(CORR_OUT.unpack(raw))\n self.logger.info('baseline %s, mean %d' %(baseline, self._correlations[baseline].mean()))\n self.bee2.write_int('corr_record', 0)\n self.bee2.write_int('corr_en', 0)\n self.bee2.write_int('corr_rst', 1)\n self.bee2.write_int('corr_rst', 0)\n self.bee2.write_int('corr_en', 1)\n sleep(integration_time+1)\n self.bee2.write_int('corr_record', 1)", "def gene_finder(dna, threshold):\n \n # YOUR IMPLEMENTATION HERE\n result = find_all_ORFs_both_strands(dna)\n orfs = []\n AAs = []\n \n for i in result:\n if len(i) > threshold:\n orfs.append(i)\n for i in orfs:\n AAs.append(coding_strand_to_AA(i))\n return AAs", "def map_bb(*args, **kwargs):\n return _digital_swig.map_bb(*args, **kwargs)", "def _process_bitmapped_descriptor(self,\n func_process_string,\n func_process_codeflag,\n func_process_numeric,\n func_process_numeric_with_new_refval,\n marker_id,\n nbytes_new,\n nbits_offset,\n scale_offset,\n nbits_increment,\n scale_increment,\n refval_factor):\n\n idx_descriptor, bitmapped_descriptor = self.next_bitmapped_descriptor()\n self.bitmap_links[len(self.decoded_descriptors)] = idx_descriptor\n\n # difference statistical values marker has different refval and nbits values\n if marker_id == 225255:\n bitmapped_descriptor = MarkerDescriptor.from_element_descriptor(\n bitmapped_descriptor,\n marker_id,\n refval=-2 ** bitmapped_descriptor.nbits,\n nbits=bitmapped_descriptor.nbits + 1,\n )\n else:\n bitmapped_descriptor = MarkerDescriptor.from_element_descriptor(\n bitmapped_descriptor,\n marker_id,\n )\n\n if bitmapped_descriptor.unit == 'CCITT IA5':\n nbytes = nbytes_new if nbytes_new else bitmapped_descriptor.nbits // 8\n func_process_string(bitmapped_descriptor, nbytes)\n\n elif bitmapped_descriptor.unit in ('FLAG TABLE', 'CODE TABLE'):\n func_process_codeflag(bitmapped_descriptor, bitmapped_descriptor.nbits)\n\n else: # numeric\n nbits = bitmapped_descriptor.nbits + nbits_offset + nbits_increment\n scale = bitmapped_descriptor.scale + scale_offset + scale_increment\n scale_powered = 1.0 * 10 ** scale\n\n if bitmapped_descriptor.id not in self.refval_new:\n refval = bitmapped_descriptor.refval * refval_factor\n func_process_numeric(bitmapped_descriptor, nbits, scale_powered, refval)\n else:\n func_process_numeric_with_new_refval(\n bitmapped_descriptor, nbits, scale_powered, refval_factor)", "def perform_ransac(self,tags, samplesize=6, cutoff=3, refine =True):\n correspondence = self.correspondence[tags[2]]\n image1points = np.zeros((len(correspondence), 2))\n image2points = np.zeros((len(correspondence), 2))\n image1points = correspondence[:, 0:2]\n image2points = correspondence[:, 2:]\n count = 0\n listofinliersfinal =[]\n listofoutliersfinal = []\n homographyfinal =np.zeros((3,3))\n\n for iteration in range(self.ransactrials):\n print(str(iteration) + \" of \" + str(self.ransactrials))\n print(len(image1points))\n ip_index = np.random.randint(0, len(image1points), samplesize)\n image1sample = image1points[ip_index, :]\n image2sample = image2points[ip_index, :]\n H = self.calculate_lls_homography(image1sample, image2sample)\n dest_pts_estimate = np.zeros((image2points.shape), dtype='int')\n for index in range(len(image1points)):\n dest_pts_nonNorm = np.matmul(H, ([image1points[index, 0], image1points[index, 1], 1]))\n dest_pts_estimate[index, 0] = dest_pts_nonNorm[0] / dest_pts_nonNorm[-1]\n dest_pts_estimate[index, 1] = dest_pts_nonNorm[1] / dest_pts_nonNorm[-1]\n\n estimationerror = dest_pts_estimate - image2points\n errorsqaure = np.square(estimationerror)\n dist = np.sqrt(errorsqaure[:, 0] + errorsqaure[:, 1])\n validpointidx = np.where(dist <= cutoff)\n invalidpointidx = np.where(dist > cutoff)\n innlierlist=[]\n outlierlist =[]\n for i,element in enumerate(dist):\n if element <=cutoff:\n innlierlist.append([image1points[i][1],image1points[i][0],dest_pts_estimate[i][1],dest_pts_estimate[i][0] ])\n else:\n outlierlist.append([image1points[i][0], image1points[i][1], image2points[i][0], image2points[i][1]])\n\n Inliers = [1 for val in dist if (val < 3)]\n if len(Inliers) > count:\n count = len(Inliers)\n listofinliersfinal =innlierlist\n listofoutliersfinal =outlierlist\n homographyfinal = H\n\n if refine == True:\n print(\"Refining...\")\n self.homographydict[tags[2]] = self.refine_homography(homographyfinal, image1points, image2points)\n else:\n self.homographydict[tags[2]]=homographyfinal\n print(len(listofinliersfinal))\n print(len(listofoutliersfinal))\n self.draw_inliers_outliers(tags, correspondence, homographyfinal, 3)", "def run(self, exposure, catalog):\n bbox = exposure.getBBox()\n\n self.log.info(\"Measuring aperture corrections for %d flux fields\" % (len(self.toCorrect),))\n # First, create a subset of the catalog that contains only selected stars\n # with non-flagged reference fluxes.\n subset1 = [record for record in self.starSelector.selectStars(exposure, catalog).starCat\n if not record.get(self.refFluxKeys.flag)]\n\n apCorrMap = ApCorrMap()\n\n # Outer loop over the fields we want to correct\n for name, keys in self.toCorrect.iteritems():\n fluxName = name + \"_flux\"\n fluxSigmaName = name + \"_fluxSigma\"\n\n # Create a more restricted subset with only the objects where the to-be-correct flux\n # is not flagged.\n subset2 = [record for record in subset1 if not record.get(keys.flag)]\n\n # Check that we have enough data points that we have at least the minimum of degrees of\n # freedom specified in the config.\n if len(subset2) - 1 < self.config.minDegreesOfFreedom:\n raise RuntimeError(\"Only %d sources for calculation of aperture correction for '%s'; \"\n \"require at least %d.\"\n % (len(subset2), name, self.config.minDegreesOfFreedom+1))\n apCorrMap[fluxName] = ChebyshevBoundedField(bbox, numpy.ones((1,1), dtype=float))\n apCorrMap[fluxSigmaName] = ChebyshevBoundedField(bbox, numpy.zeros((1,1), dtype=float))\n continue\n\n # If we don't have enough data points to constrain the fit, reduce the order until we do\n ctrl = self.config.fitConfig.makeControl()\n while len(subset2) - ctrl.computeSize() < self.config.minDegreesOfFreedom:\n if ctrl.orderX > 0:\n ctrl.orderX -= 1\n if ctrl.orderY > 0:\n ctrl.orderY -= 1\n\n # Fill numpy arrays with positions and the ratio of the reference flux to the to-correct flux\n x = numpy.zeros(len(subset2), dtype=float)\n y = numpy.zeros(len(subset2), dtype=float)\n apCorrData = numpy.zeros(len(subset2), dtype=float)\n indices = numpy.arange(len(subset2), dtype=int)\n for n, record in enumerate(subset2):\n x[n] = record.getX()\n y[n] = record.getY()\n apCorrData[n] = record.get(self.refFluxKeys.flux)/record.get(keys.flux)\n\n for _i in range(self.config.numIter):\n\n # Do the fit, save it in the output map\n apCorrField = ChebyshevBoundedField.fit(bbox, x, y, apCorrData, ctrl)\n\n # Compute errors empirically, using the RMS difference between the true reference flux and the\n # corrected to-be-corrected flux.\n apCorrDiffs = apCorrField.evaluate(x, y)\n apCorrDiffs -= apCorrData\n apCorrErr = numpy.mean(apCorrDiffs**2)**0.5\n\n # Clip bad data points\n apCorrDiffLim = self.config.numSigmaClip * apCorrErr\n keep = numpy.fabs(apCorrDiffs) <= apCorrDiffLim\n x = x[keep]\n y = y[keep]\n apCorrData = apCorrData[keep]\n indices = indices[keep]\n\n # Final fit after clipping\n apCorrField = ChebyshevBoundedField.fit(bbox, x, y, apCorrData, ctrl)\n\n self.log.info(\"Aperture correction for %s: RMS %f from %d\" %\n (name, numpy.mean((apCorrField.evaluate(x, y) - apCorrData)**2)**0.5, len(indices)))\n\n # Save the result in the output map\n # The error is constant spatially (we could imagine being\n # more clever, but we're not yet sure if it's worth the effort).\n # We save the errors as a 0th-order ChebyshevBoundedField\n apCorrMap[fluxName] = apCorrField\n apCorrErrCoefficients = numpy.array([[apCorrErr]], dtype=float)\n apCorrMap[fluxSigmaName] = ChebyshevBoundedField(bbox, apCorrErrCoefficients)\n\n # Record which sources were used\n for i in indices:\n subset2[i].set(keys.used, True)\n\n return Struct(\n apCorrMap = apCorrMap,\n )", "def reconstruct(imprefixL1,imprefixL2,imprefixR1,imprefixR2,threshold1,threshold2,camL,camR):\n\n CLh,maskLh,cmaskL = decode(imprefixL1,imprefixL2,0,threshold1,threshold2)\n CLv,maskLv,_ = decode(imprefixL1,imprefixL2,20,threshold1,threshold2)\n CRh,maskRh,cmaskR = decode(imprefixR1,imprefixR2,0,threshold1,threshold2)\n CRv,maskRv,_ = decode(imprefixR1,imprefixR2,20,threshold1,threshold2)\n\n CL = CLh + 1024*CLv\n maskL = maskLh*maskLv*cmaskL\n CR = CRh + 1024*CRv\n maskR = maskRh*maskRv*cmaskR\n\n h = CR.shape[0]\n w = CR.shape[1]\n\n subR = np.nonzero(maskR.flatten())\n subL = np.nonzero(maskL.flatten())\n\n CRgood = CR.flatten()[subR]\n CLgood = CL.flatten()[subL]\n\n _,submatchR,submatchL = np.intersect1d(CRgood,CLgood,return_indices=True)\n\n matchR = subR[0][submatchR]\n matchL = subL[0][submatchL]\n\n xx,yy = np.meshgrid(range(w),range(h))\n xx = np.reshape(xx,(-1,1))\n yy = np.reshape(yy,(-1,1))\n\n pts2R = np.concatenate((xx[matchR].T,yy[matchR].T),axis=0)\n pts2L = np.concatenate((xx[matchL].T,yy[matchL].T),axis=0)\n \n #record the bvalues\n imageL= plt.imread(imprefixL1 +\"%02d\" % (1)+'.png')\n imageR = plt.imread(imprefixR1 +\"%02d\" % (1)+'.png')\n bvaluesL_list=[]\n bvaluesR_list=[]\n for i in range(pts2L.shape[1]):\n bvaluesL_list.append(imageL[pts2L[1][i]][pts2L[0][i]])\n bvaluesR_list.append(imageR[pts2R[1][i]][pts2R[0][i]])\n bvaluesL=np.array(bvaluesL_list).T\n bvaluesR=np.array(bvaluesR_list).T\n bvalues=(bvaluesL+bvaluesR)/2\n\n pts3 = triangulate(pts2L,camL,pts2R,camR)\n\n return pts2L,pts2R,pts3,bvalues", "def gene_finder(dna, threshold):\n\n all_ORFs = find_all_ORFs_both_strands(dna)\n amino_acids = []\n for i in range(0,len(all_ORFs)):\n if (len(all_ORFs[i])> threshold):\n aa = coding_strand_to_AA(all_ORFs[i]) #only if the ORF is longer than the threshold, it is considered a gene and decoded into amino acids\n amino_acids.append(aa)\n \n return amino_acids" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_correlate_access_code_tag_bb_sptr __init__(self, p) > digital_correlate_access_code_tag_bb_sptr
def __init__(self, *args): this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pn_correlator_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self,kim_code,*args,**kwargs):\n super(VirtualMachine,self).__init__(kim_code,*args,**kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_glfsr_source_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self,kim_code,*args,**kwargs):\n super(ReferenceDatum,self).__init__(kim_code,*args,**kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def correlate_access_code_tag_bb(*args, **kwargs):\n return _digital_swig.correlate_access_code_tag_bb(*args, **kwargs)", "def __init__(self,kim_code,*args,**kwargs):\n super(Test,self).__init__(kim_code,*args,**kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_if_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _coin.new_SbImage(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_ic_sptr(*args)\n try: self.this.append(this)\n except: self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set_access_code(self, string access_code) > bool
def set_access_code(self, *args, **kwargs): return _digital_swig.digital_correlate_access_code_tag_bb_sptr_set_access_code(self, *args, **kwargs)
[ "def set_code(self, code):\n self._code = code", "def setCode(self, c):\n\t\t\n\t\tself.code = c", "def setCode(self, code):\n if not utils.is_valid_code(code)[0]:\n raise ValueError, utils.mapping(_(\"Invalid code: $1\"),(str(code),))\n self.__code = code", "def UseCode(code):\n access_token = client.exchange_code_for_token(client_id=client_id, client_secret=secret, code=code)\n client.access_token = access_token\n open(\"access_token\", \"w\").write(access_token)\n callback(client)", "def set_code(self, key, value):\n self._code[key] = value", "def markAsCode(self,addr):\n return HopperLowLevel.markAsCode(self.__internal_segment_addr__,addr)", "def setAccessMode(self, mode): \n self.__accessMode = mode", "def sso_code(self, sso_code):\n\n self._sso_code = sso_code", "def update_code(self, new_code):\n self.code = new_code # code from __inti ___\n\n # Fill in the rest", "def acriss_code(self, acriss_code):\n if acriss_code is None:\n raise ValueError(\"Invalid value for `acriss_code`, must not be `None`\")\n\n self._acriss_code = acriss_code", "def validate_code(self, client_id, code, client, request, *args, **kwargs):\r\n client = client or self._clientgetter(client_id)\r\n log.debug(\r\n 'Validate code for client %r and code %r', client.client_id, code\r\n )\r\n grant = self._grantgetter(client_id=client.client_id, code=code)\r\n if not grant:\r\n log.debug('Grant not found.')\r\n return False\r\n if hasattr(grant, 'expires') and \\\r\n datetime.datetime.utcnow() > grant.expires:\r\n log.debug('Grant is expired.')\r\n return False\r\n\r\n request.state = kwargs.get('state')\r\n request.user = grant.user\r\n request.scopes = grant.scopes\r\n return True", "def allow_access(self, share, access, share_server):", "def save_authorization_code(self, client_id, code, request, *args, **kwargs):\n msg = \"Scopes in the request: {}\".format(request.scopes)\n current_app.logger.debug(msg)\n associations = {\n \"scopes\": request.scopes,\n \"redirect_uri\": request.redirect_uri,\n \"client_id\": client_id,\n \"state\": request.state,\n \"user\": request.user,\n }\n\n cache.set(code[\"code\"], associations, timeout=10 * 60)\n\n return", "def update_code(self, new_code):\n\n # Fill in the rest\n self.code = new_code\n # print(self.code) #for checking\n return self.code", "def modify_access_mode(self, snapshot_id, access_mode_list):\n\n try:\n changed = False\n for temp in access_mode_list:\n if temp['accessMode']:\n self.powerflex_conn.volume.set_access_mode_for_sdc(\n volume_id=snapshot_id, sdc_id=temp['sdc_id'],\n access_mode=temp['accessMode'])\n changed = True\n return changed\n except Exception as e:\n errormsg = \"Modify access mode of SDC %s operation failed \" \\\n \"with error %s\" % (temp['sdc_id'], str(e))\n LOG.error(errormsg)\n self.module.fail_json(msg=errormsg)", "def validate_code(self, client_id, code, client, request, *args, **kwargs):\n cached = cache.get(code)\n\n if cached is None:\n msg = \"validate_code - Code {} not found, possibly invalidated\".format(code)\n current_app.logger.info(msg)\n return False\n\n if cached.get(\"client_id\", None) != client_id:\n msg = \"validate_code - Client id in cache does not make supplied client id\"\n current_app.logger.info(msg)\n return False\n\n request.scopes = cached[\"scopes\"]\n msg = \"Scopes in the request: {}\".format(request.scopes)\n current_app.logger.debug(msg)\n request.user = cached[\"user\"]\n return True", "def verify_code(self, code: str) -> bool:\n self.verified = self.verification_code == code\n return self.verified", "def response_code(self, response_code: str):\n\n self._response_code = response_code", "def esri_access(self, value):\r\n if self._portal.is_arcgisonline:\r\n if value == True:\r\n ret = self._portal.update_user(self._user_id,\r\n user_type=\"both\")\r\n else:\r\n ret = self._portal.update_user(self._user_id,\r\n user_type=\"arcgisonly\")\r\n self._hydrate()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
correlate_access_code_tag_bb(string access_code, int threshold, string tag_name) > digital_correlate_access_code_tag_bb_sptr Examine input for specified access code, one bit at a time.
def correlate_access_code_tag_bb(*args, **kwargs): return _digital_swig.correlate_access_code_tag_bb(*args, **kwargs)
[ "def set_access_code(self, *args, **kwargs):\n return _digital_swig.digital_correlate_access_code_tag_bb_sptr_set_access_code(self, *args, **kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def tag_seen_callback(llrpMsg):\n global tagReport, accessId, OpSpecsIdx, hexFileIdx\n tags = llrpMsg.msgdict['RO_ACCESS_REPORT']['TagReportData']\n\n if tags:\n smokesignal.emit('rfid', {\n 'tags': tags,})\n\n if len(tags):\n for tag in tags:\n # logger.info('saw!! tag(s): {}'.format(pprint.pformat(tags)))\n if(\"OpSpecResult\" in tags[0]):\n for ops in tag[\"OpSpecResult\"]:\n logger.info('saw tag(s): {}'.format(pprint.pformat(tags)))\n if (\"ReadData\" in tag[\"OpSpecResult\"][ops]):\n logger.info(\"Readdata = \" + tag[\"OpSpecResult\"][ops][\"ReadData\"] + \" accessType :\" + accessType)\n\n if (accessType == 'readWisp') :\n # AsscessSpec Reading message for WISP5\n logger.info(\"OpSpecsIdx : \" + str(OpSpecsIdx) + \" OpSpecs.__len__(): \" + str(OpSpecs.__len__()) )\n smokesignal.emit('rfid', {\n 'readWispTags': [{'readWisp' : tag[\"OpSpecResult\"][ops][\"ReadData\"]\n , 'EPCvalue' : tag[\"EPC-96\"]\n , 'OpSpecId' : tag[\"OpSpecResult\"][ops][\"OpSpecID\"] }],})\n\n if(OpSpecsIdx < OpSpecs.__len__()) :\n logger.info(\"ReadWisp : \")\n accessId += 1\n fac.nextAccessSpec(opSpecs = [OpSpecs[OpSpecsIdx], OpSpecs[OpSpecsIdx+1]],\n accessSpec = {'ID':accessId, 'StopParam': {'AccessSpecStopTriggerType': 1, 'OperationCountValue': 1,},})\n OpSpecsIdx += 2\n\n else :\n # Result for Normal tags\n smokesignal.emit('rfid', {\n 'readTags': [{'read' : tag[\"OpSpecResult\"][ops][\"ReadData\"]\n , 'EPCvalue' : tag[\"EPC-96\"] }],})\n\n\n elif(0 == tag[\"OpSpecResult\"][ops][\"NumWordsWritten\"]):\n if (accessType == 'readWisp') :\n OpSpecsIdx -= 2\n fac.nextAccessSpec(opSpecs = [OpSpecs[OpSpecsIdx], OpSpecs[OpSpecsIdx+1]],\n accessSpec = {'ID':accessId, 'StopParam': {'AccessSpecStopTriggerType': 1, 'OperationCountValue': 1,},})\n OpSpecsIdx += 2\n elif(accessType == 'writeWisp'):\n smokesignal.emit('rfid', {\n 'writeWispTags': [{'writeWisp' : hexFileLines[hexFileIdx]\n , 'EPCvalue' : tag[\"EPC-96\"]\n , 'OpSpecId' : tag[\"OpSpecResult\"][ops][\"OpSpecID\"]\n , 'status' : 'Failed'} ],})\n\n elif(2 < tag[\"OpSpecResult\"][ops][\"NumWordsWritten\"]):\n if (accessType == 'writeWisp') :\n # AsscessSpec Writing message for WISP5\n logger.info(\"hexFileLines : \" + hexFileLines[hexFileIdx] + \" hexFileIdx size: \" + str(hexFileIdx) + \" OpSpecSize : \" + str(len(OpSpecs)))\n\n smokesignal.emit('rfid', {\n 'writeWispTags': [{'writeWisp' : hexFileLines[hexFileIdx]\n , 'EPCvalue' : tag[\"EPC-96\"]\n , 'OpSpecId' : tag[\"OpSpecResult\"][ops][\"OpSpecID\"]\n , 'status' : 'Success'} ],})\n\n if (hexFileIdx == (len(OpSpecs) - 1)):\n logger.info(\" EOF reached.\")\n else:\n logger.info(\"WriteWisp : \" + str(hexFileIdx))\n accessId += 1\n hexFileIdx += 1\n fac.nextAccessSpec(opSpecs = [OpSpecs[hexFileIdx]],\n accessSpec = {'ID':accessId, 'StopParam': {'AccessSpecStopTriggerType': 1, 'OperationCountValue': 1,},})\n\n print getTimeMeasurement()\n else:\n logger.info('no tags seen')\n return\n for tag in tags:\n tagReport += tag['TagSeenCount'][0]", "def add_tags(read, guide_start=None, guide_length=None, expected_barcode=None,\n barcode_start=None, flag=None):\n\n # add bitwise flags\n if flag:\n read.flag+=flag\n\n if guide_start:\n #ged, seq = get_guide_edit_distance(read, start = guide_start, length=guide_length, extract=True)\n try:\n guide, ged = extract_subsequence(read, start = guide_start, end = guide_start+guide_length, count_ed=True)\n except:\n # if we hit an error, do not extract tad\n logging.error(\"Guide extraction failed for read: {}\".format(read.qname))\n return read\n\n if expected_barcode:\n barcode = extract_subsequence(read, start = barcode_start, end = barcode_start + len(expected_barcode), count_ed=False)\n if barcode is not None:\n #bed = levenshtein_distance(barcode, expected_barcode)\n bed = hamming_distance(barcode, expected_barcode)\n\n # add tags to read\n # set guide edit distance\n if guide_start:\n read.set_tag('YG', guide, value_type=\"Z\")\n read.set_tag('YH', ged, value_type=\"i\")\n if expected_barcode and barcode:\n read.set_tag(\"YB\", barcode, value_type = \"Z\")\n read.set_tag(\"YC\", bed, value_type = \"i\")\n\n\n return read", "def perform_ransac(self,tags, samplesize=6, cutoff=3, refine =True):\n correspondence = self.correspondence[tags[2]]\n image1points = np.zeros((len(correspondence), 2))\n image2points = np.zeros((len(correspondence), 2))\n image1points = correspondence[:, 0:2]\n image2points = correspondence[:, 2:]\n count = 0\n listofinliersfinal =[]\n listofoutliersfinal = []\n homographyfinal =np.zeros((3,3))\n\n for iteration in range(self.ransactrials):\n print(str(iteration) + \" of \" + str(self.ransactrials))\n print(len(image1points))\n ip_index = np.random.randint(0, len(image1points), samplesize)\n image1sample = image1points[ip_index, :]\n image2sample = image2points[ip_index, :]\n H = self.calculate_lls_homography(image1sample, image2sample)\n dest_pts_estimate = np.zeros((image2points.shape), dtype='int')\n for index in range(len(image1points)):\n dest_pts_nonNorm = np.matmul(H, ([image1points[index, 0], image1points[index, 1], 1]))\n dest_pts_estimate[index, 0] = dest_pts_nonNorm[0] / dest_pts_nonNorm[-1]\n dest_pts_estimate[index, 1] = dest_pts_nonNorm[1] / dest_pts_nonNorm[-1]\n\n estimationerror = dest_pts_estimate - image2points\n errorsqaure = np.square(estimationerror)\n dist = np.sqrt(errorsqaure[:, 0] + errorsqaure[:, 1])\n validpointidx = np.where(dist <= cutoff)\n invalidpointidx = np.where(dist > cutoff)\n innlierlist=[]\n outlierlist =[]\n for i,element in enumerate(dist):\n if element <=cutoff:\n innlierlist.append([image1points[i][1],image1points[i][0],dest_pts_estimate[i][1],dest_pts_estimate[i][0] ])\n else:\n outlierlist.append([image1points[i][0], image1points[i][1], image2points[i][0], image2points[i][1]])\n\n Inliers = [1 for val in dist if (val < 3)]\n if len(Inliers) > count:\n count = len(Inliers)\n listofinliersfinal =innlierlist\n listofoutliersfinal =outlierlist\n homographyfinal = H\n\n if refine == True:\n print(\"Refining...\")\n self.homographydict[tags[2]] = self.refine_homography(homographyfinal, image1points, image2points)\n else:\n self.homographydict[tags[2]]=homographyfinal\n print(len(listofinliersfinal))\n print(len(listofoutliersfinal))\n self.draw_inliers_outliers(tags, correspondence, homographyfinal, 3)", "def main():\n\n global args, summaryInstance\n from Bio import SeqIO\n import pysam\n import logging\n\n\n\n configureLogging('info')\n\n readArgs() # Argument parsing\n logging.info('Arguments read successfully')\n\n summaryInstance = Summary()\n\n # Generates a dictionary from concatenated .clstr file (stores in Summary instance)\n # dict1 = consensus barcode representing the cluster : non-consensus barcode in the same cluster\n # dict2 = master dictionary, concatenated from dict1\n\n readAndProcessClusters(args.input_clstr)\n\n logging.info('Cluster file processed successfully')\n\n infile = pysam.AlignmentFile(args.input_mapped_bam, 'rb')\n out = pysam.AlignmentFile(args.output_tagged_bam+'.bam', 'wb', template=infile)\n\n for read in infile.fetch(until_eof=True):\n read_bc = read.query_name.split()[0].split('_')[-1]\n consensus_seq = summaryInstance.master_barcode_dict[read_bc]\n read.set_tag('BC', str(consensus_seq),value_type='Z') # Stores as string, makes duplicate removal possible. Can do it as integer as well.\n read.query_name = (read.query_name + '_@BC:Z:' + str(consensus_seq))\n out.write(read)\n\n infile.close()\n out.close()\n\n logging.info('Tagging completed')", "def _process_bitmapped_descriptor(self,\n func_process_string,\n func_process_codeflag,\n func_process_numeric,\n func_process_numeric_with_new_refval,\n marker_id,\n nbytes_new,\n nbits_offset,\n scale_offset,\n nbits_increment,\n scale_increment,\n refval_factor):\n\n idx_descriptor, bitmapped_descriptor = self.next_bitmapped_descriptor()\n self.bitmap_links[len(self.decoded_descriptors)] = idx_descriptor\n\n # difference statistical values marker has different refval and nbits values\n if marker_id == 225255:\n bitmapped_descriptor = MarkerDescriptor.from_element_descriptor(\n bitmapped_descriptor,\n marker_id,\n refval=-2 ** bitmapped_descriptor.nbits,\n nbits=bitmapped_descriptor.nbits + 1,\n )\n else:\n bitmapped_descriptor = MarkerDescriptor.from_element_descriptor(\n bitmapped_descriptor,\n marker_id,\n )\n\n if bitmapped_descriptor.unit == 'CCITT IA5':\n nbytes = nbytes_new if nbytes_new else bitmapped_descriptor.nbits // 8\n func_process_string(bitmapped_descriptor, nbytes)\n\n elif bitmapped_descriptor.unit in ('FLAG TABLE', 'CODE TABLE'):\n func_process_codeflag(bitmapped_descriptor, bitmapped_descriptor.nbits)\n\n else: # numeric\n nbits = bitmapped_descriptor.nbits + nbits_offset + nbits_increment\n scale = bitmapped_descriptor.scale + scale_offset + scale_increment\n scale_powered = 1.0 * 10 ** scale\n\n if bitmapped_descriptor.id not in self.refval_new:\n refval = bitmapped_descriptor.refval * refval_factor\n func_process_numeric(bitmapped_descriptor, nbits, scale_powered, refval)\n else:\n func_process_numeric_with_new_refval(\n bitmapped_descriptor, nbits, scale_powered, refval_factor)", "def gene_finder(dna, threshold):\n\n all_ORFs = find_all_ORFs_both_strands(dna)\n amino_acids = []\n for i in range(0,len(all_ORFs)):\n if (len(all_ORFs[i])> threshold):\n aa = coding_strand_to_AA(all_ORFs[i]) #only if the ORF is longer than the threshold, it is considered a gene and decoded into amino acids\n amino_acids.append(aa)\n \n return amino_acids", "def count_good_constructs_and_barcodes(bam_file_path, \n library = None,\n guide_edit_threshold = 2,\n barcode_edit_threshold = 0,\n sample = \"Count\",\n output_counts_path = \"/dev/stdout\",\n output_barcodes_path = None):\n\n # set up logger\n log = logging.getLogger()\n\n if bam_file_path.endswith('bam'):\n method = \"rb\"\n else:\n method = \"r\"\n\n # open bam handle\n bam = pysam.AlignmentFile(bam_file_path, method)\n\n # get total read count and paired status\n log.info(\"Getting total read counts.\")\n total_count, paired = get_fragment_count(bam_file_path)\n log.debug(\"Total Frags: {}. Is Paired: {}\".format(total_count, paired))\n\n # initialize counters\n construct_counter = defaultdict(int)\n observed_barcodes = defaultdict(lambda: defaultdict(int))\n read_count = 0\n reads_considered = 0\n constructs_recognized = 0\n constructs_unrecognized = 0 \n guides_recognized = 0\n guides_unrecoginzed = 0\n valid_barcodes = 0\n invalid_barcodes = 0\n barcode_assigned = 0\n valid_constructs = 0\n\n for read, mate in mate_pair_bam_reader(bam_file_path, paired=paired):\n\n read_count += 1\n # do some qc checks \n if (read.is_unmapped) | \\\n (read.is_duplicate) | \\\n (paired and mate.is_unmapped) | \\\n (paired and not read.is_read1):\n continue\n\n # count the read\n reads_considered += 1 \n\n # get guide edit distance\n try:\n guide_ed_read1 = read.get_tag(\"YH\")\n except KeyError:\n # no edit distance tag, assume bad\n continue\n\n\n rname = read.reference_name.rstrip(\"_A\")\n #rname = read.reference_name\n\n # get construct\n if paired:\n mname = mate.reference_name.rstrip(\"_B\")\n #mname = mate.reference_name\n construct = get_construct_divider().join([rname, mname])\n # get mate edit distance\n try:\n guide_ed_read2 = mate.get_tag(\"YH\")\n except KeyError:\n # no edit distance tag, assume bad\n continue\n else:\n construct = rname\n guide_ed_read2 = None\n\n # TODO: fix this for no barcode reads\n # check barcode edit distance\n try:\n barcode = read.get_tag(\"YB\")\n barcode_distance = read.get_tag(\"YC\")\n except KeyError:\n # no barcode read, assume bad \n log.debug(\"No barcode tag found in read: {}\".format(read.qname))\n barcode = None \n barcode_distance = barcode_edit_threshold+1 # one more than threshold so it never passes\n pass\n\n # Tabulate construct counts\n # count good guides\n # were the guides recognized according to allowed edit distance\n if (guide_ed_read1<=guide_edit_threshold):\n guides_recognized += 1\n if paired:\n if (guide_ed_read2<=guide_edit_threshold):\n guides_recognized += 1\n valid_constructs += 1\n # report valid constructs\n if library is not None:\n if construct in library:\n constructs_recognized+=1\n else:\n constructs_unrecognized+=1\n # check the barcode\n if (barcode_distance <= barcode_edit_threshold):\n observed_barcodes[barcode][construct] += 1\n barcode_assigned += 1\n valid_barcodes += 1\n else:\n invalid_barcodes += 1\n\n # add to construct counter\n construct_counter[construct]+=1\n else:\n guides_unrecoginzed += 1\n\n else:\n construct_counter[construct]+=1\n valid_constructs += 1\n # report valid constructs\n if library is not None:\n if construct in library:\n constructs_recognized+=1\n else:\n constructs_unrecognized+=1\n\n # check the barcode\n if (barcode_distance <= barcode_edit_threshold):\n observed_barcodes[barcode][construct] += 1\n barcode_assigned += 1\n valid_barcodes += 1\n else:\n invalid_barcodes += 1\n else:\n guides_unrecoginzed += 1\n if paired:\n if (guide_ed_read2<=guide_edit_threshold):\n guides_recognized += 1\n else:\n guides_unrecoginzed += 1 \n\n # check the barcode\n if (barcode_distance <= barcode_edit_threshold):\n observed_barcodes[barcode][construct] += 1\n #barcode_assigned += 1 # guides are bad so dont count\n valid_barcodes += 1\n else:\n invalid_barcodes += 1\n\n if read_count==0:\n pct_valid = 0\n else:\n pct_valid = valid_constructs/read_count * 100\n\n\n log.info(\"Found {0} passing constructs out of {1} reads. {2:.2f}%.\".format(\n valid_constructs,\n read_count,\n pct_valid))\n log.info(\"Writing outputs.\")\n\n # finally write out counts and barcode paths\n with open(output_counts_path, 'w') as handle:\n handle.write(\"#Total Fragments: {}\\n\".format(total_count))\n handle.write(\"#Fragments Considered: {}\\n\".format(reads_considered))\n handle.write(\"#Guides Passing: {}\\n\".format(guides_recognized))\n handle.write(\"#Guides Failing: {}\\n\".format(guides_unrecoginzed))\n if library is not None:\n handle.write(\"#Constructs Recognized: {}\\n\".format(\n constructs_recognized))\n handle.write(\"#Constructs Unrecognized: {}\\n\".format(\n constructs_unrecognized))\n if valid_barcodes > 0:\n handle.write(\"#Barcodes Passing: {}\\n\".format(\n valid_barcodes))\n handle.write(\"#Barcodes Failing: {}\\n\".format(\n invalid_barcodes))\n handle.write(\"#Barcodes Assigned: {}\\n\".format(\n barcode_assigned))\n\n # write stats\n if library is not None:\n # use library to add write out extra columns\n # only write constructs we are interested in\n # TODO: this is hardcoded to two guides ... can we do singles?\n if paired: \n header = [\"construct_id\",\n \"target_a_id\",\"probe_a_id\",\n \"target_b_id\",\"probe_b_id\",\n sample]\n else:\n header = [\"construct_id\",\n \"target_a_id\",\"probe_a_id\",\n sample]\n print(paired) \n print(header)\n out_line = \"\\t\".join(header)\n handle.write(\"{}\\n\".format(out_line))\n for construct in sorted(library):\n info = library[construct]\n count = construct_counter[construct]\n to_write = [construct] \n to_write += [info[i] for i in header[1:-1]] \n to_write += [str(count)]\n out_line = \"\\t\".join(to_write)\n try:\n handle.write(\"{}\\n\".format(out_line))\n except UnicodeEncodeError:\n print(out_line)\n sys.exit()\n else:\n header = [\"construct_id\",\"count\"]\n out_line = \"\\t\".join(header)\n handle.write(\"{}\\n\".format(out_line))\n for construct in sorted(construct_counter):\n count = construct_counter[construct]\n to_write = [construct, str(count)]\n out_line = \"\\t\".join(to_write)\n handle.write(\"{}\\n\".format(out_line))\n\n if output_barcodes_path:\n # build ambigous barcodes path\n ambiguous = \"{}.ambiguous.txt\".format(\n os.path.splitext(output_barcodes_path)[0])\n with open(output_barcodes_path, 'w') as handle,\\\n open(ambiguous, 'w') as amb:\n handle.write(\"barcode\\tconstruct_id\\tcount\\n\")\n amb.write(\"barcode\\tconstruct_id\\tcount\\n\") \n for barcode, vals in observed_barcodes.items():\n constructs = [(i[0], i[1]) for i in vals.items()]\n if len(constructs)>1: \n log.debug(\"Abgious Barcode: {}. Maps to {} constructs\".\\\n format(barcode, len(constructs)))\n output = \"{}\\t\".format(barcode) \n for construct in constructs:\n output += \"{}\\t{}\\t\".format(construct[0],construct[1])\n amb.write(\"{}\\n\".format(output))\n else:\n # split out information\n construct = constructs[0][0] # name of constructs\n count = constructs[0][1]\n to_write = [barcode, construct, str(count)]\n out_line = \"\\t\".join(to_write)\n handle.write(\"{}\\n\".format(out_line))\n\n return 0", "def gene_finder(dna, threshold):\n \n # YOUR IMPLEMENTATION HERE\n result = find_all_ORFs_both_strands(dna)\n orfs = []\n AAs = []\n \n for i in result:\n if len(i) > threshold:\n orfs.append(i)\n for i in orfs:\n AAs.append(coding_strand_to_AA(i))\n return AAs", "def test_03_bandpass_calibrator_analysis_flagging():\n\tcasalog.origin(\"test_03_bandpass_calibrator_analysis_flagging\")\n\tcasalog.post(\"starting\")\n\n\tflaglist = ['antenna=\"ea01,ea10,ea19,ea13\"',\n\t 'antenna=\"ea24\" spw=\"40,47~48\"',\n\t 'antenna=\"ea18\" spw=\"16~31\"']\n\tflagcmd(vis='G192_6s.ms', inpmode='list', inpfile=flaglist, \\\n\t action='apply', flagbackup=True)", "def revoke_ag_access():\n\tschema = {\n\t\t\"ag_id\": {\"type\": \"integer\"},\n\t\t\"email\": {\"type\": \"string\"}\n\t}\n\n\temail = request.json.get(\"email\")\n\tag_id = request.json.get(\"ag_id\")\n\n\t# Checks if the request is a json\n\tif not request.is_json:\n\t\treturn bad_request(\"Missing JSON in request\")\n\n\t# Checks if any of the input is illegal\n\tif not validator(request.json, schema):\n\t\treturn bad_request(validator.errors)\n\n\t# Checks if the reader exists in the database\n\treader = Reader.query.filter_by(email=email).first()\n\tif not reader:\n\t\treturn bad_request(\"Reader does not exist!\")\n\n\tgives_access = db.session.query(gives_access_to, BelongsTo).filter(\n\t\tgives_access_to.c.ag_id == BelongsTo.ag_id,\n\t\tBelongsTo.ag_id == ag_id,\n\t\tBelongsTo.reader_id == reader.id\n\t).all()\n\n\tif not gives_access:\n\t\treturn bad_request(\"The reader does not have access to this access group\")\n\n\tBelongsTo.query.filter_by(reader_id=reader.id, ag_id=ag_id).delete()\n\n\tdb.session.commit()\n\treturn ok(\"Access to {0} has been removed for {1}\".format(ag_id, email))", "def fuzzy(self, tag, threshold=80):\n if isinstance(tag, string_types):\n tags = [tag]\n else:\n tags = tag\n\n matches = []\n\n for tag in tags:\n matches += [i[0] for i in process.extract(tag, self, limit=None)\n if i[1] > threshold]\n\n return tuple(matches)", "def TagFluorescenceAnalysis(tag_folder, tag_round, Roundness_threshold):\r\n tagprotein_cell_properties_dict = {}\r\n RoundNumberList, CoordinatesList, fileNameList = ProcessImage.retrive_scanning_scheme(tag_folder)\r\n\r\n for EachRound in RoundNumberList:\r\n \r\n if EachRound == tag_round:\r\n \r\n for EachCoord in CoordinatesList:\r\n \r\n # =============================================================================\r\n # For tag fluorescence:\r\n # ============================================================================= \r\n print(EachCoord)\r\n #-------------- readin image---------------\r\n for Eachfilename in enumerate(fileNameList):\r\n if EachCoord in Eachfilename[1] and EachRound in Eachfilename[1]: \r\n tag_imagefilename = os.path.join(tag_folder, Eachfilename[1])\r\n# print(tag_imagefilename)\r\n loaded_tag_image = imread(tag_imagefilename, as_gray=True)\r\n #------------------------------------------\r\n \r\n RegionProposalMask, RegionProposalOriginalImage = ProcessImage.generate_mask(loaded_tag_image, openingfactor=2, \r\n closingfactor=3, binary_adaptive_block_size=335)#256(151) 500(335)\r\n \r\n TagFluorescenceLookupBook = ProcessImage.Region_Proposal(loaded_tag_image, RegionProposalMask, smallest_size=800, biggest_size=3500, Roundness_thres = Roundness_threshold,\r\n DeadPixelPercentageThreshold = 0.14, lowest_region_intensity=0.16, contour_thres=0.001, contour_dilationparameter=11,\r\n cell_region_opening_factor=1, cell_region_closing_factor=2)\r\n \r\n tagprotein_cell_properties_dict['{}_{}'.format(EachRound, EachCoord)] = TagFluorescenceLookupBook\r\n \r\n # for Eachpos in tagprotein_cell_properties_dict:\r\n # cellnum = len(tagprotein_cell_properties_dict[Eachpos])\r\n # for eachcell in range(cellnum):\r\n # if str(tagprotein_cell_properties_dict[Eachpos][eachcell]['Mean intensity']) != 'nan':\r\n # tag_cell_mean_intensity.append(tagprotein_cell_properties_dict[Eachpos][eachcell]['Mean intensity'])\r\n # trace_back.append('{}_{}'.format(Eachpos, eachcell))\r\n \r\n return tagprotein_cell_properties_dict", "def add_artificial_cover(self, name, c_type, c_tag, extends=''):\n self.artificial[c_tag] = name\n self._codegens[name] = wrapper = Cover(\n name=name,\n from_=c_type,\n extends=extends,\n )", "def map_bb(*args, **kwargs):\n return _digital_swig.map_bb(*args, **kwargs)", "def map_tag(self, map_tag):\n\n self._map_tag = map_tag", "def timetagBasicCalibration(input, inpha, outtag,\n output, outcounts, outflash, outcsum,\n cl_args,\n info, switches, reffiles,\n wavecal_info):\n input_path = os.path.dirname(input)\n if info[\"obsmode\"] == \"TIME-TAG\":\n cosutil.printIntro(\"TIME-TAG calibration\")\n names = [(\"Input\", input),\n (\"OutTag\", outtag),\n (\"OutFlt\", output),\n (\"OutCounts\", outcounts)]\n if outflash is not None:\n names.append((\"OutFlash\", outflash))\n if outcsum is not None:\n names.append((\"OutCsum\", outcsum))\n cosutil.printFilenames(names,\n shift_file=cl_args[\"shift_file\"],\n stimfile=cl_args[\"stimfile\"],\n livetimefile=cl_args[\"livetimefile\"])\n cosutil.printMode(info)\n\n # Copy data from the input file to the output. Then open the output\n # file read/write.\n if info[\"obsmode\"] == \"TIME-TAG\":\n nrows = cosutil.writeOutputEvents(input, outtag)\n ofd = fits.open(outtag, mode=\"update\")\n if ofd[\"EVENTS\"].data is None:\n nrows = 0\n else:\n nrows = len(ofd[\"EVENTS\"].data)\n\n # events_hdu is a complete astropy.io.fits HDU object (i.e., header plus data),\n # while events (assigned below) is just the data, a recarray object.\n events_hdu = ofd[\"EVENTS\"]\n\n if nrows > 0 and info[\"obsmode\"] == \"TIME-TAG\":\n # Change orig_exptime to be the range of times in the TIME column.\n time64 = events_hdu.data.field(\"TIME\").astype(np.float64)\n if time64[-1] > time64[0]:\n info[\"orig_exptime\"] = time64[-1] - time64[0]\n del time64\n\n # Get a copy of the primary header. This copy will be modified and\n # written to the output image files.\n phdr = ofd[0].header\n # This list also includes the primary header, but we'll ignore this\n # copy of the primary header.\n if info[\"obsmode\"] == \"ACCUM\" and not info[\"corrtag_input\"]:\n headers = cosutil.getHeaders(input)\n # replace the first extension header so the headers of the\n # pseudo-corrtag table will be updated\n headers[1] = events_hdu.header\n else:\n headers = mkHeaders(phdr, events_hdu.header)\n\n # Update the switches and reference file names, so the output header\n # will reflect what was actually used.\n cosutil.overrideKeywords(phdr, headers[1], info, switches, reffiles)\n\n # Update keywords for FUV high voltage.\n if info[\"detector\"] == \"FUV\":\n updateHVKeywords(headers[1], info, reffiles)\n\n if nrows == 0:\n writeNull(input, ofd, output, outcounts, outcsum,\n cl_args, info, phdr, headers)\n ofd.close()\n return 1\n\n setCorrColNames(info[\"detector\"])\n\n events = events_hdu.data\n\n # For corrtag input, reinitialize the DQ column if dqicorr is perform.\n if info[\"corrtag_input\"] and switches[\"dqicorr\"] == \"PERFORM\":\n events.field(\"dq\")[:] = 0.\n\n # Set active_area, but note that this is preliminary because we haven't\n # done tempcorr or geocorr yet.\n setActiveArea(events, info, reffiles[\"brftab\"])\n\n doPhotcorr(info, switches, reffiles[\"imphttab\"], phdr, headers[1])\n\n badt = doBadtcorr(events, info, switches, reffiles, phdr)\n\n doRandcorr(events, info, switches, reffiles, phdr)\n\n (stim_param, stim_countrate, stim_livetime) = initTempcorr(events,\n input, info, switches, reffiles, headers[1],\n cl_args[\"stimfile\"])\n\n doTempcorr(stim_param, events, info, switches, reffiles, phdr)\n\n doGeocorr(events, info, switches, reffiles, phdr)\n\n doDgeocorr(events, info, switches, reffiles, phdr)\n\n # Set active_area based on (xcorr, ycorr) coordinates.\n setActiveArea(events, info, reffiles[\"brftab\"])\n\n #\n # The X and Y walk correction need to be independent, and applied to the\n # same xcorr/pha values\n if doWalkCorr(switches):\n\n xcorrection = doXWalkcorr(events, info, switches, reffiles, phdr)\n ycorrection = doYWalkcorr(events, info, switches, reffiles, phdr)\n applyWalkCorrection(events, xcorrection, ycorrection)\n\n updateGlobrate(info, headers[1])\n\n # Copy columns to xdopp, xfull, yfull so we'll have default values.\n if not info[\"corrtag_input\"]:\n copyColumns(events)\n\n initHelcorr(events, info, headers[1])\n\n doDeadcorr(events, input, info, switches, reffiles, phdr, headers[1],\n stim_countrate, stim_livetime, cl_args[\"livetimefile\"])\n\n # Write the calcos sum image.\n if info[\"obsmode\"] == \"TIME-TAG\":\n bursts = None\n (modified, gti) = recomputeExptime(input, bursts, badt, events,\n headers[1], info)\n if info[\"detector\"] == \"FUV\": # update keywords EXPTIME[AB]\n key = cosutil.segmentSpecificKeyword(\"exptime\", info[\"segment\"])\n headers[1][key] = info[\"exptime\"]\n if outcsum is not None:\n writeCsum(outcsum, events,\n info[\"detector\"], info[\"obsmode\"],\n phdr, headers[1],\n cl_args[\"raw_csum_coords\"],\n cl_args[\"binx\"], cl_args[\"biny\"],\n cl_args[\"compress_csum\"],\n cl_args[\"compression_parameters\"])\n\n doPhacorr(inpha, events, info, switches, reffiles, phdr, headers[1])\n\n doDoppcorr(events, info, switches, reffiles, phdr)\n\n if not (info[\"aperture\"] in APERTURE_NAMES or\n info[\"targname\"] == \"DARK\" and\n info[\"aperture\"] in OTHER_APERTURE_NAMES):\n ofd.close()\n raise BadApertureError(\"APERTURE = %s is not a valid aperture name.\" %\n info[\"aperture\"])\n\n if outcsum is not None and cl_args[\"only_csum\"]:\n return 0 # don't write flt and counts\n\n doFlatcorr(events, info, switches, reffiles, phdr, headers[1])\n\n phdr[\"wavecals\"] = \"\" # initial value\n if info[\"tagflash\"]:\n cosutil.printSwitch(\"WAVECORR\", switches)\n if switches[\"wavecorr\"] == \"PERFORM\":\n if info[\"tagflash\"]:\n (tl_time, shift1_vs_time, wavecorr) = \\\n concurrent.processConcurrentWavecal(events, \\\n outflash, cl_args[\"shift_file\"],\n info, switches, reffiles, phdr, headers[1])\n if wavecorr == \"COMPLETE\":\n filename = os.path.basename(input)\n if cl_args[\"shift_file\"] is not None:\n filename = filename + \" \" + cl_args[\"shift_file\"]\n phdr[\"wavecals\"] = filename\n else:\n # Value to assign to keyword in phdr (updateFromWavecal does\n # this), but this value can be overridden by noWavecal.\n wavecorr = \"COMPLETE\"\n if not wavecal_info:\n # The exposure is not tagflash and there's no auto/GO\n # wavecal, so create wavecal info with a default shift1,\n # or possibly with a value specified by the user.\n (wavecal_info, wavecorr) = noWavecal(input,\n cl_args[\"shift_file\"],\n info, switches, reffiles)\n # LP6 FUV data has no tagflash, so wavecal is done using wavecal\n # exposures before and after each science exposure. Long exposures\n # (>900s) need a simulated wavecal inserted 600s after the beginning of\n # the preceding waecal to model the non-linear\n # behaviour of shift vs time\n (tl_time, shift1_vs_time) = \\\n updateFromWavecal(events, wavecal_info, wavecorr,\n cl_args[\"shift_file\"],\n info, switches, reffiles, input_path, phdr, headers[1])\n # Compute wavelengths for the wavelength column (except for wavecals).\n if info[\"obstype\"] == \"SPECTROSCOPIC\" and \\\n info[\"exptype\"].find(\"WAVE\") == -1:\n computeWavelengths(events, info, reffiles,\n helcorr=switches[\"helcorr\"], hdr=None)\n else:\n time = cosutil.getColCopy(data=events, column=\"time\")\n tl_time = cosutil.timelineTimes(time[0], time[-1], dt=1.)\n shift1_vs_time = None\n del time\n\n if info[\"obsmode\"] == \"TIME-TAG\":\n bursts = doBurstcorr(events, info, switches, reffiles, phdr,\n cl_args[\"burstfile\"])\n (modified, gti) = recomputeExptime(input, bursts, badt, events,\n headers[1], info)\n if modified:\n saveNewGTI(ofd, gti)\n countBadEvents(events, bursts, badt, info, headers[1])\n\n if info[\"detector\"] == \"FUV\": # update keyword EXPTIMEA or EXPTIMEB\n key = cosutil.segmentSpecificKeyword(\"exptime\", info[\"segment\"])\n headers[1][key] = info[\"exptime\"]\n minmax_shift_dict = getWavecalOffsets(events, info, switches[\"wavecorr\"],\n reffiles[\"xtractab\"],\n reffiles[\"brftab\"])\n tracemask = createTraceMask(events, info, switches,\n reffiles['xtractab'], active_area)\n\n traceprofile = doTraceCorr(events, info, switches, reffiles, phdr,\n tracemask)\n\n #\n # Make sure we have a gti variable, and make one if we don't. None is OK, it will be\n # detected and filled in later if necessary\n try:\n temp_gti = gti\n except NameError:\n gti = None\n\n align = doProfileAlignmentCorr(events, input, info, switches, reffiles,\n phdr, headers[1], minmax_shift_dict,\n tracemask, traceprofile, gti)\n\n dq_array = doDqicorr(events, input, info, switches, reffiles,\n phdr, headers[1], minmax_shift_dict,\n traceprofile, gti)\n\n\n writeImages(events.field(xfull), events.field(yfull),\n events.field(\"epsilon\"), events.field(\"dq\"),\n phdr, headers,\n dq_array, info[\"npix\"], info[\"x_offset\"], info[\"exptime\"],\n outcounts, output)\n\n doStatflag(switches, output, outcounts)\n\n # Create or update a TIMELINE extension.\n timeline.createTimeline(input, ofd, info, reffiles,\n tl_time, shift1_vs_time,\n events.field(\"TIME\").astype(np.float64),\n events.field(xfull), events.field(yfull))\n\n ofd.close()\n\n return 0 # 0 is OK", "def generate_automated_labels_birdnet(audio_dir, isolation_parameters):\n annotations = analyze(audio_path=audio_dir, **isolation_parameters)\n return annotations" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_costas_loop_cc_sptr __init__(self, p) > digital_costas_loop_cc_sptr
def __init__(self, *args): this = _digital_swig.new_digital_costas_loop_cc_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pn_correlator_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_ccf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, callback = None, userdata = None):\n this = _coin.new_SbClip(callback, userdata)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _coin.new_SbDPLine(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_packet_sink_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_ic_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _coin.new_SbCylinder(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, args, phase):\n self.args = args\n self.phase = phase", "def __init__(self):\n this = _coin.new_SoClipPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _coin.new_SbCylinderPlaneProjector(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
costas_loop_cc(float loop_bw, int order) > digital_costas_loop_cc_sptr Carrier tracking PLL for QPSK
def costas_loop_cc(*args, **kwargs): return _digital_swig.costas_loop_cc(*args, **kwargs)
[ "def set_loop_bandwidth(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_set_loop_bandwidth(self, *args, **kwargs)", "def set_loop_bandwidth(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_set_loop_bandwidth(self, *args, **kwargs)", "def get_loop_bandwidth(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_loop_bandwidth(self)", "def main(ac,flow,lin):\n\n wing = ac.wing;\n htail = ac.htail;\n vtail = ac.vtail;\n fus = ac.fus;\n prop = ac.prop;\n # Numerical parameters initialisation\n tol = 1e-3; # Tolerance for the a.o.a. correction loop\n nIterMAX = 25; # Maximum number of iteration in the correction loop before introducing the numerical damping\n tolWarning = 2e-3; # Tolerance above which a warning is send to the promp because of lack of accuracy\n tolStop = 5e-2; # Tolerance above which the software is stopped because of lack of accuracy\n\n # Flight conditions\n Alphas = flow.getAlphas() * m.pi/180;\n Beta = flow.getBeta();\n V0 = flow.getV0();\n # Geometric characteristics\n sweepC4 = wing.getSweepC4();\n for i in range(len(sweepC4)/2):\n sweepC4[i] *= -1;\n dih = wing.getDih();\n tp = wing.getTwist();\n \n \n x = wing.getXP();\n y = wing.getYP();\n z = wing.getZP();\n c = wing.getChord();\n \n dA = 0.5*(wing.y[1:]-wing.y[:-1])*(wing.chordDistrib[1:]+wing.chordDistrib[:-1]);\n S = wing.getS();\n MAC = wing.getMac();\n \n yFus = fus.getYF();\n \n index = np.array(range(wing.getR()));\n indexV = [];\n \n [wclPanel, wcdPanel, wcmPanel, wcdaPanel,alphal0PanelVLM,cl_alphaPanel] = p.panelPolar(wing);\n # Induced velocity by the engine\n # tangencial increases the local speed and then the local lift\n # contribution \n vix = prop.vix;\n viy = prop.viy;\n viz = prop.viz;\n if prop.bool:\n dyp = prop.yp - ac.rp[1];\n dzp = prop.zp - ac.rp[2];\n else:\n dyp = 0;\n dzp = 0;\n prop.Tc = 0;\n \n # Aerodynamic contribution of fuselage\n L_F = np.zeros(len(Alphas),dtype = float);\n Y_F = 0.;\n N_F = 0.;\n D_F = np.zeros(len(Alphas),dtype = float);\n M_F = np.zeros(len(Alphas),dtype = float);\n \n if fus.bool:\n for i in range(wing.getR()):\n if (y[i] > yFus[0] and y[i] < yFus[1]):\n index[i] = -1;\n index = index[index >= 0];\n L_F= np.interp(flow.getAlphas(), u.rangeF(flow.aMin-20.,flow.aMax+20.,0.1), fus.getL());\n D_F= np.interp(flow.getAlphas(), u.rangeF(flow.aMin-20.,flow.aMax+20.,0.1), fus.getD());\n M_F= np.interp(flow.getAlphas(), u.rangeF(flow.aMin-20.,flow.aMax+20.,0.1), fus.getM());\n N_F = fus.getN();\n Y_F = fus.getY();\n \n if htail.bool:\n yTail = htail.getY();\n cTail = htail.getChordDist();\n index = np.concatenate([index,np.array(range(index[-1]+1,index[-1]+1+htail.getR()))]);\n dA = np.concatenate([dA , 0.5 * (cTail[1:] + cTail[:-1]) * (yTail[1:] - yTail[:-1])]);\n tp = np.concatenate([tp,htail.getTwist()]);\n dih = np.concatenate([dih,htail.getDih()]);\n sweepC4T = htail.getSweepC4();\n for i in range(htail.getR()/2):\n sweepC4T[i] *= -1;\n sweepC4 = np.concatenate([sweepC4,sweepC4T]);\n c = np.concatenate([c,htail.getChord()]);\n x = np.concatenate([x,htail.getXP()]);\n y = np.concatenate([y,htail.getYP()]);\n z = np.concatenate([z,htail.getZP()]);\n \n [wclPanelT, wcdPanelT, wcmPanelT, wcdaPanelT,alphal0PanelVLMT,cl_alphaPanelT] = p.panelPolar(htail);\n wclPanel = np.concatenate([wclPanel,wclPanelT],0);\n wcdPanel = np.concatenate([wcdPanel,wcdPanelT],0);\n wcmPanel = np.concatenate([wcmPanel,wcmPanelT],0);\n wcdaPanel = np.concatenate([wcdaPanel,wcdaPanelT],0);\n alphal0PanelVLM = np.concatenate([alphal0PanelVLM,alphal0PanelVLMT]);\n cl_alphaPanel= np.concatenate([cl_alphaPanel,cl_alphaPanelT]);\n \n if vtail.bool:\n CDV,CYV,CMV,ClV,CnV,CDiV,CD0V,Alphas = AeroPredVTail(vtail,flow,lin,S,MAC,ac.rp);\n # Construction of the A matrix\n \n [A,normal] = VLM.ICMatrix(ac,cl_alphaPanel);\n invA = np.linalg.inv(A);\n nbPan = len(y);\n nbA = len(Alphas);\n \n betaEff = sweepC4-Beta;\n Af = np.transpose(np.matlib.repmat(Alphas,nbPan,1));\n Af_eff = Af;\n \n twist_eff = tp;\n Af_twist = np.transpose(np.matlib.repmat(Alphas,nbPan,1)) + twist_eff;\n \n A0 = np.transpose(np.matlib.repmat(Alphas,nbPan,1)) - alphal0PanelVLM;\n \n # Physical flow velocities triangle\n vix0 = V0 * np.cos(betaEff) * np.cos(Af_eff);\n viy0 = V0 * np.sin(betaEff);\n viz0 = V0 * np.cos(betaEff) * np.sin(Af_eff);\n # Physical flow velocities triangle in the pannel frame\n vixT = V0 * np.cos(betaEff) * np.cos(Af_twist);\n viyT = viy0;\n vizT = V0 * np.cos(betaEff) * np.sin(Af_twist);\n # Flow velocities taken into account the zero-lift angle\n vixA0 = V0 * np.cos(betaEff) * np.cos(A0);\n vizA0 = V0 * np.cos(betaEff) * np.sin(A0);\n \n # Initialisation and beginning of the computation\n deltaAlpha = np.zeros(nbPan);\n # Intialization of data containers\n al_i = np.zeros([nbPan,nbA],dtype = float);\n CL = np.zeros(nbA,dtype = float);\n CY = np.zeros(nbA,dtype = float);\n CM = np.zeros(nbA,dtype = float);\n Cl = np.zeros(nbA,dtype = float);\n Cn = np.zeros(nbA,dtype = float);\n CDi = np.zeros(nbA,dtype = float);\n CD0 = np.zeros(nbA,dtype = float);\n \n #dV = np.zeros([3,nbPan],dtype = float);\n lonDist = np.sqrt((x-ac.rp[0])**2 + (z-ac.rp[2])**2);\n angle = np.arctan2(z-ac.rp[2],x-ac.rp[0])\n yDist = y - ac.rp[1];\n if lin:\n for ii in range(nbA):\n vin = viz+ viz0[ii,:];\n speedFactor = ((vix+vix0[ii])**2+vin**2)/(V0**2);\n vNref = np.concatenate([vizT[ii,index]+viz[index], viyT[indexV]+viy[indexV]]);\n vTref = np.concatenate([vixT[ii] + vix]);\n alphaRef = np.arctan2(vNref,vTref);\n a0z = viz + vizA0[ii];\n a0x = vix + vixA0[ii];\n a0 = np.arctan2(a0z,a0x);\n AOA = a0 + deltaAlpha - alphal0PanelVLM;\n ccl = VLM_Solve(invA, AOA ,normal , nbPan);\n clw = ccl/(c*np.cos(sweepC4));\n al_e = clw/(cl_alphaPanel);\n alphaLoc = alphaRef - (AOA - al_e);\n [clVisc,cd,cm,cda] = p.AeroCoef(wclPanel,wcdPanel,wcmPanel,wcdaPanel,alphaLoc);\n al_i[:,ii] = Alphas[ii] - alphaLoc + tp;\n dF = clw * dA * speedFactor;\n dCDi = np.sin(al_i[index,ii]) * dF[index];\n dCL = dF[index]* np.cos(al_i[index,ii])*np.cos(dih[index])**2 - cd[index] * dA[index]*speedFactor[index] * np.sin(al_i[index,ii]);\n dCD0 = cd[index] * dA[index]*speedFactor[index] * np.cos(al_i[index,ii]);\n dCM = - ( dCL * lonDist[index] * np.cos(angle[index]-Alphas[ii])) + \\\n (dCDi + dCD0) * lonDist[index] * np.sin(angle[index] - Alphas[ii]) + \\\n cm[index] * dA[index] * c[index] * speedFactor[index];\n Cl[ii] = -np.sum(dCL*yDist[index]) / (S*wing.getMac()) +ClV[ii];\n Cn[ii] = (np.sum((dCD0+dCDi)*yDist[index]) + N_F) / (S*wing.getMac())\\\n - np.sum(dyp*prop.Tc/wing.getMac()) + CnV[ii];\n CY[ii] = Y_F/S + CYV[ii];\n CL[ii] = (np.sum(dCL)+L_F[ii])/S + np.sum(prop.Tc) * m.sin(Alphas[ii]) ;\n CDi[ii] = np.sum(dCDi)/S + CDiV[ii];\n CD0[ii] = CD0V[ii] + (np.sum(dCD0)+D_F[ii])/S - np.sum(prop.Tc) * m.cos(Alphas[ii]);\n CM[ii] = CMV[ii] + (np.sum(dCM) + M_F[ii])/(S*wing.getMac()) - np.sum(dzp*prop.Tc/wing.getMac());\n al_i[:,ii] = 180./m.pi*(al_i[:,ii]);\n CD = CD0+CDi;\n Alphas = Alphas*180./m.pi;\n return CL,CD,CY,CM,Cl,Cn,CDi,CD0,Alphas\n else:\n for ii in range(nbA):\n K = np.ones(nbPan,dtype = float); # if possible no numerical damping to fasten the computation\n vin = viz+ viz0[ii,:];\n speedFactor = ((vix+vix0[ii])**2+vin**2)/(V0**2);\n vNref = np.concatenate([vizT[ii,index]+viz[index], viyT[indexV]+viy[indexV]]);\n vTref = np.concatenate([vixT[ii] + vix]);\n alphaRef = np.arctan2(vNref,vTref);\n a0z = viz + vizA0[ii];\n a0x = vix + vixA0[ii];\n a0 = np.arctan2(a0z,a0x);\n AOA = a0 + deltaAlpha - alphal0PanelVLM;\n ccl = VLM_Solve(invA, AOA ,normal , nbPan);\n clw = ccl/(c*np.cos(sweepC4));\n al_e = clw/(cl_alphaPanel);\n alphaLoc = alphaRef - (AOA - al_e);\n [clVisc,cd,cm,cda] = p.AeroCoef(wclPanel,wcdPanel,wcmPanel,wcdaPanel,alphaLoc);\n deltaAlpha = (clVisc-clw)/(cl_alphaPanel*K); # apparent inviscid difference of a.o.a. such that the inviscid lift at AOA+deltaAlphas\n # is the viscous lift at alphaLoc\n nIter = 0;\n nCorrection = 0;\n # Begining of the correction loop\n while np.amax(np.absolute(deltaAlpha))*2*m.pi > tol or np.any(np.isnan(deltaAlpha)):\n nIter = nIter +1;\n # If convergence too tough : introduction of numerical damping\n if nIter == nIterMAX or np.any(np.isnan(deltaAlpha)):\n tol = tol*2;\n nIter = 0;\n deltaAlpha = np.zeros(nbPan,dtype = float);\n K = 2*K;\n if tol > tolStop:\n tol = 1e-3;\n nCorrection = nCorrection +1;\n if nCorrection == 1:\n nIterMAX = 2*nIterMAX;\n K = 2*K;\n if ii == nbA-1:\n Alphas[ii] = (Alphas[ii]+Alphas[ii-1])/2;\n else:\n Alphas[ii] = (Alphas[ii]+Alphas[ii+1])/2;\n #vNref = np.concatenate([vizT[ii,index]+viz[index], viyT[indexV]+viy[indexV]]);\n #vTref = np.concatenate([vixT[ii] + vix]);\n #alphaRef = np.arctan2(vNref,vTref);\n elif nCorrection == 2:\n K = np.ones(nbPan,dtype = float);\n for kk in range(nbPan):\n indice = 0;\n for idi in range(ii):\n if np.isnan(al_i[kk,idi]):\n pass;\n else:\n indice = idi;\n alpha = (Alphas[indice]+ tp[kk])*180./m.pi-al_i[kk,indice];\n cl_alphaPanel[kk] = 180./m.pi*((np.polyval(np.polyder(wclPanel[kk,:]),alpha-0.2))+np.polyval(np.polyder(wclPanel[kk,:]),alpha-0.2))/(0.4);\n cl = np.polyval(wclPanel[kk,:],alpha);\n alphal0PanelVLM[kk] = -cl/cl_alphaPanel[kk] +alpha*m.pi/180;\n while np.absolute(cl_alphaPanel[kk]) <4:\n alpha = alpha+0.5;\n cl_alphaPanel[kk] = 180./m.pi*((np.polyval(np.polyder(wclPanel[kk,:]),alpha-0.2))+np.polyval(np.polyder(wclPanel[kk,:]),alpha-0.2))/(0.4);\n cl = np.polyval(wclPanel[kk,:],alpha);\n alphal0PanelVLM[kk] = -cl/cl_alphaPanel[kk] +alpha*m.pi/180;\n [A,normal] = VLM.ICMatrix(ac,cl_alphaPanel);\n invA = np.linalg.inv(A);\n elif nCorrection == 3:\n break;\n AOA += deltaAlpha;\n ccl = VLM_Solve(invA, AOA,normal, nbPan);\n clw = ccl/(c*np.cos(sweepC4));\n al_e = clw/(cl_alphaPanel);\n\n alphaLoc = alphaRef - (AOA - al_e);\n [clVisc,cd,cm,cda] = p.AeroCoef(wclPanel,wcdPanel,wcmPanel,wcdaPanel,alphaLoc);\n deltaAlpha = (clVisc-clw)/(cl_alphaPanel*K); # recovering lift and drag coeff\n al_i[:,ii] = Alphas[ii] - alphaLoc + tp;\n # Warning for user in case of bad coputation point\n if np.amax(np.absolute(deltaAlpha)) >= tolStop:\n print('Attention, the tolerance on cl for alpha = '+str(180./m.pi*Alphas[ii]-wing.getiW())+' has raised up to ',str(np.amax(np.absolute(deltaAlpha)*cl_alphaPanel)));\n print( 'Because of the too low precison, the calcul is stopped here');\n Alphas = 180./m.pi*(Alphas[:ii]);\n CL = CL[:ii];\n CDi = CDi[:ii];\n CD0 = CD0[:ii];\n CY = CY[:ii];\n CM = CM[:ii];\n Cl = Cl[:ii];\n Cn = Cn[:ii];\n CD = CDi + CD0;\n return CL,CD,CY,CM,Cl,Cn,CDi,CD0,Alphas;\n elif np.amax(abs(deltaAlpha)) >= tolWarning:\n print('Attention, the tolerance on cl for alpha = '+str(180./m.pi*Alphas[ii])+' has raised up to '+str(np.amax(np.absolute(deltaAlpha)*cl_alphaPanel)));\n nIterMAX = 100;\n tol = 1e-3; \n \n dF = clw * dA * speedFactor;\n dCDi = np.sin(al_i[index,ii]) * dF[index];\n dCL = dF[index]* np.cos(al_i[index,ii])*np.cos(dih[index])**2 - cd[index] * dA[index]*speedFactor[index] * np.sin(al_i[index,ii]);\n dCD0 = cd[index] * dA[index]*speedFactor[index] * np.cos(al_i[index,ii]);\n dCM = - ( dCL * lonDist[index] * np.cos(angle[index]-Alphas[ii])) + \\\n (dCDi + dCD0) * lonDist[index] * np.sin(angle[index] - Alphas[ii]) + \\\n cm[index] * dA[index] * c[index] * speedFactor[index];\n Cl[ii] = -np.sum(dCL*yDist[index]) / (S*wing.getMac()) +ClV[ii];\n Cn[ii] = (np.sum((dCD0+dCDi)*yDist[index]) + N_F) / (S*wing.getMac())\\\n - np.sum(dyp*prop.Tc/wing.getMac()) + CnV[ii];\n CY[ii] = Y_F/S + CYV[ii];\n CL[ii] = (np.sum(dCL)+L_F[ii])/S + np.sum(prop.Tc) * m.sin(Alphas[ii]) ;\n CDi[ii] = np.sum(dCDi)/S + CDiV[ii];\n CD0[ii] = CD0V[ii] + (np.sum(dCD0)+D_F[ii])/S - np.sum(prop.Tc) * m.cos(Alphas[ii]);\n CM[ii] = CMV[ii] + (np.sum(dCM) + M_F[ii])/(S*wing.getMac()) - np.sum(dzp*prop.Tc/wing.getMac());\n al_i[:,ii] = 180./m.pi*(al_i[:,ii]);\n \n CD = CDi+CD0;\n Alphas=180./m.pi*(Alphas);\n return CL,CD,CY,CM,Cl,Cn,CDi,CD0,Alphas;", "def coil_combine_cmrr_sequential(chain):\n block = chain._block\n set = chain._block.set\n dataset = chain._dataset\n raw = chain.raw\n\n ncoils = raw.shape[1]\n nfids = raw.shape[2]\n dim0 = raw.shape[3]\n acqdim0 = dim0\n xaxis = range(dim0)\n\n flag_norm_to_sum = False # default for now\n\n dat_comb = np.ndarray([nfids,dim0], dtype=np.complex128)\n\n all_weight = np.ndarray([nfids,ncoils], dtype=np.float)\n all_phases = np.ndarray([nfids,ncoils], dtype=np.complex)\n\n for i in range(nfids):\n\n # determine weighting and phz for each coil\n # zero-order phase correction\n # correct for phase based on 1st point in 1st wref fid\n\n # for each average, calc phase and weights to correct for coil geometry\n chans = []\n weight = []\n phases = []\n \n for j in range(ncoils):\n chan = chain.raw[0,j,i,:].copy()\n \n magn = np.abs(chan[0])\n phas = np.conjugate(chan[0])/magn # normalized complex conj to cancel phase \n chan = phas * chan # Note. applying phase here NOT below as in Siemens\n \n # amplitude of zero order phased fid in time domain\n # using 9th order polynomial fit (based on Uzay's script)\n coeffs = np.polyfit(xaxis, np.absolute(chan), 9)\n \n weight.append(coeffs[-1]) # last entry is amplitude - zero order coeff\n phases.append(phas)\n chans.append(chan)\n \n # normalize weighting function based on spectro data \n tmp = np.sum([val*val for val in weight]) # sum squared values \n if tmp == 0.0: tmp = 1.0\n if flag_norm_to_sum:\n # sum of sensitivities\n lamda = np.sum(weight) / tmp \n else:\n # sqrt of sum of squared sensitivities\n lamda = 1.0 / np.sqrt(tmp)\n\n weight = [val*lamda for val in weight]\n\n all_weight[i,:] = weight\n all_phases[i,:] = phases\n \n # apply weighting ... phase corrections done above\n for j,chan in enumerate(chans):\n chans[j] = chan * weight[j]\n \n # sum corrected FIDs from each coil into one combined FID\n dat_comb[i,:] = np.sum(chans, axis=0) \n\n print_combine_stats(all_weight, all_phases, method='CMRR_Sequential')\n \n return normalize_shape(dat_comb), all_weight, all_phases", "def _prepare_ligand_BC(self):\n if self.data['BC'].protocol == []:\n\n # Set up the force field\n params_o = self.system.paramsFromAlpha(1.0, 'BC', site=False)\n self.system.setParams(params_o)\n\n # Get starting configurations\n basename = os.path.basename(self.args.FNs['score'])\n basename = basename[:basename.find('.')]\n dirname = os.path.dirname(self.args.FNs['score'])\n minimizedB_FN = os.path.join(dirname, basename + '_minB.nc')\n if os.path.isfile(minimizedB_FN):\n from netCDF4 import Dataset\n dock6_nc = Dataset(minimizedB_FN, 'r')\n minimizedConfigurations = [\n dock6_nc.variables['confs'][n][self.top.inv_prmtop_atom_order_L, :]\n for n in range(dock6_nc.variables['confs'].shape[0])\n ]\n Es = dict([(key, dock6_nc.variables[key][:])\n for key in dock6_nc.variables.keys() if key != 'confs'])\n dock6_nc.close()\n else:\n (minimizedConfigurations, Es) = self._get_confs_to_rescore(site=False, minimize=True)\n\n from netCDF4 import Dataset\n dock6_nc = Dataset(minimizedB_FN, 'w')\n dock6_nc.createDimension('n_confs', len(minimizedConfigurations))\n dock6_nc.createDimension('n_atoms', minimizedConfigurations[0].shape[0])\n dock6_nc.createDimension('n_cartesian', 3)\n dock6_nc.createDimension('one', 1)\n dock6_nc.createVariable('confs', 'f8', ('n_confs', 'n_atoms', 'n_cartesian'))\n for n in range(len(minimizedConfigurations)):\n dock6_nc.variables['confs'][n] = minimizedConfigurations[n][self.top.prmtop_atom_order_L, :]\n for key in Es.keys():\n dock6_nc.createVariable(key, 'f8', ('one', 'n_confs'))\n dock6_nc.variables[key][:] = Es[key]\n dock6_nc.close()\n\n # initializes smart darting for BC\n # and sets the universe to the lowest energy configuration\n self.iterator.initializeSmartDartingConfigurations(\n minimizedConfigurations, 'BC', self.log, self.data)\n if len(minimizedConfigurations) > 0:\n self.top.universe.setConfiguration(\n Configuration(self.top.universe, minimizedConfigurations[-1]))\n self.data['BC'].confs['starting_poses'] = minimizedConfigurations\n\n # Ramp the temperature from 0 to the desired starting temperature using HMC\n self._ramp_T(params_o['T'], normalize=True)\n\n # Run at starting temperature\n seeds = [np.copy(self.top.universe.configuration().array) \\\n for n in range(self.args.params['BC']['seeds_per_state'])]\n else:\n seeds = None\n return seeds", "def LamC2pKK ( self ) : \n from GaudiConfUtils.ConfigurableGenerators import DaVinci__N3BodyDecays\n #\n return self.make_selection (\n 'LambdaCpKK' ,\n DaVinci__N3BodyDecays ,\n ## inputs \n [ self.protons() , self.kaons() ] ,\n ##\n DecayDescriptor = \" [ Lambda_c+ -> p+ K- K+ ]cc\" ,\n ##\n Combination12Cut = \"\"\"\n ( AM < 2.5 * GeV ) &\n ( ACHI2DOCA(1,2) < 16 ) \n \"\"\" ,\n ## \n CombinationCut = \"\"\"\n ( ( ADAMASS ( 'Lambda_c+' ) < 65 * MeV ) \n | ( ADAMASS ( 'Xi_c+' ) < 65 * MeV ) ) &\n ( APT > %s ) & \n ( ACHI2DOCA(1,3) < 16 ) &\n ( ACHI2DOCA(2,2) < 16 ) \n \"\"\" % ( 0.95 * self[ 'pT(Lc+)' ] ) ,\n ##\n MotherCut = \"\"\"\n ( chi2vx < 25 ) &\n ( PT > %s ) &\n ( ( ADMASS ( 'Lambda_c+' ) < 55 * MeV ) \n | ( ADMASS ( 'Xi_c+' ) < 55 * MeV ) ) &\n ( ctau > 100 * micrometer ) \n \"\"\" % self [ 'pT(Lc+)']\n )", "def pilot_pll(xr, fq, fs, loop_type, bn, zeta):\n T = 1 / float(fs)\n # Set the VCO gain in Hz/V \n Kv = 1.0\n # Design a lowpass filter to remove the double freq term\n Norder = 5\n b_lp, a_lp = signal.butter(Norder, 2 * (fq / 2.) / float(fs))\n fstate = np.zeros(Norder) # LPF state vector\n\n Kv = 2 * np.pi * Kv # convert Kv in Hz/v to rad/s/v\n\n if loop_type == 1:\n # First-order loop parameters\n fn = bn\n Kt = 2 * np.pi * fn # loop natural frequency in rad/s\n elif loop_type == 2:\n # Second-order loop parameters\n fn = 1 / (2 * np.pi) * 2 * bn / (zeta + 1 / (4 * zeta)) # given Bn in Hz\n Kt = 4 * np.pi * zeta * fn # loop natural frequency in rad/s\n a = np.pi * fn / zeta\n else:\n print('Loop type must be 1 or 2')\n\n # Initialize integration approximation filters\n filt_in_last = 0\n filt_out_last = 0\n vco_in_last = 0\n vco_out = 0\n vco_out_last = 0\n\n # Initialize working and final output vectors\n n = np.arange(0, len(xr))\n theta = np.zeros(len(xr))\n ev = np.zeros(len(xr))\n phi_error = np.zeros(len(xr))\n # Normalize total power in an attemp to make the 19kHz sinusoid\n # component have amplitude ~1.\n # xr = xr/(2/3*std(xr));\n # Begin the simulation loop\n for kk in range(len(n)):\n # Sinusoidal phase detector (simple multiplier)\n phi_error[kk] = 2 * xr[kk] * np.sin(vco_out)\n # LPF to remove double frequency term\n phi_error[kk], fstate = signal.lfilter(b_lp, a_lp, np.array([phi_error[kk]]), zi=fstate)\n pd_out = phi_error[kk]\n # pd_out = 0\n # Loop gain\n gain_out = Kt / Kv * pd_out # apply VCO gain at VCO\n # Loop filter\n if loop_type == 2:\n filt_in = a * gain_out\n filt_out = filt_out_last + T / 2. * (filt_in + filt_in_last)\n filt_in_last = filt_in\n filt_out_last = filt_out\n filt_out = filt_out + gain_out\n else:\n filt_out = gain_out\n # VCO\n vco_in = filt_out + fq / (Kv / (2 * np.pi)) # bias to quiescent freq.\n vco_out = vco_out_last + T / 2. * (vco_in + vco_in_last)\n vco_in_last = vco_in\n vco_out_last = vco_out\n vco_out = Kv * vco_out # apply Kv\n # Measured loop signals\n ev[kk] = filt_out\n theta[kk] = np.mod(vco_out, 2 * np.pi); # The vco phase mod 2pi\n return theta, phi_error", "def CCM(wl, R_V=3.1):\n\n\n a = np.zeros(np.shape(wl))\n b = np.zeros(np.shape(wl))\n F_a = np.zeros(np.shape(wl))\n F_b = np.zeros(np.shape(wl))\n x = np.zeros(np.shape(wl))\n y = np.zeros(np.shape(wl))\n q = np.zeros(np.shape(wl))\n\n x = 10000. / wl\n y = 10000. / wl - 1.82\n\n # Far-Ultraviolet: 8 <= x <= 10 ; 1000 -> 1250 Angs\n i = np.bitwise_and(x >= 8, x <= 10)\n\n a[i] = -1.073 - 0.628 * (x[i] - 8.) + 0.137 * (x[i] - 8.)**2 - 0.070 * (x[i] - 8.)**3\n b[i] = 13.670 + 4.257 * (x[i] - 8.) - 0.420 * (x[i] - 8.)**2 + 0.374 * (x[i] - 8.)**3\n\n # Ultraviolet: 3.3 <= x <= 8 ; 1250 -> 3030 Angs\n i = np.bitwise_and(x >= 5.9, x < 8)\n F_a[i] = -0.04473 * (x[i] - 5.9)**2 - 0.009779 * (x[i] - 5.9)**3\n F_b[i] = 0.2130 * (x[i] - 5.9)**2 + 0.1207 * (x[i] - 5.9)**3\n\n i = np.bitwise_and(x >= 3.3, x < 8)\n\n a[i] = 1.752 - 0.316 * x[i] - 0.104 / ((x[i] - 4.67)**2 + 0.341) + F_a[i]\n b[i] = -3.090 + 1.825 * x[i] + 1.206 / ((x[i] - 4.62)**2 + 0.263) + F_b[i]\n\n # Optical/NIR: 1.1 <= x <= 3.3 ; 3030 -> 9091 Angs ;\n i = np.bitwise_and(x >= 1.1, x < 3.3)\n\n a[i] = 1.+ 0.17699 * y[i] - 0.50447 * y[i]**2 - 0.02427 * y[i]**3 + \\\n 0.72085 * y[i]**4 + 0.01979 * y[i]**5 - 0.77530 * y[i]**6 + 0.32999 * y[i]**7\n b[i] = 1.41338 * y[i] + 2.28305 * y[i]**2 + 1.07233 * y[i]**3 - \\\n 5.38434 * y[i]**4 - 0.62251 * y[i]**5 + 5.30260 * y[i]**6 - 2.09002 * y[i]**7\n\n\n # Infrared: 0.3 <= x <= 1.1 ; 9091 -> 33333 Angs ;\n i = np.bitwise_and(x >= 0.3, x < 1.1)\n\n a[i] = 0.574 * x[i]**1.61\n b[i] = -0.527 * x[i]**1.61\n\n q = a + b / R_V\n\n return q", "def cz_rule(gate, platform):\n return platform.create_CZ_pulse_sequence(gate.qubits)", "def calculate_cable_delay(self, frequencies=None, z_data=None):\n delay_upper_bound = 100e-9\n \n if z_data is None:\n z_data = self.z_data_raw\n frequencies = self.frequencies\n # normalize data\n # first part - clculate cable delay by minimizing the angle distance between the two parts of the signal\n # def residuals(delay, frequencies, z_data):\n # z_data_ = self.correctdelay(frequencies, z_data, delay[0])\n # angles_at_limits = np.unwrap([np.angle(z_data_[0]), np.angle(z_data_[-1])])\n # angle_distance = angles_at_limits[0] - angles_at_limits[1]\n # res = angle_distance\n # \n # \n if self.delay_rough_estimation <= delay_upper_bound:\n initial_guess = self.delay_rough_estimation\n else:\n initial_guess = delay_upper_bound\n if self.config == 'T':\n def residuals(delay, frequencies, z_data):\n z_data_ = self.correctdelay(frequencies, z_data, delay[0])\n xc, yc, r0 = self.fit_circle(z_data_)\n # calculating the distance from radius of each point (will be zero for perfect circle)\n distance_from_radius = np.sqrt((z_data_.real - xc) ** 2 + (z_data_.imag - yc) ** 2) - r0\n # calculating the angle distance between the first and last points\n res = distance_from_radius / r0\n return res\n optimized = optimize.least_squares(residuals, initial_guess, args=(frequencies, z_data),\n bounds=(0, delay_upper_bound), xtol=5e-16, ftol=5e-16, gtol=1e-12)\n cable_delay = optimized.x[0]\n\n elif self.config == 'circulator':\n def residuals(delay, frequencies, z_data):\n z_data_ = self.correctdelay(frequencies, z_data, delay[0])\n xc, yc, r0 = self.fit_circle(z_data_)\n # calculating the distance from radius of each point (will be zero for perfect circle)\n distance_from_radius = np.sqrt((z_data_.real - xc) ** 2 + (z_data_.imag - yc) ** 2) - r0\n # calculating the angle distance between the first and last points\n angles_at_limits = np.unwrap([np.angle(z_data_[0]), np.angle(z_data_[-1])])\n angle_distance = angles_at_limits[0] - angles_at_limits[1]\n # returning residuls while taking both circle parameters into accout:\n # distance from radius and, and complition of a circle, in case of a small circle the residulas normalized\n # by the radius to increase the value of the function\n res = angle_distance\n return res\n\n optimized = optimize.least_squares(residuals, initial_guess, args=(frequencies, z_data),\n bounds=(0, delay_upper_bound), xtol=5e-16, ftol=5e-16, gtol=1e-12)\n cable_delay = optimized.x[0]\n initial_guess = cable_delay\n\n def residuals_(delay, frequencies, z_data):\n z_data_ = self.correctdelay(frequencies, z_data, delay[0])\n xc, yc, r0 = self.fit_circle(z_data_)\n # calculating the distance from radius of each point (will be zero for perfect circle)\n distance_from_radius = np.sqrt((z_data_.real - xc) ** 2 + (z_data_.imag - yc) ** 2) - r0\n # calculating the angle distance between the first and last points\n angles_at_limits = np.unwrap([np.angle(z_data_[0]), np.angle(z_data_[-1])])\n angle_distance = angles_at_limits[0] - angles_at_limits[1]\n # returning residuls while taking both circle parameters into accout:\n # distance from radius and, and complition of a circle, in case of a small circle the residulas normalized\n # by the radius to increase the value of the function\n res = distance_from_radius\n return res\n optimized = optimize.least_squares(residuals_, initial_guess, args=(frequencies, z_data),\n bounds=(0.9*initial_guess, 1.1*initial_guess), xtol=5e-16, ftol=5e-16, gtol=1e-12)\n cable_delay = optimized.x[0]\n \n self.delay = cable_delay\n logger.info(f\"Calculated cable delay is: {cable_delay:.5E}\")\n return cable_delay", "def compute_gains(Q, R, W, V, dt):\n\n data = np.empty((N,), dtype=controller_t)\n\n # Loop over all speeds for which we have system dynamics\n for i in range(N):\n data['theta_R_dot'][i] = theta_R_dot[i]\n data['dt'][i] = dt\n # Convert the bike dynamics to discrete time using a zero order hold\n data['A'][i], data['B'][i], _, _, _ = cont2discrete(\n (A_w[i], B_w[i, :], eye(4), zeros((4, 1))), dt)\n data['plant_evals_d'][i] = la.eigvals(data['A'][i])\n data['plant_evals_c'][i] = np.log(data['plant_evals_d'][i]) / dt\n \n # Bicycle measurement matrices\n # - steer angle\n # - roll rate\n data['C_m'][i] = C_w[i, :2, :]\n # - yaw rate\n data['C_z'][i] = C_w[i, 2, :]\n\n A = data['A'][i]\n B = data['B'][i, :, 2].reshape((4, 1))\n C_m = data['C_m'][i]\n C_z = data['C_z'][i]\n\n # Controllability from steer torque\n data['ctrb_plant'][i] = ctrb(A, B)\n u, s, v = la.svd(data['ctrb_plant'][i])\n assert(np.all(s > 1e-13))\n\n # Solve discrete algebraic Ricatti equation associated with LQI problem\n P_c = dare(A, B, R, Q)\n \n # Optimal feedback gain using solution of Ricatti equation\n K_c = -la.solve(R + dot(B.T, dot(P_c, B)),\n dot(B.T, dot(P_c, A)))\n data['K_c'][i] = K_c\n data['A_c'][i] = A + dot(B, K_c)\n data['B_c'][i] = B\n data['controller_evals'][i] = la.eigvals(data['A_c'][i])\n data['controller_evals_c'][i] = np.log(data['controller_evals'][i]) / dt\n assert(np.all(abs(data['controller_evals'][i]) < 1.0))\n\n # Observability from steer angle and roll rate measurement\n # Note that (A, C_m * A) must be observable in the \"current estimator\"\n # formulation\n data['obsv_plant'][i] = obsv(A, dot(C_m, A))\n u, s, v = la.svd(data['obsv_plant'][i])\n assert(np.all(s > 1e-13))\n\n # Solve Riccati equation\n P_e = dare(A.T, C_m.T, V, W)\n # Compute Kalman gain\n K_e = dot(P_e, dot(C_m.T, la.inv(dot(C_m, dot(P_e, C_m.T)) + V)))\n data['K_e'][i] = K_e\n data['A_e'][i] = dot(eye(4) - dot(K_e, C_m), A)\n data['B_e'][i] = np.hstack((dot(eye(4) - dot(K_e, C_m), B), K_e))\n data['estimator_evals'][i] = la.eigvals(data['A_e'][i])\n data['estimator_evals_c'][i] = np.log(data['estimator_evals'][i]) / dt\n # Verify that Kalman estimator eigenvalues are stable\n assert(np.all(abs(data['estimator_evals'][i]) < 1.0))\n\n # Closed loop state space equations\n A_cl = np.zeros((8, 8))\n A_cl[:4, :4] = A\n A_cl[:4, 4:] = dot(B, K_c)\n A_cl[4:, :4] = dot(K_e, dot(C_m, A))\n A_cl[4:, 4:] = A - A_cl[4:, :4] + A_cl[:4, 4:]\n data['A_cl'][i] = A_cl\n data['closed_loop_evals'][i] = la.eigvals(A_cl)\n assert(np.all(abs(data['closed_loop_evals'][i]) < 1.0))\n\n B_cl = np.zeros((8, 1))\n B_cl[:4, 0] = B.reshape((4,))\n B_cl[4:, 0] = dot(eye(4) - dot(K_e, C_m), B).reshape((4,))\n data['B_cl'][i] = B_cl\n\n C_cl = np.hstack((C_z, np.zeros((1, 4))))\n data['C_cl'][i] = C_cl\n\n # Transfer functions from r to yaw rate\n num, den = ss2tf(A_cl, B_cl, C_cl, 0)\n data['w_r_to_psi_dot'][i], y = freqz(num[0], den)\n data['w_r_to_psi_dot'][i] /= (dt * 2.0 * np.pi)\n data['mag_r_to_psi_dot'][i] = 20.0 * np.log10(abs(y))\n data['phase_r_to_psi_dot'][i] = np.unwrap(np.angle(y)) * 180.0 / np.pi\n\n # Open loop transfer function from e to yaw rate (PI loop not closed,\n # but LQR/LQG loop closed.\n inner_cl = ss(A_cl, B_cl, C_cl, 0)\n pi_block = ss([[1]], [[1]], [[data['Ki_fit'][i]*dt]], [[data['Kp_fit'][i]]])\n e_to_psi_dot = series(pi_block, inner_cl)\n num, den = ss2tf(e_to_psi_dot.A, e_to_psi_dot.B, e_to_psi_dot.C, e_to_psi_dot.D)\n data['w_e_to_psi_dot'][i], y = freqz(num[0], den)\n data['w_e_to_psi_dot'][i] /= (dt * 2.0 * np.pi)\n data['mag_e_to_psi_dot'][i] = 20.0 * np.log10(abs(y))\n data['phase_e_to_psi_dot'][i] = np.unwrap(np.angle(y)) * 180.0 / np.pi\n\n\n\n\n return data", "def get_loop_bandwidth(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_loop_bandwidth(self)", "def _calc_WaterCirculation(heat_load, CT_design, WBT, DBT, fixedCWT_ctrl, pump_ctrl, ignore_CT_eff, max_CT_eff=0.85):\r\n nTime = len(WBT)\r\n nCT = CT_design.shape[0]\r\n # .......................................................... 1) Calc CWT (based on WBT) and approach\r\n # i) CWT\r\n if fixedCWT_ctrl:\r\n raise NotImplementedError\r\n # This ctrl is not as simple as setting CWT to rated, because what if ambient WBT + min approach is above this?\r\n # CWT fixed at design value\r\n # CWT = Q_(np.tile(CT_design['CWT [°C]'].values, (Nsimul, 1)), 'degC')\r\n else:\r\n # CWT from CT performance curves\r\n perf_m = CT_design['CT perf slope'].values\r\n perf_b = CT_design['CT perf y-int'].values\r\n\r\n # time x CT\r\n CWT = Q_(np.outer(WBT, perf_m) + np.tile(perf_b, (nTime, 1)), 'degC')\r\n\r\n # ii) Approach\r\n WBT2 = Q_(np.transpose(np.tile(WBT, (nCT, 1))), 'degC')\r\n approach = CWT - WBT2\r\n\r\n # .......................................................... 2) Calc water circulation loop\r\n # (calc deltaT, waterflow, assuming loaded)\r\n # Forms a time-invariant array with shape (time x CT) and as a Pint quantity\r\n tile_and_pint = lambda arr, units: Q_(np.tile(arr, (nTime, 1)), units)\r\n\r\n HWT_r = tile_and_pint(CT_design['HWT [°C]'].values, 'degC')\r\n waterflow_r = tile_and_pint(CT_design['water flow [kg/s]'].values, 'kg/s')\r\n\r\n if pump_ctrl == 'fixed HWT':\r\n deltaT = HWT_r - CWT\r\n waterflow = (heat_load / (cp_water * deltaT)).to_base_units()\r\n\r\n elif pump_ctrl == 'range limit':\r\n # Calc range as if HWT = HWT_r\r\n deltaT = HWT_r - CWT\r\n\r\n # i) Adjust deltaT\r\n deltaT_min = np.tile(CT_design['Min Range [C°]'].values, (nTime, 1))\r\n deltaT = Q_(np.clip((deltaT).magnitude, deltaT_min, None), 'delta_degC')\r\n\r\n # ii) Calc water flow\r\n waterflow = (heat_load / (cp_water * deltaT)).to_base_units()\r\n\r\n elif pump_ctrl == 'c':\r\n # Calc range & water flow as if HWT = HWT_r\r\n deltaT = HWT_r - CWT\r\n waterflow = (heat_load / (cp_water * deltaT)).to_base_units()\r\n waterflow_units = waterflow.units\r\n\r\n # i) Adjust water flow\r\n # Clip violating values\r\n waterflow_ub = np.tile((CT_design['Max per unit water flow'] * CT_design['water flow [kg/s]']).values,\r\n (nTime, 1))\r\n waterflow_lb = np.tile((CT_design['Min per unit water flow'] * CT_design['water flow [kg/s]']).values,\r\n (nTime, 1))\r\n\r\n _wf = np.clip(waterflow.magnitude, waterflow_lb, waterflow_ub)\r\n # Back to pint\r\n waterflow = Q_(_wf, waterflow_units)\r\n\r\n # ii) Calc deltaT\r\n deltaT = (heat_load / (cp_water * waterflow)).to('delta_degC')\r\n\r\n else:\r\n waterflow = waterflow_r\r\n deltaT = (heat_load / (cp_water * waterflow)).to('delta_degC')\r\n\r\n # .......................................................... 3) No-load fix\r\n # This part is necessary for all conrtol modes because the operational limits applied\r\n # in the step 2 assumed loaded operation. After this step, water flow and deltaT are final.\r\n CT_load_mask = (heat_load != 0).astype('int') # 0 if no load, 1 otherwise\r\n waterflow = waterflow * CT_load_mask\r\n deltaT = deltaT * CT_load_mask\r\n HWT = CWT + deltaT\r\n\r\n # .......................................................... 4) HWT and CWT adjustment\r\n # HWT cannot be less than DBT; in which case, HWT is limited to DBT and CWT rises.\r\n # Vectorize DBT into (time x CT)\r\n DBT = np.tile(DBT, (nCT, 1)).transpose()\r\n\r\n HWT = Q_(np.maximum(HWT.magnitude, DBT), 'degC')\r\n CWT = HWT - deltaT\r\n\r\n # .......................................................... 5) Checks and return\r\n assert waterflow.units == ureg.kg / ureg.s\r\n assert deltaT.units == ureg.delta_degC, deltaT.units\r\n\r\n # Check that CT efficiency is realistic. In practice, efficiency is 65-70% (normal operating conditions)\r\n CT_eff = deltaT / (deltaT + approach)\r\n assert ignore_CT_eff or np.all(CT_eff < max_CT_eff), \\\r\n \"CT efficiency exceeded the limit: {}\".format(CT_eff)\r\n\r\n assert all(obj.shape == (nTime, nCT) for obj in (HWT, CWT, waterflow, deltaT, approach, CT_eff))\r\n # Check energy balance\r\n assert np.allclose(heat_load.magnitude, (cp_water * deltaT * waterflow).to(heat_load.units).magnitude)\r\n\r\n res = {\r\n 'HWT': HWT,\r\n 'CWT': CWT,\r\n 'water flow': waterflow,\r\n 'range': deltaT,\r\n 'approach': approach,\r\n 'CT_eff': CT_eff,\r\n }\r\n\r\n return res", "def update_cps(free_cps, busy_cps,\n waiting_queue, current_time_step, by_time, within_opening_hours, log):\n # if outside of opening hours disconnect all vehicles\n if not within_opening_hours:\n cps_to_remove = []\n for cp in busy_cps:\n cp.disconnect_vehicle()\n cps_to_remove.append(cp)\n if log:\n logging.info(' Disconnect: %s', cp)\n for cp in cps_to_remove:\n busy_cps.remove(cp)\n free_cps.add(cp)\n return\n\n temp_switch_cps = []\n # if parking time is overdue: disconnect vehicle\n if by_time is True:\n for cp in busy_cps:\n connected_vehicle = cp.connected_vehicle\n\n if connected_vehicle['leaving_time'] <= current_time_step:\n if log:\n logging.info(' Disconnect: %s', cp)\n cp.disconnect_vehicle()\n temp_switch_cps.append(cp)\n\n # immediately connect next waiting car\n if waiting_queue.size() > 0:\n cp.connect_vehicle(waiting_queue.dequeue())\n # temporary store cps to switch later so busy_cp set does not change size\n temp_switch_cps = temp_switch_cps[:-1]\n # Put charging point from available to busy\n if log:\n logging.info(' Connect: %s from queue.', cp)\n\n for cp in temp_switch_cps:\n busy_cps.remove(cp)\n free_cps.add(cp)\n\n # if SOC limit is reached: disconnect vehicle\n else:\n for cp in busy_cps:\n connected_vehicle = cp.connected_vehicle\n\n soc = connected_vehicle['soc']\n soc_target = connected_vehicle['soc_target']\n\n if round(soc, 3) >= soc_target:\n if log:\n logging.info(' Disconnect: %s', cp)\n cp.disconnect_vehicle()\n\n temp_switch_cps.append(cp)\n\n # immediately connect next waiting car\n if waiting_queue.size() > 0:\n cp.connect_vehicle(waiting_queue.dequeue())\n # temporary store cps to switch later so set does not change size\n temp_switch_cps = temp_switch_cps[:-1]\n if log:\n logging.info(' Connect: %s from queue.', cp)\n # Put charging point from available to busy\n for cp in temp_switch_cps:\n busy_cps.remove(cp)\n free_cps.add(cp)", "def get_blockdim_and_loop_cycle(self):\n #block_num = tik.Dprofile().get_aicore_num()\n block_num = tbe_platform.cce_conf.get_soc_spec(tbe_platform.cce_conf.CORE_NUM)\n \n shape_y = self.input_dict.get(\"y\").get(\"shape\")\n limit_size_of_each_block = shape_y[2] * shape_y[3]\n total_channel = shape_y[0] * shape_y[1]\n each_block_num = constant.BLOCK_SIZE // self.dsize\n each_block_align = \\\n ((each_block_num + limit_size_of_each_block - 1) //\n limit_size_of_each_block) * limit_size_of_each_block\n if limit_size_of_each_block * self.dsize < constant.BLOCK_SIZE:\n all_size = total_channel * limit_size_of_each_block * self.dsize\n if all_size < constant.BLOCK_SIZE:\n block_num = 1\n return block_num, total_channel, 0\n\n limit_size_of_each_block = each_block_align\n limit_channel_of_each_block = limit_size_of_each_block // \\\n (shape_y[2] * shape_y[3])\n loop = (total_channel * shape_y[2] * shape_y[3]) // \\\n limit_size_of_each_block\n mod_channel = ((total_channel * shape_y[2] * shape_y[3]) % \\\n limit_size_of_each_block) // (shape_y[2] * shape_y[3])\n if loop <= block_num:\n block_num = loop\n inner_loop = limit_channel_of_each_block\n inner_loop_mod = mod_channel\n else:\n inner_loop = (loop // block_num) * limit_channel_of_each_block\n inner_loop_mod = (loop % block_num) * limit_channel_of_each_block \\\n + mod_channel\n if inner_loop_mod > block_num:\n inner_loop = inner_loop + inner_loop_mod // block_num\n inner_loop_mod = inner_loop_mod % block_num\n\n return block_num, inner_loop, inner_loop_mod", "def setup_cycles(params):\n\n # Read out upper and lower voltage bounds\n phi_bounds = np.array((params['lower-cutoff'], params['upper-cutoff']))\n\n # Read out the sweep rate, and convert units as necessary to (V/s)\n R = read_sweep_rate(params['sweep-rate'])\n\n # Time for one complete sweep between the upper and lower bounds:\n dt_sweep = (phi_bounds[1] - phi_bounds[0])/R\n\n # Direction of the initial sweep: positive-going or negative-going?\n if params['initial-sweep'] == 'positive':\n direction = 1\n elif params['initial-sweep'] == 'negative':\n direction = -1\n\n # Initial potential:\n if params['initial-potential'] == 'ocv':\n phi_0 = params['ocv']\n else:\n phi_0 = params[\"initial-potential\"]\n\n # Find the first time where the potential hits one of the limits:\n t_limit_0 = -direction*(phi_0 - phi_bounds[int(0.5*(1. + direction))])/R\n\n # Make an array containing all of the times when the voltage limits are hit:\n t_events = np.arange(t_limit_0, \n t_limit_0 + dt_sweep*(2.*params[\"n_cycles\"])+1, \n dt_sweep)\n \n # Calculate the CV end time (s): \n t_final = t_limit_0 + dt_sweep*(2.*params[\"n_cycles\"])\n\n # Concatenate all times into a single array:\n times = np.concatenate((np.array([0.]), t_events, np.array([t_final]),))\n \n # Initialize the array of potentials:\n potentials = np.zeros_like(times)\n\n # Load the initial potential:\n potentials[0] = phi_0\n\n # Use the sweep rate, sweep direction, and the `times` array to determine \n # the other potentials. Each time a voltage limit is hit, the sweep \n # changes direction.\n for i, t in enumerate(times[1:]):\n potentials[i+1] = potentials[i] + direction*(t - times[i])*R\n direction *= -1\n\n return potentials, times", "def setBoundaryCondition(self):\n \n \n if self.grid.bc == 'constant' and self.t == 0.0:\n # conditions are fixed to their starting values at edges\n self.__qR__ = np.array([[self.q[0][-1]],[self.q[1][-1]],[self.q[2][-1]]])\n self.__qL__ = np.array([[self.q[0][0]] ,[self.q[1][0]] ,[self.q[2][0]]])\n \n self.__fR__ = np.array([[self.f[0][-1]],[self.f[1][-1]],[self.f[2][-1]]])\n self.__fL__ = np.array([[self.f[0][0]] ,[self.f[1][0]] ,[self.f[2][0]]])\n \n \n elif self.grid.bc == 'periodic':\n self.__qR__ = np.array([[self.q[0][0]],[self.q[1][0]],[self.q[2][0]]])\n self.__qL__ = np.array([[self.q[0][-1]],[self.q[1][-1]],[self.q[2][-1]]])\n\n self.__fR__ = np.array([[self.f[0][0]],[self.f[1][0]],[self.f[2][0]]])\n self.__fL__ = np.array([[self.f[0][-1]],[self.f[1][-1]],[self.f[2][-1]]])\n \n elif not self.grid.bc == 'constant':\n print \"nothing set with boundary conditions... check bc settings\"", "def compiler(circuit):\n #initialize an empty circuit with the same size of qubits and clbits \n decomposedCircuit = QuantumCircuit(circuit.num_qubits)\n if circuit.num_clbits >0:\n decomposedCircuit.add_register(ClassicalRegister(circuit.num_clbits))\n \n #extract the gates to compile them from the data list\n for item in circuit.data:\n \n #the gate object\n gate=item[0] \n \n #number of qubits of the gate\n numOfQubits=len(item[1]) \n \n #the indices that the gate applied on\n positions=[qubit.index for qubit in item[1]] \n \n #check if the gate is a single qubit gate\n if numOfQubits==1:\n #decompose the single qubit gate\n decomposition=oneQubitDecomppser(gate)\n #extract the decomposition gates from the received circuit\n gates=[item[0] for item in decomposition.data]\n #append each gate to the new circuit at the same position note: len(positions)=1 \"single qubit gate\"\n [decomposedCircuit.append(gate,positions) for gate in gates]\n \n #check if the gate is a two qubit gate\n elif numOfQubits==2:\n #decompose the gate\n decomposition=twoQubitDecomppser(gate)\n #extract the decomposition gates from the received circuit\n for item in decomposition.data:\n gate=item[0]\n if len(item[1])==2:\n #append each gate to the new circuit at the same positions note: len(positions)=2\n decomposedCircuit.append(gate,positions)\n else:\n #append a single qubit gate to the new circuit\n #get the index (0 or 1) means the gate is applied to the 1st qubit or the 2nd qubit from the positions list \n decomposedCircuit.append(gate,[positions[item[1][0].index]]) \n \n return decomposedCircuit", "def bpcg(H, B, Fx, Fy, Qh, Qs, x, y, prec, maxit, show): \n \n from Epetra import Vector\n from numpy import sqrt\n verbose = (H.Comm().MyPID()==0) \n\n r1 = Vector(x)\n r2 = Vector(y)\n\n # r1_0 = Fx - H * x - B * y\n tr1 = Vector(Fx)\n H.Multiply(False, x, r1)\n tr1.Update(-1., r1, 1.)\n B.Multiply(False, y, r1)\n tr1.Update(-1., r1, 1.)\n \n # r2_0 = F_y - B' * x\n tr2 = Vector(Fy) \n B.Multiply(True, x, r2)\n tr2.Update(-1., r2, 1.)\n\n # r0 = G r_check_0\n # with G = [inv(Qh) 0 \n # B*inv(Qh) - I]\n r1.Multiply(1., Qh, tr1, 0.)\n \n B.Multiply(True, r1, r2)\n r2.Update(-1., tr2, 1.)\n \n # norm evaluation\n res = sqrt(r1.Norm2()**2 + r2.Norm2()**2)\n nF = sqrt(Fx.Norm2()**2 + Fy.Norm2()**2)\n\n # pre-alloc\n z1 = Vector(x)\n z2 = Vector(y)\n \n w1 = Vector(x)\n w2 = Vector(y)\n \n q1 = Vector(x)\n q2 = Vector(y)\n \n d = Vector(x)\n ###########################################\n # MAIN LOOP\n ###########################################\n k = 0\n while ((res > prec * nF) and (k <= maxit)):\n \n #solve the \\tilde{K} z^k = r^k \n\t z2.Multiply(1., Qs, r2, 0.)\n z1.Update(1., r1, 0.)\n \n # d = H * r_1^k\n H.Multiply(False, r1, d)\n \n\t #beta^n_k = <d,r_1^k> -<r_check_1^k,r_1^k> +<z_2^k,r_2^k>\n bet_n = d.Dot(r1) - tr1.Dot(r1) + z2.Dot(r2);\n \n if k == 0:\n bet = 0.\n p1 = Vector(z1)\n p2 = Vector(z2)\n s = Vector(d)\n else:\n # beta_k = beta^n_k /beta^n_{k-1}\n bet = bet_n / bet_n1\n \n # p^k = z^k + beta_k* p^{k-1}\n p1.Update(1., z1, bet)\n p2.Update(1., z2, bet)\n \n # s^k = d + beta_k* s^{k-1}\n s.Update(1., d, bet) \n \n # q = [s;0] + [B' p2^k ; B * p1^k]\n\t B.Multiply(False, p2, q1) \n\t q1.Update(1., s, 1.) \n B.Multiply(True, p1, q2)\n \n # w = [Qh^{-1}q1 ; B'Qh^{-1}q1 -q2 ] \n w1.Multiply(1., Qh, q1, 0.)\n #w2 = B.T*w1-q2\n B.Multiply(True, w1, w2)\n\t w2.Update(-1., q2, 1.)\n \n #alpha_k^d = <w_1,s^k>-<q_1,p_1^k> + <w_2,p_2^k> \n alp_d = w1.Dot(s) - q1.Dot(p1) + w2.Dot(p2)\n \n # alpha_k = beta^n_k / alpha_k^d\n alp = bet_n / alp_d\n \n # v^{k+1} = v^k + alpha_k p^k\n x.Update(alp, p1, 1.)\n y.Update(alp, p2, 1.)\n \n # r^{k+1} = r^k - alpha_k w\n r1.Update(-alp, w1, 1.)\n r2.Update(-alp, w2, 1.)\n \n # r_check_1^{k+1} = r_check_1^k - alpha_k q_1\n tr1.Update(-alp, q1, 1.)\n \n # update\n bet_n1 = bet_n\n k += 1\n \n res = sqrt(r1.Norm2()**2 + r2.Norm2()**2)\n if show and (k % 10 == 0) and verbose:\n\t print '%d %.3e '% (k, res/nF)\n \n H.Multiply(False, x, tr1)\n B.Multiply(False, y, r1)\n tr1.Update(1., r1, -1., Fx, 1.)\n\n B.Multiply(True, x, tr2)\n tr2.Update(-1., Fy, 1.)\n residu=sqrt(tr1.Norm2()**2 + tr2.Norm2()**2)\n return residu/nF, k" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_cpmmod_bc_sptr __init__(self, p) > digital_cpmmod_bc_sptr
def __init__(self, *args): this = _digital_swig.new_digital_cpmmod_bc_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_ccf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _coin.new_SbDPMatrix(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_ic_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, mem, inp, outp):\n self.pc = 0\n self.mem = mem\n self.inp = inp\n self.outp = outp", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_if_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_ScXMLCoinMultiplyOpExprDataObj()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pn_correlator_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_doublep()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
primitive_connect(self, gr_basic_block_sptr block) primitive_connect(self, gr_basic_block_sptr src, int src_port, gr_basic_block_sptr dst, int dst_port)
def primitive_connect(self, *args): return _digital_swig.digital_cpmmod_bc_sptr_primitive_connect(self, *args)
[ "def connect_wire( self, dest=None, src=None ):\n\n self._connect_signal( src, dest ) # expects the src first", "def _connect_signal( self, left_port, right_port ):\n\n # Can't connect a port to itself!\n assert left_port != right_port\n # Create the connection\n connection_edge = ConnectionEdge( left_port, right_port )\n\n # Add the connection to the Model's connection list\n if not connection_edge:\n raise Exception( \"Invalid Connection!\")\n self._connections.add( connection_edge )", "def _connect_bundle( self, left_bundle, right_bundle ):\n\n # Can't connect a port to itself!\n assert left_bundle != right_bundle\n\n ports = zip( left_bundle.get_ports(), right_bundle.get_ports() )\n\n for left, right in ports:\n self._connect_signal( left, right )", "def connect_inline(target, source):\n dependents = source.dependent(nuke.INPUTS | nuke.HIDDEN_INPUTS)\n target.setInput(0, source)\n\n if target.maxOutputs():\n for node in dependents:\n print node.fullName()\n for i in xrange(node.inputs()):\n print \"setting input {0}\".format(i)\n print node.input(i)\n if node.input(i) == source:\n print \"setting that input\"\n node.setInput(i, target)", "def connect(self, layer1, layer2):\n return NotImplemented", "def _install_flow (self, p, c, n, port_src, port_dst = None,\n **kw):\n\n node_p = core.Outband.t.name(p)\n node_c = core.Outband.t.name(c)\n node_n = core.Outband.t.name(n)\n inport = node_c.port(node_p)\n outport = node_c.port(node_n)\n if not inport:\n log.error('%s->%s: not found' % (node_c.name, node_p.name))\n return\n if not outport:\n log.error('%s->%s: not found' % (node_c.name, node_n.name))\n return\n\n nw_src = nw_dst = None\n info_src = info_dst = \"\"\n if port_src:\n nw_src = port_src.ip\n info_src = \"%s(%s) => \" % (port_src.parent.name, nw_src)\n if port_dst:\n nw_dst = port_dst.ip\n info_dst = \" => %s(%s)\" % (port_dst.parent.name, nw_dst)\n\n backport = node_n.port(node_c)\n if backport:\n mac = backport.mac\n else:\n log.error('%s->%s: link not found' % (node_n.name, node_c.name))\n return\n\n str_from = \"%s.%s\" % (dpid_to_str(node_c.dpid), c)\n str_out = \"%s.%s\" % (dpid_to_str(node_n.dpid), n)\n eth_in, eth_out = '', ''\n if mac:\n eth_out = '!'\n\n if not outport or outport < 0 or not inport.num or inport.num < 0:\n log.error('unknown port: %s %s->%s %s' %\n (str_from, inport.num, outport.num, str_out))\n return\n\n actions = []\n if not mac and 'add_eth_label' in kw:\n mac = ETH_LABEL\n eth_out = '+'\n if not mac and 'del_eth_label' in kw:\n mac = ETHER_BROADCAST\n eth_out = '-'\n if mac:\n actions.append(of.ofp_action_dl_addr.set_dst(mac))\n actions.append(of.ofp_action_output(port = outport.num))\n\n match = of.ofp_match(in_port = inport.num,\n #nw_proto = ipv4.TCP_PROTOCOL,\n #dl_vlan = 1301,\n #dl_type = ethernet.VLAN_TYPE,\n dl_type = ethernet.IP_TYPE,\n nw_src = nw_src, #None,\n nw_dst = nw_dst )\n match.adjust_wildcards = False\n if 'with_eth_label' in kw or 'del_eth_label' in kw:\n match.dl_dst = ETH_LABEL\n eth_in = '*'\n\n if port_src and port_src.mac:\n match.dl_src = port_src.mac\n else:\n #log.error('unknown port_src.mac')\n return\n\n priority = of.OFP_DEFAULT_PRIORITY\n if 'add_eth_label' in kw:\n priority = FAILOVER_PRIORITY\n if mac:\n priority = of.OFP_DEFAULT_PRIORITY + 1 + outport.num\n if 'priority' in kw:\n priority = kw['priority']\n\n if 'failover_entry' in kw:\n mark = '=>'\n else:\n mark = '->'\n\n if 'udp' in kw:\n match.nw_proto = ipv4.UDP_PROTOCOL\n\n log.info('%s%s %i%s%s%s%i %s%s',\n info_src, str_from, \n inport.num, eth_in, mark, eth_out, outport.num,\n str_out, info_dst)\n\n msg = of.ofp_flow_mod(command=of.OFPFC_ADD,\n idle_timeout=of.OFP_FLOW_PERMANENT,\n hard_timeout=of.OFP_FLOW_PERMANENT,\n actions=actions,\n match=match,\n priority=priority\n )\n if 'failover_entry' in kw:\n self._add_failover_entry(c, msg)\n else:\n core.openflow.sendToDPID(node_c.dpid, msg.pack())\n\n if (not ('udp' in kw)) and outport.mac:\n #sending to destination, separte udp traffic \n self._install_flow(p, c, n, port_src, port_dst,\n udp = True, priority = of.OFP_DEFAULT_PRIORITY + 99,\n **kw)\n\n return", "def src_sink(self) -> SrcSink:\n pass", "def _connect(self, v1, v2):\n v1.neighbours.append(v2)\n v2.neighbours.append(v1)", "def connectVector(cls, plug, src, *args, **kwargs):\r\n pm.mel.cgfxShader_connectVector(plug, src)", "def add_link (self, src, dst):\n raise NotImplementedError(\"Not implemented yet!\")", "def add_sglink (self, src, dst):\n raise NotImplementedError(\"Not implemented yet!\")", "def _connect(self, start, end):\n if self._game.is_resolving():\n return\n if not self._playing:\n return\n self._grid_view.draw_connection(start, end,\n self._game.grid[start].get_dot().get_kind())", "def onConnectAttr(self, srcattr, dstattr, opts):\n pass", "def copy_(self, src, non_blocking=False): # real signature unknown; restored from __doc__\n pass", "def vnnConnect(disconnect=bool):\n pass", "def change_edge_src(graph: dace.graph.graph.OrderedDiGraph,\n node_a: Union[dace.graph.nodes.Node, dace.graph.graph.\n OrderedMultiDiConnectorGraph],\n node_b: Union[dace.graph.nodes.Node, dace.graph.graph.\n OrderedMultiDiConnectorGraph]):\n\n # Create new outgoing edges from node B, by copying the outgoing edges from\n # node A and setting their source to node B.\n edges = list(graph.out_edges(node_a))\n for e in edges:\n # Delete the outgoing edges from node A from the graph.\n graph.remove_edge(e)\n # Insert the new edges to the graph.\n if isinstance(e, gr.MultiConnectorEdge):\n # src_conn = e.src_conn\n # if e.src_conn is not None:\n # # Remove connector from node A.\n # node_a.remove_out_connector(e.src_conn)\n # # Insert connector to node B.\n # if (not node_b.add_out_connector(src_conn) and isinstance(\n # node_b, (dace.graph.nodes.CodeNode,\n # dace.graph.nodes.MapExit))):\n # while not node_b.add_out_connector(src_conn):\n # src_conn = src_conn + '_'\n # graph.add_edge(node_b, src_conn, e.dst, e.dst_conn, e.data)\n graph.add_edge(node_b, e.src_conn, e.dst, e.dst_conn, e.data)\n else:\n graph.add_edge(node_b, e.dst, e.data)", "def connect(connect_from, connect_to):\r\n connect_to_name = connect_to.layer_name\r\n for node_from in range(len(connect_from.nodes)):\r\n for node_to in range(len(connect_to.nodes)):\r\n connect_from.nodes[node_from].connections.append(Connection(connect_to_name, node_to, random.random()*2 - 1))", "def vnnCopy(sourceNode=\"string\"):\n pass", "def connectInput(self, input, output, other):\n\n if not input.name in self.inputs:\n raise ValueError(\"Input is not part of this block\")\n\n if not output.name in other.outputs:\n raise ValueError(\"Output is not part of target block\")\n\n if input.maxConnections > -1 and (not len(input.targets) < input.maxConnections):\n raise ValueError(\"Too many connections to input '%s'\" % input.name)\n\n if output.maxConnections > -1 and (not len(output.targets) < output.maxConnections):\n raise ValueError(\"Too many connections from output '%s'\" % output.name)\n\n input.targets.append(output)\n output.targets.append(input)", "def copyConnection(self, fromfield: 'SoField') -> \"void\":\n return _coin.SoField_copyConnection(self, fromfield)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
primitive_disconnect(self, gr_basic_block_sptr block) primitive_disconnect(self, gr_basic_block_sptr src, int src_port, gr_basic_block_sptr dst, int dst_port)
def primitive_disconnect(self, *args): return _digital_swig.digital_cpmmod_bc_sptr_primitive_disconnect(self, *args)
[ "def disconnect(self, device):", "def disconnect(self, *args) -> \"void\":\n return _coin.SoField_disconnect(self, *args)", "def l2cap_disconnect(self, conn_handle: memoryview, cid: memoryview, /) -> None:", "def disconnect(self, endpoint):\n raise NotImplementedError", "def test_DisconnectNode(self):\n graph = mGraph.Graph()\n sumNode_1 = graph.createNode(mNode.SumNode)\n sumNode_2 = graph.createNode(mNode.SumNode)\n sumNode_3 = graph.createNode(mNode.SumNode)\n\n sumNode_1.getOutputPort(\"result\").connect(sumNode_3.getInputPort(\"value1\"))\n sumNode_2.getOutputPort(\"result\").connect(sumNode_3.getInputPort(\"value2\"))\n\n sumNode_1.portsIn[0].value = 1.0\n sumNode_1.portsIn[1].value = 1.5\n sumNode_2.portsIn[1].value = 2.25\n sumNode_2.portsIn[0].value = 3.0\n negNode = graph.createNode(mNode.NegateNode)\n sumNode_3.getOutputPort(\"result\").connect(negNode.getInputPort(\"value\"))\n negNode.evaluate()\n self.assertEqual(negNode.portsOut[0].value, -7.75, \"Output from Negate Node incorrect\")\n\n heads = graph.getNetworkHeads()\n self.assertEqual(len(heads), 1)\n tails = graph.getNetworkTails()\n self.assertEqual(len(tails), 2)\n\n sumNode_2.getOutputPort(\"result\").disconnect(sumNode_3.getInputPort(\"value2\"))\n self.assertFalse(sumNode_2.getOutputPort(\"result\").isConnected())\n self.assertFalse(sumNode_3.getInputPort(\"value2\").isConnected())\n self.assertEqual(sumNode_3.getInputPort(\"value2\").value, 5.25, \"Disconnected port values should be equal to there last connected input\")\n\n negNode.evaluate()\n self.assertEqual(negNode.portsOut[0].value, -7.75, \"Output from Negate Node incorrect\")\n\n heads = graph.getNetworkHeads()\n self.assertEqual(len(heads), 2)", "def onDisconnectAttr(self, srcattr, dstattr, opts):\n pass", "def user32_DdeDisconnect(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hConv\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def disconnect(self, reason: str = ''):\r\n reason_b = reason.encode(ENCODING)\r\n self.send(self.Enum.INF_DISCONNECT, reason_b) # Send inform_disconnect message with 'reason'\r\n self.transport.close() # Close the connection. Waits to send all data first. No data received hereon.\r", "def disconnect_nodes(parent_obj, parent_plug, child_obj, child_plug):\n\n parent_plug = get_plug(parent_obj, parent_plug)\n child_plug = get_plug(child_obj, child_plug)\n mdg_mod = maya.api.OpenMaya.MDGModifier()\n mdg_mod.disconnect(parent_plug, child_plug)\n mdg_mod.doIt()", "def __disconnect_field_signal(self, node):\n field = node.elem\n if field != None:\n if field.id != None:\n field.view.disconnect(field.id)", "def disconnectUnits(self, a, b):\n if self.verbose >= 1:\n print \"Remove edge:\", a.vectorStr(), b.vectorStr()\n a.edges.remove(a.getEdgeTo(b))\n b.edges.remove(b.getEdgeTo(a))", "def did_disconnect(self, target: \"SoCTarget\", resume: bool) -> None:\n pass", "def disconnectOutputs(self, node):\r\n node = self.convertToPyNode(node)\r\n if not node.isReferenced():\r\n output = node.outputs(c=1, p=1)\r\n for o in output:\r\n disconnectAttr(o[0], o[1])", "def disconnect(self):\n self.blnkt_dev.disconnect()", "def will_disconnect(self, target: \"SoCTarget\", resume: bool) -> None:\n pass", "def _disconnect_input(self, step_arg_name):\n self._connected_inputs[step_arg_name] = False", "def disconnect_node(G,node1,node2,text_id,idx):\n\tedge = G[node1][node2]\n\tedge_idx_list = edge['paths'][text_id]['word_positions']\n\tidx_pos = edge_idx_list.index(idx)\n\tedge_idx_list.pop(idx_pos)\n\tedge['weight'] -=1\n\tif not len(edge_idx_list):\n\t\tdel edge['paths'][text_id]\n\tif not len(edge['paths']):\n\t\tG.remove_edge(node1,node2)", "def detach_port(self, instance_obj, network_obj):\n raise NotImplementedError()", "def disconnect_connector(self): \n if self.itemA is not None:\n if self in self.itemA.connectorList:\n self.itemA.connectorList.remove(self)\n if self.itemB is not None:\n if self in self.itemB.connectorList:\n self.itemB.connectorList.remove(self)", "def disconnectJoint(attachHandleMode=bool, deleteHandleMode=bool):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
cpmmod_bc(int type, float h, unsigned int samples_per_sym, unsigned int L, double beta = 0.3) > digital_cpmmod_bc_sptr Generic CPM modulator.
def cpmmod_bc(*args, **kwargs): return _digital_swig.cpmmod_bc(*args, **kwargs)
[ "def update_bc(\n betas,\n ses,\n err_corr,\n C,\n M,\n delta_m,\n bc,\n scales,\n Theta_0_inv,\n iteration,\n annot_len,\n annot_vec,\n annot_map,\n Vjm_scale,\n):\n\n for c in range(1, C):\n count = 0\n mu_lhs = 0\n var_q = 0\n mu_rhs = 0 * betas[0, :]\n mu_rhs = mu_rhs.T\n for var_idx in range(0, M):\n if delta_m[iteration, var_idx] == c:\n count += 1\n varannot = annot_vec[var_idx]\n annot_idx = [\n i for i in range(0, annot_len) if annot_map[i] == varannot\n ][0]\n Vjm = calculate_Vjm(ses, var_idx, err_corr, Vjm_scale)\n Vjminv = np.linalg.inv(Vjm)\n q1 = scales[iteration - 1, annot_idx] * Vjminv\n q2 = (\n np.sqrt(scales[iteration - 1, annot_idx])\n * Vjminv\n * betas[var_idx, :].T\n )\n q3 = scales[iteration - 1, annot_idx] * Vjminv\n if count == 1:\n mu_lhs, mu_rhs, var_q = q1, q2, q3\n else:\n mu_lhs += q1\n mu_rhs += q2\n var_q += q3\n mu_lhs += Theta_0_inv\n var_q += Theta_0_inv\n mean_param = np.ravel(np.linalg.inv(mu_lhs) * mu_rhs)\n var_param = np.linalg.inv(var_q)\n bc[iteration, c, :] = np.random.multivariate_normal(mean_param, var_param)\n return bc", "def bpcg(H, B, Fx, Fy, Qh, Qs, x, y, prec, maxit, show): \n \n from Epetra import Vector\n from numpy import sqrt\n verbose = (H.Comm().MyPID()==0) \n\n r1 = Vector(x)\n r2 = Vector(y)\n\n # r1_0 = Fx - H * x - B * y\n tr1 = Vector(Fx)\n H.Multiply(False, x, r1)\n tr1.Update(-1., r1, 1.)\n B.Multiply(False, y, r1)\n tr1.Update(-1., r1, 1.)\n \n # r2_0 = F_y - B' * x\n tr2 = Vector(Fy) \n B.Multiply(True, x, r2)\n tr2.Update(-1., r2, 1.)\n\n # r0 = G r_check_0\n # with G = [inv(Qh) 0 \n # B*inv(Qh) - I]\n r1.Multiply(1., Qh, tr1, 0.)\n \n B.Multiply(True, r1, r2)\n r2.Update(-1., tr2, 1.)\n \n # norm evaluation\n res = sqrt(r1.Norm2()**2 + r2.Norm2()**2)\n nF = sqrt(Fx.Norm2()**2 + Fy.Norm2()**2)\n\n # pre-alloc\n z1 = Vector(x)\n z2 = Vector(y)\n \n w1 = Vector(x)\n w2 = Vector(y)\n \n q1 = Vector(x)\n q2 = Vector(y)\n \n d = Vector(x)\n ###########################################\n # MAIN LOOP\n ###########################################\n k = 0\n while ((res > prec * nF) and (k <= maxit)):\n \n #solve the \\tilde{K} z^k = r^k \n\t z2.Multiply(1., Qs, r2, 0.)\n z1.Update(1., r1, 0.)\n \n # d = H * r_1^k\n H.Multiply(False, r1, d)\n \n\t #beta^n_k = <d,r_1^k> -<r_check_1^k,r_1^k> +<z_2^k,r_2^k>\n bet_n = d.Dot(r1) - tr1.Dot(r1) + z2.Dot(r2);\n \n if k == 0:\n bet = 0.\n p1 = Vector(z1)\n p2 = Vector(z2)\n s = Vector(d)\n else:\n # beta_k = beta^n_k /beta^n_{k-1}\n bet = bet_n / bet_n1\n \n # p^k = z^k + beta_k* p^{k-1}\n p1.Update(1., z1, bet)\n p2.Update(1., z2, bet)\n \n # s^k = d + beta_k* s^{k-1}\n s.Update(1., d, bet) \n \n # q = [s;0] + [B' p2^k ; B * p1^k]\n\t B.Multiply(False, p2, q1) \n\t q1.Update(1., s, 1.) \n B.Multiply(True, p1, q2)\n \n # w = [Qh^{-1}q1 ; B'Qh^{-1}q1 -q2 ] \n w1.Multiply(1., Qh, q1, 0.)\n #w2 = B.T*w1-q2\n B.Multiply(True, w1, w2)\n\t w2.Update(-1., q2, 1.)\n \n #alpha_k^d = <w_1,s^k>-<q_1,p_1^k> + <w_2,p_2^k> \n alp_d = w1.Dot(s) - q1.Dot(p1) + w2.Dot(p2)\n \n # alpha_k = beta^n_k / alpha_k^d\n alp = bet_n / alp_d\n \n # v^{k+1} = v^k + alpha_k p^k\n x.Update(alp, p1, 1.)\n y.Update(alp, p2, 1.)\n \n # r^{k+1} = r^k - alpha_k w\n r1.Update(-alp, w1, 1.)\n r2.Update(-alp, w2, 1.)\n \n # r_check_1^{k+1} = r_check_1^k - alpha_k q_1\n tr1.Update(-alp, q1, 1.)\n \n # update\n bet_n1 = bet_n\n k += 1\n \n res = sqrt(r1.Norm2()**2 + r2.Norm2()**2)\n if show and (k % 10 == 0) and verbose:\n\t print '%d %.3e '% (k, res/nF)\n \n H.Multiply(False, x, tr1)\n B.Multiply(False, y, r1)\n tr1.Update(1., r1, -1., Fx, 1.)\n\n B.Multiply(True, x, tr2)\n tr2.Update(-1., Fy, 1.)\n residu=sqrt(tr1.Norm2()**2 + tr2.Norm2()**2)\n return residu/nF, k", "def m_c(mcmc, scale, f, alphasMZ=0.1185, loop=3):\n if scale == mcmc:\n return mcmc # nothing to do\n _sane(scale, f)\n crd = rundec.CRunDec()\n alphas_mc = alpha_s(mcmc, 4, alphasMZ=alphasMZ, loop=loop)\n if f == 4:\n alphas_scale = alpha_s(scale, f, alphasMZ=alphasMZ, loop=loop)\n return crd.mMS2mMS(mcmc, alphas_mc, alphas_scale, f, loop)\n elif f == 3:\n crd.nfMmu.Mth = 1.3\n crd.nfMmu.muth = 1.3\n crd.nfMmu.nf = 4\n return crd.mH2mL(mcmc, alphas_mc, mcmc, crd.nfMmu, scale, loop)\n elif f == 5:\n crd.nfMmu.Mth = 4.8\n crd.nfMmu.muth = 4.8\n crd.nfMmu.nf = 5\n return crd.mL2mH(mcmc, alphas_mc, mcmc, crd.nfMmu, scale, loop)\n else:\n raise ValueError(f\"Invalid input: f={f}, scale={scale}\")", "def periodicBC(particle, fieldset, time, dt):\n # from tutorials\n if particle.lon < fieldset.halo_west:\n particle.lon += fieldset.halo_east - fieldset.halo_west\n elif particle.lon > fieldset.halo_east:\n particle.lon -= fieldset.halo_east - fieldset.halo_west", "def descrambler_bb(*args, **kwargs):\n return _digital_swig.descrambler_bb(*args, **kwargs)", "def sigma_der_C_l(bias, n_z, Omega_m , sig_8):\n \n alpha_s = sig_8/10.0\n \n C_sig_1 = C_l(bias, n_z, Omega_m, sig_8+alpha_s)[1]\n C_sig_2 = C_l(bias, n_z, Omega_m , sig_8-alpha_s)[1]\n \n sig_der = (C_sig_1 - C_sig_2)/(2.0*alpha_s)\n\n return sig_der", "def _get_ghash_clmul():\n\n if not _cpu_features.have_clmul():\n return None\n try:\n api = _ghash_api_template.replace(\"%imp%\", \"clmul\")\n lib = load_pycryptodome_raw_lib(\"Crypto.Hash._ghash_clmul\", api)\n result = _build_impl(lib, \"clmul\")\n except OSError:\n result = None\n return result", "def run_mixed_bp(y,M,Omega,epsilon): \r\n return ABSmixed.bp(y,M,Omega,epsilon, numpy.zeros(Omega.shape[0]))", "def _generateBetaParams(self, N, pcomplex=0.35, seed=8595,\n\t\tchiAlpha=1, chiBeta=1, chiMin=0, chiMax=np.pi,\n\t\tdepthAlpha=1, depthBeta=1, depthMin=-50, depthMax=50,\n\t\tfluxAlpha=1, fluxBeta=1, fluxMin=0.01, fluxMax=1,\n\t\tnoiseAlpha=1, noiseBeta=1, noiseMin=0.01, noiseMax=1.0):\n\n\n\t\t# ===========================================\n\t\t#\tSet the random seed\n\t\t# ===========================================\n\t\tnp.random.seed(seed)\n\n\t\t# ===========================================\n\t\t#\tGenerate parameters for the first comp.\n\t\t# ===========================================\n\t\tdepth = self.__randDepth(N, depthMin=depthMin, depthMax=depthMax).astype('object')\n\t\tflux = np.ones(N).astype('object')\n\t\tchi = self.__randChi(N, chiMin=chiMin, chiMax=chiMax).astype('object')\n\t\tsig = self.__randBetaNoise(N, alpha=noiseAlpha, beta=noiseBeta, noiseMin=noiseMin, noiseMax=noiseMax)\n\n\t\t# ===========================================\n\t\t#\tArray of labels (1 = complex, 0 = single)\n\t\t# ===========================================\n\t\tlabel = np.random.binomial(1, pcomplex, N)\n\n\t\t# ===========================================\n\t\t#\tGenerate random flux, depth, chi, and\n\t\t#\tsigma for the two component case\n\t\t# ===========================================\n\t\tloc = np.where(label == 1)[0]\n\t\tsize = len(loc)\n\n\t\tdepth[loc] = list(zip( depth[loc], depth[loc] + self.__randBetaDepth(size, alpha=depthAlpha, beta=depthBeta, depthMax=depthMax)))\n\t\tflux[loc] = list(zip( flux[loc], self.__randBetaFlux(size, alpha=fluxAlpha, beta=fluxBeta, fluxMin=fluxMin, fluxMax=fluxMax)))\n\t\tchi[loc] = list(zip( chi[loc], np.mod(chi[loc] + self.__randBetaChi(size, alpha=chiAlpha, beta=chiBeta, chiMin=chiMin, chiMax = chiMax), chiMax)))\n\n\n\t\t# ===========================================\n\t\t#\tStore the results\n\t\t# ===========================================\n\t\tself.depth_ = depth\n\t\tself.flux_ = flux\n\t\tself.chi_ = chi\n\t\tself.sig_ = sig\n\t\tself.label_ = label", "def bmc_j2(t=2001):\n x = time.time()\n tt = min(5,max(1,.05*t))\n abc('bmc3 -r -T %0.2f'%tt)\n if is_sat():\n## print 'cex found in %0.2f sec at frame %d'%((time.time()-x),cex_frame())\n return RESULT[get_status()]\n## abc('bmc3 -T 1')\n N = n_bmc_frames()\n N = max(1,N)\n## print bmc_depth()\n## abc('bmc3 -C 1000000 -T %f -S %d'%(t,int(1.5*max(3,max_bmc))))\n## cmd = 'bmc3 -J 2 -D 4000 -C 1000000 -T %f -S %d'%(t,2*N)\n cmd = 'bmc3 -r -C 2000 -J %d'%(2*N+2)\n## print cmd\n abc(cmd)\n## if is_sat():\n## print 'cex found in %0.2f sec at frame %d'%((time.time()-x),cex_frame())\n gs = prob_status()\n if not gs in [0,1,-1]:\n print 'bmc_j2 returned %s'%str(gs)\n return RESULT[get_status()]", "def build_bkg(self):\n try:\n self.param_bphi.x\n print(\"Bphi already built!\")\n except:\n self.calc_field()\n\n print(\"Build bkg\")\n\n R_temp = np.linspace(self.eqdsk.rboxleft, self.eqdsk.rboxleft+self.eqdsk.rboxlength+self.extend_psi_R, self.nR)\n z_temp = np.linspace(-self.eqdsk.zboxlength/2., self.eqdsk.zboxlength/2., self.nz)\n #R_temp = np.linspace(float(np.around(np.min(self.R_w), decimals=2)), float(np.around(np.max(self.R_w), decimals=2)), self.nR)\n #z_temp = np.linspace(float(np.around(np.min(self.z_w), decimals=2)), float(np.around(np.max(self.z_w), decimals=2)), self.nz)\n\n psitemp = self.psi_coeff(R_temp, z_temp)\n\n bphitemp = self.param_bphi(R_temp, z_temp)\n\n self.bkg={'type':'magn_bkg', 'phi0':0, 'nsector':0, 'nphi_per_sector':1,\\\n 'ncoil':0, 'zero_at_coil':1,\\\n 'R':R_temp,'z':z_temp, \\\n 'phimap_toroidal':0, 'phimap_poloidal':0, \\\n 'psi':[],\\\n 'Bphi':bphitemp, 'BR':self.Br, 'Bz':self.Bz, \\\n 'Bphi_pert':self.Bphi_pert, 'BR_pert':self.BR_pert, 'Bz_pert':self.Bz_pert} \n\n self.bkg['psi'] = psitemp*2*np.pi #in ASCOT Bfield, the psi is divided by 2*pi and reverses sign. This prevents it from happening \n print(\"remember: I am multiplying psi times 2pi since in ascot it divides by it!\")", "def enc_mul_const(pub, m, c): # to do\n return powmod(m, c, pub.n_sq) # m^c mod n^2", "def chunks_to_symbols_bc(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_bc(*args, **kwargs)", "def get_constant_bn(self, t_slice=0):\n from pgmpy.models import BayesianNetwork\n\n edges = [\n (\n str(u[0]) + \"_\" + str(u[1] + t_slice),\n str(v[0]) + \"_\" + str(v[1] + t_slice),\n )\n for u, v in self.edges()\n ]\n new_cpds = []\n for cpd in self.cpds:\n new_vars = [\n str(var) + \"_\" + str(time + t_slice) for var, time in cpd.variables\n ]\n new_cpds.append(\n TabularCPD(\n variable=new_vars[0],\n variable_card=cpd.cardinality[0],\n values=cpd.get_values(),\n evidence=new_vars[1:],\n evidence_card=cpd.cardinality[1:],\n )\n )\n\n bn = BayesianNetwork(edges)\n bn.add_cpds(*new_cpds)\n return bn", "def _pmrapmdec(self,*args,**kwargs):\n lbdvrpmllpmbb= self._lbdvrpmllpmbb(*args,**kwargs)\n return coords.pmllpmbb_to_pmrapmdec(lbdvrpmllpmbb[:,4],\n lbdvrpmllpmbb[:,5],\n lbdvrpmllpmbb[:,0],\n lbdvrpmllpmbb[:,1],degree=True)", "def LamC2pKK ( self ) : \n from GaudiConfUtils.ConfigurableGenerators import DaVinci__N3BodyDecays\n #\n return self.make_selection (\n 'LambdaCpKK' ,\n DaVinci__N3BodyDecays ,\n ## inputs \n [ self.protons() , self.kaons() ] ,\n ##\n DecayDescriptor = \" [ Lambda_c+ -> p+ K- K+ ]cc\" ,\n ##\n Combination12Cut = \"\"\"\n ( AM < 2.5 * GeV ) &\n ( ACHI2DOCA(1,2) < 16 ) \n \"\"\" ,\n ## \n CombinationCut = \"\"\"\n ( ( ADAMASS ( 'Lambda_c+' ) < 65 * MeV ) \n | ( ADAMASS ( 'Xi_c+' ) < 65 * MeV ) ) &\n ( APT > %s ) & \n ( ACHI2DOCA(1,3) < 16 ) &\n ( ACHI2DOCA(2,2) < 16 ) \n \"\"\" % ( 0.95 * self[ 'pT(Lc+)' ] ) ,\n ##\n MotherCut = \"\"\"\n ( chi2vx < 25 ) &\n ( PT > %s ) &\n ( ( ADMASS ( 'Lambda_c+' ) < 55 * MeV ) \n | ( ADMASS ( 'Xi_c+' ) < 55 * MeV ) ) &\n ( ctau > 100 * micrometer ) \n \"\"\" % self [ 'pT(Lc+)']\n )", "def test_band_structure_bc(ph_nacl):\n ph_nacl.run_band_structure(\n _get_band_qpoints(), with_group_velocities=False, is_band_connection=True\n )\n ph_nacl.get_band_structure_dict()", "def beta (self, r) :\n \n return ( 2 * mu_0 * self.p(r) ) / self.modB(r, r)**2", "def get_pbc(self):\n return self.atoms.get_pbc()", "def _get_bn_params(model, bn_layer) -> libpymo.BnParamsBiasCorr():\n\n bn_params = libpymo.BnParamsBiasCorr()\n bn_params.beta = BNUtils.get_beta_as_numpy_data(model, bn_layer).reshape(-1)\n bn_params.gamma = BNUtils.get_gamma_as_numpy_data(model, bn_layer).reshape(-1)\n\n return bn_params" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
update_crc32(unsigned int crc, string buf) > unsigned int update running CRC32 Update a running CRC with the bytes buf[0..len1] The CRC should be initialized to all 1's, and the transmitted value is the 1's complement of the final running CRC. The resulting CRC should be transmitted in big endian order.
def update_crc32(*args, **kwargs): return _digital_swig.update_crc32(*args, **kwargs)
[ "def crc32(self, val):\n\t\treturn binascii.crc32(str(self)+str(self.getCoord()), val)", "def calc_crc32(data):\n return binascii.crc32(data) & 0xFFFFFFFF", "def CRC32data(data):\n from binascii import crc32\n buf = (crc32(data) & 0xFFFFFFFF)\n return \"{:08X}\".format(buf)", "def crc32_hash(value: str) -> int:\n return crc32(bytes(value.encode(\"utf-8\")))", "def crc32(polynom, data, value=0, reverse=False):\n if reverse:\n if sys.version_info >= (3, ):\n return crc32_idata_rev(polynom, (d & 0xff for d in data), value)\n else:\n return crc32_idata_rev(polynom, (ord(d) for d in data), value)\n else:\n if sys.version_info >= (3, ):\n return crc32_idata(polynom, (d & 0xff for d in data), value)\n else:\n return crc32_idata(polynom, (ord(d) for d in data), value)", "def _gen_crc(crc):\n for j in range(8):\n if crc & 1:\n crc = (crc >> 1) ^ 0xEDB88320\n else:\n crc >>= 1\n return crc", "def test_crc32_fr():\n eq_(2750076964, crc32(u'parl\\u00e9 Fran\\u00e7ais'))", "def Checksum(cls, string):\n # Get the last 10 bits\n c = crc32(string.encode('utf-8')) & (2 ** 10 - 1)\n return (cls.BASE32_ALPHABET[c >> cls.BASE32_BIT_WIDTH] +\n cls.BASE32_ALPHABET[c & (2 ** cls.BASE32_BIT_WIDTH - 1)])", "def str_crc_example(self, crcdict, message=None):\n \n crclen = crcdict['crclen']\n\n packstr = self.crcdict_to_packstr(crcdict)\n\n example_str = \"import struct\\n\"\n if crclen == 1:\n example_str += \"from crccheck.crc import Crc8Base\\ncrc = Crc8Base\\n\"\n elif crclen == 2:\n example_str += \"from crccheck.crc import Crc16Base\\ncrc = Crc16Base\\n\"\n else:\n example_str += \"from crccheck.crc import Crc32Base\\ncrc = Crc32Base\\n\"\n \n example_str += \"def my_crc(message):\\n\"\n example_str += \" crc._poly = 0x%X\\n\"%crcdict['poly'] +\\\n \" crc._reflect_input = %r\\n\"%crcdict['reflectin'] +\\\n \" crc._reflect_output = %r\\n\"%crcdict['reflectout'] +\\\n \" crc._initvalue = 0x%0X\\n\"%crcdict['init'] +\\\n \" crc._xor_output = 0x%0X\\n\"%crcdict['xor_output']\n\n example_str += \" output_int = crc.calc(message)\\n\"\n example_str += ' output_bytes = struct.pack(\"%s\", output_int)\\n'%packstr\n example_str += \" output_list = list(output_bytes)\\n\"\n example_str += \" return (output_int, output_bytes, output_list)\\n\"\n \n if message:\n example_str += \"\\n\"\n example_str += \"m = %r\\n\"%message\n example_str += \"output = my_crc(m)\\n\"\n example_str += \"print(hex(output[0]))\"\n \n return example_str", "def test_crc32_ja():\n eq_(696255294, crc32(u'\\u6709\\u52b9'))", "def calculate_checksum(buf):\n checksum = 0\n for byte in bytearray(buf):\n checksum = (checksum + byte) & 0x0000FFFF\n\n return checksum", "def add_crc(self, path):\n with open(path, 'rb') as f:\n data = f.read()\n\n # Calculate our CRC\n calced_crc = binascii.crc32(data)\n self.logger.debug('Adding checksum {} to {}'.format(calced_crc, path))\n # Convert the integer to binary with little endian\n bin_crc = struct.pack('<L', calced_crc)\n\n # Write the new CRC with the data back to the file\n with open(path, 'wb') as f:\n f.write(bin_crc + data)", "def calculate_crc16(data):\r\n crc = 0xffff\r\n for byte in data:\r\n crc = crc ^ byte\r\n for i in range(8):\r\n x = crc & 1\r\n crc = crc >> 1\r\n crc = crc & 0x7fff\r\n if x:\r\n crc = crc ^ 0xa001\r\n return crc & 0xffff", "def crc(fileName):\n file_without_ext, ext = os.path.splitext(fileName)\n\n # cue + bin\n if ext == '.cue':\n with open(fileName) as f:\n # Get first bin file from first or second cue sheet line\n for i, line in enumerate(f):\n if 'CATALOG' in line:\n continue\n else:\n bin_file = line[6:-9]\n break\n\n # Read first 2KB of first bin file of CUE sheet\n fileName = f'{os.path.dirname(fileName)}/{bin_file}'\n prev = 0\n with open(fileName, 'rb') as f:\n f.read(16)\n b = f.read(2048)\n prev = zlib.crc32(b, prev)\n return \"%X\" % (prev & 0xFFFFFFFF)\n\n # ROMs\n else:\n prev = 0\n for eachLine in open(fileName, \"rb\"):\n prev = zlib.crc32(eachLine, prev)\n return \"%X\" % (prev & 0xFFFFFFFF)", "def calc_crc32_for_file(path: pathlib.Path) -> int:\n with open(str(path), 'rb') as f:\n return zlib.crc32(f.read())", "def conv18bitToInt32(threeByteBuffer): \n if len(threeByteBuffer) != 3:\n raise Valuerror(\"Input should be 3 bytes long.\")\n\n prefix = 0;\n\n # if LSB is 1, negative number, some hasty unsigned to signed conversion to do\n if threeByteBuffer[2] & 0x01 > 0:\n prefix = 0b11111111111111;\n return ((prefix << 18) | (threeByteBuffer[0] << 16) | (threeByteBuffer[1] << 8) | threeByteBuffer[2]) | ~0xFFFFFFFF\n else:\n return (prefix << 18) | (threeByteBuffer[0] << 16) | (threeByteBuffer[1] << 8) | threeByteBuffer[2]", "def test_crc32(self):\n self.assertEqual(\"4B8E39EF\", self.file_path.crc32)", "def crc(self, refresh=False):\r\n if refresh or self._crc is None:\r\n c = 0\r\n with open(self.path, 'rb') as fp:\r\n chunk = fp.read(1024)\r\n while chunk:\r\n c = binascii.crc32(chunk, c)\r\n chunk = fp.read(1024)\r\n self._crc = c\r\n return self._crc", "def crc_check(self, data):\n\n crc = calc_crc(data)\n if crc != 0:\n print('Failed CRC. Errors in data received')", "def checksum(sentence):\n crc = 0\n for c in sentence:\n crc = crc ^ ord(c)\n crc = crc & 0xFF\n return crc" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_descrambler_bb_sptr __init__(self, p) > digital_descrambler_bb_sptr
def __init__(self, *args): this = _digital_swig.new_digital_descrambler_bb_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def descrambler_bb(*args, **kwargs):\n return _digital_swig.descrambler_bb(*args, **kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def scrambler_bb(*args, **kwargs):\n return _digital_swig.scrambler_bb(*args, **kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, algorithm: GeneratorAlgorithm) -> None:\n self.algorithm = algorithm", "def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, a, b):\n self.a = make_generator(a)\n self.b = make_generator(b)", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoBlinker()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n self.codeword = Codeword.generate_random_codeword()\n self.transmission = \"\"", "def __init__(self, name, smarts, score) -> None:\n ...", "def __init__(self, process_chain, showWarnings=1, maxsec_rttrace=7200, analysis_overlap=0): # ppc\n self.process_chain = process_chain\n super(PadGenerator, self).__init__(showWarnings)\n self.show_warnings = showWarnings\n self.maxsec_rttrace = maxsec_rttrace # in seconds for EACH (x,y,z) rt_trace\n #self.scale_factor = scale_factor # ppc\n self.analysis_interval = self.process_chain.analysis_interval # ppc\n self.analysis_overlap = analysis_overlap\n self.analysis_samples = None\n self.starttime = None\n if showWarnings:\n self.warnfiltstr = 'always'\n else:\n self.warnfiltstr = 'ignore'", "def __init__(self, *args):\n this = _coin.new_SbDPLine(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
descrambler_bb(int mask, int seed, int len) > digital_descrambler_bb_sptr Descramble an input stream using an LFSR. This block works on the LSB only of the input data stream, i.e., on an "unpacked binary" stream, and produces the same format on its output.
def descrambler_bb(*args, **kwargs): return _digital_swig.descrambler_bb(*args, **kwargs)
[ "def scrambler_bb(*args, **kwargs):\n return _digital_swig.scrambler_bb(*args, **kwargs)", "def bpsk(input_bits, noise):\n modulator = Modulator()\n demodulator = Demodulator()\n channel = Channel()\n signal = modulator.make_bpsk_mod(input_bits)\n\n signal = channel.send_signal(signal, noise)\n\n result_bits = demodulator.make_bpsk_demod(signal, channel)\n return result_bits", "def additive_scrambler_bb(*args, **kwargs):\n return _digital_swig.additive_scrambler_bb(*args, **kwargs)", "def diff_decoder_bb(*args, **kwargs):\n return _digital_swig.diff_decoder_bb(*args, **kwargs)", "def hrsbias(rawpath, outpath, link=False, mem_limit=1e9, sdb=None, clobber=True):\n if not os.path.isdir(rawpath): return \n\n image_list = ImageFileCollection(rawpath)\n if len(image_list.files)==0: return\n\n #make output directory\n if not os.path.isdir(outpath): os.mkdir(outpath)\n \n \n obsdate=get_obsdate(image_list.summary['file'][0])\n \n\n #process the red bias frames\n matches = (image_list.summary['obstype'] == 'Bias') * (image_list.summary['detnam'] == 'HRDET')\n rbias_list = []\n for fname in image_list.summary['file'][matches]:\n ccd = red_process(rawpath+fname)\n rbias_list.append(ccd)\n if sdb is not None: dq_ccd_insert(rawpath + fname, sdb)\n\n if rbias_list:\n if os.path.isfile(\"{0}/RBIAS_{1}.fits\".format(outpath, obsdate)) and clobber: \n os.remove(\"{0}/RBIAS_{1}.fits\".format(outpath, obsdate))\n rbias = ccdproc.combine(rbias_list, method='median', output_file=\"{0}/RBIAS_{1}.fits\".format(outpath, obsdate), mem_limit=mem_limit)\n del rbias_list\n\n #process the red bias frames\n matches = (image_list.summary['obstype'] == 'Bias') * (image_list.summary['detnam'] == 'HBDET')\n hbias_list = []\n for fname in image_list.summary['file'][matches]:\n ccd = blue_process(rawpath+fname)\n hbias_list.append(ccd)\n if sdb is not None: dq_ccd_insert(rawpath + fname, sdb)\n\n if hbias_list:\n if os.path.isfile(\"{0}/HBIAS_{1}.fits\".format(outpath, obsdate)) and clobber: \n os.remove(\"{0}/HBIAS_{1}.fits\".format(outpath, obsdate))\n hbias = ccdproc.combine(hbias_list, method='median', output_file=\"{0}/HBIAS_{1}.fits\".format(outpath, obsdate), mem_limit=mem_limit)\n del hbias_list\n\n\n #provide the link to the bias frame\n if link:\n ldir = '/salt/HRS_Cals/CAL_BIAS/{0}/{1}/'.format(obsdate[0:4], obsdate[4:8])\n if not os.path.isdir(ldir): os.mkdir(ldir)\n ldir = '/salt/HRS_Cals/CAL_BIAS/{0}/{1}/product'.format(obsdate[0:4], obsdate[4:8])\n if not os.path.isdir(ldir): os.mkdir(ldir)\n \n infile=\"{0}/RBIAS_{1}.fits\".format(outpath, obsdate)\n link='/salt/HRS_Cals/CAL_BIAS/{0}/{1}/product/RBIAS_{2}.fits'.format(obsdate[0:4], obsdate[4:8], obsdate)\n if os.path.islink(link) and clobber: os.remove(link)\n os.symlink(infile, link)\n infile=\"{0}/HBIAS_{1}.fits\".format(outpath, obsdate)\n link='/salt/HRS_Cals/CAL_BIAS/{0}/{1}/product/HBIAS_{2}.fits'.format(obsdate[0:4], obsdate[4:8], obsdate)\n if os.path.islink(link) and clobber: os.remove(link)\n os.symlink(infile, link)", "def maskDead(smr, verbose=False):\n if verbose:\n print('(*) Perform masking of the dead/bad pixel')\n smr.errorType = 'F'\n #\n # mask blinded pixels\n #\n smr.blinded = np.empty((smr.numPixels,), dtype=bool)\n smr.blinded[:] = False\n\n id_list = np.array(list(range(10)) + list(range(1024-10, 1024)))\n smr.blinded[0+id_list] = True # channel 1\n smr.blinded[1024+id_list] = True # channel 2\n smr.blinded[2048+id_list] = True # channel 3\n smr.blinded[3072+id_list] = True # channel 4\n smr.blinded[4096+id_list] = True # channel 5\n smr.blinded[5120+id_list] = True # channel 6\n smr.blinded[6144+id_list] = True # channel 7\n smr.blinded[7168+id_list] = True # channel 8\n #\n # mask dead pixels\n #\n i_masked = smr.spectra.mask.sum()\n smr.spectra = ma.masked_equal(smr.spectra, 0, copy=False)\n if verbose:\n masked = smr.spectra.mask.sum()\n print('* Info: masked %6.1f pixels/spectrum with zero signal'\n % ((masked - i_masked) / float(smr.numSpectra)))\n i_masked = masked\n\n smr.spectra = ma.masked_where((smr.spectra / smr.coaddf) >= 65535.,\n smr.spectra, copy=False)\n if verbose:\n masked = smr.spectra.mask.sum()\n print('* Info: masked %6.1f pixels/spectrum with saturated signal'\n % ((masked - i_masked) / float(smr.numSpectra)))\n i_masked = masked", "def get_ULRB_scramble():\n return _MEGA_SCRAMBLER.call(\"megaScrambler.getSkewbULRBScramble\")", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def rle_decode(rle, shape):\n\trle = list(map(int, rle.split()))\n\trle = np.array(rle, dtype=np.int32).reshape([-1, 2])\n\trle[:, 1] += rle[:, 0]\n\trle -= 1\n\tmask = np.zeros([shape[0] * shape[1]], np.bool)\n\tfor s, e in rle:\n\t assert 0 <= s < mask.shape[0]\n\t assert 1 <= e <= mask.shape[0], \"shape: {} s {} e {}\".format(shape, s, e)\n\t mask[s:e] = 1\n\t# Reshape and transpose\n\tmask = mask.reshape([shape[1], shape[0]]).T\n\treturn mask", "def _banded_beads(y, freq_cutoff=0.005, lam_0=1.0, lam_1=1.0, lam_2=1.0, asymmetry=6,\n filter_type=1, use_v2_loss=True, max_iter=50, tol=1e-2, eps_0=1e-6,\n eps_1=1e-6, smooth_half_window=0):\n num_y = y.shape[0]\n d1_diags = np.zeros((5, num_y))\n d2_diags = np.zeros((5, num_y))\n A, B = _high_pass_filter(num_y, freq_cutoff, filter_type, False)\n # the number of lower and upper diagonals for both A and B\n ab_lu = (filter_type, filter_type)\n # the shape of A and B, and D.T*D matrices in their full forms rather than banded forms\n full_shape = (num_y, num_y)\n A_lower = A[filter_type:]\n BTB = _banded_dot_banded(B, B, ab_lu, ab_lu, full_shape, full_shape, True)\n # number of lower and upper diagonals of A.T * (D.T * D) * A\n num_diags = (2 * filter_type + 2, 2 * filter_type + 2)\n\n # line 2 of Table 3 in beads paper\n d = (\n _banded_dot_vector(\n np.asfortranarray(BTB),\n solveh_banded(A_lower, y, check_finite=False, lower=True),\n (2 * filter_type, 2 * filter_type), full_shape\n )\n - _banded_dot_vector(\n A, np.full(num_y, lam_0 * (1 - asymmetry) / 2), ab_lu, full_shape\n )\n )\n gamma = np.empty(num_y)\n gamma_factor = lam_0 * (1 + asymmetry) / 2 # 2 * lam_0 * (1 + asymmetry) / 4\n x = y\n d1_x, d2_x = _abs_diff(x, smooth_half_window)\n cost_old = 0\n abs_x = np.abs(x)\n big_x = abs_x > eps_0\n tol_history = np.empty(max_iter + 1)\n for i in range(max_iter + 1):\n # calculate line 6 of Table 3 in beads paper using banded matrices rather\n # than sparse matrices since it is much faster; Gamma + D.T * Lambda * D\n\n # row 1 and 3 instead of 0 and 2 to account for zeros on top and bottom\n d1_diags[1][1:] = d1_diags[3][:-1] = -_beads_weighting(d1_x, use_v2_loss, eps_1)\n d1_diags[2] = -(d1_diags[1] + d1_diags[3])\n\n d2_diags[0][2:] = d2_diags[-1][:-2] = _beads_weighting(d2_x, use_v2_loss, eps_1)\n d2_diags[1] = 2 * (d2_diags[0] - np.roll(d2_diags[0], -1, 0)) - 4 * d2_diags[0]\n d2_diags[-2][:-1] = d2_diags[1][1:]\n d2_diags[2] = -(d2_diags[0] + d2_diags[1] + d2_diags[-1] + d2_diags[-2])\n\n d_diags = lam_1 * d1_diags + lam_2 * d2_diags\n\n gamma[~big_x] = gamma_factor / eps_0\n gamma[big_x] = gamma_factor / abs_x[big_x]\n d_diags[2] += gamma\n\n temp = _banded_dot_banded(\n _banded_dot_banded(A, d_diags, ab_lu, (2, 2), full_shape, full_shape),\n A, (filter_type + 2, filter_type + 2), ab_lu, full_shape, full_shape, True\n )\n temp[2:-2] += BTB\n\n # cannot use solveh_banded since temp is not guaranteed to be positive-definite\n # and diagonally-dominant\n x = _banded_dot_vector(\n A,\n solve_banded(num_diags, temp, d, overwrite_ab=True, check_finite=False),\n ab_lu, full_shape\n )\n\n abs_x, big_x, theta = _beads_theta(x, asymmetry, eps_0)\n d1_x, d2_x = _abs_diff(x, smooth_half_window)\n h = _banded_dot_vector(\n B,\n solveh_banded(A_lower, y - x, check_finite=False, overwrite_b=True, lower=True),\n ab_lu, full_shape\n )\n cost = (\n 0.5 * h.dot(h)\n + lam_0 * theta\n + lam_1 * _beads_loss(d1_x, use_v2_loss, eps_1).sum()\n + lam_2 * _beads_loss(d2_x, use_v2_loss, eps_1).sum()\n )\n cost_difference = relative_difference(cost_old, cost)\n tol_history[i] = cost_difference\n if cost_difference < tol:\n break\n cost_old = cost\n\n diff = y - x\n baseline = (\n diff\n - _banded_dot_vector(\n B,\n solveh_banded(A_lower, diff, check_finite=False, overwrite_ab=True, lower=True),\n ab_lu, full_shape\n )\n )\n\n return baseline, {'signal': x, 'tol_history': tol_history[:i + 1]}", "def decode_mask(mask): # real signature unknown; restored from __doc__\n pass", "def decode_strand(read_flag, mask):\n\n strand_flag = (read_flag & mask == 0)\n if strand_flag:\n return \"+\"\n else:\n return \"-\"", "def rle_decode(self, rle, shape):\n rle = list(map(int, rle.split()))\n rle = np.array(rle, dtype=np.int32).reshape([-1, 2])\n rle[:, 1] += rle[:, 0]\n rle -= 1\n mask = np.zeros([shape[0] * shape[1]], np.bool)\n for s, e in rle:\n assert 0 <= s < mask.shape[0]\n assert 1 <= e <= mask.shape[0], \"shape: {} s {} e {}\".format(shape, s, e)\n mask[s:e] = 1\n # Reshape and transpose\n mask = mask.reshape([shape[1], shape[0]]).T\n return mask", "def get_binLF(self,Lbin=0.3,zbin=None,plot_fig=False):\n \n if self.data_loaded is False:\n self.load_sample() \n if zbin is None:\n z1=KdeLF.z1\n z2=KdeLF.z2\n else:\n if type(zbin)==list and len(zbin)==2:\n z1,z2=zbin\n else:\n print(\"'zbin' input error, 'zbin' should be a list of two real numbers, e.g., [0.1,0.5]\")\n return\n \n Lmin=KdeLF.Lmin \n Lmax=KdeLF.Lmax \n nLbin=int((Lmax-Lmin)/Lbin) \n Ls=np.empty(nLbin)\n lgphi=np.empty(nLbin)\n nums=np.empty(nLbin)\n zz1=max(1e-6,z1)\n L0=max(Lmin,self.f_lim(zz1))\n f_lim_z2=self.f_lim(z2)\n #print('L0',L0)\n L1=L0\n L2=L1+Lbin \n k=0\n for i in range(nLbin):\n num=0\n for j in range(KdeLF.ndata): \n if KdeLF.red[j]>=z1 and KdeLF.red[j]<z2 and KdeLF.lum[j]>=L1 and KdeLF.lum[j]<L2:\n #if KdeLF.lum[j]>=L1 and KdeLF.lum[j]<L2:\n num=num+1\n #print('L1,L2,num',L1,L2,num)\n Lo=self.f_lim((z1+z2)/2)\n Lc=L1+Lbin/2\n if num>0: #and Lc>=Lo:\n if L2<f_lim_z2:\n x0=optimize.brentq(self.f,zz1,z2,args=(L2))\n #print('x0',x0)\n else:\n x0=z2\n area = dblquad(self.rho, z1, x0, lambda z: max(self.f_lim(z),L1), lambda z: L2)\n #print('area',area)\n phi=num/area[0]\n Ls[k]=Lc\n lgphi[k]=phi #np.log10(phi)\n nums[k]=num\n #print(k,L[k],lgphi[k])\n k=k+1\n L1=L1+Lbin\n L2=L2+Lbin\n if L1>=Lmax:\n break \n #result=np.empty((2,k)) \n #result[0]=L[0:k];result[1]=lgphi[0:k] #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ \n Ls=Ls[0:k]\n lgphi=lgphi[0:k] \n nums=nums[0:k] \n \n # Calculate errorbars on our binned LF. These have been estimated\n # using Equations 1 and 2 of Gehrels 1986 (ApJ 303 336), as\n # implemented in astropy.stats.poisson_conf_interval. The\n # interval='frequentist-confidence' option to that astropy function is\n # exactly equal to the Gehrels formulas. (also see Kulkarni et al. 2019, MNRAS, 488, 1035) \n \n nlims = pci(nums,interval='frequentist-confidence')\n nlims *= lgphi/nums\n lgphi=np.log10(lgphi) \n uperr = np.log10(nlims[1]) - lgphi \n downerr = lgphi - np.log10(nlims[0])\n left=Ls*0+Lbin/2\n right=Ls*0+Lbin/2 \n \n if plot_fig is True: \n plt.figure(figsize=(8,6)) \n ax=plt.axes([0.13,0.1, 0.82, 0.85])\n ax.tick_params(direction='in', top=True, right=True, labelsize=12) \n plt.plot(Ls,lgphi,'o',mfc='white',mec='black',ms=9,mew=1.0,alpha=0.7,label=r'$\\hat{\\phi}_{\\mathrm{bin}}$') # plot_bin\n ax.errorbar(Ls, lgphi, ecolor='k', capsize=0, xerr=np.vstack((left, right)), \n yerr=np.vstack((uperr, downerr)),fmt='None', zorder=4, alpha=0.5)\n\n if self.absolute_magnitude is True:\n #tx=lmax-(lmax-KdeLF.Lmin)*0.618\n #ty=y1+(y2-y1)*0.88\n #ax.text(tx,ty,'z='+'%.3f' %z,fontsize=13,bbox=dict(boxstyle='square,pad=0.3', fc='yellow', ec='k',lw=0.5 ,alpha=0.4)) \n plt.ylabel(r'$\\log_{10}( \\phi(z,M) ~/~ {\\rm Mpc}^{-3} ~ \\mathrm{mag}^{-1} )$',fontsize=18)\n plt.xlabel(r'$M$',fontsize=18) \n else: \n plt.ylabel(r'$\\log_{10}( \\phi(z,L) ~/~ {\\rm Mpc}^{-3} ~ \\Delta L^{-1} )$',fontsize=18)\n plt.xlabel(r'$\\log_{10} ~ L$',fontsize=18)\n plt.legend(loc='best', fontsize=12)\n plt.savefig('binLF.png')\n plt.show() \n \n return Ls,left,right,lgphi,uperr,downerr", "def detail_mask(clip: vs.VideoNode,\n sigma: float = 1.0, rxsigma: List[int] = [50, 200, 350],\n pf_sigma: Optional[float] = 1.0,\n rad: int = 3, brz: Tuple[int, int] = (2500, 4500),\n rg_mode: int = 17,\n ) -> vs.VideoNode:\n from kagefunc import kirsch\n\n bits, clip = _get_bits(clip)\n\n clip_y = get_y(clip)\n pf = core.bilateral.Gaussian(clip_y, sigma=pf_sigma) if pf_sigma else clip_y\n ret = core.retinex.MSRCP(pf, sigma=rxsigma, upper_thr=0.005)\n\n blur_ret = core.bilateral.Gaussian(ret, sigma=sigma)\n blur_ret_diff = core.std.Expr([blur_ret, ret], \"x y -\")\n blur_ret_dfl = core.std.Deflate(blur_ret_diff)\n blur_ret_ifl = iterate(blur_ret_dfl, core.std.Inflate, 4)\n blur_ret_brz = core.std.Binarize(blur_ret_ifl, brz[0])\n blur_ret_brz = core.morpho.Close(blur_ret_brz, size=8)\n\n kirsch_mask = kirsch(clip_y).std.Binarize(brz[1])\n kirsch_ifl = kirsch_mask.std.Deflate().std.Inflate()\n kirsch_brz = core.std.Binarize(kirsch_ifl, brz[1])\n kirsch_brz = core.morpho.Close(kirsch_brz, size=4)\n\n merged = core.std.Expr([blur_ret_brz, kirsch_brz], \"x y +\")\n rm_grain = core.rgvs.RemoveGrain(merged, rg_mode)\n return rm_grain if bits == 16 else depth(rm_grain, bits)", "def make_buffer_from_bit_pattern(pattern, DATASIZE, freqs, off_freq):\n # the key's middle value is the bit's value and the left and right bits are the bits before and after\n # the buffers are enveloped to cleanly blend into each other\n\n last_bit = pattern[-1]\n output_buffer = []\n offset = 0\n counter = 1\n\n for i in range(len(pattern)):\n bit = pattern[i]\n if i < len(pattern) - 1:\n next_bit = pattern[i+1]\n else:\n next_bit = pattern[0]\n\n freq = freqs[counter] if bit == '1' else off_freq\n tone = ttone(freq, DATASIZE, offset=offset)\n # output_buffer += envelope(tone, left=last_bit=='0', right=next_bit=='0')\n output_buffer.append(tone)\n # offset += DATASIZE\n last_bit = bit\n\n if counter == 8:\n counter = 1\n else:\n counter += 1\n\n output_buffer = [struct.pack('f'*len(frame), *frame) for frame in output_buffer]\n # print output_buffer\n\n # return struct.pack('s'*len(output_buffer), *output_buffer)\n return output_buffer", "def build_maskrcnn(input_specs: tf.keras.layers.InputSpec,\n model_config: maskrcnn_cfg.MaskRCNN,\n l2_regularizer: tf.keras.regularizers.Regularizer = None):\n backbone = backbones.factory.build_backbone(\n input_specs=input_specs,\n model_config=model_config,\n l2_regularizer=l2_regularizer)\n\n decoder = decoder_factory.build_decoder(\n input_specs=backbone.output_specs,\n model_config=model_config,\n l2_regularizer=l2_regularizer)\n\n rpn_head_config = model_config.rpn_head\n roi_generator_config = model_config.roi_generator\n roi_sampler_config = model_config.roi_sampler\n roi_aligner_config = model_config.roi_aligner\n detection_head_config = model_config.detection_head\n generator_config = model_config.detection_generator\n norm_activation_config = model_config.norm_activation\n num_anchors_per_location = (\n len(model_config.anchor.aspect_ratios) * model_config.anchor.num_scales)\n\n rpn_head = dense_prediction_heads.RPNHead(\n min_level=model_config.min_level,\n max_level=model_config.max_level,\n num_anchors_per_location=num_anchors_per_location,\n num_convs=rpn_head_config.num_convs,\n num_filters=rpn_head_config.num_filters,\n use_separable_conv=rpn_head_config.use_separable_conv,\n activation=norm_activation_config.activation,\n use_sync_bn=norm_activation_config.use_sync_bn,\n norm_momentum=norm_activation_config.norm_momentum,\n norm_epsilon=norm_activation_config.norm_epsilon,\n kernel_regularizer=l2_regularizer)\n\n detection_head = instance_heads.DetectionHead(\n num_classes=model_config.num_classes,\n num_convs=detection_head_config.num_convs,\n num_filters=detection_head_config.num_filters,\n use_separable_conv=detection_head_config.use_separable_conv,\n num_fcs=detection_head_config.num_fcs,\n fc_dims=detection_head_config.fc_dims,\n activation=norm_activation_config.activation,\n use_sync_bn=norm_activation_config.use_sync_bn,\n norm_momentum=norm_activation_config.norm_momentum,\n norm_epsilon=norm_activation_config.norm_epsilon,\n kernel_regularizer=l2_regularizer)\n\n roi_generator_obj = roi_generator.MultilevelROIGenerator(\n pre_nms_top_k=roi_generator_config.pre_nms_top_k,\n pre_nms_score_threshold=roi_generator_config.pre_nms_score_threshold,\n pre_nms_min_size_threshold=(\n roi_generator_config.pre_nms_min_size_threshold),\n nms_iou_threshold=roi_generator_config.nms_iou_threshold,\n num_proposals=roi_generator_config.num_proposals,\n test_pre_nms_top_k=roi_generator_config.test_pre_nms_top_k,\n test_pre_nms_score_threshold=(\n roi_generator_config.test_pre_nms_score_threshold),\n test_pre_nms_min_size_threshold=(\n roi_generator_config.test_pre_nms_min_size_threshold),\n test_nms_iou_threshold=roi_generator_config.test_nms_iou_threshold,\n test_num_proposals=roi_generator_config.test_num_proposals,\n use_batched_nms=roi_generator_config.use_batched_nms)\n\n roi_sampler_obj = roi_sampler.ROISampler(\n mix_gt_boxes=roi_sampler_config.mix_gt_boxes,\n num_sampled_rois=roi_sampler_config.num_sampled_rois,\n foreground_fraction=roi_sampler_config.foreground_fraction,\n foreground_iou_threshold=roi_sampler_config.foreground_iou_threshold,\n background_iou_high_threshold=(\n roi_sampler_config.background_iou_high_threshold),\n background_iou_low_threshold=(\n roi_sampler_config.background_iou_low_threshold))\n\n roi_aligner_obj = roi_aligner.MultilevelROIAligner(\n crop_size=roi_aligner_config.crop_size,\n sample_offset=roi_aligner_config.sample_offset)\n\n detection_generator_obj = detection_generator.DetectionGenerator(\n apply_nms=True,\n pre_nms_top_k=generator_config.pre_nms_top_k,\n pre_nms_score_threshold=generator_config.pre_nms_score_threshold,\n nms_iou_threshold=generator_config.nms_iou_threshold,\n max_num_detections=generator_config.max_num_detections,\n use_batched_nms=generator_config.use_batched_nms)\n\n if model_config.include_mask:\n mask_head = instance_heads.MaskHead(\n num_classes=model_config.num_classes,\n upsample_factor=model_config.mask_head.upsample_factor,\n num_convs=model_config.mask_head.num_convs,\n num_filters=model_config.mask_head.num_filters,\n use_separable_conv=model_config.mask_head.use_separable_conv,\n activation=model_config.norm_activation.activation,\n norm_momentum=model_config.norm_activation.norm_momentum,\n norm_epsilon=model_config.norm_activation.norm_epsilon,\n kernel_regularizer=l2_regularizer,\n class_agnostic=model_config.mask_head.class_agnostic)\n\n mask_sampler_obj = mask_sampler.MaskSampler(\n mask_target_size=(\n model_config.mask_roi_aligner.crop_size *\n model_config.mask_head.upsample_factor),\n num_sampled_masks=model_config.mask_sampler.num_sampled_masks)\n\n mask_roi_aligner_obj = roi_aligner.MultilevelROIAligner(\n crop_size=model_config.mask_roi_aligner.crop_size,\n sample_offset=model_config.mask_roi_aligner.sample_offset)\n else:\n mask_head = None\n mask_sampler_obj = None\n mask_roi_aligner_obj = None\n\n model = maskrcnn_model.MaskRCNNModel(\n backbone=backbone,\n decoder=decoder,\n rpn_head=rpn_head,\n detection_head=detection_head,\n roi_generator=roi_generator_obj,\n roi_sampler=roi_sampler_obj,\n roi_aligner=roi_aligner_obj,\n detection_generator=detection_generator_obj,\n mask_head=mask_head,\n mask_sampler=mask_sampler_obj,\n mask_roi_aligner=mask_roi_aligner_obj)\n return model", "def rle_to_binary_mask(rle):\n binary_array = np.zeros(np.prod(rle.get('size')), dtype=bool)\n counts = rle.get('counts')\n \n start = 0\n for i in range(len(counts)-1):\n start += counts[i] \n end = start + counts[i+1] \n binary_array[start:end] = (i + 1) % 2\n \n binary_mask = binary_array.reshape(*rle.get('size'), order='F')\n\n return binary_mask", "def Hamming_7_4_dec(stream):\n\n if (len(stream) % 7) != 0:\n exit(\"Aborted decoding: non valid number of bits\")\n\n synd_tab = {\"000\" : np.array([0, 0, 0, 0]),\n \"001\" : np.array([0, 0, 0, 0]),\n \"010\" : np.array([0, 0, 0, 0]),\n \"100\" : np.array([0, 0, 0, 0]),\n \"101\" : np.array([1, 0, 0, 0]),\n \"110\" : np.array([0, 1, 0, 0]),\n \"111\" : np.array([0, 0, 1, 0]),\n \"011\" : np.array([0, 0, 0, 1])\n }\n\n G = np.array([[1, 0, 0, 0, 1, 0, 1], [0, 1, 0, 0, 1, 1, 0], [0, 0, 1, 0, 1, 1, 1], [0, 0, 0, 1, 0, 1, 1]])\n stream = [stream[k:k+7] for k in range(0, len(stream), 7)]\n for i, word in enumerate(stream):\n sig = word[0:4]\n par = word[4:]\n\n sig = np.asarray([int(i) for i in sig])\n par = np.asarray([int(i) for i in par])\n\n code_word = sig.dot(G) % 2\n par_word = code_word[4:]\n\n syndrom = (par + par_word) % 2\n synd = ''\n for bit in syndrom:\n synd += str(bit)\n\n mpep = synd_tab[synd]\n stream[i] = (sig + mpep) % 2\n\n return stream" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_diff_decoder_bb_sptr __init__(self, p) > digital_diff_decoder_bb_sptr
def __init__(self, *args): this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def diff_decoder_bb(*args, **kwargs):\n return _digital_swig.diff_decoder_bb(*args, **kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_simple_framer_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_packet_sink_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_framer_sink_1_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def make(self, *args, **kwargs):\n return _frame_detection_swig.deinterleaver_bb_sptr_make(self, *args, **kwargs)", "def __init__(self):\n this = _coin.new_SoMFVec2b()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_glfsr_source_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_if_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_fff_sptr(*args)\n try: self.this.append(this)\n except: self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
diff_decoder_bb(unsigned int modulus) > digital_diff_decoder_bb_sptr y[0] = (x[0] x[1]) % M Uses current and previous symbols and the alphabet modulus to perform differential decoding.
def diff_decoder_bb(*args, **kwargs): return _digital_swig.diff_decoder_bb(*args, **kwargs)
[ "def diff_encoder_bb(*args, **kwargs):\n return _digital_swig.diff_encoder_bb(*args, **kwargs)", "def decode(self, y):\n assert(y.size == self.p.n)\n # Setup!\n beta = np.zeros(self.p.M * self.p.L) # beta_0 = 0\n z = y # z_0 = y\n s = beta + self.Ay(z)\n tau_sq = np.dot(z,z) / self.p.n\n tau_sq_prev = tau_sq + 1\n\n # Iterate!\n t = 1\n decoding_threshold = 5*self.p.Palloc[self.p.L-1]\n while tau_sq_prev - tau_sq >= decoding_threshold:\n #print('t = {}, tau_sq = {}, avg(beta^2) = {}'.format(t, tau_sq, np.dot(beta, beta) / self.p.n))\n \n # Calculate beta^t = eta^(t-1) (s_(t-1))\n beta = self.eta(s, tau_sq)\n\n # Calculate z_t = y - A beta^t - z_(t-1) / tau_(t-1)^2 * (P_total - (beta^t)^2 / n)\n z = y - self.Ax(beta) + z / tau_sq * (self.P_total - np.dot(beta, beta) / self.p.n)\n\n # Calculate s^t = beta^t + A^T z^(t)\n s = beta + self.Ay(z)\n\n # Calculate tau_t^2 = z_t^2 / n\n tau_sq_prev = tau_sq\n tau_sq = np.dot(z,z) / self.p.n\n\n t += 1\n\n # Declare the maximum value in each section to be the decoded '1'.\n x = np.zeros(self.p.L, dtype=int)\n\n for l in range(self.p.L):\n index = beta[l * self.p.M : (l+1) * self.p.M].argmax()\n x[l] = index\n return x", "def decrypt_key(e1, modulus):\r\n\r\n d = 1 #Setting d to 1\r\n finished = False #To check if we found a value for d\r\n while finished != True: #We check to see if we have a value for d\r\n newD = e1 * d #Works out the remainder using the equation ed (mod modulus) = 1\r\n finalD = newD % modulus\r\n \r\n if finalD == 1: #If the remainder is 1 we have a value for d!!\r\n finished = True\r\n else:\r\n d = d + 1\r\n\r\n\r\n return d", "def encB(self, enc, m1=1, m2=1):\n print('Moving '+str((enc/18))+ ' rotations(s) backwards')\n enc_tgt(m1, m2, enc)\n bwd()\n time.sleep(1 * (enc / 18)+.4)", "def decrypt(ciphertext,private_exponent,modulus):\n return pow(ciphertext,private_exponent,modulus) # cipher^private mod modulus", "def descrambler_bb(*args, **kwargs):\n return _digital_swig.descrambler_bb(*args, **kwargs)", "def modulus(x):\n return np.abs(x)", "def de00(bgr1, bgr2,ret_bool:bool=False):\n bgr1 = np.array([[bgr1]], dtype=np.uint8)\n bgr2 = np.array([[bgr2]], dtype=np.uint8)\n \n lab1 = _cvt_bgr2lab(bgr1)[0,0].tolist()\n lab2 = _cvt_bgr2lab(bgr2)[0,0].tolist()\n \n L1, a1, b1 = lab1[0], lab1[1], lab1[2]\n L2, a2, b2 = lab2[0], lab2[1], lab2[2]\n \n ##### CALCULATE Ci_p , hi_p\n # (2) \n C1 = (a1**2 + b1**2) ** 0.5\n C2 = (a2**2 + b2**2) ** 0.5\n \n # (3)\n mean_C = (C1 + C2) / 2\n \n # (4)\n G = 0.5 * (1 - (mean_C**7 / (mean_C**7 + 25**7))**0.5)\n \n # (5)\n a1_p = (1+G)*a1\n a2_p = (1+G)*a2\n \n # (6)\n C1_p = (a1_p**2 + b1**2) ** 0.5\n C2_p = (a2_p**2 + b2**2) ** 0.5\n \n # (7)\n h1_p = deg(atan2(b1,a1_p)) % 360\n h2_p = deg(atan2(b2,a2_p)) % 360 \n \n ##### CALCULATE Delta(s) of L, C, H\n # (8)\n delta_L_p = L2 - L1\n \n # (9)\n delta_C_p = C2_p - C1_p\n \n # (10)\n raw_delta_h = h2_p - h1_p\n abs_delta_h = abs(raw_delta_h)\n \n if C1_p * C2_p == 0:\n delta_h_p = 0\n elif abs_delta_h <= 180:\n delta_h_p = raw_delta_h\n elif raw_delta_h > 180:\n delta_h_p = raw_delta_h - 360\n elif raw_delta_h < -180:\n delta_h_p = raw_delta_h + 360\n \n # (11)\n delta_H_p = (C1_p * C2_p) ** 0.5 * sin( rad(delta_h_p) /2 ) * 2\n \n ##### CALCULATE CIE E2000\n # (12)\n mean_L_p = (L1 + L2) / 2\n \n # (13)\n mean_C_p = (C1_p + C2_p) / 2\n \n # (14)\n sum_h_p = h1_p + h2_p\n \n if C1_p * C2_p == 0:\n mean_h_p = sum_h_p\n elif abs_delta_h <= 180:\n mean_h_p = sum_h_p / 2\n elif sum_h_p < 360:\n mean_h_p = (sum_h_p + 360 ) / 2\n elif sum_h_p >= 360:\n mean_h_p = (sum_h_p - 360 ) / 2\n \n # (15)\n T = 1 - 0.17*cos(rad(mean_h_p - 30)) + 0.24*cos(rad(2*mean_h_p))\n T += 0.32*cos(rad(3*mean_h_p+6)) - 0.2*cos(rad(4*mean_h_p-63))\n \n # (16)\n delta_theta = 30*exp(-((mean_h_p - 275) / 25 )**2)\n \n # (17)\n Rc = 2 * (mean_C_p**7 / (mean_C_p**7 + 25**7))**0.5\n \n # (18)\n Sl = 1 + (0.015 * (mean_L_p - 50)**2 ) / (20+ (mean_L_p - 50)**2) ** 0.5\n \n # (19)\n Sc = 1 + 0.045 * mean_C_p\n \n # (20)\n Sh = 1 + 0.015 * mean_C_p * T\n \n # (21)\n Rt = -sin( rad(2 * delta_theta) ) * Rc\n \n # (22)\n kl = kc = kh = 1 # Unity by default\n delta_E2000 = (delta_L_p / (kl * Sl)) ** 2 \n delta_E2000 += (delta_C_p / (kc * Sc)) ** 2 \n delta_E2000 += (delta_H_p / (kh * Sh)) ** 2 \n delta_E2000 += Rt * (delta_C_p / (kc * Sc)) * (delta_H_p / (kh * Sh))\n delta_E2000 **= 0.5\n \n if ret_bool:\n noticable_diff = delta_E2000 >= 2\n return delta_E2000, noticable_diff\n else:\n return delta_E2000", "def modinv(b, modulus):\n x0, _ = xgcd(b, modulus)\n if x0 > 0:\n return x0\n else:\n return x0 + modulus # b^-1", "def test_decrypt2_old(benchmark):\n benchmark(td.decrypt2_old, _enc_data)", "def decrypt_block(block, tester):\n random = bytearray(urandom(16))\n i = b'\\x00' * 16\n test = xor(random, i)\n\n while tester(test + block) is False:\n i = inc(i)\n test = xor(random, i)\n\n j = 1\n\n tweaked = tweak(test[:], j-1)\n\n while tester(tweaked + block) is True:\n j += 1\n tweaked = tweak(tweaked, j-1)\n\n l = 17 - j\n known = bytearray([b ^ l for b in test[-l:]])[::-1]\n\n while l != 16:\n random = bytearray(urandom(16 - l))\n i = b'\\x00' * (16 - l)\n pad = xor(bytearray([l + 1]) * l, known)\n\n head = xor(random, i)\n\n while tester(head + pad + block) is False:\n i = inc(i)\n head = xor(random, i)\n\n known = bytearray([head[-1] ^ (l+1)]) + known\n l += 1\n\n return known", "def __rmod__(self, y):\n return self._binary_operation(y, \"__rmod__\")", "def zmod(_a: int, _m: int) -> int:\n return _a % _m", "def decrypt(encrypted_msg, shift):\n # the body of function\n decrypted_msg=''\n new_index=0\n \n for i in encrypted_msg:\n \n index=ALPHABET.find(i)\n \n alpha_index=index-shift\n \n if abs(alpha_index)>=len(ALPHABET):\n new_index = alpha_index%len(ALPHABET)\n else:\n new_index = alpha_index\n \n \n decrypted_msg+=ALPHABET[new_index]\n \n return decrypted_msg", "def decrypt(encrypted_msg, shift):\n # remove this pass statement and write the body of your function\n newStr = ''\n for i in range(len(encrypted_msg)):\n for j in range(len(ALPHABET)):\n if encrypted_msg[i]==ALPHABET[j]:\n index = j\n break\n newStr += ALPHABET[(index - shift) % len(ALPHABET)]\n return newStr", "def __decryptvalchange(self,x,y,listread,grysc,count,msgencoded,endcheck,initcheck):\n\n\t\t\"\"\" If its a greyscale value, it creates a list and appends the greyscale value, otherwise it just converts the tuple to a list. It then reads the LSB and adds it to another list, once its read 8 bits it converts it to a character and apppends it to a new list of characters\"\"\"\n\t\tinit = '$a$a$'\n\t\tinitlist = list(init)\n\t\tfinal = '$a$a$'\n\t\tfinalist = list(final)\n\t\tif(grysc == 1):\n\t\t\tnewrgb = []\n\t\t\tdig = []\n\t\t\tdig.append(self.pic[x,y])\n\t\t\tnewrgb.append(self.pic[x,y])\n\t\telse:\n\t\t\tnewrgb = list(self.pic[x, y])\n\t\t\tdig = self.pic[x, y]\n\t\t\n\t\t\"\"\" Gets a pixel and gets the LSB, once 8 bits are got, it adds it to the message list\"\"\"\n\t\tfor z in range(0, len(newrgb)):\n\t\t\tif(count % 8 == 0) and (count != 0):\n\t\t\t\tasciival = 0\n\t\t\t\tpower = 7\n\t\t\t\tfor l in listread:\n\t\t\t\t\tasciival = asciival + l*(2**power)\n\t\t\t\t\tpower -= 1\n\t\t\t\tcharread = chr(asciival)\n\t\t\t\tdel listread\n\t\t\t\tlistread = []\n\t\t\t\tmsgencoded.append(charread)\n\t\t\t\tif(len(msgencoded) == 5):\n\t\t\t\t\tif(initlist != msgencoded):\n\t\t\t\t\t\traise ValueError(\"The direction provided to decrypt does not contain any data\")\n\t\t\t\tif(len(endcheck) != 0):\t\n\t\t\t\t\tendcheck.append(charread)\n\t\t\t\t\tif(len(endcheck) == 5):\n\t\t\t\t\t\tif(endcheck!=finalist):\n\t\t\t\t\t\t\tdel endcheck\n\t\t\t\t\t\t\tendcheck = []\n\t\t\t\t\t\telif(initcheck == 1):\n\t\t\t\t\t\t\treturn count, listread,endcheck\n\t\t\t\tif (charread == '$' and len(endcheck) == 0 and initcheck == 1):\t\n\t\t\t\t\tendcheck.append(charread)\n\t\t\tlistread.append((newrgb[z] % 2))\n\t\t\tcount = count + 1\n\t\tdel newrgb\n\t\tdel dig\n\t\treturn count,listread,endcheck", "def decrypt_digram(digram, table):\n assert digram[0]!=digram[1]\n apos = get_position(digram[0], table)\n bpos = get_position(digram[1], table)\n if apos[0]==bpos[0]: #same row, decrease column by 1\n apos[1] = (apos[1]-1)%5\n bpos[1] = (bpos[1]-1)%5\n elif apos[1]==bpos[1]: #same column, decrease row by 1\n apos[0] = (apos[0]-1)%5\n bpos[0] = (bpos[0]-1)%5\n else:\n temp = apos[1]\n apos[1] = bpos[1]\n bpos[1] = temp\n return table[apos[0]][apos[1]]+table[bpos[0]][bpos[1]]", "def _calc_lz_mod(mod, modules, dt, shift, start, end):\n x, _ = psd.moving_average(modules[mod], dt, shift, start, end)\n binx = (x > x.mean()).astype(int)\n return LZ76(binx)", "def decrypt_step(ciphertext, i, cross_total, key_i, plaintext):\n try:\n c = chr(ciphertext[i] + cross_total - key_i)\n except ValueError:\n # out of range with certain guesses of key. return an invalid\n # char which won't get past constraint check\n c = '?'\n s1 = md5((\"\".join(plaintext)+c).encode('utf-8')).hexdigest()[0:16]\n s2 = md5(str(cross_total).encode('utf-8')).hexdigest()[0:16]\n return (c, eval_cross_total(s1 + s2))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_diff_encoder_bb_sptr __init__(self, p) > digital_diff_encoder_bb_sptr
def __init__(self, *args): this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def diff_encoder_bb(*args, **kwargs):\n return _digital_swig.diff_encoder_bb(*args, **kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_packet_sink_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_framer_sink_1_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, a, b):\n self.a = make_generator(a)\n self.b = make_generator(b)", "def __init_encoder_params_fc(self):\n self.enc_params_fc = self.layer_cfg", "def diff_decoder_bb(*args, **kwargs):\n return _digital_swig.diff_decoder_bb(*args, **kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_ccf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_probe_density_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def make(self, *args, **kwargs):\n return _frame_detection_swig.deinterleaver_bb_sptr_make(self, *args, **kwargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
diff_encoder_bb(unsigned int modulus) > digital_diff_encoder_bb_sptr y[0] = (x[0] + y[1]) % M Uses current and previous symbols and the alphabet modulus to perform differential encoding.
def diff_encoder_bb(*args, **kwargs): return _digital_swig.diff_encoder_bb(*args, **kwargs)
[ "def diff_decoder_bb(*args, **kwargs):\n return _digital_swig.diff_decoder_bb(*args, **kwargs)", "def encB(self, enc, m1=1, m2=1):\n print('Moving '+str((enc/18))+ ' rotations(s) backwards')\n enc_tgt(m1, m2, enc)\n bwd()\n time.sleep(1 * (enc / 18)+.4)", "def decrypt_key(e1, modulus):\r\n\r\n d = 1 #Setting d to 1\r\n finished = False #To check if we found a value for d\r\n while finished != True: #We check to see if we have a value for d\r\n newD = e1 * d #Works out the remainder using the equation ed (mod modulus) = 1\r\n finalD = newD % modulus\r\n \r\n if finalD == 1: #If the remainder is 1 we have a value for d!!\r\n finished = True\r\n else:\r\n d = d + 1\r\n\r\n\r\n return d", "def modulus(x):\n return np.abs(x)", "def decode(self, y):\n assert(y.size == self.p.n)\n # Setup!\n beta = np.zeros(self.p.M * self.p.L) # beta_0 = 0\n z = y # z_0 = y\n s = beta + self.Ay(z)\n tau_sq = np.dot(z,z) / self.p.n\n tau_sq_prev = tau_sq + 1\n\n # Iterate!\n t = 1\n decoding_threshold = 5*self.p.Palloc[self.p.L-1]\n while tau_sq_prev - tau_sq >= decoding_threshold:\n #print('t = {}, tau_sq = {}, avg(beta^2) = {}'.format(t, tau_sq, np.dot(beta, beta) / self.p.n))\n \n # Calculate beta^t = eta^(t-1) (s_(t-1))\n beta = self.eta(s, tau_sq)\n\n # Calculate z_t = y - A beta^t - z_(t-1) / tau_(t-1)^2 * (P_total - (beta^t)^2 / n)\n z = y - self.Ax(beta) + z / tau_sq * (self.P_total - np.dot(beta, beta) / self.p.n)\n\n # Calculate s^t = beta^t + A^T z^(t)\n s = beta + self.Ay(z)\n\n # Calculate tau_t^2 = z_t^2 / n\n tau_sq_prev = tau_sq\n tau_sq = np.dot(z,z) / self.p.n\n\n t += 1\n\n # Declare the maximum value in each section to be the decoded '1'.\n x = np.zeros(self.p.L, dtype=int)\n\n for l in range(self.p.L):\n index = beta[l * self.p.M : (l+1) * self.p.M].argmax()\n x[l] = index\n return x", "def modinv(b, modulus):\n x0, _ = xgcd(b, modulus)\n if x0 > 0:\n return x0\n else:\n return x0 + modulus # b^-1", "def decrypt(ciphertext,private_exponent,modulus):\n return pow(ciphertext,private_exponent,modulus) # cipher^private mod modulus", "def _calc_lz_mod(mod, modules, dt, shift, start, end):\n x, _ = psd.moving_average(modules[mod], dt, shift, start, end)\n binx = (x > x.mean()).astype(int)\n return LZ76(binx)", "def descrambler_bb(*args, **kwargs):\n return _digital_swig.descrambler_bb(*args, **kwargs)", "def decrypt(encrypted_msg, shift):\n # the body of function\n decrypted_msg=''\n new_index=0\n \n for i in encrypted_msg:\n \n index=ALPHABET.find(i)\n \n alpha_index=index-shift\n \n if abs(alpha_index)>=len(ALPHABET):\n new_index = alpha_index%len(ALPHABET)\n else:\n new_index = alpha_index\n \n \n decrypted_msg+=ALPHABET[new_index]\n \n return decrypted_msg", "def zmod(_a: int, _m: int) -> int:\n return _a % _m", "def __rmod__(self, y):\n return self._binary_operation(y, \"__rmod__\")", "def decrypt(encrypted_msg, shift):\n # remove this pass statement and write the body of your function\n newStr = ''\n for i in range(len(encrypted_msg)):\n for j in range(len(ALPHABET)):\n if encrypted_msg[i]==ALPHABET[j]:\n index = j\n break\n newStr += ALPHABET[(index - shift) % len(ALPHABET)]\n return newStr", "def de00(bgr1, bgr2,ret_bool:bool=False):\n bgr1 = np.array([[bgr1]], dtype=np.uint8)\n bgr2 = np.array([[bgr2]], dtype=np.uint8)\n \n lab1 = _cvt_bgr2lab(bgr1)[0,0].tolist()\n lab2 = _cvt_bgr2lab(bgr2)[0,0].tolist()\n \n L1, a1, b1 = lab1[0], lab1[1], lab1[2]\n L2, a2, b2 = lab2[0], lab2[1], lab2[2]\n \n ##### CALCULATE Ci_p , hi_p\n # (2) \n C1 = (a1**2 + b1**2) ** 0.5\n C2 = (a2**2 + b2**2) ** 0.5\n \n # (3)\n mean_C = (C1 + C2) / 2\n \n # (4)\n G = 0.5 * (1 - (mean_C**7 / (mean_C**7 + 25**7))**0.5)\n \n # (5)\n a1_p = (1+G)*a1\n a2_p = (1+G)*a2\n \n # (6)\n C1_p = (a1_p**2 + b1**2) ** 0.5\n C2_p = (a2_p**2 + b2**2) ** 0.5\n \n # (7)\n h1_p = deg(atan2(b1,a1_p)) % 360\n h2_p = deg(atan2(b2,a2_p)) % 360 \n \n ##### CALCULATE Delta(s) of L, C, H\n # (8)\n delta_L_p = L2 - L1\n \n # (9)\n delta_C_p = C2_p - C1_p\n \n # (10)\n raw_delta_h = h2_p - h1_p\n abs_delta_h = abs(raw_delta_h)\n \n if C1_p * C2_p == 0:\n delta_h_p = 0\n elif abs_delta_h <= 180:\n delta_h_p = raw_delta_h\n elif raw_delta_h > 180:\n delta_h_p = raw_delta_h - 360\n elif raw_delta_h < -180:\n delta_h_p = raw_delta_h + 360\n \n # (11)\n delta_H_p = (C1_p * C2_p) ** 0.5 * sin( rad(delta_h_p) /2 ) * 2\n \n ##### CALCULATE CIE E2000\n # (12)\n mean_L_p = (L1 + L2) / 2\n \n # (13)\n mean_C_p = (C1_p + C2_p) / 2\n \n # (14)\n sum_h_p = h1_p + h2_p\n \n if C1_p * C2_p == 0:\n mean_h_p = sum_h_p\n elif abs_delta_h <= 180:\n mean_h_p = sum_h_p / 2\n elif sum_h_p < 360:\n mean_h_p = (sum_h_p + 360 ) / 2\n elif sum_h_p >= 360:\n mean_h_p = (sum_h_p - 360 ) / 2\n \n # (15)\n T = 1 - 0.17*cos(rad(mean_h_p - 30)) + 0.24*cos(rad(2*mean_h_p))\n T += 0.32*cos(rad(3*mean_h_p+6)) - 0.2*cos(rad(4*mean_h_p-63))\n \n # (16)\n delta_theta = 30*exp(-((mean_h_p - 275) / 25 )**2)\n \n # (17)\n Rc = 2 * (mean_C_p**7 / (mean_C_p**7 + 25**7))**0.5\n \n # (18)\n Sl = 1 + (0.015 * (mean_L_p - 50)**2 ) / (20+ (mean_L_p - 50)**2) ** 0.5\n \n # (19)\n Sc = 1 + 0.045 * mean_C_p\n \n # (20)\n Sh = 1 + 0.015 * mean_C_p * T\n \n # (21)\n Rt = -sin( rad(2 * delta_theta) ) * Rc\n \n # (22)\n kl = kc = kh = 1 # Unity by default\n delta_E2000 = (delta_L_p / (kl * Sl)) ** 2 \n delta_E2000 += (delta_C_p / (kc * Sc)) ** 2 \n delta_E2000 += (delta_H_p / (kh * Sh)) ** 2 \n delta_E2000 += Rt * (delta_C_p / (kc * Sc)) * (delta_H_p / (kh * Sh))\n delta_E2000 **= 0.5\n \n if ret_bool:\n noticable_diff = delta_E2000 >= 2\n return delta_E2000, noticable_diff\n else:\n return delta_E2000", "def scrambler_bb(*args, **kwargs):\n return _digital_swig.scrambler_bb(*args, **kwargs)", "def common_modulus_attack(modulus, exp1, exp2, msg1, msg2):\n g, s, t = gmpy2.gcdext(exp1, exp2)\n if g != 1:\n print(\"Error: GCD of the two exponents is not 1!\", file=sys.stderr)\n sys.exit(1)\n tmp1 = gmpy2.powmod(msg1, s, modulus)\n tmp2 = gmpy2.powmod(msg2, t, modulus)\n return int(gmpy2.mod(tmp1 * tmp2, modulus))", "def mod(_a: int, _m: int) -> int:\n return zmod(_a - 1, _m) + 1", "def calculate_yy(bin_edges,arrays,region,version,cov_versions,beam_version,\n effective_freq,overwrite,maxval,unsanitized_beam=False,do_weights=False,\n pa1_shift = None,\n pa2_shift = None,\n pa3_150_shift = None,\n pa3_090_shift = None,\n no_act_color_correction=False, ccor_exp = -1,\n sim_splits=None,unblind=False,all_analytic=False,beta_samples=None):\n arrays = arrays.split(',')\n narrays = len(arrays)\n if sim_splits is not None: assert not(unblind)\n def warn(): print(\"WARNING: no bandpass file found. Assuming array \",dm.c['id'],\" has no response to CMB, tSZ and CIB.\")\n aspecs = tutils.ASpecs().get_specs\n bandpasses = not(effective_freq)\n savedir = tutils.get_save_path(version,region)\n assert len(cov_versions)==3\n covdirs = [tutils.get_save_path(cov_versions[i],region) for i in range(3)]\n for covdir in covdirs: assert os.path.exists(covdir)\n if not(overwrite):\n assert not(os.path.exists(savedir)), \\\n \"This version already exists on disk. Please use a different version identifier.\"\n try: os.makedirs(savedir)\n except:\n if overwrite: pass\n else: raise\n\n\n mask = enmap.read_map(covdir+\"tilec_mask.fits\")\n\n\n from scipy.ndimage.filters import gaussian_filter as smooth\n pm = enmap.read_map(\"/scratch/r/rbond/msyriac/data/planck/data/pr2/COM_Mask_Lensing_2048_R2.00_car_deep56_interp_order0.fits\")\n wcs = pm.wcs\n mask = enmap.enmap(smooth(pm,sigma=10),wcs) * mask\n\n\n shape,wcs = mask.shape,mask.wcs\n Ny,Nx = shape\n modlmap = enmap.modlmap(shape,wcs)\n omodlmap = modlmap.copy()\n ells = np.arange(0,modlmap.max())\n minell = maps.minimum_ell(shape,wcs)\n sel = np.where(np.logical_and(modlmap>=bin_edges[0]-minell,modlmap<=bin_edges[-1]+minell))\n modlmap = modlmap[sel]\n\n bps = []\n lbeams = []\n kbeams = []\n shifts = []\n cfreqs = []\n lmins = []\n lmaxs = []\n names = []\n for i,qid in enumerate(arrays):\n dm = sints.models[sints.arrays(qid,'data_model')](region=mask,calibrated=True)\n if dm.name=='act_mr3':\n season,array1,array2 = sints.arrays(qid,'season'),sints.arrays(qid,'array'),sints.arrays(qid,'freq')\n array = '_'.join([array1,array2])\n elif dm.name=='planck_hybrid':\n season,patch,array = None,None,sints.arrays(qid,'freq')\n else:\n raise ValueError\n lmin,lmax,hybrid,radial,friend,cfreq,fgroup,wrfit = aspecs(qid)\n lmins.append(lmin)\n lmaxs.append(lmax)\n names.append(qid)\n cfreqs.append(cfreq)\n if bandpasses:\n try: \n fname = dm.get_bandpass_file_name(array) \n bps.append(\"data/\"+fname)\n if (pa1_shift is not None) and 'PA1' in fname:\n shifts.append(pa1_shift)\n elif (pa2_shift is not None) and 'PA2' in fname:\n shifts.append(pa2_shift)\n elif (pa3_150_shift is not None) and ('PA3' in fname) and ('150' in fname):\n shifts.append(pa3_150_shift)\n elif (pa3_090_shift is not None) and ('PA3' in fname) and ('090' in fname):\n shifts.append(pa3_90_shift)\n else:\n shifts.append(0)\n\n except:\n warn()\n bps.append(None)\n else:\n try: bps.append(cfreq)\n except:\n warn()\n bps.append(None)\n\n kbeam = tutils.get_kbeam(qid,modlmap,sanitize=not(unsanitized_beam),version=beam_version,planck_pixwin=True)\n if dm.name=='act_mr3':\n lbeam = tutils.get_kbeam(qid,ells,sanitize=not(unsanitized_beam),version=beam_version,planck_pixwin=False) # note no pixwin but doesnt matter since no ccorr for planck\n elif dm.name=='planck_hybrid':\n lbeam = None\n else:\n raise ValueError\n lbeams.append(lbeam)\n kbeams.append(kbeam.copy())\n # Make responses\n responses = {}\n\n def _get_response(comp,param_override=None):\n if bandpasses:\n if no_act_color_correction:\n r = tfg.get_mix_bandpassed(bps, comp, bandpass_shifts=shifts,\n param_dict_override=param_override)\n else:\n r = tfg.get_mix_bandpassed(bps, comp, bandpass_shifts=shifts,\n ccor_cen_nus=cfreqs, ccor_beams=lbeams, \n ccor_exps = [ccor_exp] * narrays,\n param_dict_override=param_override)\n else:\n r = tfg.get_mix(bps, comp,param_dict_override=param_override)\n return r\n\n for comp in ['tSZ','CMB','CIB']:\n responses[comp] = _get_response(comp,None)\n\n\n \n from tilec.utils import is_planck\n ilcgens = []\n okcoadds = []\n for splitnum in range(2):\n covdir = covdirs[splitnum]\n kcoadds = []\n for i,qid in enumerate(arrays):\n lmin = lmins[i]\n lmax = lmaxs[i]\n\n if is_planck(qid):\n dm = sints.models[sints.arrays(qid,'data_model')](region=mask,calibrated=True)\n\n _,kcoadd,_ = kspace.process(dm,region,qid,mask,\n skip_splits=True,\n splits_fname=sim_splits[i] if sim_splits is not None else None,\n inpaint=False,fn_beam = None,\n plot_inpaint_path = None,\n split_set=splitnum)\n else:\n kcoadd_name = covdir + \"kcoadd_%s.npy\" % qid\n kcoadd = enmap.enmap(np.load(kcoadd_name),wcs)\n\n kmask = maps.mask_kspace(shape,wcs,lmin=lmin,lmax=lmax)\n dtype = kcoadd.dtype\n kcoadds.append((kcoadd.copy()*kmask)[sel])\n\n kcoadds = enmap.enmap(np.stack(kcoadds),wcs)\n okcoadds.append(kcoadds.copy())\n\n\n # Read Covmat\n ctheory = ilc.CTheory(modlmap)\n nells = kcoadds[0].size\n cov = np.zeros((narrays,narrays,nells))\n for aindex1 in range(narrays):\n for aindex2 in range(aindex1,narrays):\n qid1 = names[aindex1]\n qid2 = names[aindex2]\n if is_planck(names[aindex1]) or is_planck(names[aindex2]) or all_analytic:\n lmin,lmax,hybrid,radial,friend,f1,fgroup,wrfit = aspecs(qid1)\n lmin,lmax,hybrid,radial,friend,f2,fgroup,wrfit = aspecs(qid2)\n # If both are Planck and same array, get white noise from last bin\n icov = ctheory.get_theory_cls(f1,f2,a_cmb=1,a_gal=0.8)*kbeams[aindex1]*kbeams[aindex2]\n if aindex1==aindex2:\n pcov = enmap.enmap(np.load(covdirs[2]+\"tilec_hybrid_covariance_%s_%s.npy\" % (names[aindex1],names[aindex2])),wcs)\n pbin_edges = np.append(np.arange(500,3000,200) ,[3000,4000,5000,5800])\n pbinner = stats.bin2D(omodlmap,pbin_edges)\n w = pbinner.bin(pcov)[1][-1]\n icov = icov + w\n else:\n icov = np.load(covdir+\"tilec_hybrid_covariance_%s_%s.npy\" % (names[aindex1],names[aindex2]))[sel]\n if aindex1==aindex2: \n icov[modlmap<lmins[aindex1]] = maxval\n icov[modlmap>lmaxs[aindex1]] = maxval\n cov[aindex1,aindex2] = icov\n cov[aindex2,aindex1] = icov\n\n assert np.all(np.isfinite(cov))\n\n ilcgen = ilc.HILC(modlmap,np.stack(kbeams),cov=cov,responses=responses,invert=True)\n ilcgens.append(ilcgen)\n \n\n solutions = ['tSZ','tSZ-CMB','tSZ-CIB']\n ypowers = {}\n w2 = np.mean(mask**2.)\n binner = stats.bin2D(modlmap,bin_edges)\n np.random.seed(100)\n blinding = np.random.uniform(0.8,1.2) if not(unblind) else 1\n\n\n def _get_ypow(sname,dname,dresponse=None,dcmb=False):\n\n if dresponse is not None:\n assert dname is not None\n for splitnum in range(2):\n ilcgens[splitnum].add_response(dname,dresponse)\n\n ykmaps = []\n for splitnum in range(2):\n if dcmb:\n assert dname is not None\n ykmap = ilcgens[splitnum].multi_constrained_map(okcoadds[splitnum],sname,[dname,\"CMB\"])\n else:\n if dname is None:\n ykmap = ilcgens[splitnum].standard_map(okcoadds[splitnum],sname)\n else:\n ykmap = ilcgens[splitnum].constrained_map(okcoadds[splitnum],sname,dname)\n ykmaps.append(ykmap.copy())\n\n ypower = (ykmaps[0]*ykmaps[1].conj()).real / w2\n return binner.bin(ypower)[1] * blinding\n\n\n # The usual solutions\n for solution in solutions:\n\n sols = solution.split('-')\n if len(sols)==2:\n sname = sols[0]\n dname = sols[1]\n elif len(sols)==1:\n sname = sols[0]\n dname = None\n else:\n raise ValueError\n\n ypowers[solution] = _get_ypow(sname,dname,dresponse=None)\n\n\n # The CIB SED samples\n if beta_samples is not None:\n y_bsamples = []\n y_bsamples_cmb = []\n for beta in beta_samples:\n pdict = tfg.default_dict.copy()\n pdict['beta_CIB'] = beta\n response = _get_response(\"CIB\",param_override=pdict)\n y_bsamples.append( _get_ypow(\"tSZ\",\"iCIB\",dresponse=response,dcmb=False) )\n y_bsamples_cmb.append( _get_ypow(\"tSZ\",\"iCIB\",dresponse=response,dcmb=True) )\n else:\n y_bsamples = None\n y_bsamples_cmb = None\n\n\n return binner.centers,ypowers,y_bsamples,y_bsamples_cmb", "def split_Bregman(sig, mask, initial_d, initial_b, mu, lamda, ninnner,nouter, max_cg):\n sigT=sig[np.newaxis].transpose()\n\n maskT = mask.transpose()\n\n uk=np.dot(maskT, sigT)\n\n dk_x=initial_d[np.newaxis].transpose()\n\n bk_x=initial_b[np.newaxis].transpose()\n fk = sigT\n for jouter in xrange (nouter):\n for jinner in xrange(ninnner):\n ukp=uk\n ifkt=np.dot(maskT, sigT)\n rhs=mu*ifkt+lamda*(dk_x-bk_x)\n\n ruk = np.dot(mask, uk)\n iukt = np.dot(maskT,ruk)\n r = rhs - mu * iukt -lamda *uk\n p = r\n rsold = np.dot(r.transpose(), r)\n\n for i in xrange(max_cg):\n rp=np.dot(mask,p)\n irpt = np.dot(maskT ,rp)\n Ap = mu * irpt + lamda *p\n\n alpha = rsold / np.dot(p.transpose(),Ap)\n uk = uk + alpha * p\n r = r - alpha * Ap\n rsnew = np.dot(r.transpose(),r)\n if rsnew < 1e-32:\n break\n\n p = r + rsnew / rsold * p;\n rsold = rsnew\n\n sk_x = uk + bk_x\n dk_x = np.maximum(np.abs(sk_x)-1/lamda,0)*np.sign(sk_x)\n bk_x = sk_x-dk_x\n\n fk = fk + sigT - np.dot(mask, uk)\n rec_tv = uk\n\n return (uk)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_diff_phasor_cc_sptr __init__(self, p) > digital_diff_phasor_cc_sptr
def __init__(self, *args): this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_ccf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pn_correlator_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, args, phase):\n self.args = args\n self.phase = phase", "def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoShaderParameter2i()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _coin.new_SbCylinderPlaneProjector(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SbLineProjector()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoShaderParameter1i()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoShaderParameter2f()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, coeff):\n self.coeff = coeff", "def __init__(self):\n this = _coin.new_SoClipPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoClipPlaneManip()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_fll_band_edge_cc_sptr __init__(self, p) > digital_fll_band_edge_cc_sptr
def __init__(self, *args): this = _digital_swig.new_digital_fll_band_edge_cc_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_ccf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pn_correlator_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_fff_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _coin.new_SbDPLine(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n # Flag this instance as compiled now\n self.is_compiled = True\n \n super(HCompositeState2ProcDef, self).__init__(name='HCompositeState2ProcDef', num_nodes=152, edges=[])\n \n # Add the edges\n self.add_edges([(8, 39), (39, 1), (1, 40), (40, 6), (6, 41), (41, 9), (9, 42), (42, 11), (8, 43), (43, 13), (6, 44), (44, 14), (6, 45), (45, 15), (6, 46), (46, 16), (11, 47), (47, 17), (11, 48), (48, 18), (11, 49), (49, 19), (11, 50), (50, 20), (9, 51), (51, 12), (12, 52), (52, 21), (12, 53), (53, 22), (12, 54), (54, 23), (71, 55), (55, 119), (72, 56), (56, 120), (73, 57), (57, 121), (74, 58), (58, 122), (75, 59), (59, 123), (76, 60), (60, 124), (77, 61), (61, 125), (78, 62), (62, 126), (79, 63), (63, 127), (80, 64), (64, 128), (81, 65), (65, 129), (82, 66), (66, 130), (83, 67), (67, 131), (84, 68), (68, 132), (85, 69), (69, 133), (86, 70), (70, 134), (13, 24), (24, 88), (14, 25), (25, 89), (15, 26), (26, 90), (16, 27), (27, 91), (11, 28), (28, 92), (17, 29), (29, 93), (18, 30), (30, 94), (19, 31), (31, 95), (20, 32), (32, 96), (12, 33), (33, 97), (21, 34), (34, 98), (22, 35), (35, 99), (23, 36), (36, 100), (8, 37), (37, 101), (1, 38), (38, 102), (5, 0), (0, 135), (0, 136), (0, 137), (0, 138), (0, 139), (0, 140), (0, 141), (0, 142), (0, 143), (0, 144), (0, 145), (0, 146), (0, 147), (0, 148), (0, 149), (0, 150), (0, 151), (136, 1), (7, 2), (2, 4), (4, 3), (3, 87), (10, 4), (7, 5), (137, 6), (71, 103), (72, 104), (73, 105), (74, 106), (75, 107), (76, 108), (77, 109), (78, 110), (79, 111), (80, 112), (81, 113), (82, 114), (83, 115), (84, 116), (85, 117), (86, 118), (135, 8), (138, 9), (139, 13), (140, 14), (141, 15), (142, 16), (143, 11), (144, 12), (145, 17), (146, 18), (147, 19), (148, 20), (149, 21), (150, 22), (151, 23), (8, 10), (103, 87), (104, 88), (105, 89), (106, 90), (107, 91), (108, 92), (109, 93), (110, 94), (111, 95), (112, 96), (113, 97), (114, 98), (115, 99), (116, 100), (117, 101), (118, 102)])\n # Set the graph attributes\n self[\"mm__\"] = pickle.loads(\"\"\"(lp1\nS'UMLRT2Kiltera_MM'\np2\na.\"\"\")\n self[\"name\"] = \"\"\"CompositeState2ProcDef\"\"\"\n self[\"GUID__\"] = UUID('d5e9d5a2-c202-49ef-a74d-abc96e53b4fe')\n \n # Set the node attributes\n self.vs[0][\"mm__\"] = \"\"\"ApplyModel\"\"\"\n self.vs[0][\"GUID__\"] = UUID('4f03b792-e84e-4c84-bbae-3072cf6a293c')\n self.vs[1][\"name\"] = \"\"\"localdef1\"\"\"\n self.vs[1][\"classtype\"] = \"\"\"LocalDef\"\"\"\n self.vs[1][\"mm__\"] = \"\"\"LocalDef\"\"\"\n self.vs[1][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[1][\"GUID__\"] = UUID('00ff12a2-181f-4200-81a2-75850a58d99f')\n self.vs[2][\"mm__\"] = \"\"\"match_contains\"\"\"\n self.vs[2][\"GUID__\"] = UUID('938cefd8-a8a4-4aaf-be3a-e728f6d4b308')\n self.vs[3][\"mm__\"] = \"\"\"hasAttribute_S\"\"\"\n self.vs[3][\"GUID__\"] = UUID('a1001fa8-fbfb-4491-a555-e688afae9a35')\n self.vs[4][\"name\"] = \"\"\"state1\"\"\"\n self.vs[4][\"classtype\"] = \"\"\"State\"\"\"\n self.vs[4][\"mm__\"] = \"\"\"State\"\"\"\n self.vs[4][\"cardinality\"] = \"\"\"+\"\"\"\n self.vs[4][\"GUID__\"] = UUID('2de4b186-4d1b-49c5-a24d-837430de86c3')\n self.vs[5][\"mm__\"] = \"\"\"paired_with\"\"\"\n self.vs[5][\"GUID__\"] = UUID('6864a62e-0c16-41ec-85cb-5304c66b2167')\n self.vs[6][\"name\"] = \"\"\"new1\"\"\"\n self.vs[6][\"classtype\"] = \"\"\"New\"\"\"\n self.vs[6][\"mm__\"] = \"\"\"New\"\"\"\n self.vs[6][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[6][\"GUID__\"] = UUID('6e918d39-761f-4145-980d-e035e8956e4c')\n self.vs[7][\"mm__\"] = \"\"\"MatchModel\"\"\"\n self.vs[7][\"GUID__\"] = UUID('9d3c9ff3-d943-45c5-9a68-4b94f8ae4f55')\n self.vs[8][\"name\"] = \"\"\"procdef1\"\"\"\n self.vs[8][\"classtype\"] = \"\"\"ProcDef\"\"\"\n self.vs[8][\"mm__\"] = \"\"\"ProcDef\"\"\"\n self.vs[8][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[8][\"GUID__\"] = UUID('b36423c7-5f8e-4565-9124-9dedad23d1e1')\n self.vs[9][\"name\"] = \"\"\"par1\"\"\"\n self.vs[9][\"classtype\"] = \"\"\"Par\"\"\"\n self.vs[9][\"mm__\"] = \"\"\"Par\"\"\"\n self.vs[9][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[9][\"GUID__\"] = UUID('64a7af82-a641-4084-b5c3-db88c40c7b99')\n self.vs[10][\"type\"] = \"\"\"ruleDef\"\"\"\n self.vs[10][\"mm__\"] = \"\"\"backward_link\"\"\"\n self.vs[10][\"GUID__\"] = UUID('869d5d52-235c-4240-af78-31e36a1f47d7')\n self.vs[11][\"name\"] = \"\"\"inst1\"\"\"\n self.vs[11][\"classtype\"] = \"\"\"Inst\"\"\"\n self.vs[11][\"mm__\"] = \"\"\"Inst\"\"\"\n self.vs[11][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[11][\"GUID__\"] = UUID('a4079b80-e123-4015-96c9-8e664b15e053')\n self.vs[12][\"name\"] = \"\"\"inst2\"\"\"\n self.vs[12][\"classtype\"] = \"\"\"Inst\"\"\"\n self.vs[12][\"mm__\"] = \"\"\"Inst\"\"\"\n self.vs[12][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[12][\"GUID__\"] = UUID('a3eef854-3648-462d-be65-3eca75bdebf7')\n self.vs[13][\"name\"] = \"\"\"name1\"\"\"\n self.vs[13][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[13][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[13][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[13][\"GUID__\"] = UUID('9b94a56a-dd11-415e-8663-6f429c2c0753')\n self.vs[14][\"name\"] = \"\"\"name2\"\"\"\n self.vs[14][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[14][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[14][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[14][\"GUID__\"] = UUID('d90c8a9c-eee1-48af-9308-abbb6052af8f')\n self.vs[15][\"name\"] = \"\"\"name3\"\"\"\n self.vs[15][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[15][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[15][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[15][\"GUID__\"] = UUID('8e53fe34-6fcc-4059-8042-db911db6e812')\n self.vs[16][\"name\"] = \"\"\"name4\"\"\"\n self.vs[16][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[16][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[16][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[16][\"GUID__\"] = UUID('4f23669c-d236-4a8d-b52b-1f37ba406f94')\n self.vs[17][\"name\"] = \"\"\"name5\"\"\"\n self.vs[17][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[17][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[17][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[17][\"GUID__\"] = UUID('91bc841f-2211-4638-a340-584da8347c98')\n self.vs[18][\"name\"] = \"\"\"name6\"\"\"\n self.vs[18][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[18][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[18][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[18][\"GUID__\"] = UUID('8a109a2d-2d70-4318-8a72-46c784206075')\n self.vs[19][\"name\"] = \"\"\"name7\"\"\"\n self.vs[19][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[19][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[19][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[19][\"GUID__\"] = UUID('5a95e461-d2f8-435b-9e77-af581d91ee29')\n self.vs[20][\"name\"] = \"\"\"name8\"\"\"\n self.vs[20][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[20][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[20][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[20][\"GUID__\"] = UUID('c600b1fb-8c9c-4ef2-b597-8137d9bdfb08')\n self.vs[21][\"name\"] = \"\"\"name9\"\"\"\n self.vs[21][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[21][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[21][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[21][\"GUID__\"] = UUID('708cd8f1-6e3d-4dfa-af00-18e9d43a01a4')\n self.vs[22][\"name\"] = \"\"\"name10\"\"\"\n self.vs[22][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[22][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[22][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[22][\"GUID__\"] = UUID('132e8292-4471-498d-a202-3d2abc7ab5ca')\n self.vs[23][\"name\"] = \"\"\"name11\"\"\"\n self.vs[23][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[23][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[23][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[23][\"GUID__\"] = UUID('fdb484f0-a8b5-4b9e-86a6-b679b1012005')\n self.vs[24][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[24][\"GUID__\"] = UUID('2a8418a3-cb80-496b-a1e0-7419de2ae33f')\n self.vs[25][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[25][\"GUID__\"] = UUID('4f37af75-2b77-45c1-93d1-8aae7cf14cc8')\n self.vs[26][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[26][\"GUID__\"] = UUID('54ef6fcc-cb9a-494e-aa36-f44525e4a0b0')\n self.vs[27][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[27][\"GUID__\"] = UUID('22858e97-7bbe-460d-b44b-14652852a592')\n self.vs[28][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[28][\"GUID__\"] = UUID('c3fcdb66-34da-4c82-b163-e5ab5f04e5c0')\n self.vs[29][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[29][\"GUID__\"] = UUID('88c90884-ae83-49af-96da-74f03c7f80ce')\n self.vs[30][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[30][\"GUID__\"] = UUID('1e3c412d-8372-4ba5-8a56-9d82407b79d0')\n self.vs[31][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[31][\"GUID__\"] = UUID('a500f0c7-1535-40ed-802e-a883517bbc64')\n self.vs[32][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[32][\"GUID__\"] = UUID('ed658c5a-81c3-4938-920e-98953de205ba')\n self.vs[33][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[33][\"GUID__\"] = UUID('49be0f69-494e-4f45-8923-582778c6828a')\n self.vs[34][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[34][\"GUID__\"] = UUID('e3709cc9-ed04-44f9-b8a7-a8f9f5939f3b')\n self.vs[35][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[35][\"GUID__\"] = UUID('8a657ede-e29d-4a28-9c1c-4c95a3ecd3b6')\n self.vs[36][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[36][\"GUID__\"] = UUID('b3cd8a7c-7deb-4b8c-9ed2-4a22bd6b5a39')\n self.vs[37][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[37][\"GUID__\"] = UUID('2287628a-d22b-427b-bdfd-d24d04bd46ad')\n self.vs[38][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[38][\"GUID__\"] = UUID('65083504-7423-4b8f-8b3e-7dc369fa08db')\n self.vs[39][\"associationType\"] = \"\"\"p\"\"\"\n self.vs[39][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[39][\"GUID__\"] = UUID('dd5a6c0f-e438-4f23-ad0f-acd02dd4afe8')\n self.vs[40][\"associationType\"] = \"\"\"p\"\"\"\n self.vs[40][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[40][\"GUID__\"] = UUID('d4bcb4b5-37a3-4d04-895f-d689ea89c825')\n self.vs[41][\"associationType\"] = \"\"\"p\"\"\"\n self.vs[41][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[41][\"GUID__\"] = UUID('b860cc3c-a70a-4c66-9bb9-c1fd1395b23c')\n self.vs[42][\"associationType\"] = \"\"\"p\"\"\"\n self.vs[42][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[42][\"GUID__\"] = UUID('97c4f558-4e1a-4a85-82e4-e0500374d80f')\n self.vs[43][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[43][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[43][\"GUID__\"] = UUID('58acb66a-2008-4ef3-975f-1db1219bd830')\n self.vs[44][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[44][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[44][\"GUID__\"] = UUID('5e14b29f-f5e6-4d6d-bfac-8616df51ab56')\n self.vs[45][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[45][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[45][\"GUID__\"] = UUID('57ac3f37-c63f-4a74-bc90-a846fb38e370')\n self.vs[46][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[46][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[46][\"GUID__\"] = UUID('9fc39a10-40e0-47f4-93c6-eccc9fdbd594')\n self.vs[47][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[47][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[47][\"GUID__\"] = UUID('00e09455-e8b5-414e-8eee-abbe55b7a65d')\n self.vs[48][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[48][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[48][\"GUID__\"] = UUID('17170197-069c-44fa-9239-dec8622935ee')\n self.vs[49][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[49][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[49][\"GUID__\"] = UUID('a4654b49-ee9c-4f69-a4e2-b8101c7086d2')\n self.vs[50][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[50][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[50][\"GUID__\"] = UUID('f9e0515c-b37c-4c22-8fe9-49c98acd152d')\n self.vs[51][\"associationType\"] = \"\"\"p\"\"\"\n self.vs[51][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[51][\"GUID__\"] = UUID('2c60fd52-acfa-4cba-8c04-53c9affdc4db')\n self.vs[52][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[52][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[52][\"GUID__\"] = UUID('f8f3ccd7-1cd5-4a57-b6a8-d35ba5bef6e4')\n self.vs[53][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[53][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[53][\"GUID__\"] = UUID('7c94a074-10cb-4087-acd1-09f74b36fee5')\n self.vs[54][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[54][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[54][\"GUID__\"] = UUID('857117de-5cb0-4717-8c19-a916f3913d44')\n self.vs[55][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[55][\"GUID__\"] = UUID('be66b7a4-a420-4307-9c3e-15a25480f612')\n self.vs[56][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[56][\"GUID__\"] = UUID('8b06f23c-dc76-480c-a91b-2a89628187bb')\n self.vs[57][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[57][\"GUID__\"] = UUID('a30e8284-77ae-44b5-83fe-950b7a7cf134')\n self.vs[58][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[58][\"GUID__\"] = UUID('d79efc53-0195-4578-9e6e-f325fa1b9347')\n self.vs[59][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[59][\"GUID__\"] = UUID('4c20c97d-c715-4ddc-ba86-f4b8f93342f2')\n self.vs[60][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[60][\"GUID__\"] = UUID('b6badd99-bce6-4ecb-95f2-2a56eb8e31ec')\n self.vs[61][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[61][\"GUID__\"] = UUID('784aca61-7263-4894-ada3-514b7dc1263c')\n self.vs[62][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[62][\"GUID__\"] = UUID('b751aba0-9035-400e-81b0-a05af5ff13f8')\n self.vs[63][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[63][\"GUID__\"] = UUID('f5e9aa39-f124-44ff-bf9e-835d8231fa1c')\n self.vs[64][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[64][\"GUID__\"] = UUID('adb9f451-c62d-4218-aebc-d7065b89a497')\n self.vs[65][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[65][\"GUID__\"] = UUID('71250a4b-2989-43ad-8a29-d2c8f7011af6')\n self.vs[66][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[66][\"GUID__\"] = UUID('ef32cf77-f92d-4364-b997-484a66740660')\n self.vs[67][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[67][\"GUID__\"] = UUID('c3c01696-8c64-45f7-a598-6e443991711f')\n self.vs[68][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[68][\"GUID__\"] = UUID('0481036c-254e-4f46-a7c3-6f4a865fe7bd')\n self.vs[69][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[69][\"GUID__\"] = UUID('f98b92f3-81c2-403a-ba4b-29cb117d561a')\n self.vs[70][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[70][\"GUID__\"] = UUID('c32d7a5a-e311-48d5-b3fc-2a284673c4aa')\n self.vs[71][\"name\"] = \"\"\"eq1\"\"\"\n self.vs[71][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[71][\"GUID__\"] = UUID('0abf26da-d349-4bad-be96-014c8959a4cd')\n self.vs[72][\"name\"] = \"\"\"eq2\"\"\"\n self.vs[72][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[72][\"GUID__\"] = UUID('af92b37e-0c63-4fe5-a906-7cd312cad172')\n self.vs[73][\"name\"] = \"\"\"eq3\"\"\"\n self.vs[73][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[73][\"GUID__\"] = UUID('108e8752-a98c-44df-b24a-3b958c450846')\n self.vs[74][\"name\"] = \"\"\"eq4\"\"\"\n self.vs[74][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[74][\"GUID__\"] = UUID('340c5b78-fbbc-4734-ac7d-8a1f953679e3')\n self.vs[75][\"name\"] = \"\"\"eq5\"\"\"\n self.vs[75][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[75][\"GUID__\"] = UUID('63513c17-c285-47ce-9b5c-e658df31b8bf')\n self.vs[76][\"name\"] = \"\"\"eq6\"\"\"\n self.vs[76][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[76][\"GUID__\"] = UUID('dfd958e8-0fd4-4975-b28f-dab1df8a6858')\n self.vs[77][\"name\"] = \"\"\"eq7\"\"\"\n self.vs[77][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[77][\"GUID__\"] = UUID('1cd0e4a3-2b1a-42c8-bdf3-f98e156d8265')\n self.vs[78][\"name\"] = \"\"\"eq8\"\"\"\n self.vs[78][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[78][\"GUID__\"] = UUID('d7c1a1c4-4b83-4e3c-9e1f-2212a30343b1')\n self.vs[79][\"name\"] = \"\"\"eq9\"\"\"\n self.vs[79][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[79][\"GUID__\"] = UUID('aea37644-aa22-4e82-92a7-17d85ad5acf3')\n self.vs[80][\"name\"] = \"\"\"eq10\"\"\"\n self.vs[80][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[80][\"GUID__\"] = UUID('f7db1558-e110-4984-b825-62e4ce6f1324')\n self.vs[81][\"name\"] = \"\"\"eq11\"\"\"\n self.vs[81][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[81][\"GUID__\"] = UUID('a0722a1f-aaa4-4ac3-99d3-5bea37c15e79')\n self.vs[82][\"name\"] = \"\"\"eq12\"\"\"\n self.vs[82][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[82][\"GUID__\"] = UUID('ddbd74ac-21f7-4724-a2a8-b78c7389a8f4')\n self.vs[83][\"name\"] = \"\"\"eq13\"\"\"\n self.vs[83][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[83][\"GUID__\"] = UUID('a8fe40b1-4985-43d2-a874-0741d09ba4ae')\n self.vs[84][\"name\"] = \"\"\"eq14\"\"\"\n self.vs[84][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[84][\"GUID__\"] = UUID('281fd930-5f47-4b53-949b-e274ec95fdef')\n self.vs[85][\"name\"] = \"\"\"eq15\"\"\"\n self.vs[85][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[85][\"GUID__\"] = UUID('2e2199ae-3f44-4d76-b322-4b617a8c58db')\n self.vs[86][\"name\"] = \"\"\"eq16\"\"\"\n self.vs[86][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[86][\"GUID__\"] = UUID('25ad532f-5f8d-433a-bb65-507c97469275')\n self.vs[87][\"name\"] = \"\"\"isComposite\"\"\"\n self.vs[87][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[87][\"Type\"] = \"\"\"'Bool'\"\"\"\n self.vs[87][\"GUID__\"] = UUID('75b3e3d3-2cfc-4444-b65e-2fc5a8b7ae5d')\n self.vs[88][\"name\"] = \"\"\"literal\"\"\"\n self.vs[88][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[88][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[88][\"GUID__\"] = UUID('426aea1c-8a9f-4651-b297-9ec3c1c1352e')\n self.vs[89][\"name\"] = \"\"\"literal\"\"\"\n self.vs[89][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[89][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[89][\"GUID__\"] = UUID('284a3a1d-8a2d-4cef-9551-98d424afe038')\n self.vs[90][\"name\"] = \"\"\"literal\"\"\"\n self.vs[90][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[90][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[90][\"GUID__\"] = UUID('3b7a1cdc-9ffb-48db-994f-497c06449458')\n self.vs[91][\"name\"] = \"\"\"literal\"\"\"\n self.vs[91][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[91][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[91][\"GUID__\"] = UUID('40cff5ab-cab2-4fab-bbc1-c8039fe486ac')\n self.vs[92][\"name\"] = \"\"\"name\"\"\"\n self.vs[92][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[92][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[92][\"GUID__\"] = UUID('b9e0ab51-1690-44de-875b-773826f9e420')\n self.vs[93][\"name\"] = \"\"\"literal\"\"\"\n self.vs[93][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[93][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[93][\"GUID__\"] = UUID('708e489d-456a-4974-9198-73334eb3d1d8')\n self.vs[94][\"name\"] = \"\"\"literal\"\"\"\n self.vs[94][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[94][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[94][\"GUID__\"] = UUID('bdabcea3-c164-4f6b-a54f-be957abedb49')\n self.vs[95][\"name\"] = \"\"\"literal\"\"\"\n self.vs[95][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[95][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[95][\"GUID__\"] = UUID('22f79d9e-a9bf-41b5-9559-4560af4afc10')\n self.vs[96][\"name\"] = \"\"\"literal\"\"\"\n self.vs[96][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[96][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[96][\"GUID__\"] = UUID('56b242b2-5ebd-4a02-a1bb-829ecc6822a7')\n self.vs[97][\"name\"] = \"\"\"name\"\"\"\n self.vs[97][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[97][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[97][\"GUID__\"] = UUID('46680774-a892-41cb-8005-809b5eea2003')\n self.vs[98][\"name\"] = \"\"\"literal\"\"\"\n self.vs[98][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[98][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[98][\"GUID__\"] = UUID('c8c58f99-e94c-442b-a747-c873a43b903b')\n self.vs[99][\"name\"] = \"\"\"literal\"\"\"\n self.vs[99][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[99][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[99][\"GUID__\"] = UUID('18aa7445-341a-40e8-b09c-70904b3f9994')\n self.vs[100][\"name\"] = \"\"\"literal\"\"\"\n self.vs[100][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[100][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[100][\"GUID__\"] = UUID('9f63580a-288f-4d14-b275-b96062163c5a')\n self.vs[101][\"name\"] = \"\"\"pivot\"\"\"\n self.vs[101][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[101][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[101][\"GUID__\"] = UUID('c8777ba9-8c6e-4582-a082-81f2f34e6016')\n self.vs[102][\"name\"] = \"\"\"pivot\"\"\"\n self.vs[102][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[102][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[102][\"GUID__\"] = UUID('ce2a6aa7-c8ce-4cee-807c-cd4de96a08bf')\n self.vs[103][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[103][\"GUID__\"] = UUID('8119a747-1d59-4f48-83a6-16869a919672')\n self.vs[104][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[104][\"GUID__\"] = UUID('b7c5aeaf-7e59-4a81-9616-bb2474f2660f')\n self.vs[105][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[105][\"GUID__\"] = UUID('ced29f38-6ce7-449c-823f-34aaab43899b')\n self.vs[106][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[106][\"GUID__\"] = UUID('e29dc6da-439d-4a9d-9d40-e87aa9fbebd3')\n self.vs[107][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[107][\"GUID__\"] = UUID('af49357e-a46d-4ee5-ab4a-b6d6ef261df0')\n self.vs[108][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[108][\"GUID__\"] = UUID('ff49109b-ccc0-4635-9a33-d88c1d675bc6')\n self.vs[109][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[109][\"GUID__\"] = UUID('423ad2a2-0a19-4192-902d-706965800fef')\n self.vs[110][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[110][\"GUID__\"] = UUID('5864c11a-7792-4549-999f-bc86a4246314')\n self.vs[111][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[111][\"GUID__\"] = UUID('7182946d-d5f6-4a7c-acaa-d4eeb97133db')\n self.vs[112][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[112][\"GUID__\"] = UUID('d965f0b2-048d-490c-81f5-2b18446941de')\n self.vs[113][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[113][\"GUID__\"] = UUID('6e4c8ba9-6ab0-44d3-9cc6-c181772a1e3b')\n self.vs[114][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[114][\"GUID__\"] = UUID('5633c48b-1add-43eb-9789-1bece00f8079')\n self.vs[115][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[115][\"GUID__\"] = UUID('d2c598e2-09b1-4c12-acff-871f6662238a')\n self.vs[116][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[116][\"GUID__\"] = UUID('33a09bc8-cfa9-4367-834e-41bfae2fa7b6')\n self.vs[117][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[117][\"GUID__\"] = UUID('858b7fe0-edf0-4eda-ae81-6477f6499fb7')\n self.vs[118][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[118][\"GUID__\"] = UUID('a19a8472-6b86-4aee-b2bb-d66b4d26aeea')\n self.vs[119][\"name\"] = \"\"\"true\"\"\"\n self.vs[119][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[119][\"Type\"] = \"\"\"'Bool'\"\"\"\n self.vs[119][\"GUID__\"] = UUID('ba19f7ae-c0e3-43f5-9c87-2e08b3ff7d4e')\n self.vs[120][\"name\"] = \"\"\"sh\"\"\"\n self.vs[120][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[120][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[120][\"GUID__\"] = UUID('b78c45bc-2ecd-438a-a905-dbd90a4edeed')\n self.vs[121][\"name\"] = \"\"\"exit_in\"\"\"\n self.vs[121][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[121][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[121][\"GUID__\"] = UUID('0bbc3f31-d9e3-49a7-b213-d874f9d6e0ac')\n self.vs[122][\"name\"] = \"\"\"exack_in\"\"\"\n self.vs[122][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[122][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[122][\"GUID__\"] = UUID('e58ce45d-49e1-44b9-8a0a-78c1f1305afd')\n self.vs[123][\"name\"] = \"\"\"sh_in\"\"\"\n self.vs[123][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[123][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[123][\"GUID__\"] = UUID('34144527-9a72-44f3-8afe-f49bbe5fac47')\n self.vs[124][\"name\"] = \"\"\"C\"\"\"\n self.vs[124][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[124][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[124][\"GUID__\"] = UUID('61ed1583-c983-4369-b0de-0c3ca82aba52')\n self.vs[125][\"name\"] = \"\"\"enp\"\"\"\n self.vs[125][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[125][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[125][\"GUID__\"] = UUID('a4bfdfad-6e17-46b1-9939-685bd4cbfb62')\n self.vs[126][\"name\"] = \"\"\"exit_in\"\"\"\n self.vs[126][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[126][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[126][\"GUID__\"] = UUID('92007092-a080-4cb3-ba90-cbc8e6637732')\n self.vs[127][\"name\"] = \"\"\"exack_in\"\"\"\n self.vs[127][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[127][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[127][\"GUID__\"] = UUID('1a61b1e5-e926-45cd-bf6a-60adeef0d338')\n self.vs[128][\"name\"] = \"\"\"sh_in\"\"\"\n self.vs[128][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[128][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[128][\"GUID__\"] = UUID('95c52a1f-42ae-4384-bcfc-0cab537ee1cf')\n self.vs[129][\"name\"] = \"\"\"H\"\"\"\n self.vs[129][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[129][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[129][\"GUID__\"] = UUID('146f9ec3-3f2d-48a1-92ac-a5546268e069')\n self.vs[130][\"name\"] = \"\"\"exit_in\"\"\"\n self.vs[130][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[130][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[130][\"GUID__\"] = UUID('c52aec39-171b-4710-8150-b343a557bebf')\n self.vs[131][\"name\"] = \"\"\"exack_in\"\"\"\n self.vs[131][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[131][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[131][\"GUID__\"] = UUID('e2ab70c6-01a2-420a-9e96-bb238fe29689')\n self.vs[132][\"name\"] = \"\"\"sh_in\"\"\"\n self.vs[132][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[132][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[132][\"GUID__\"] = UUID('f476d190-6014-4c6a-a27f-c3f45b9d10ba')\n self.vs[133][\"name\"] = \"\"\"procdef\"\"\"\n self.vs[133][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[133][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[133][\"GUID__\"] = UUID('5a678e2c-8444-4e53-a430-5f0b1a603c07')\n self.vs[134][\"name\"] = \"\"\"localdefcompstate\"\"\"\n self.vs[134][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[134][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[134][\"GUID__\"] = UUID('dfac50b9-4956-45c0-b8a5-14f609e078e5')\n self.vs[135][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[135][\"GUID__\"] = UUID('632f235b-d18d-4939-b4b8-9d38a7505cc8')\n self.vs[136][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[136][\"GUID__\"] = UUID('b5724e21-522d-415c-8538-59b279583ff4')\n self.vs[137][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[137][\"GUID__\"] = UUID('a05b3ebc-4b86-43f0-adcd-d46d8c4d773e')\n self.vs[138][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[138][\"GUID__\"] = UUID('b43788ff-9ab6-4bec-b8ae-c76b10985fc3')\n self.vs[139][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[139][\"GUID__\"] = UUID('16c28ca0-6429-4540-9505-e8057aad958a')\n self.vs[140][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[140][\"GUID__\"] = UUID('e97eb3e2-8fca-41a6-9599-a173acee4c22')\n self.vs[141][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[141][\"GUID__\"] = UUID('eda0c12e-26c0-4296-9d34-62cbe764e151')\n self.vs[142][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[142][\"GUID__\"] = UUID('336e11b9-cbc3-41b4-9c07-041ed4ba1453')\n self.vs[143][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[143][\"GUID__\"] = UUID('31297722-e0a1-4e03-8c28-44abe1930256')\n self.vs[144][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[144][\"GUID__\"] = UUID('51fcd9e5-c817-4710-9a24-d080b3f8fa71')\n self.vs[145][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[145][\"GUID__\"] = UUID('7acc9f40-a78c-47ac-8e38-fc7f4647c2f1')\n self.vs[146][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[146][\"GUID__\"] = UUID('c94eef8e-f552-4b53-ba99-f21c13dfca4a')\n self.vs[147][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[147][\"GUID__\"] = UUID('09d8138f-8be9-4a30-af93-cc714a2570db')\n self.vs[148][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[148][\"GUID__\"] = UUID('792865ce-75f2-41cb-9c42-74f831e96a76')\n self.vs[149][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[149][\"GUID__\"] = UUID('fb0f0ebe-59c0-4ffc-8370-2ead7eb40f18')\n self.vs[150][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[150][\"GUID__\"] = UUID('23a9a8da-507e-4d11-a8e0-f5b721f01f96')\n self.vs[151][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[151][\"GUID__\"] = UUID('38fd864c-df5e-4e85-9838-2e665d75637c')", "def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_glfsr_source_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_packet_sink_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoMFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_framer_sink_1_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n _itkQuadEdgeMeshPointPython.itkQuadEdgeMeshPointF2GQEULLULLBBT_swiginit(self, _itkQuadEdgeMeshPointPython.new_itkQuadEdgeMeshPointF2GQEULLULLBBT(*args))", "def __init__(self, position, spectrum, brightness):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set_samples_per_symbol(self, float sps) Set the number of samples per symbol. Set's the number of samples per symbol the system should use. This value is uesd to calculate the filter taps and will force a recalculation.
def set_samples_per_symbol(self, *args, **kwargs): return _digital_swig.digital_fll_band_edge_cc_sptr_set_samples_per_symbol(self, *args, **kwargs)
[ "def set_sample_number(self):\r\n self.n_samples = self.exprs.shape[0]", "def change_sampling_rate(self, sampling_rate):\n\n self.sampling_rate = sampling_rate", "def SetNumberOfSpatialSamples(self, num: 'unsigned int') -> \"void\":\n return _itkMutualInformationImageToImageMetricPython.itkMutualInformationImageToImageMetricISS3ISS3_SetNumberOfSpatialSamples(self, num)", "def audio_sampling_rate_test(self, audio_sampling_rate_test):\n\n self._audio_sampling_rate_test = audio_sampling_rate_test", "def set_sample_rate(self, sample_rate):\n self.dtg.write('TBAS:FREQ {0:e}'.format(sample_rate))\n return self.get_sample_rate()", "def SetNumberOfSpatialSamples(self, num: 'unsigned int') -> \"void\":\n return _itkMutualInformationImageToImageMetricPython.itkMutualInformationImageToImageMetricISS2ISS2_SetNumberOfSpatialSamples(self, num)", "def SetNumberOfSpatialSamples(self, num: 'unsigned int') -> \"void\":\n return _itkMutualInformationImageToImageMetricPython.itkMutualInformationImageToImageMetricIF3IF3_SetNumberOfSpatialSamples(self, num)", "def set_sampling_rate(address, name, sampling_rate):\n explore = explorepy.explore.Explore()\n explore.connect(mac_address=address, device_name=name)\n explore.set_sampling_rate(int(sampling_rate))", "def set_sweep(self, sweep_len: int, start_freq: float,\n\t\t\t\t stepsize: float) -> None:\n\t\tif not isinstance(self.instrument, SignalHound_USB_SA124B):\n\t\t\traise RuntimeError(\"'FrequencySweep' is only implemented\"\n\t\t\t\t\t\t\t \"for 'SignalHound_USB_SA124B'\")\n\t\tend_freq = start_freq + stepsize*(sweep_len-1)\n\t\tfreq_points = tuple(np.linspace(start_freq, end_freq, sweep_len))\n\t\tself.setpoints = (freq_points,)\n\t\tself.shape = (sweep_len,)\n\t\tself.instrument._trace_updated = True", "def set_reg_symb_setting(self, symb, name, setting):\n self._set_reg_symb_setting(symb, name.encode(), setting.encode())", "def setSamplingTime(self, time):\n self.samplingTime = time", "def setSamplingPoints(self, samplingPoints) -> None:\n ...", "def set_frequency(self, frequency, phase, ttls=0):\n \"Sets the frequency generator to a desired frequency (Hz)\"\n s_content = '<analogout id=\"0\" f=\"%f\" phase=\"%f\"/>' % (frequency, phase)\n if ttls != 0:\n s_content += '<ttlout value=\"0x%06x\"/>' % ttls\n self.state_list.append(StateSimple(2e-6, s_content))", "def put_symbol(self, s):\n self._check(pn_data_put_symbol(self._data, s))", "def setScanFreq(self,newFreq):\n if newFreq == 'LOW':\n\n self.scanFreq = newFreq\n\n elif newFreq == 'MEDIUM':\n\n self.scanFreq = newFreq\n\n elif newFreq == 'HIGH':\n\n self.scanFreq = newFreq", "def set_frequency(self, freq):\n\n if self.shape in [\"SIN\", \"SQU\"]:\n if freq > 15e6:\n print('Specified frequency is too high. No change')\n return\n else:\n if freq > 100e3:\n print('Specified frequency is too high. No change')\n return\n\n self.gpib.write(\"FREQ %.2E\" % freq)", "def set_Df_sweep(instrument, f_start, f_stop, unit='MHZ', channel_num=1):\n command1 = ':SENSe%d:FREQuency:STARt %G %s' % (channel_num, f_start, unit)\n command2 = ':SENSe%d:FREQuency:STOP %G %s' % (channel_num, f_stop, unit)\n instrument.write(command1)\n instrument.write(command2)", "def ni845xSpiScriptNumBitsPerSample(self, numbitspersample=0):\r\n returnvalue = self.ni8452.ni845xSpiScriptNumBitsPerSample(self.script_handle, c.c_uint16(numbitspersample))\r\n if returnvalue != 0:\r\n self.ni845xStatusToString(returnvalue)", "def setup_symbols_for_species_pKs(self, sid_list):\n new_variable_index = 0\n self.variable_vector_dict = {}\n for species_id in sid_list:\n pK_data_val = self.get_pK_val(species_id) \n self.variable_vector_dict[species_id] = [symbols('x[%d]'%new_variable_index), pK_data_val]\n new_variable_index += 1\n #for each species_id, set up the sequence of species that eventually lead to least protonated state, for binding constant calculation\n self.compounds_species_id_sequence = {}\n for species_id in self.compounds_data_dict.keys():\n self.compounds_species_id_sequence[species_id] = self.get_sequence_of_species_ids(species_id)", "def set_frequency(self, frequency):\r\n self.obs.centerFreqHz = float(frequency)\r\n self.ref.centerFreqHz = float(frequency)\r\n self.ave.centerFreqHz = float(frequency)\r\n self.hot.centerFreqHz = float(frequency)\r\n self.cold.centerFreqHz = float(frequency)\r\n deltaNu = self.obs.bandwidthHz/float(self.vlen)\r\n n0 = self.obs.centerFreqHz - (self.obs.bandwidthHz/2.)\r\n nu = n0\r\n print(\"Setting Frequency: %10.0f Hz\" % (self.obs.centerFreqHz))\r\n nx = len( self.obs.xdata)\r\n if nx != self.vlen:\r\n self.update_len(self.obs)\r\n for iii in range(self.vlen):\r\n self.obs.xdata[iii] = nu\r\n nu = nu + deltaNu" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set_rolloff(self, float rolloff) Set the rolloff factor of the shaping filter. This sets the rolloff factor that is used in the pulse shaping filter and is used to calculate the filter taps. Changing this will force a recalculation of the filter taps. This should be the same value that is used in the transmitter's pulse shaping filter. It must be between 0 and 1 and is usually between 0.2 and 0.5 (where 0.22 and 0.35 are commonly used values).
def set_rolloff(self, *args, **kwargs): return _digital_swig.digital_fll_band_edge_cc_sptr_set_rolloff(self, *args, **kwargs)
[ "def rolloff_curvefit(self, angles, rolloff):\n\n return curve_fit(self.rolloff_polynomial, angles, rolloff)", "def rolloff(self):\n absSpectrum = abs(self)\n spectralSum = numpy.sum(absSpectrum)\n\n rolloffSum = 0\n rolloffIndex = 0\n for i in range(0, len(self)):\n rolloffSum = rolloffSum + absSpectrum[i]\n if rolloffSum > (0.85 * spectralSum):\n rolloffIndex = i\n break\n\n # Convert the index into a frequency\n frequency = rolloffIndex * (self.sampleRate / 2.0) / len(self)\n return frequency", "def set_stop_wavelength(self,val): #documented\n if self.__is_int_or_float(val) and self.__is_between(val,600,1800):\n if val < self.get_start_wavelength():\n self.__verbose_output( \"error: stop wavelength can not be set to < start wavelength\",1)\n else:\n self.send_message(\"STO %.1f\"%(val)) \n else:\n self.__verbose_output( \"error: set_stop_wavelength() - invalid argument\",1)", "def set_frequency(self):\n if self.laser_status:\n self._fiber_shooting_logic.set_frequency(self._mw.frequency_spinBox.value())\n else:\n pass\n return", "def setFlyingVelocityMod(self, flying):\n getHandle().setFlyingVelocityMod(flying)", "def set_flux(self, flux):\n self.flux = flux", "def change_filter_freq(self, value):\n self.filter_frequency = value\n self.filtered_voltage = self.filtering(self.signal[:, 1])", "def set_scaling(self, factor=None):\n if factor is None:\n factor = self.get_default_scaling_factor()\n\n factor = float(factor)\n if np.isnan(factor) or factor == 1:\n return\n log.debug(f\"Applying scaling factor {factor:.3f}\")\n self.gain /= factor", "def adjust_cuts_scroll(self, plot, event):\n bm = self.fitsimage.get_bindings()\n pct = -self.scroll_pct\n if event.step > 0:\n pct = -pct\n bm.cut_pct(self.fitsimage, pct)", "def set_sweep(self, sweep_len: int, start_freq: float,\n\t\t\t\t stepsize: float) -> None:\n\t\tif not isinstance(self.instrument, SignalHound_USB_SA124B):\n\t\t\traise RuntimeError(\"'FrequencySweep' is only implemented\"\n\t\t\t\t\t\t\t \"for 'SignalHound_USB_SA124B'\")\n\t\tend_freq = start_freq + stepsize*(sweep_len-1)\n\t\tfreq_points = tuple(np.linspace(start_freq, end_freq, sweep_len))\n\t\tself.setpoints = (freq_points,)\n\t\tself.shape = (sweep_len,)\n\t\tself.instrument._trace_updated = True", "def setThrottle(self, throttle):\n \n self._throttle = float(throttle) \n absThrottle = abs(self._throttle)\n \n if absThrottle > Motor.MAX_THROTTLE: \n self._throttle = Motor.MAX_THROTTLE if self._throttle >= 0.0 else -Motor.MAX_THROTTLE\n\n self._log(\"throttle: {0}\".format(self._throttle))", "def SetLengthSliding(self, *args):\n return _FairCurve.FairCurve_DistributionOfTension_SetLengthSliding(self, *args)", "def setDopplerFactor(self, factor: 'float') -> \"void\":\n return _coin.SoVRMLSound_setDopplerFactor(self, factor)", "def ramp_reactor_level(self, target_val=None, duration=None, slope=None):\n label = 'ReactorLevelSP'\n self.ramp_setpoint(label, target_val, duration, slope)", "def setVoltageSlewRate(self, rise, fall):\n # TODO: This doesn't work\n self.instr.write(\"RISE %f\" % float(rise))\n self.instr.write(\"FALL %f\" % float(fall))", "def set_reimers_wind_efficiency(self, index_of_the_star, reimers_wind_efficiency):\n return self.set_control(index_of_the_star,'reimers_scaling_factor',reimers_wind_efficiency)", "def wfe_roll_drift(self, value):\n self._wfe_roll_drift = value\n if value!=0:\n self.wfe_drift = True", "def setWvlCutoffs(self, wvlLowerLimit=7000, wvlUpperLimit=16000):\n self.wvlLowerLimit = wvlLowerLimit\n self.wvlUpperLimit = wvlUpperLimit", "def set_scale(self, scale: float) -> None:\n lib.wlr_output_set_scale(self._ptr, scale)", "def setThrottle(self, throttle):\n \n self._throttle = float(throttle) \n absThrottle = abs(self._throttle)\n \n #Fordwards or backwards movement\n #TODO: 20181114 DPM: This is not required to do if the throttle sign was not changed\n if self._throttle >= 0.0:\n SysfsWriter.writeOnce(\"0\", \"/sys/class/gpio/gpio{0}/value\".format(self._gpioId))\n else:\n SysfsWriter.writeOnce(\"1\", \"/sys/class/gpio/gpio{0}/value\".format(self._gpioId))\n \n\n #Throttle\n if absThrottle > 0.0 and absThrottle <= Motor.MAX_THROTTLE: \n \n self._duty = int((self._rangeDuty * absThrottle) + self._minDuty)\n \n elif absThrottle == 0.0:\n self._setNeutralThrottle()\n \n else: # absThrottle > Motor.MAX_THROTTLE\n self._duty = int((self._rangeDuty * Motor.MAX_THROTTLE) + self._minDuty)\n self._throttle = Motor.MAX_THROTTLE if self._throttle > 0.0 else -Motor.MAX_THROTTLE\n\n self._sysfsWriter.write(str(self._duty))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set_filter_size(self, int filter_size) Set the number of taps in the filter. This sets the number of taps in the bandedge filters. Setting this will force a recalculation of the filter taps. This should be about the same number of taps used in the transmitter's shaping filter and also not very large. A large number of taps will result in a large delay between input and frequency estimation, and so will not be as accurate. Between 30 and 70 taps is usual.
def set_filter_size(self, *args, **kwargs): return _digital_swig.digital_fll_band_edge_cc_sptr_set_filter_size(self, *args, **kwargs)
[ "def smooth(self,size=10):\n from scipy.ndimage import median_filter\n self.n = median_filter(self.n,size)\n self.k = median_filter(self.k,size)", "def features_size(self, features_size):\n\n self._features_size = features_size", "def sampled_frame_size_test(self, sampled_frame_size_test):\n\n self._sampled_frame_size_test = sampled_frame_size_test", "def set_buffer_size(self, buffer_size):\n self.buffer_size = buffer_size", "def change_filter_freq(self, value):\n self.filter_frequency = value\n self.filtered_voltage = self.filtering(self.signal[:, 1])", "def __init__(self, size, **kwargs):\n self.active_size = 0\n super(DigitalRFSizeRingbufferHandler, self).__init__(\n threshold=size, **kwargs\n )", "def initialize_filter(self, f_size, scale=1.0):\n stddev = scale / np.sqrt(np.prod(f_size))\n return np.random.normal(loc=0, scale=stddev, size=f_size)", "def setPopulationSize(self, size):\n\n self.popSize = size", "def shuffle_buffer_size(self, shuffle_buffer_size):\n self._shuffle_buffer_size = shuffle_buffer_size", "def set_timeseries_chunk_size(self, new_chunk_size):\n _chunk_size = int(new_chunk_size)\n if _chunk_size < 1:\n raise ValueError(\"Chunk size must be greater than 0\")\n self._timeseries_chunk_size = slice(0, _chunk_size)\n logger.info(\"New chunk for timeseries size has been set to %d\", new_chunk_size)\n self._grid_kwargs.update({\"timeseries_chunk_size\": self._timeseries_chunk_size})", "def enableStopSize(self, size=0):\n\n if type(size) != int:\n print(\"Error: size must be an integer\")\n _sys.exit(1)\n \n self._stopSize = size", "def setFilmSize(self, size):\n self.filmSize = size", "def update_size(self, size):\n self.batch_size_estimation = size\n self.trust_batch_estimation = True", "def set_size_threshold(self, size_threshold: int) -> 'FileCompactStrategy.Builder':\n self._j_builder.setSizeThreshold(size_threshold)\n return self", "def setSize(self, newsize: 'SbVec2s') -> \"void\":\n return _coin.SoEventManager_setSize(self, newsize)", "def set_floor_size(self, floor_size):\n self.add_item(conf.FLOOR_SIZE_KEY, floor_size)", "def setFilter(self, type: int, filter: bool) -> None:\n ...", "def set_cache_size(self, size):\n pass", "def batch_size(self, new_batch_size):\n\n self._batch_size = int(new_batch_size)", "def rescale(self, s):\n if s == 1:\n return self\n assert not self.isloaded(), \"Filters can only be applied prior to load() - Try calling flush() first\"\n self.shape(shape=(int(np.round(self.height()*float(np.ceil(s*1e6)/1e6))), int(np.round(self.width()*float(np.ceil(s*1e6)/1e6))))) # update the known shape \n self._ffmpeg = self._ffmpeg.filter('scale', 'iw*%1.6f' % float(np.ceil(s*1e6)/1e6), 'ih*%1.6f' % float(np.ceil(s*1e6)/1e6)) # ceil last significant digit to avoid off by one\n return self" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_samples_per_symbol(self) > float Returns the number of sampler per symbol used for the filter.
def get_samples_per_symbol(self): return _digital_swig.digital_fll_band_edge_cc_sptr_get_samples_per_symbol(self)
[ "def sample_frequency(self):\n return self._sample_frequency", "def n_profile_samples(self):\n return self.__n_profile_samples", "def packet_get_samples_per_frame(cls, data: bytes) -> int:\n return _lib.opus_packet_get_samples_per_frame(data, cls.SAMPLING_RATE)", "def getSamplingRate(self):\n return self.samp[0]", "def n_samples(self):\n return len(self.sampler)", "def get_num_samples(self, split_name):", "def sample_size(self):\n\t\treturn _get_sample_size(self._device)", "def getNumSamples(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n return SliceSamplerBase.getNumSamples(self)", "def getNumSamples(sound):\n return getLength(sound)", "def sample_unit_size(self):\n raise NotImplementedError()", "def sample_rate(self) -> float:\n return self._rate", "def num_samples(self):\r\n return self.snapshots[0].num_samples", "def sampling_rate(self):\n return self.file.sampling_rate", "def sample_rate(self):\n return (len(self) - 1) * self.df * 2.0", "def getSampleRate(self) -> \"int\":\n return _coin.SoVRMLAudioClip_getSampleRate(self)", "def symbol_count (self):\n \n raise NotImplementedError", "def get_sampling_rate(self):\n\n return self.sampling_rate", "def sample_unit_size(self):\n return np.array([t['length'] for t in self.sample_units])", "def get_min_sample_size(self):\n\n pass", "def num_symbols(self):\r\n return self['sh_size'] // self['sh_entsize']" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_rolloff(self) > float Returns the rolloff factor used for the filter.
def get_rolloff(self): return _digital_swig.digital_fll_band_edge_cc_sptr_get_rolloff(self)
[ "def rolloff(self):\n absSpectrum = abs(self)\n spectralSum = numpy.sum(absSpectrum)\n\n rolloffSum = 0\n rolloffIndex = 0\n for i in range(0, len(self)):\n rolloffSum = rolloffSum + absSpectrum[i]\n if rolloffSum > (0.85 * spectralSum):\n rolloffIndex = i\n break\n\n # Convert the index into a frequency\n frequency = rolloffIndex * (self.sampleRate / 2.0) / len(self)\n return frequency", "def getThrottle(self) -> float:\n return self.getRawAxis(self.axes[self.AxisType.kThrottle])", "def getDopplerFactor(self) -> \"float\":\n return _coin.SoVRMLSound_getDopplerFactor(self)", "def getDopplerVelocity(self) -> \"float\":\n return _coin.SoVRMLSound_getDopplerVelocity(self)", "def freq_kep(self):\n return self.vel_ff/self.rads", "def relative_rate(self):\n return _wavelet_swig.wvps_ff_sptr_relative_rate(self)", "def relative_rate(self):\n return _wavelet_swig.wavelet_ff_sptr_relative_rate(self)", "def relative_rate(self):\n return _raw_util.raw_divide_ff_sptr_relative_rate(self)", "def rolloff_curvefit(self, angles, rolloff):\n\n return curve_fit(self.rolloff_polynomial, angles, rolloff)", "def relative_rate(self):\n return _wavelet_swig.squash_ff_sptr_relative_rate(self)", "def get_filter_slope(self):\n return self.slopes[np.int(self.query(\"OFSL?\"))]", "def get_stop_px(self):\n return float(self.stopSpin.get())", "def wavelength_rel(self) -> float:\n wavelength_rel = (\n sc.h\n / np.sqrt(\n 2 * sc.m_e * sc.e * 1000 * self.voltage * (1 + (sc.e * 1000 * self.voltage) / (2 * sc.m_e * sc.c**2))\n )\n * (10**10)\n )\n return wavelength_rel", "def vel_coef(self):\n return self._vel_coef", "def _get_slope(self):\n return self._slope", "def yaw_pitch_roll(self):\n \n return self.__tilt", "def get_calibration_factor(self):\n\n return float(self.inst.query(\"INPut:OFFSet?\").strip())", "def get_linear_intensity(self):\n return self.linear_polarization", "def ramp_rate(self):\n return self.field_control_params[2]", "def getThrottleChannel(self) -> int:\n return self.axes[self.Axis.kThrottle]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_filter_size(self) > int Returns the number of taps of the filter.
def get_filter_size(self): return _digital_swig.digital_fll_band_edge_cc_sptr_get_filter_size(self)
[ "def size(self) -> \"unsigned int\":\n return _coin.SbFifo_size(self)", "def input_filters(self) -> int:\n return self.__input_filters", "def incoming_spike_buffer_size(self):\n return self.__incoming_spike_buffer_size", "def get_size():\n l = self.linfeats.get_size()\n return 2*l + (l *(l-1)) / 2", "def _get_count(self) -> \"size_t\" :\n return _core.ToolbarTabList__get_count(self)", "def get_size(self) -> int:\n return len([i for i in self.window if i is not None])", "def fft_size(self):\n return int(2**(self._spectrum_fftselector+6))", "def _get_count(self) -> \"size_t\" :\n return _core.ToolbarTabs__get_count(self)", "def action_size(self):\n return self.datasets[\"main\"].ACTION_SIZE", "def _get_count(self) -> \"size_t\" :\n return _core.ToolbarPanelList__get_count(self)", "def test_len(self):\n self.f.add_filter(self.TEST_FILTER_KEY, self.TEST_FILTER_VALUE_1)\n self.assertEqual(1, len(self.f))", "def state_size(self):\n return self._state_size", "def _get_count(self) -> \"size_t\" :\n return _core.FavoriteAppearances__get_count(self)", "def filter_life(self) -> int:\n try:\n return int(self.details['filter_life'])\n except KeyError:\n return 0", "def _get_count(self) -> \"size_t\" :\n return _core.ToolbarPanels__get_count(self)", "def state_size(self):\n #############################################\n # TODO: YOUR CODE HERE #\n #############################################\n params = self.params\n return params[0]+params[1]\n \n #raise NotImplementedError('Please edit this function.')", "def find_NumSamplesPerSweep(self,):\n\n \n\n # just return length of analogSignal\n return len(self.sweep_data[0]) if len(self.sweep_data) > 0 else 0", "def num_bands(self):\n return self._data.shape[2]", "def size(self):\n return len(self.FDs)", "def get_n_state_features(self):\n return self.n_features" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
print_taps(self) Print the taps to screen.
def print_taps(self): return _digital_swig.digital_fll_band_edge_cc_sptr_print_taps(self)
[ "def print_tiles(self, tiles_to_print):\r\n for cur_tile in tiles_to_print:\r\n print(cur_tile)", "def print(self, *args):\n print(*args, file=self.dump_file)", "def print_board(self):\n for cell in self.board:\n print(\"current step: {}, ladder top: {}, snake_tail: {}\".\n format(cell.current_step, cell.ladder_top, cell.snake_tail))", "def printTotalGaps(self, name=None):\n if name is None:\n name = \"Gaps_{}.txt\".format(os.path.splitext(os.path.basename(self.resultsFile))[0])\n \n with open(name, \"w\") as gaps_file:\n print >> gaps_file, \"index\\tgap length\"\n for index, gap in enumerate(self.total_gaps):\n print >> gaps_file, \"{}\\t{}\".format(index, gap)", "def print_plosives():\n\n print(\"p\")\n print(\"t\")\n print(\"k\")\n print(\"b\")\n print(\"d\")\n print(\"g\")", "def _print_packets(self):\n controller = self.get_controller()\n print \"PENDING PACKETS\"\n for p in controller.get_pending_packets():\n print \" - \" + str(p.get_pos()) + \" \" + str(p)\n print \"BUFFER PACKETS\"\n buf = controller.get_buffer()\n for p in buf:\n print \" [\" + str(buf.index(p)) + \"] \" + str(p.get_pos()) + \" \" + str(p)", "def print_screen( self ):\n\t\tself.msg(1,\"Printing the current TN3270 buffer:\")\n\t\tprint self.get_screen()", "def print(self):\n print(RenderTree(self.root_node).by_attr(\"text\"))\n for paragraph in self.annotation_list:\n print(paragraph['text'])", "def time_track_print():\n\tglobal _time_track_dict\n#\tif not _time_track_dict.values(): return\n\tmax_time = max(_time_track_dict.values())\n\ttupel_list = [(fn_name, \"%.2f%%\" % (100*exe_time/max_time), \"%fs\" % exe_time) for (fn_name, exe_time) in sorted(_time_track_dict.items(), key=operator.itemgetter(1), reverse=True)]\n\tmax_len_item_1 = max([len(x) for (x,_,_) in tupel_list])\n\tmax_len_item_2 = max([len(x) for (_,x,_) in tupel_list])\n\tmax_len_item_3 = max([len(x) for (_,_,x) in tupel_list])\n\tfor (x,y,z) in tupel_list:\n\t\tprint x.ljust(max_len_item_1 + 3), y.rjust(max_len_item_2), z.rjust(max_len_item_3 + 3)", "def prints(self):\n print('Trip\\n\\tstart date: {}\\n\\tfinal date: {}\\n\\tgasoline: {}'.\n format(time.strftime(\"%Y.%m.%d %H:%M\",\n time.localtime(self.start_date)),\n time.strftime(\"%Y.%m.%d %H:%M\",\n time.localtime(self.end_date)),\n self.fuel))", "def Print(self,*args,**kwargs):\n print(*args,**kwargs)", "def print_to_stdout(text, reps):\r\n\tfor i in range(reps): # use: range(size), range(begin, end) or range(begin, end, step_size)\r\n\t\tprint(text)", "def print_timeline(self):\n tweets = self.get_tweets_from_timeline()\n tweets = MakingActions.get_text_from_list(tweets)\n for items in tweets:\n print items", "def print_board(self):\n util_print_board(board_dict=self.to_printable_dict(),\n message=self.__hash__(),\n compact=False)", "def prints(clock_list):\n i = 0\n for item in clock_list:\n print(\"Frame #\" + str(i) + \" contains \" + str(item.printer()))\n i += 1", "def print(self, *args):\n print(*args, file=self.output_file)", "def mprint_stack(self, none=None):\n for _ in range(self.count):\n self.print_stack()\n time.sleep(self.interval)\n return", "def _print_power(activity: Activity):\n lrp = LeftRightPrinter(left_width=60)\n _print_power_data(activity, lrp)\n _print_power_zones(activity, lrp)\n lrp.print()", "def finish_printing():\n set_extruder_temp(0,0)\n set_extruder_temp(0,1)\n set_bed_temp()\n drop_bed(100)\n home_axis('x')\n disable_motors()", "def mtprint_stack(self):\n th = threading.Thread(target=self.mprint_stack, args=(self,))\n th.start()\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fll_band_edge_cc(float samps_per_sym, float rolloff, int filter_size, float bandwidth) > digital_fll_band_edge_cc_sptr Frequency Lock Loop using bandedge filters. The frequency lock loop derives a bandedge filter that covers the upper and lower bandwidths of a digitallymodulated signal. The bandwidth range is determined by the excess bandwidth (e.g., rolloff factor) of the modulated signal. The placement in frequency of the bandedges is determined by the oversampling ratio (number of samples per symbol) and the excess bandwidth. The size of the filters should be fairly large so as to average over a number of symbols. The FLL works by filtering the upper and lower band edges into x_u(t) and x_l(t), respectively. These are combined to form cc(t) = x_u(t) + x_l(t) and ss(t) = x_u(t) x_l(t). Combining these to form the signal e(t) = Re{cc(t) imes ss(t)^} (where ^ is the complex conjugate) provides an error signal at the DC term that is directly proportional to the carrier frequency. We then make a secondorder loop using the error signal that is the running average of e(t).
def fll_band_edge_cc(*args, **kwargs): return _digital_swig.fll_band_edge_cc(*args, **kwargs)
[ "def set_loop_bandwidth(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_set_loop_bandwidth(self, *args, **kwargs)", "def set_loop_bandwidth(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_set_loop_bandwidth(self, *args, **kwargs)", "def __init__(self, fft_length, cp_length, occupied_tones, snr, ks, carrier_map_bin, nc_filter, logging=False):\n\n\tgr.hier_block2.__init__(self, \"ofdm_receiver\",\n\t\t\t\tgr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature\n gr.io_signature2(2, 2, gr.sizeof_gr_complex*occupied_tones, gr.sizeof_char)) # Output signature\n\n bw = (float(occupied_tones) / float(fft_length)) / 2.0\n tb = bw*0.04\n print \"ofdm_receiver:__init__:occupied_tones %s fft_length %d \" % (occupied_tones, fft_length)\n \n chan_coeffs = filter.firdes.low_pass (1.0, # gain\n 1.0, # sampling rate\n bw+tb, # midpoint of trans. band\n tb, # width of trans. band\n filter.firdes.WIN_HAMMING) # filter type\n \n self.chan_filt = filter.fft_filter_ccc(1, chan_coeffs)\n\n # linklab, get ofdm parameters\n self._fft_length = fft_length\n self._occupied_tones = occupied_tones\n self._cp_length = cp_length\n self._nc_filter = nc_filter\n self._carrier_map_bin = carrier_map_bin\n \n win = [1 for i in range(fft_length)]\n \n # linklab, initialization function\n self.initialize(ks, self._carrier_map_bin)\n \n\n zeros_on_left = int(math.ceil((fft_length - occupied_tones)/2.0))\n ks0 = fft_length*[0,]\n ks0[zeros_on_left : zeros_on_left + occupied_tones] = ks[0]\n\n ks0 = np_fft.ifftshift(ks0)\n ks0time = np_fft.ifft(ks0)\n # ADD SCALING FACTOR\n ks0time = ks0time.tolist()\n\n SYNC = \"pn\"\n if SYNC == \"ml\":\n nco_sensitivity = -1.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_ml(fft_length,\n cp_length,\n snr,\n ks0time,\n logging)\n elif SYNC == \"pn\":\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_pn(fft_length,\n cp_length,\n logging)\n elif SYNC == \"pnac\":\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_pnac(fft_length,\n cp_length,\n ks0time,\n logging)\n # for testing only; do not user over the air\n # remove filter and filter delay for this\n elif SYNC == \"fixed\":\n self.chan_filt = gr.multiply_const_cc(1.0)\n nsymbols = 18 # enter the number of symbols per packet\n freq_offset = 0.0 # if you use a frequency offset, enter it here\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_fixed(fft_length,\n cp_length,\n nsymbols,\n freq_offset,\n logging)\n\n # Set up blocks\n\n # Create a delay line, linklab\n self.delay = blocks.delay(gr.sizeof_gr_complex, fft_length)\n\n self.nco = analog.frequency_modulator_fc(nco_sensitivity) # generate a signal proportional to frequency error of sync block\n self.sigmix = blocks.multiply_cc()\n self.sampler = gr_papyrus.ofdm_sampler(fft_length, fft_length+cp_length)\n self.fft_demod = gr_fft.fft_vcc(fft_length, True, win, True)\n self.ofdm_frame_acq = gr_papyrus.ofdm_frame_acquisition(occupied_tones,\n fft_length,\n cp_length, ks[0])\n # linklab, check current mode: non-contiguous OFDM or not\n if self._nc_filter:\n print '\\nMulti-band Filter Turned ON!'\n # linklab, non-contiguous filter\n self.ncofdm_filt = ncofdm_filt(self._fft_length, self._occupied_tones, self._carrier_map_bin)\n self.connect(self, self.chan_filt, self.ncofdm_filt)\n self.connect(self.ncofdm_filt, self.ofdm_sync) # into the synchronization alg.\n self.connect((self.ofdm_sync,0), self.nco, (self.sigmix,1)) # use sync freq. offset output to derotate input signal\n self.connect(self.ncofdm_filt, self.delay, (self.sigmix,0)) # signal to be derotated\n else :\n print '\\nMulti-band Filter Turned OFF!'\n self.connect(self, self.chan_filt)\n self.connect(self.chan_filt, self.ofdm_sync) # into the synchronization alg.\n self.connect((self.ofdm_sync,0), self.nco, (self.sigmix,1)) # use sync freq. offset output to derotate input signal\n self.connect(self.chan_filt, self.delay, (self.sigmix,0)) # signal to be derotated\n\n self.connect(self.sigmix, (self.sampler,0)) # sample off timing signal detected in sync alg\n self.connect((self.ofdm_sync,1), (self.sampler,1)) # timing signal to sample at\n\n self.connect((self.sampler,0), self.fft_demod) # send derotated sampled signal to FFT\n self.connect(self.fft_demod, (self.ofdm_frame_acq,0)) # find frame start and equalize signal\n self.connect((self.sampler,1), (self.ofdm_frame_acq,1)) # send timing signal to signal frame start\n self.connect((self.ofdm_frame_acq,0), (self,0)) # finished with fine/coarse freq correction,\n self.connect((self.ofdm_frame_acq,1), (self,1)) # frame and symbol timing, and equalization\n\n if logging:\n self.connect(self.chan_filt, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-chan_filt_c.dat\"))\n self.connect(self.fft_demod, gr.file_sink(gr.sizeof_gr_complex*fft_length, \"ofdm_receiver-fft_out_c.dat\"))\n self.connect(self.ofdm_frame_acq,\n gr.file_sink(gr.sizeof_gr_complex*occupied_tones, \"ofdm_receiver-frame_acq_c.dat\"))\n self.connect((self.ofdm_frame_acq,1), gr.file_sink(1, \"ofdm_receiver-found_corr_b.dat\"))\n self.connect(self.sampler, gr.file_sink(gr.sizeof_gr_complex*fft_length, \"ofdm_receiver-sampler_c.dat\"))\n self.connect(self.sigmix, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-sigmix_c.dat\"))\n self.connect(self.nco, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-nco_c.dat\"))", "def bandpass_ifft(t, flux, low_cutoff, high_cutoff, sample=1, \n M=None, inv_box=False, gf_sig = 1, Filter='box', Plot=''): \n #perform fft\n spectrum = np.fft.rfft(flux) \n freq = np.fft.rfftfreq(len(flux), sample)\n freq_sort = np.sort(spectrum)\n \n #calculate the index of the cut off points\n lc = np.abs(freq) < Low_cutoff\n hc = np.abs(freq) > High_cutoff\n between = ~(lc + hc)\n \n ps = np.abs(spectrum)**2\n if ('PS' in Plot) or ('All' in Plot):\n plt.plot(freq, ps)\n plt.title(\"power spectrum\")\n plt.xlabel('Frequency (1/day)')\n plt.ylabel('Power Spectral Density')\n #plt.xlim(0,100)\n #plt.savefig('Figures/spec.png', bbox_inches='tight', pad_inches=0.5)\n plt.show()\n\n if ('DFT' in Plot) or ('All' in Plot):\n plt.plot(freq, spectrum)\n #plt.plot(freq[between], spectrum[between], alpha=0.5)\n plt.title(\"real fourier transform \")\n plt.xlabel('Frequency (1/day)')\n plt.ylabel('Amplitude')\n #plt.xlim(0,100)\n #plt.savefig('Figures/fft.png', bbox_inches='tight', pad_inches=0.5)\n plt.show()\n \n \n \n if Filter == 'box':\n \n #filtered_spectrum = spectrum.copy()\n \n if inv_box == True:\n x_1 = np.arange(0, Low_cutoff, 0.1)\n x_2 = np.arange(High_cutoff, np.max(freq), 0.1)\n plt.plot(freq, spectrum)\n plt.fill_between(x_1, [plt.ylim()[0]] * len(x_1), \n [plt.ylim()[1]] * len(x_1), color='r', alpha=0.3)\n plt.fill_between(x_2, [plt.ylim()[0]] * len(x_2), \n [plt.ylim()[1]] * len(x_2), color='r', alpha=0.3)\n plt.title(\"range to suppress\")\n plt.figure()\n filtered_spectrum[lc] = 0.\n filtered_spectrum[hc] = 0.\n else:\n x_ = np.arange(Low_cutoff, High_cutoff, 0.1)\n plt.plot(freq, spectrum)\n plt.fill_between(x_, [plt.ylim()[0]] * len(x_), \n [plt.ylim()[1]] * len(x_), color='r', alpha=0.3)\n plt.title(\"range to suppress\")\n plt.figure()\n filtered_spectrum[between] = 0.\n \n if Filter == 'Gaussian':\n ig = invgaussian(1, np.median([low_cutoff,high_cutoff]), gf_sig, freq)\n filtered_spectrum = spectrum * ig\n if ('filter' in Plot) or ('All' in Plot):\n plt.plot(freq, ig)\n plt.title('Gaussian Filter')\n #plt.savefig('Figures/gfilter.png')\n #plt.xlim(0,100)\n plt.figure()\n\n if ('spec_filtered' in Plot) or ('All' in Plot):\n plt.plot(freq, filtered_spectrum, label=\"filtered spectrum\")\n plt.plot(freq, spectrum, c='k', ls=\"--\", label=\"spectrum\", alpha=0.5)\n plt.title(\"Unfiltered vs. Filtered Spectrum\")\n plt.xlabel('Frequency (1/day)')\n plt.ylabel('Amplitude')\n ldg = plt.legend(fontsize=12)\n #plt.xlim(0,100)\n #plt.savefig('Figures/filter_compare.png', bbox_inches='tight', pad_inches=0.5)\n plt.figure()\n\n filtered_signal = np.fft.irfft(filtered_spectrum) # Construct filtered signal\n\n if ('signal_filtered' in Plot) or ('All' in Plot):\n fig = plt.figure(figsize=(15,10)) \n plt.plot(t, filtered_signal, label=\"filtered signal\")\n plt.plot(t, flux, c='k', ls=\"--\", label=\"original signal\", alpha=0.5)\n plt.xlabel('Time')\n plt.ylabel('Amplitude')\n plt.title(\"Unfiltered vs. Filtered Signal\")\n #plt.savefig('Figures/filtered_signal.png', bbox_inches='tight', pad_inches=0.5)\n plt.legend()\n #Filtered_signal = np.zeros_like(Filtered_signal)\n return spectrum, freq, filtered_spectrum, filtered_signal, Low_cutoff, High_cutoff", "def MBfilter_CF(st, frequencies,\n CN_HP, CN_LP,\n filter_norm, filter_npoles=2,\n var_w=True,\n CF_type='envelope', CF_decay_win=1.0,\n hos_order=4,\n rosenberger_decay_win=1.0,\n rosenberger_filter_power=1.0,\n rosenberger_filter_threshold=None,\n rosenberger_normalize_each=False,\n wave_type='P',\n hos_sigma=None,\n rec_memory=None,\n full_output=False):\n delta = st[0].stats.delta\n Tn = 1. / frequencies\n Nb = len(frequencies)\n CF_decay_nsmps = CF_decay_win / delta\n rosenberger_decay_nsmps = rosenberger_decay_win / delta\n\n if hos_sigma is None:\n hos_sigma = -1.\n\n # Single component analysis\n if len(st) < 2:\n # Use just the first trace in stream\n tr = st[0]\n y = tr.data\n\n YN1 = np.zeros((Nb, len(y)), float)\n CF1 = np.zeros((Nb, len(y)), float)\n\n for n in range(Nb):\n if rec_memory is not None:\n rmem = rec_memory[(tr.id, wave_type)][n]\n else:\n rmem = None\n\n YN1[n] = recursive_filter(y, CN_HP[n], CN_LP[n],\n filter_npoles, rmem)\n YN1[n] /= filter_norm[n]\n\n if var_w and CF_type == 'envelope':\n CF_decay_nsmps_mb = (Tn[n]/delta) * CF_decay_nsmps\n else:\n CF_decay_nsmps_mb = CF_decay_nsmps\n\n # Define the decay constant\n CF_decay_constant = 1 / CF_decay_nsmps_mb\n\n # Calculates CF for each MBF signal\n if CF_type == 'envelope':\n CF1[n] = recursive_rms(YN1[n], CF_decay_constant, rmem)\n\n if CF_type == 'kurtosis':\n CF1[n] = recursive_hos(YN1[n], CF_decay_constant,\n hos_order, hos_sigma, rmem)\n\n # 2 (horizontal) components analysis\n elif len(st) == 2:\n # Assumes that 2 horizontal components are used\n tr1 = st.select(channel='*[E,W,1]')[0]\n tr2 = st.select(channel='*[N,S,2]')[0]\n\n y1 = tr1.data\n y2 = tr2.data\n\n # Initializing arrays\n YN_E = np.zeros((Nb, len(y1)), float)\n YN_N = np.zeros((Nb, len(y1)), float)\n YN1 = np.zeros((Nb, len(y1)), float)\n CF1 = np.zeros((Nb, len(y1)), float)\n\n for n in range(Nb):\n if rec_memory is not None:\n rmem1 = rec_memory[(tr1.id, wave_type)][n]\n rmem2 = rec_memory[(tr2.id, wave_type)][n]\n else:\n rmem1 = None\n rmem2 = None\n\n YN_E[n] = recursive_filter(y1, CN_HP[n], CN_LP[n],\n filter_npoles, rmem1)\n YN_E[n] /= filter_norm[n]\n YN_N[n] = recursive_filter(y2, CN_HP[n], CN_LP[n],\n filter_npoles, rmem2)\n YN_N[n] /= filter_norm[n]\n # Combining horizontal components\n YN1[n] = np.sqrt(np.power(YN_E[n], 2) + np.power(YN_N[n], 2))\n\n if var_w and CF_type == 'envelope':\n CF_decay_nsmps_mb = (Tn[n] / delta) * CF_decay_nsmps\n else:\n CF_decay_nsmps_mb = CF_decay_nsmps\n\n # Define the decay constant\n CF_decay_constant = 1 / CF_decay_nsmps_mb\n\n # Calculates CF for each MBF signal\n if CF_type == 'envelope':\n CF1[n] = recursive_rms(YN1[n], CF_decay_constant, rmem1)\n\n if CF_type == 'kurtosis':\n CF1[n] = recursive_hos(YN1[n], CF_decay_constant,\n hos_order, hos_sigma, rmem1)\n\n # 3 components analysis, includes polarization P and S decomposition\n else:\n # Vertical\n tr1 = st.select(channel='*[Z,U,D]')[0]\n # Horizontals\n tr2 = st.select(channel='*[E,W,1]')[0]\n tr3 = st.select(channel='*[N,S,2]')[0]\n\n y1 = tr1.data\n y2 = tr2.data\n y3 = tr3.data\n\n # Initializing arrays\n YN1 = np.zeros((Nb, len(y1)), float)\n YN2 = np.zeros((Nb, len(y1)), float)\n YN3 = np.zeros((Nb, len(y1)), float)\n CF1 = np.zeros((Nb, len(y1)), float)\n filteredDataP = np.zeros((Nb, len(y1)), float)\n filteredDataS = np.zeros((Nb, len(y1)), float)\n if full_output:\n CF2 = np.zeros((Nb, len(y1)), float)\n\n for n in range(Nb):\n if rec_memory is not None:\n rmem1 = rec_memory[(tr1.id, wave_type)][n]\n rmem2 = rec_memory[(tr2.id, wave_type)][n]\n rmem3 = rec_memory[(tr3.id, wave_type)][n]\n else:\n rmem1 = None\n rmem2 = None\n rmem3 = None\n\n YN1[n] = recursive_filter(y1, CN_HP[n], CN_LP[n],\n filter_npoles, rmem1)\n YN1[n] /= filter_norm[n]\n YN2[n] = recursive_filter(y2, CN_HP[n], CN_LP[n],\n filter_npoles, rmem2)\n YN2[n] /= filter_norm[n]\n YN3[n] = recursive_filter(y3, CN_HP[n], CN_LP[n],\n filter_npoles, rmem3)\n YN3[n] /= filter_norm[n]\n\n # Define the decay constant\n rosenberger_decay_constant = 1 / rosenberger_decay_nsmps\n\n # print('Rosenberger in process {}/{}\\r'.format(n+1, Nb),\n # sys.stdout.flush())\n\n # third value returned by rosenberger() is the polarizaion filter,\n # which we do not use here\n filt_dataP, filt_dataS, _ =\\\n rosenberger(YN2[n], YN3[n], YN1[n],\n rosenberger_decay_constant,\n pol_filter_power=rosenberger_filter_power,\n pol_filter_threshold=rosenberger_filter_threshold,\n normalize_each=rosenberger_normalize_each)\n\n # Use vertical component for P data\n filteredDataP[n] = filt_dataP[0, :]\n # Use vector composition of the two horizontal component for S data\n filteredDataS[n] = np.sqrt(np.power(filt_dataS[1, :], 2) +\n np.power(filt_dataS[2, :], 2))\n\n if var_w and CF_type == 'envelope':\n CF_decay_nsmps_mb = (Tn[n]/delta) * CF_decay_nsmps\n else:\n CF_decay_nsmps_mb = CF_decay_nsmps\n\n # Define the decay constant\n CF_decay_constant = 1 / CF_decay_nsmps_mb\n\n if CF_type == 'envelope':\n if wave_type == 'P':\n CF1[n] = recursive_rms(filteredDataP[n],\n CF_decay_constant, rmem1)\n if full_output:\n CF2[n] = recursive_rms(filteredDataS[n],\n CF_decay_constant, rmem2)\n else:\n CF1[n] = recursive_rms(filteredDataS[n],\n CF_decay_constant, rmem1)\n if full_output:\n CF2[n] = recursive_rms(filteredDataP[n],\n CF_decay_constant, rmem2)\n\n if CF_type == 'kurtosis':\n if wave_type == 'P':\n CF1[n] = recursive_hos(filteredDataP[n],\n CF_decay_constant,\n hos_order, hos_sigma, rmem1)\n if full_output:\n CF2[n] = recursive_hos(filteredDataS[n],\n CF_decay_constant,\n hos_order, hos_sigma, rmem2)\n else:\n CF1[n] = recursive_hos(filteredDataS[n],\n CF_decay_constant,\n hos_order, hos_sigma, rmem1)\n if full_output:\n CF2[n] = recursive_hos(filteredDataP[n],\n CF_decay_constant,\n hos_order, hos_sigma, rmem2)\n\n if full_output:\n return YN1, CF1, CF2, Tn, Nb, filteredDataP, filteredDataS\n else:\n return YN1, CF1, Tn, Nb", "def get_loop_bandwidth(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_loop_bandwidth(self)", "def coherence_over_band(a, b, fs, low=20*Hz, high=55*Hz, plot=True, mag_squared=True,\n overlap=0.5, window='hamming', len_seg=None, axis=0):\n a = a.reshape(a.shape[0],)\n b = b.reshape(b.shape[0],)\n if len_seg == None:\n noverlap = overlap * int(fs)\n freqs, coherency = signal.coherence(a, b, fs, window=window, nperseg=int(fs),\n noverlap=noverlap, axis=axis)\n else:\n noverlap = overlap * len_seg\n freqs, coherency = signal.coherence(a, b, fs, window=window, nperseg=len_seg,\n noverlap=noverlap, axis=axis)\n df = (freqs[1] - freqs[0])/Hz\n low, high = int(low), int(high)\n if low%df == 0 and high%df == 0:\n low_int_bound = int(low/df)\n high_int_bound = int(high/df) + 1\n if not mag_squared:\n coherency = map(lambda x: np.sqrt(x), coherency)\n coh_integral = np.trapz(coherency[low_int_bound:high_int_bound], dx=df, axis=0)\n coh_max = max(coherency[low_int_bound:high_int_bound])\n coh_integral, coh_max = coh_integral.item(), coh_max\n else:\n raise ValueError(\"The low and high bounds of freqband, \" + str(low) + \"-\" + str(high) + \"must be divisible by the frequency increment, \" + str(df) + \"for numpy.trapz to work.\")\n coh_integral, coh_max = 'undefined'\n print(\"----- Welch's method used to calculate coherence in the frequency band \" + str(low) + \" Hz to \" + str(high) + \" Hz -----\")\n print(\" Maximum coherence value = \" + str(coh_max))\n print(\" Integral over band = \" + str(coh_integral))\n if plot:\n plt.plot(freqs, np.sqrt(coherency))\n plt.show()\n return coh_integral, coh_max", "def fn_buildFilters(params, fs):\n bandPassRange = params.bpRanges\n params.filtType = 'bandpass'\n params.filterSignal = True\n \n # Handle different filter cases:\n # 1) low pass\n if params.bpRanges[0] == 0:\n # they only specified a top freqency cutoff, so we need a low pass\n # filter\n bandPassRange = params.bpRanges[1]\n params.filtType = 'low'\n if bandpassRange == fs/2:\n # they didn't specify any cutoffs, so we need no filter\n params.filterSignal = False\n \n # 2) High passs\n if params.bpRanges[1] == fs/2 and params.filterSignal:\n # they only specified a lower freqency cutoff, so we need a high pass\n # filter\n bandPassRange = params.bpRanges[0]\n params.filtType = 'high'\n \n if params.filterSignal:\n params.fB, params.fA = signal.butter(params.filterOrder, bandPassRange/(fs/2),btype=params.filtType)\n \n # filtTaps = length(fB)\n previousFs = fs\n \n params.fftSize = int(math.ceil(fs * params.frameLengthUs / 10**6))\n if params.fftSize % 2 == 1:\n params.fftSize = params.fftSize - 1 # Avoid odd length of fft\n\n params.fftWindow = signal.windows.hann(params.fftSize)\n\n lowSpecIdx = int(params.bpRanges[0]/fs*params.fftSize)\n highSpecIdx = int(params.bpRanges[1]/fs*params.fftSize)\n\n params.specRange = np.arange(lowSpecIdx, highSpecIdx+1)\n params.binWidth_Hz = fs / params.fftSize\n params.binWidth_kHz = params.binWidth_Hz / 1000\n params.freq_kHz = params.specRange*params.binWidth_kHz # calculate frequency axis\n return previousFs, params", "def compute_coherence(sig, fs=5000, fband=[1.0, 4.0], lag=0, lag_step=0,\n fft_win=1):\n\n if type(sig) != np.ndarray:\n raise TypeError(\"Signals have to be in numpy arrays!\")\n\n if lag == 0:\n lag_step = 1\n nstep_lag = int(lag * 2 / lag_step)\n\n fft_win = int(fft_win*fs)\n hz_bins = (fft_win/2)/(fs/2)\n fc1 = int(fband[0]*hz_bins)\n fc2 = int(fband[1]*hz_bins)\n\n sig1_w = sig[0]\n sig2_w = sig[1]\n\n sig1_wl = sig1_w[lag:len(sig1_w) - lag]\n\n coh_win = []\n for i in range(0, nstep_lag + 1):\n ind1 = i * lag_step\n ind2 = ind1 + len(sig1_wl)\n\n sig2_wl = sig2_w[ind1:ind2]\n\n f, coh = coherence(sig1_wl, sig2_wl, fs, nperseg=fft_win)\n coh_win.append(np.mean(coh[fc1:fc2]))\n\n return np.max(coh_win), coh_win.index(max(coh_win))", "def tf_agc(d, sr, t_scale=0.5, f_scale=1.0, causal_tracking=True, plot=False):\n\n hop_size = 0.032 # in seconds\n\n # Make STFT on ~32 ms grid\n ftlen = int(2 ** np.round(np.log(hop_size * sr) / np.log(2.)))\n winlen = ftlen\n hoplen = winlen / 2\n D = stft(d, winlen, hoplen) # using my code\n ftsr = sr / hoplen\n ndcols = D.shape[1]\n\n # Smooth in frequency on ~ mel resolution\n # Width of mel filters depends on how many you ask for,\n # so ask for fewer for larger f_scales\n nbands = max(10, 20 / f_scale) # 10 bands, or more for very fine f_scale\n mwidth = f_scale * nbands / 10 # will be 2.0 for small f_scale\n (f2a_tmp, _) = fft2melmx(ftlen, sr, int(nbands), mwidth)\n f2a = f2a_tmp[:, :ftlen / 2 + 1]\n audgram = np.dot(f2a, np.abs(D))\n\n if causal_tracking:\n # traditional attack/decay smoothing\n fbg = np.zeros(audgram.shape)\n # state = zeros(size(audgram,1),1);\n state = np.zeros(audgram.shape[0])\n alpha = np.exp(-(1. / ftsr) / t_scale)\n for i in range(audgram.shape[1]):\n state = np.maximum(alpha * state, audgram[:, i])\n fbg[:, i] = state\n\n else:\n # noncausal, time-symmetric smoothing\n # Smooth in time with tapered window of duration ~ t_scale\n tsd = np.round(t_scale * ftsr) / 2\n htlen = 6 * tsd # Go out to 6 sigma\n twin = np.exp(-0.5 * (((np.arange(-htlen, htlen + 1)) / tsd) ** 2)).T\n\n # reflect ends to get smooth stuff\n AD = audgram\n x = np.hstack((np.fliplr(AD[:, :htlen]),\n AD,\n np.fliplr(AD[:, -htlen:]),\n np.zeros((AD.shape[0], htlen))))\n fbg = signal.lfilter(twin, 1, x, 1)\n\n # strip \"warm up\" points\n fbg = fbg[:, twin.size + np.arange(ndcols)]\n\n # map back to FFT grid, flatten bark loop gain\n sf2a = np.sum(f2a, 0)\n sf2a_fix = sf2a\n sf2a_fix[sf2a == 0] = 1.\n E = np.dot(np.dot(np.diag(1. / sf2a_fix), f2a.T), fbg)\n # Remove any zeros in E (shouldn't be any, but who knows?)\n E[E <= 0] = np.min(E[E > 0])\n\n # invert back to waveform\n y = istft(D / E, winlen, hoplen, window=np.ones(winlen)) # using my code\n\n if plot:\n try:\n import matplotlib.pyplot as plt\n plt.subplot(3, 1, 1)\n plt.imshow(20. * np.log10(np.flipud(np.abs(D))))\n plt.subplot(3, 1, 2)\n plt.imshow(20. * np.log10(np.flipud(np.abs(E))))\n A = stft(y, winlen, hoplen) # using my code\n plt.subplot(3, 1, 3)\n plt.imshow(20. * np.log10(np.flipud(np.abs(A))))\n plt.show()\n except Exception, e:\n print \"Failed to plot results\"\n print e\n\n return (y, D, E)", "def extract_cochlear_subbands(nets, SIGNAL_SIZE, SR, LOW_LIM, HIGH_LIM, N, SAMPLE_FACTOR, pad_factor, rFFT, custom_filts, erb_filter_kwargs, include_all_keys, compression_function):\n\n # make the erb filters tensor\n nets['filts_tensor'] = make_filts_tensor(SIGNAL_SIZE, SR, LOW_LIM, HIGH_LIM, N, SAMPLE_FACTOR, use_rFFT=rFFT, pad_factor=pad_factor, custom_filts=custom_filts, erb_filter_kwargs=erb_filter_kwargs)\n\n # make subbands by multiplying filts with fft of input\n nets['subbands'] = tf.multiply(nets['filts_tensor'],nets['fft_input'],name='mul_subbands')\n\n # make the time the keys in the graph if we are returning all keys (otherwise, only return the subbands in fourier domain)\n if include_all_keys:\n if not rFFT:\n nets['subbands_ifft'] = tf.real(tf.ifft(nets['subbands'],name='ifft_subbands'),name='ifft_subbands_r')\n else:\n nets['subbands_ifft'] = tf.spectral.irfft(nets['subbands'],name='ifft_subbands')\n nets['subbands_time'] = nets['subbands_ifft']\n\n return nets", "def vamsi_OFDMCP_ff_make(*args, **kwargs):\n return _OFDM_Cyclic_Prefix_swig.vamsi_OFDMCP_ff_make(*args, **kwargs)", "def opt_lf_num_bits(lf_params, min_bits, max_bits, rms_filt_error=0.1, noise_figure=1,\n sim_steps=1000, fpoints=512, mode=\"tdc\", sigma_ph=0.1):\n print(\"\\n********************************************************\")\n print(\"Optimizing loop filter digital direct form-I implementation for\")\n print(\"number of bits in fixed point data words utilized\")\n sign_bits = 1\n # fint number of integer bits needed\n int_bits = n_int_bits(lf_params)\n print(\"\\n* Integer bits = %d\"%int_bits)\n\n \"\"\" Optimization for quantization noise\n \"\"\"\n print(\"\\n* Optimizing for quantization noise:\")\n # find optimal number of bits for quantization noise\n lf_ideal = LoopFilterIIRPhase(ignore_clk=True, **lf_params)\n w = np.floor(np.random.normal(0, 0.1*lf_params[\"m\"], sim_steps))\n pow_ntdc_post_lf = var_ntdc_post_lf(lf_params, mode=mode) # variance of TDC noise at loop filter\n\n x_ideal = np.zeros(sim_steps)\n for n in range(sim_steps):\n x_ideal[n] = lf_ideal.update(w[n], 0)\n\n mses = []\n bit_range = range(min_bits-int_bits-1, max_bits-int_bits)\n for frac_bits in bit_range:\n # use a large number of int bits to avoid overflow. Tuning here is with frac bits as\n lf_quant = LoopFilterIIRPhase(ignore_clk=True, int_bits=32, frac_bits=frac_bits, quant_filt=False, **lf_params)\n x_quant = np.zeros(sim_steps)\n for n in range(sim_steps):\n x_quant[n] = lf_quant.update(w[n], 0)\n mse = np.var(x_ideal-x_quant)\n print(\"\\tN bits = %d\\tQuant noise power = %E LSB^2\"%(frac_bits+int_bits+sign_bits, mse))\n mses.append(mse)\n n = len(mses)-1\n threshold = (10**(noise_figure/10.0) - 1)*pow_ntdc_post_lf\n print(\"!&!&&!\", threshold, pow_ntdc_post_lf)\n while n>=0:\n if mses[n] > threshold:\n n = n+1 if n < len(mses) - 1 else len(mses) - 1\n break\n n -= 1\n opt_frac_bits_qn = bit_range[n]\n print(\"* Optimum int bits = %d, frac bits = %d, sign bits = 1, quant noise = %.3f LSB^2\"%(int_bits, opt_frac_bits_qn, mses[n]))\n\n \"\"\" Optimization for filter accuracy\n \"\"\"\n print(\"\\n* Optimizing for filter design accuracy:\")\n fmin = 1e2\n fclk = lf_params[\"fclk\"]\n\n a = [lf_params[\"a0\"], lf_params[\"a1\"]]\n b = [lf_params[\"b0\"], lf_params[\"b1\"], lf_params[\"b2\"]]\n f, h_ideal = scipy.signal.freqz(a, b, np.geomspace(fmin, fclk/2, fpoints), fs=fclk)\n s = 2j*np.pi*f\n l = (lf_params[\"m\"]/lf_params[\"n\"])*lf_params[\"kdco\"]*h_ideal/s\n g = l/(1+l)\n bit_range = range(min_bits-int_bits-1, max_bits-int_bits)\n mses = []\n for frac_bits in bit_range:\n _lf_params = quant_lf_params(lf_params, int_bits, frac_bits)\n a = [_lf_params[\"a0\"], _lf_params[\"a1\"]]\n b = [_lf_params[\"b0\"], _lf_params[\"b1\"], _lf_params[\"b2\"]]\n f, h = scipy.signal.freqz(a, b, np.geomspace(fmin, fclk/2, fpoints), fs=fclk)\n s = 2j*np.pi*f\n l = (_lf_params[\"m\"]/_lf_params[\"n\"])*_lf_params[\"kdco\"]*h/s\n g = l/(1+l)\n # w, h = scipy.signal.freqz(a, b, points)\n mses.append(np.var(20*np.log10(np.abs(h[1:]))-20*np.log10(np.abs(h_ideal[1:]))))\n print(\"\\tN bits = %d\\tMSE = %E dB^2\"%(frac_bits+int_bits+sign_bits, mses[-1]))\n n = len(mses)-1\n while n>=0:\n if mses[n] > rms_filt_error**2:\n n = n+1 if n < len(mses) - 1 else len(mses) - 1\n break\n n -= 1\n opt_frac_bits_filt_acc = bit_range[n]\n print(\"* Optimum int bits = %d, frac bits = %d, sign_bits=1, quant noise = %E LSB^2\"%(int_bits, opt_frac_bits_filt_acc, mses[n]))\n\n frac_bits = max(opt_frac_bits_qn, opt_frac_bits_filt_acc)\n print(\"\\n* Optimization complete:\")\n print(\"\\tInt bits = %d, frac bits = %d, sign bits = 1\"%(int_bits, frac_bits))\n print(\"\\tTotal number bits = %d\"%(int_bits+frac_bits+sign_bits))\n return int_bits, frac_bits", "def butter_demod(x, y, fp, cutoff, bl = 3, GP = 1, GS = 10):\n x = np.asarray(x)\n y = np.asarray(y)\n x = x - x[0]\n dt = x[1] - x[0]\n nyq = 0.5 * (1. / dt)\n if x.ndim < y.ndim:\n x = x.repeat(y.shape[1])\n x = x.reshape(y.shape)\n \n if y.dtype.kind != 'c':\n yc = y * np.exp(-1j * 2.* np.pi * fp * x)\n yn = butter_filter(x, yc, cutoff, Btype = 'low', bl = bl, GP = GP, GS = GS)\n amp = 2.0*np.sqrt(yn.real**2 + yn.imag**2)\n phase = np.arctan2(yn.imag, yn.real)\n recon = amp * np.cos(2. * np.pi * fp * x + phase)\n return amp, phase, recon\n else:\n ycp = y * np.exp(-1j * 2.* np.pi * fp * x)\n ycn = y * np.exp(1j * 2.* np.pi * fp * x)\n yp = butter_filter(x, ycp, cutoff, Btype = 'low', bl = bl, GP = GP, GS = GS)\n yn = butter_filter(x, ycn, cutoff, Btype = 'low', bl = bl, GP = GP, GS = GS)\n ampp = np.sqrt(yp.real**2 + yp.imag**2)\n ampn = np.sqrt(yn.real**2 + yn.imag**2)\n phasep = np.arctan2(yp.imag, yp.real)\n phasen = np.arctan2(yn.imag, yn.real)\n reconp = ampp * np.cos(2. * np.pi * fp * x + phasep)\n reconn = ampn * np.cos(2. * np.pi * fp * x + phasen)\n return ampp, ampn, phasep, phasen, reconp, reconn", "def get_loop_bandwidth(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_loop_bandwidth(self)", "def calc_bsfc(datadict, W=85., LD=25., Pe=100., rhofuel=750.):\n\n W *= lbs2N\n V = np.average(datadict[\"speed\"][\"values\"])*kts2ms\n totalfuel = datadict[\"totalfuel\"][\"values\"]\n fuel = (totalfuel[-1] - totalfuel[0])*ml2m3\n fuelrate = datadict[\"fuelflow\"][\"values\"]*min2sec\n time = datadict[\"timeelapsed\"][\"values\"]\n totaltime = time[-1] - time[0]\n fuelt = np.trapz(fuelrate, time, np.diff(time)[0])*ml2m3\n assert abs(fuel - fuelt) < 0.001, (\"Fuel volume doesn't match; \" +\n \"could be bad data\")\n\n bsfc = fuel*rhofuel/(W/LD*V)/totaltime\n bsfc *= sm2kgkWhr\n return bsfc", "def gyroLowPassFilter( bandwidth=None ):\n if bandwidth and bandwidth in [0,1,2,3,4,5,6,7]:\n i2c.writeto_mem(0x68, 0x1A, pack('b',\n (i2c.readfrom_mem(0x68, 0x1A, 1)[0] & ~7 ) | bandwidth\n ))\n return i2c.readfrom_mem(0x68, 0x1A, 1)[0] & 7", "def fir_filter(x, y, cutoff, win = 'blackman', ftype = 'low', ntaps = 1001, ax = 0, mode = 'same'):\n d = np.diff(x).mean(axis = ax)\n nyq = 1. / (2*d)\n\n # ideally you would pick a band and the pass/stop gain/loss and a function would give ntaps, etc\n # N, beta = kaiserord(ripple_db, width)\n # taps = firwin(ntaps, cutoff/nyq, window=('kaiser', beta))\n\n if ftype == 'band' or ftype == 'high':\n f = firwin(ntaps, cutoff/nyq, window = win, pass_zero = False)\n elif ftype == 'low':\n f = firwin(ntaps, cutoff/nyq, window = win)\n else:\n raise ValueError, \"Pick filter type as low, high or band.\"\n delay = 0.5 * (ntaps-1) / nyq\n #yn2 = lfilter(f, 1., y, axis = ax)\n #yn3 = filtfilt(f, [1.], y, axis = ax, padtype = None)\n f.resize((ntaps,) + tuple(np.int8(np.ones(y.ndim - 1))))\n if ax != 0 and ax != -1:\n f = np.rollaxis(f, 0, start = ax + 1)\n elif ax != 0 and ax == -1:\n f = np.rollaxis(f, 0, start = y.ndim)\n elif ax == 0:\n f = f\n else:\n raise ValueError, \"Pick your axis better.\"\n \n yn = sci_fftconvolve(y, f, mode = mode)\n return yn", "def butter_bandpass(lowcut, highcut, fs, order=5):\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n return butter(order, [low, high], btype='band')", "def compCMBForeground(band,debug=False,verb=False):\n # MS - Because we treat the CIB as a grey-body, this code is\n # formally identical to that which computes the emission of the \n # telescope mirrors.\n # This is also due to the assumption that we consider that the \n # transmission factor due to the telescope mirrors is negligible\n # with respect to that due to the instrument, so we have applied\n # the same value to a light source on the telescope or beyond.\n # 02/06/2020 Danger of copy/paste. in the plot the emissivity was that\n # of the telescope..\n\n # First perform a input check\n if (checkBandForError(band) == False):\n # the band is not within the accepted band labels\n return\n \n # We will need the pixel size\n pixSizeBand = getPixSizeBand(over=useDef)\n # if over=True it will return the default pixel size, useDef is set in the\n # preamble.\n\n # Now define the function that we will need to integrate\n # first define the grid. We will use a single frequency grid (overkill)\n # we cover the range 30mic->500mic, corresponding to 600 GHz to 10 THz\n numEltsGrid = 10000\n waveGrid = np.linspace(6e11,1e13,num=numEltsGrid)\n # same but normalized by 10^11\n waveGridNorm = np.linspace(6.,100.,num=numEltsGrid)\n\n # Get the filter transmission for this grid\n filtBandpass = filterTrans(band,waveGrid,worf='f',debug=debug)\n \n # now let's build the function under the integral\n twoHNu3C2 = 1.47e-17 * waveGridNorm**3\n hNuKT = hoverk * (waveGrid / cmbTemp)\n \n # as math.exp only takes a scalar as an argument I need a loop here\n powSpecDens = []\n for i in range(numEltsGrid):\n # avoid divergence in computation\n if (hNuKT[i]<1e-3):\n powSpecDens.append(filtBandpass[i] * cmbEmissivity * twoHNu3C2[i] * (1./hNuKT[i]))\n elif (hNuKT[i]<100.):\n powSpecDens.append(filtBandpass[i] * cmbEmissivity * twoHNu3C2[i] * (1. / (math.exp(hNuKT[i])-1)))\n else:\n powSpecDens.append(filtBandpass[i] * cmbEmissivity * twoHNu3C2[i] * math.exp(-1*hNuKT[i]))\n \n # convert it as an array\n powSpecDens = np.asarray(powSpecDens)\n \n if (verb):\n idx = np.abs(waveGrid - 3e8/bandWave[band-1]).argmin()\n fg = powSpecDens[idx] / 1e-20\n print('Foreground at {0:6.2f} micron is {1:5.2g} MJy/sr'.format(bandWave[band-1]*1e6,fg))\n\n if (debug):\n plt.figure(figsize=(12,6))\n plt.xscale('log')\n plt.yscale('log')\n plt.grid(True,which='both')\n plt.plot(1e6*lightSpeed/waveGrid,powSpecDens)\n plt.xlabel('Wavelength in $\\mu$m')\n plt.ylabel('Brightness in W.m$^{-2}$.sr$^{-1}$.Hz$^{-1}$')\n \n # now integrate that function\n power = np.trapz(powSpecDens,x=waveGrid)\n # multiply by other instrumental terms in the formula that had no dependency on frequency\n power *= instTrans\n power *= etaColdStop\n power *= telEffArea\n pixSolidAngle = ((np.pi*pixSizeBand[band-1])/(3600.*180.))**2\n power *= pixSolidAngle\n \n return power" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_framer_sink_1_sptr __init__(self, p) > digital_framer_sink_1_sptr
def __init__(self, *args): this = _digital_swig.new_digital_framer_sink_1_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_packet_sink_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, sink):\n\n self.sink = sink", "def __init__(self, *args):\n this = _digital_swig.new_digital_simple_framer_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, stream):\n self.send = stream.send", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n _ida_pro.channel_redir_t_swiginit(self, _ida_pro.new_channel_redir_t(*args))", "def __init__(self, *args):\n this = _digital_swig.new_digital_probe_density_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_glfsr_source_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, src):\n self.src = src", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_if_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __deref__(self):\n return _wmbus_swig.wmbus_packet_sink_sptr___deref__(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
framer_sink_1(gr_msg_queue_sptr target_queue) > digital_framer_sink_1_sptr Given a stream of bits and access_code flags, assemble packets.
def framer_sink_1(*args, **kwargs): return _digital_swig.framer_sink_1(*args, **kwargs)
[ "def __init__(self, gain=None, samp_rate=None, ppm=None, arfcn=None, capture_id=None, udp_ports=[], max_timeslot=0, store_capture=True, verbose=False, band=None, rec_length=None, test=False, args=\"\"):\n\n gr.top_block.__init__(self, \"Gr-gsm Capture\")\n\n ##################################################\n # Parameters\n ##################################################\n\n self.arfcn = arfcn\n for band in grgsm.arfcn.get_bands():\n if grgsm.arfcn.is_valid_arfcn(self.arfcn, band):\n self.fc = grgsm.arfcn.arfcn2downlink(arfcn, band)\n break\n\n self.gain = gain\n self.samp_rate = samp_rate\n self.ppm = ppm\n self.arfcn = arfcn\n self.band = band\n self.shiftoff = shiftoff = 400e3\n self.rec_length = rec_length\n self.store_capture = store_capture\n self.capture_id = capture_id\n self.udp_ports = udp_ports\n self.verbose = verbose\n\n ##################################################\n # Processing Blocks\n ##################################################\n\n self.rtlsdr_source = osmosdr.source( args=\"numchan=\" + str(1) + \" \" + \"\" )\n self.rtlsdr_source.set_sample_rate(samp_rate)\n self.rtlsdr_source.set_center_freq(self.fc - shiftoff, 0)\n self.rtlsdr_source.set_freq_corr(ppm, 0)\n self.rtlsdr_source.set_dc_offset_mode(2, 0)\n self.rtlsdr_source.set_iq_balance_mode(2, 0)\n self.rtlsdr_source.set_gain_mode(True, 0)\n self.rtlsdr_source.set_gain(gain, 0)\n self.rtlsdr_source.set_if_gain(20, 0)\n self.rtlsdr_source.set_bb_gain(20, 0)\n self.rtlsdr_source.set_antenna(\"\", 0)\n self.rtlsdr_source.set_bandwidth(250e3+abs(shiftoff), 0)\n self.blocks_rotator = blocks.rotator_cc(-2*pi*shiftoff/samp_rate)\n\n #RUn for the specified amount of seconds or indefenitely\n if self.rec_length is not None:\n self.blocks_head_0 = blocks.head(gr.sizeof_gr_complex, int(samp_rate*rec_length))\n\n self.gsm_receiver = grgsm.receiver(4, ([self.arfcn]), ([]))\n self.gsm_input = grgsm.gsm_input(\n ppm=0,\n osr=4,\n fc=self.fc,\n samp_rate_in=samp_rate,\n )\n self.gsm_clock_offset_control = grgsm.clock_offset_control(self.fc-shiftoff)\n\n #Control channel demapper for timeslot 0\n #self.gsm_bcch_ccch_demapper_0 = grgsm.universal_ctrl_chans_demapper(0, ([2,6,12,16,22,26,32,36,42,46]), ([1,2,2,2,2,2,2,2,2,2]))\n self.gsm_bcch_ccch_demapper_0 = grgsm.gsm_bcch_ccch_demapper(0)\n #For all other timeslots are assumed to contain sdcch8 logical channels, this demapping may be incorrect\n if max_timeslot >= 1 and max_timeslot <= 8:\n self.gsm_sdcch8_demappers = []\n for i in range(1,max_timeslot + 1):\n #self.gsm_sdcch8_demappers.append(grgsm.universal_ctrl_chans_demapper(i, ([0,4,8,12,16,20,24,28,32,36,40,44]), ([8,8,8,8,8,8,8,8,136,136,136,136])))\n self.gsm_sdcch8_demappers.append(grgsm.gsm_sdcch8_demapper(i))\n #Control channel decoder (extracts the packets), one for each timeslot\n self.gsm_control_channels_decoders = []\n for i in range(0,max_timeslot + 1):\n self.gsm_control_channels_decoders.append(grgsm.control_channels_decoder())\n# self.blocks_socket_pdu_0 = blocks.socket_pdu(\"UDP_CLIENT\", \"127.0.0.1\", \"4729\", 10000, False)# self.blocks_socket_pdu_0 = blocks.socket_pdu(\"UDP_CLIENT\", \"127.0.0.1\", \"4729\", 10000, False)\n\n #UDP client that sends all decoded C0T0 packets to the specified port on localhost if requested\n self.client_sockets = []\n self.server_sockets = []\n for udp_port in self.udp_ports:\n #The server is for testing only\n #WARNING remove the server if you want connect to a different one\n if test:\n self.server_sockets.append(blocks.socket_pdu(\"UDP_SERVER\", \"127.0.0.1\", str(udp_port), 10000))\n self.client_sockets.append(blocks.socket_pdu(\"UDP_CLIENT\", \"127.0.0.1\", str(udp_port), 10000))\n\n #Sinks to store the capture file if requested\n if self.store_capture:\n self.gsm_burst_file_sink = grgsm.burst_file_sink(str(self.capture_id) + \".burstfile\")\n self.blocks_file_sink = blocks.file_sink(gr.sizeof_gr_complex*1, str(self.capture_id) + \".cfile\", False)\n self.blocks_file_sink.set_unbuffered(False)\n\n #Printer for printing messages when verbose flag is True\n if self.verbose:\n self.gsm_message_printer = grgsm.message_printer(pmt.intern(\"\"), False)\n\n \"\"\"\n if self.verbose:\n self.gsm_bursts_printer_0 = grgsm.bursts_printer(pmt.intern(\"\"),\n False, False, False, False)\n \"\"\"\n ##################################################\n # Connections\n ##################################################\n\n if self.rec_length is not None: #if recording length is defined connect head block after the source\n self.connect((self.rtlsdr_source, 0), (self.blocks_head_0, 0))\n self.connect((self.blocks_head_0, 0), (self.blocks_rotator, 0))\n else:\n self.connect((self.rtlsdr_source, 0), (self.blocks_rotator, 0))\n\n #Connect the file sinks\n if self.store_capture:\n self.connect((self.blocks_rotator, 0), (self.blocks_file_sink, 0))\n self.msg_connect(self.gsm_receiver, \"C0\", self.gsm_burst_file_sink, \"in\")\n\n #Connect the GSM receiver\n self.connect((self.gsm_input, 0), (self.gsm_receiver, 0))\n self.connect((self.blocks_rotator, 0), (self.gsm_input, 0))\n self.msg_connect(self.gsm_clock_offset_control, \"ppm\", self.gsm_input, \"ppm_in\")\n self.msg_connect(self.gsm_receiver, \"measurements\", self.gsm_clock_offset_control, \"measurements\")\n\n #Connect the demapper and decoder for timeslot 0\n self.msg_connect((self.gsm_receiver, 'C0'), (self.gsm_bcch_ccch_demapper_0, 'bursts'))\n self.msg_connect((self.gsm_bcch_ccch_demapper_0, 'bursts'), (self.gsm_control_channels_decoders[0], 'bursts'))\n\n #Connect the demapper and decoders for the other timeslots\n for i in range(1,max_timeslot +1):\n self.msg_connect((self.gsm_receiver, 'C0'), (self.gsm_sdcch8_demappers[i-1], 'bursts'))\n self.msg_connect((self.gsm_sdcch8_demappers[i-1], 'bursts'), (self.gsm_control_channels_decoders[i], 'bursts'))\n\n\n #Connect the UDP clients if requested\n for client_socket in self.client_sockets:\n for i in range(0,max_timeslot + 1):\n self.msg_connect((self.gsm_control_channels_decoders[i], 'msgs'), (client_socket, 'pdus'))\n\n #Connect the printer is self.verbose is True\n if self.verbose:\n for i in range(0,max_timeslot + 1):\n self.msg_connect((self.gsm_control_channels_decoders[i], 'msgs'), (self.gsm_message_printer, 'msgs'))\n\n \"\"\"\n if self.verbose:\n self.msg_connect(self.gsm_receiver, \"C0\", self.gsm_bursts_printer_0, \"bursts\")\n \"\"\"", "def read_sample_binary_datagram0(stream, dgheader):\n\n # TODO: Find some test data with Mode = 0\n\n channel = read_short(stream) # Channel number\n mode = read_short(stream) # Datatype\n transducerdepth = read_float(stream) # [m]\n frequency = read_float(stream) # [Hz]\n transmitpower = read_float(stream) # [W]\n pulselength = read_float(stream) # [s]\n bandwidth = read_float(stream) # [Hz]\n sampleinterval = read_float(stream) # [s]\n soundvelocity = read_float(stream) # [m/s]\n absorptioncoefficient = read_float(stream) # [dB/m]\n heave = read_float(stream) # [m]\n txroll = read_float(stream) # [deg]\n txpitch = read_float(stream) # [deg]\n temperature = read_float(stream) # [C]\n read_short(stream) # spare\n read_short(stream) # spare\n rxroll = read_float(stream) # [Deg]\n rxpitch = read_float(stream) # [Deg]\n offset = read_long(stream) # First sample\n count = read_long(stream) # Number of samples\n power = read_shorts(stream, count) # Compressed format - See Remark 1!\n c = (10 * math.log10(2) / 256)\n powerdb = [x * c for x in power]\n angle = read_shorts(stream, count) # See Remark 2 below!\n c = 180 /128\n\n athwartship = [twos_complement(x & 0xff) * c for x in angle]\n alongship = [twos_complement((x & 0xff00) >>8) * c for x in angle]\n\n return SampleDatagram0(dgheader, channel, mode, transducerdepth,\n frequency, transmitpower, pulselength, bandwidth,\n sampleinterval, soundvelocity,\n absorptioncoefficient, heave, txroll, txpitch,\n temperature, rxroll, rxpitch, offset, count,\n power, powerdb, angle, alongship, athwartship)", "def decode(self):\n\n self.src_port = int(data_to_hex_str(self.message[0:2]), 16)\n self.dst_port = int(data_to_hex_str(self.message[2:4]), 16)\n self.sequence_num = int(data_to_hex_str(self.message[4:8]), 16)\n self.ack_num = int(data_to_hex_str(self.message[8:12]), 16)\n self.data_offset = int(data_to_hex_str(self.message[12])[0:3], 16) * 4\n\n #parse the flags: bit operation\n flags = ord(self.message[13])\n if ((flags & (1 << 5)) != 0):\n self.flag_urg = 1\n else:\n self.flag_urg = 0\n\n if ((flags & (1 << 4)) != 0):\n self.flag_ack = 1\n else:\n self.flag_ack = 0\n\n if ((flags & (1 << 3)) != 0):\n self.flag_psh = 1\n else:\n self.flag_psh = 0\n\n if ((flags & (1 << 2)) != 0):\n self.flag_rst = 1\n else:\n self.flag_rst = 0\n\n if ((flags & (1 << 1)) != 0):\n self.flag_syn = 1\n else:\n self.flag_syn = 0\n\n if ((flags & 1) != 0):\n self.flag_fin = 1\n else:\n self.flag_fin = 0\n\n self.window_size = int(data_to_hex_str(self.message[14 : 16]), 16)\n self.checksum = data_to_hex_str(self.message[16 : 18])\n self.urgent_pointer = data_to_hex_str(self.message[18 : 20])\n\n header_len = self.data_offset\n if (header_len > 20):\n self.opt_paddings = data_to_hex_str(self.message[20 : header_len])", "def __init__(self, *args):\n this = _digital_swig.new_digital_framer_sink_1_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_packet_sink_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def _setUDP2(self):\n\n stringPipeline = \"\"\"udpsrc uri=udp://localhost:5000 caps = \"application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)H264, payload=(int)96\" ! rtph264depay ! decodebin ! videoconvert ! queue name=convert\"\"\"\n # bin = Gst.parse_bin_from_description(stringPipeline, True)\n bin = Gst.parse_launch(stringPipeline)\n self._pipeline.add(bin)\n self._pipeline.add(self._gtksink)\n bin.link(self._gtksink)\n # videoconvert = self._pipeline.get_by_name(\"convert\")\n # self._pipeline.add(videoconvert)\n # videoconvert.link(self._gtksink)\n\n \"\"\"source = Gst.ElementFactory.make(\"souphttpsrc\", \"youtube-source\") # - fast, but singleton\n if not source:\n print('source error')\n return\n source.set_property(\"is-live\", True)\n source.set_property(\"location\", tst)\n\n decode = Gst.ElementFactory.make(\"decodebin3\", \"youtube-decode\")\n\n convert = Gst.ElementFactory.make(\"videoconvert\", \"youtube-convert\")\n sink = Gst.ElementFactory.make(\"autovideosink\", \"youtube-sink\")\n # sink = Gst.ElementFactory.make(\"fakesink\", \"youtube-sink\")\n\n def decodebin_pad_added(element, pad):\n string = pad.query_caps(None).to_string()\n print('Found stream: %s' % string)\n if string.startswith('video/x-raw'):\n print(\"LINKING!!!!!!!!!!!!!!!!!!!!!!!!!\")\n pad.link(sink.get_static_pad('sink'))\n # decode.link(convert)\n decode.connect(\"pad-added\", decodebin_pad_added)\n # scale = Gst.ElementFactory.make(\"videoscale\", \"youtube-scale\")\n\n # caps = Gst.Caps.from_string(\"video/x-raw, width=200,height=200\")\n # filter = Gst.ElementFactory.make(\"capsfilter\", \"filter\")\n # filter.set_property(\"caps\", caps)\n \n # src = self._pipeline.get_by_name(\"src\")\n # print('again')\n # nl = output.decode('ascii')#[:-1]\n # print(nl)\n # src.set_property('location', nl)\n self._pipeline.add(source)\n self._pipeline.add(decode)\n self._pipeline.add(convert)\n self._pipeline.add(sink)\n # self._pipeline.add(scale)\n # self._pipeline.add(filter)\n # self._pipeline.add(self._gtksink)\n source.link(decode)\n decode.link(convert)\n convert.link(sink)\n # scale.link(filter)\n # decode.link(self._gtksink)\n # filter.link(self._gtksink)\n \n #self._pipeline.add(self._gtksink)\n \n # Link the pipeline to the sink that will display the video.\n # self._bin.link(self._gtksink)\n \"\"\"", "def make(param_mode, debug):\n return _wmbus_swig.wmbus_packet_sink_make(param_mode, debug)", "def build_other(flags, body: Any = None) -> bytes:\n if flags == 0x00 or flags == 0x05:\n return DTXPayloadHeader.build({\n \"flags\": flags,\n \"aux_length\": 0,\n \"total_length\": 0,\n })\n elif flags == 0x03 or flags == 0x04:\n body = bplist.objc_encode(body)\n pheader = DTXPayloadHeader.build({\n \"flags\": flags,\n \"aux_length\": 0,\n \"total_length\": len(body)\n })\n return pheader + body\n else:\n raise MuxError(\"Unknown flags\", flags)", "def simple_gre_packet(\n pktlen=300,\n eth_dst=\"00:01:02:03:04:05\",\n eth_src=\"00:06:07:08:09:0a\",\n dl_vlan_enable=False,\n vlan_vid=0,\n vlan_pcp=0,\n dl_vlan_cfi=0,\n ip_src=\"192.168.0.1\",\n ip_dst=\"192.168.0.2\",\n ip_tos=0,\n ip_ecn=None,\n ip_dscp=None,\n ip_ttl=64,\n ip_id=0x0001,\n ip_flags=0x0,\n ip_ihl=None,\n ip_options=False,\n gre_chksum_present=0,\n gre_routing_present=0, # begin reserved0\n gre_key_present=0,\n gre_seqnum_present=0,\n gre_strict_route_source=0,\n gre_flags=0, # end reserved0\n gre_version=0,\n gre_offset=None, # reserved1\n gre_key=None,\n gre_sequence_number=None,\n inner_frame=None,\n):\n\n if MINSIZE > pktlen:\n pktlen = MINSIZE\n\n # proto (ethertype) is set by Scapy based on the payload\n gre_hdr = packet.GRE(\n chksum_present=gre_chksum_present,\n routing_present=gre_routing_present,\n key_present=gre_key_present,\n seqnum_present=gre_seqnum_present,\n strict_route_source=gre_strict_route_source,\n flags=gre_flags,\n version=gre_version,\n offset=gre_offset,\n key=gre_key,\n sequence_number=gre_sequence_number,\n )\n\n ip_tos = ip_make_tos(ip_tos, ip_ecn, ip_dscp)\n\n # Note Dot1Q.id is really CFI\n if dl_vlan_enable:\n pkt = (\n packet.Ether(dst=eth_dst, src=eth_src)\n / packet.Dot1Q(prio=vlan_pcp, id=dl_vlan_cfi, vlan=vlan_vid)\n / packet.IP(\n src=ip_src,\n dst=ip_dst,\n tos=ip_tos,\n ttl=ip_ttl,\n id=ip_id,\n flags=ip_flags,\n ihl=ip_ihl,\n )\n / gre_hdr\n )\n else:\n if not ip_options:\n pkt = (\n packet.Ether(dst=eth_dst, src=eth_src)\n / packet.IP(\n src=ip_src,\n dst=ip_dst,\n tos=ip_tos,\n ttl=ip_ttl,\n id=ip_id,\n flags=ip_flags,\n ihl=ip_ihl,\n )\n / gre_hdr\n )\n else:\n pkt = (\n packet.Ether(dst=eth_dst, src=eth_src)\n / packet.IP(\n src=ip_src,\n dst=ip_dst,\n tos=ip_tos,\n ttl=ip_ttl,\n id=ip_id,\n flags=ip_flags,\n ihl=ip_ihl,\n options=ip_options,\n )\n / gre_hdr\n )\n\n if inner_frame:\n pkt = pkt / inner_frame\n inner_frame_bytes = bytearray(bytes(inner_frame))\n if (inner_frame_bytes[0] & 0xF0) == 0x60:\n pkt[\"GRE\"].proto = 0x86DD\n else:\n pkt = pkt / packet.IP()\n pkt = pkt / (\"D\" * (pktlen - len(pkt)))\n\n return pkt", "def prepare_layer1():\n g.feed(rate['print'])\n g.write('G1 Z0.1')", "def wmbus_packet_sink_make(param_mode, debug):\n return _wmbus_swig.wmbus_packet_sink_make(param_mode, debug)", "def fast_forward(self,removed_instructions):\n for instruction in removed_instructions: \n for group in instruction[\"groups\"]: \n if group.get(\"transfer\"):\n fromLocs = []\n toLocs = []\n volumes = []\n changeSettings = []\n for transfer in group[\"transfer\"]:\n pp.pprint(transfer)\n fromLocs.append(transfer[\"from\"].pop(\"locName\"))\n toLocs.append(transfer[\"to\"].pop(\"locName\"))\n volumes.append(transfer.pop(\"volume\"))\n changeSettings.append(transfer)\n self.protocol.add_transfer_to_stream(fromLocs,toLocs,volumes,changeSettings) \n elif group.get(\"mix\"):\n mixLocs = []\n volumes = []\n changeSettings = []\n for mix in group[\"mix\"]:\n pp.pprint(mix)\n mixLocs.append(mix.pop(\"locName\"))\n volumes.append(mix.pop(\"volume\"))\n changeSettings.append(mix)\n self.protocol.add_mix_to_stream(mixLocs,volumes,changeSettings)\n elif group.get(\"run\"):\n # cycler\n name = group[\"run\"].pop(\"name\")\n changeSettings = group[\"run\"] \n self.protocol.add_cycler_group(name,changeSettings)\n if self.protocol.instruction_stream[\"cmds\"]:\n self.protocol.end_stream()", "def __init__(self, header_bytes: bytes) -> None:\n tcp_header_first_word = unpack('!HH', header_bytes[:4])\n self.source_port = tcp_header_first_word[0]\n self.destination_port = tcp_header_first_word[1]\n\n self.sequence_number = header_bytes[4:8]\n self.acknowledgement_number = header_bytes[8:12]\n\n tcp_header_fourth_word = unpack('!HH', header_bytes[12:16])\n self.data_offset = tcp_header_fourth_word[0] >> 12\n self.reserved = (tcp_header_fourth_word[0] >> 9) & 0x7\n\n self.ns = bool(tcp_header_fourth_word[0] & 0x100) # pylint:disable=invalid-name\n self.cwr = bool(tcp_header_fourth_word[0] & 0x80)\n self.ece = bool(tcp_header_fourth_word[0] & 0x40)\n self.urg = bool(tcp_header_fourth_word[0] & 0x20)\n self.ack = bool(tcp_header_fourth_word[0] & 0x10)\n self.psh = bool(tcp_header_fourth_word[0] & 0x8)\n self.rst = bool(tcp_header_fourth_word[0] & 0x4)\n self.syn = bool(tcp_header_fourth_word[0] & 0x2)\n self.fin = bool(tcp_header_fourth_word[0] & 0x1)\n\n self.window = tcp_header_fourth_word[1]\n\n tcp_header_fifth_word = unpack('!HH', header_bytes[16:20])\n self.checksum = tcp_header_fifth_word[0]\n self.urgent_pointer = tcp_header_fifth_word[1]\n\n self.options = None\n option_word_count = self.data_offset - 5\n if option_word_count:\n self.options = header_bytes[20:(20 + option_word_count * 4)]", "def pull(self):\n\n # For each packet in the pcap process the contents\n for item in self.input_stream:\n\n # Print out the timestamp in UTC\n print('Timestamp: %s' % item['timestamp'])\n\n # Unpack the Ethernet frame (mac src/dst, ethertype)\n print('Ethernet Frame: %s --> %s (type: %d)' % \\\n (net_utils.mac_to_str(item['eth']['src']), net_utils.mac_to_str(item['eth']['dst']), item['eth']['type']))\n\n # Print out the Packet info\n packet_type = item['packet']['type']\n print('Packet: %s ' % packet_type, end='')\n packet = item['packet']\n if packet_type in ['IP', 'IP6']:\n print('%s --> %s (len:%d ttl:%d)' % (net_utils.inet_to_str(packet['src']), net_utils.inet_to_str(packet['dst']),\n packet['len'], packet['ttl']), end='')\n if packet_type == 'IP':\n print('-- Frag(df:%d mf:%d offset:%d)' % (packet['df'], packet['mf'], packet['offset']))\n else:\n print()\n else:\n print(str(packet))\n\n # Print out transport and application layers\n if item['transport']:\n transport_info = item['transport']\n print('Transport: %s ' % transport_info['type'], end='')\n for key, value in compat.iteritems(transport_info):\n if key != 'data':\n print(key+':'+repr(value), end=' ')\n\n # Give summary info about data\n data = transport_info['data']\n print('\\nData: %d bytes' % len(data), end='')\n if data:\n print('(%s...)' % repr(data)[:30])\n else:\n print()\n\n # Application data\n if item['application']:\n print('Application: %s' % item['application']['type'], end='')\n print(str(item['application']))\n\n # Is there domain info?\n if 'src_domain' in packet:\n print('Domains: %s --> %s' % (packet['src_domain'], packet['dst_domain']))\n\n # Tags\n if 'tags' in item:\n print(list(item['tags']))\n print()", "def DD_idelay(self,channel,tap0,tap1):\n\n concattaps = tap0 | (tap1 << 5)\n self.DDs[channel] = concattaps\n allconcat0 = self.DDs[4] | (self.DDs[0] << 10) | (self.DDs[1] << 20)\n allconcat1 = self.DDs[2] | (self.DDs[3] << 10) | (self.DDs[5] << 20)\n self.DELAY_TAPS.write(0x0,allconcat0)\n self.DELAY_TAPS.write(0x8, allconcat1)\n # if(channel <=2):\n # dp0 = self.DELAY_TAPS.read(0x0) | (0b1111111111 << (channel*10))\n # dp1 = dp0 & (concattaps << (channel*10))\n # self.DELAY_TAPS.write(0x0,dp1)\n # else:\n # dp0 = self.DELAY_TAPS.read(0x8) | (0b1111111111 << ((channel-3) * 10))\n # dp1 = dp0 & (concattaps << ((channel-3) * 10))\n # self.DELAY_TAPS.write(0x8, dp1)\n plog.info(\"Setting input delay on channel \"+str(channel)+\" dline taps T0:\"+str(tap0)+\" T1:\"+str(tap1))\n self.IDELAY_DEBUG.write(0x8,0b1)\n sleep(0.1)\n self.IDELAY_DEBUG.write(0x8,0b0)\n # plog.debug(\"DP0: \"+bin(dp0))\n # plog.debug(\"DP1: \" + bin(dp1))\n self.IDELAY_DEBUG.write(0x0,0x1)\n sleep(0.05)\n plog.debug(\"OBS0: \"+bin(self.IDELAY_DEBUG.read(0x0)))", "def _emit_queued_packets(self):\n\n # If we don't have any conglomerated packets, there's nothing to do!\n if not self._packets:\n return\n\n\n # Otherwise, create a new collection wrapping all of our captured SOFs.\n fields_to_copy = self._packets[0].__dict__.copy()\n #fields_to_copy['subordinate_packets'] = self._packets\n self.emit_packet(USBStartOfFrameCollection(**fields_to_copy))\n\n # And start a new collection of queued packets.\n self._packets.clear()", "def graph(packets, layer=1):\n sources = {}\n destinations = {}\n \n # srcname = pcs.source(packets[0])\n # dstname = pcs.destination(packets[0])\n srcname = \"src\"\n dstname = \"dst\"\n index = 0\n prevsrc = \"\"\n prevdst = \"\"\n graph = Dot(simplify=True, prog = 'dot', type='digraph')\n graph.add_node(Node(\"Start\"))\n first = True\n for packet in packets:\n layer_index = layer\n while layer_index > 1:\n packet = packet.data\n if packet == None:\n break\n layer_index -= 1\n if packet == None:\n continue\n if not hasattr(packet, srcname):\n print \"cannot determine packet source, skipping\"\n continue\n if not hasattr(packet, dstname):\n print \"cannot determine packet destination, skipping\"\n continue\n \n src = packet.pretty(srcname)\n dst = packet.pretty(dstname)\n \n src = src.replace(\":\", \"&#58;\")\n dst = dst.replace(\":\", \"&#58;\")\n\n snode = repr(index) + \" \" + src\n dnode = repr(index) + \" \" + dst\n\n subgraph = Subgraph(repr(index))\n subgraph.rank = \"same\"\n edge = Edge(snode, dnode)\n edge.label = packet.println()\n subgraph.add_edge(edge)\n \n graph.add_subgraph(subgraph)\n\n if first == True:\n graph.add_edge(Edge(\"Start\", snode))\n graph.add_edge(Edge(\"Start\", dnode))\n first = False\n \n if prevsrc != \"\":\n graph.add_edge(Edge(prevsrc, snode))\n if prevdst != \"\":\n graph.add_edge(Edge(prevdst, dnode))\n \n prevsrc = snode\n prevdst = dnode\n\n index += 1\n\n# for subgraph in sources.values():\n# graph.add_subgraph(subgraph)\n# for subgraph in destinations.values():\n# graph.add_subgraph(subgraph)\n\n graph.write_gif(\"graph.gif\", prog=\"dot\")\n graph.write_raw(\"graph.dot\", prog=\"dot\")", "def packageData(self, badThingsQueue, stateQueue, pipe):\n def package(state):\n \"\"\"Helper function that packages the current state.\n\n Parses through the state dictionary in key value pairs, creates a new message in the proto\n for each sensor, and adds corresponding data to each field. Currently only supports a single limit_switch\n switch as the rest of the state is just test fields.\n \"\"\"\n try:\n proto_message = runtime_pb2.RuntimeData()\n for devID, devVal in state.items():\n if (devID == 'studentCodeState'):\n proto_message.robot_state = devVal[0] #check if we are dealing with sensor data or student code state\n elif devID == 'limit_switch':\n test_sensor = proto_message.sensor_data.add() \n test_sensor.device_name = devID\n test_sensor.device_type = devVal[0][0]\n test_sensor.value = devVal[0][1]\n test_sensor.uid = devVal[0][2]\n return proto_message.SerializeToString() \n except Exception as e:\n badThingsQueue.put(BadThing(sys.exc_info(),\n \"UDP packager thread has crashed with error:\" + str(e),\n event = BAD_EVENTS.UDP_SEND_ERROR,\n printStackTrace = True))\n while True:\n try:\n nextCall = time.time()\n stateQueue.put([SM_COMMANDS.SEND_ANSIBLE, []])\n rawState = pipe.recv()\n packState = package(rawState)\n self.sendBuffer.replace(packState)\n nextCall += 1.0/self.packagerHZ\n time.sleep(max(nextCall - time.time(), 0))\n except Exception as e:\n badThingsQueue.put(BadThing(sys.exc_info(), \n \"UDP packager thread has crashed with error:\" + str(e), \n event = BAD_EVENTS.UDP_SEND_ERROR, \n printStackTrace = True))", "def message_sink(itemsize, num_symbol, msgq, dont_block):\n return _raw_util.message_sink(itemsize, num_symbol, msgq, dont_block)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_glfsr_source_b_sptr __init__(self, p) > digital_glfsr_source_b_sptr
def __init__(self, *args): this = _digital_swig.new_digital_glfsr_source_b_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_glfsr_source_f_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, gain=None, samp_rate=None, ppm=None, arfcn=None, capture_id=None, udp_ports=[], max_timeslot=0, store_capture=True, verbose=False, band=None, rec_length=None, test=False, args=\"\"):\n\n gr.top_block.__init__(self, \"Gr-gsm Capture\")\n\n ##################################################\n # Parameters\n ##################################################\n\n self.arfcn = arfcn\n for band in grgsm.arfcn.get_bands():\n if grgsm.arfcn.is_valid_arfcn(self.arfcn, band):\n self.fc = grgsm.arfcn.arfcn2downlink(arfcn, band)\n break\n\n self.gain = gain\n self.samp_rate = samp_rate\n self.ppm = ppm\n self.arfcn = arfcn\n self.band = band\n self.shiftoff = shiftoff = 400e3\n self.rec_length = rec_length\n self.store_capture = store_capture\n self.capture_id = capture_id\n self.udp_ports = udp_ports\n self.verbose = verbose\n\n ##################################################\n # Processing Blocks\n ##################################################\n\n self.rtlsdr_source = osmosdr.source( args=\"numchan=\" + str(1) + \" \" + \"\" )\n self.rtlsdr_source.set_sample_rate(samp_rate)\n self.rtlsdr_source.set_center_freq(self.fc - shiftoff, 0)\n self.rtlsdr_source.set_freq_corr(ppm, 0)\n self.rtlsdr_source.set_dc_offset_mode(2, 0)\n self.rtlsdr_source.set_iq_balance_mode(2, 0)\n self.rtlsdr_source.set_gain_mode(True, 0)\n self.rtlsdr_source.set_gain(gain, 0)\n self.rtlsdr_source.set_if_gain(20, 0)\n self.rtlsdr_source.set_bb_gain(20, 0)\n self.rtlsdr_source.set_antenna(\"\", 0)\n self.rtlsdr_source.set_bandwidth(250e3+abs(shiftoff), 0)\n self.blocks_rotator = blocks.rotator_cc(-2*pi*shiftoff/samp_rate)\n\n #RUn for the specified amount of seconds or indefenitely\n if self.rec_length is not None:\n self.blocks_head_0 = blocks.head(gr.sizeof_gr_complex, int(samp_rate*rec_length))\n\n self.gsm_receiver = grgsm.receiver(4, ([self.arfcn]), ([]))\n self.gsm_input = grgsm.gsm_input(\n ppm=0,\n osr=4,\n fc=self.fc,\n samp_rate_in=samp_rate,\n )\n self.gsm_clock_offset_control = grgsm.clock_offset_control(self.fc-shiftoff)\n\n #Control channel demapper for timeslot 0\n #self.gsm_bcch_ccch_demapper_0 = grgsm.universal_ctrl_chans_demapper(0, ([2,6,12,16,22,26,32,36,42,46]), ([1,2,2,2,2,2,2,2,2,2]))\n self.gsm_bcch_ccch_demapper_0 = grgsm.gsm_bcch_ccch_demapper(0)\n #For all other timeslots are assumed to contain sdcch8 logical channels, this demapping may be incorrect\n if max_timeslot >= 1 and max_timeslot <= 8:\n self.gsm_sdcch8_demappers = []\n for i in range(1,max_timeslot + 1):\n #self.gsm_sdcch8_demappers.append(grgsm.universal_ctrl_chans_demapper(i, ([0,4,8,12,16,20,24,28,32,36,40,44]), ([8,8,8,8,8,8,8,8,136,136,136,136])))\n self.gsm_sdcch8_demappers.append(grgsm.gsm_sdcch8_demapper(i))\n #Control channel decoder (extracts the packets), one for each timeslot\n self.gsm_control_channels_decoders = []\n for i in range(0,max_timeslot + 1):\n self.gsm_control_channels_decoders.append(grgsm.control_channels_decoder())\n# self.blocks_socket_pdu_0 = blocks.socket_pdu(\"UDP_CLIENT\", \"127.0.0.1\", \"4729\", 10000, False)# self.blocks_socket_pdu_0 = blocks.socket_pdu(\"UDP_CLIENT\", \"127.0.0.1\", \"4729\", 10000, False)\n\n #UDP client that sends all decoded C0T0 packets to the specified port on localhost if requested\n self.client_sockets = []\n self.server_sockets = []\n for udp_port in self.udp_ports:\n #The server is for testing only\n #WARNING remove the server if you want connect to a different one\n if test:\n self.server_sockets.append(blocks.socket_pdu(\"UDP_SERVER\", \"127.0.0.1\", str(udp_port), 10000))\n self.client_sockets.append(blocks.socket_pdu(\"UDP_CLIENT\", \"127.0.0.1\", str(udp_port), 10000))\n\n #Sinks to store the capture file if requested\n if self.store_capture:\n self.gsm_burst_file_sink = grgsm.burst_file_sink(str(self.capture_id) + \".burstfile\")\n self.blocks_file_sink = blocks.file_sink(gr.sizeof_gr_complex*1, str(self.capture_id) + \".cfile\", False)\n self.blocks_file_sink.set_unbuffered(False)\n\n #Printer for printing messages when verbose flag is True\n if self.verbose:\n self.gsm_message_printer = grgsm.message_printer(pmt.intern(\"\"), False)\n\n \"\"\"\n if self.verbose:\n self.gsm_bursts_printer_0 = grgsm.bursts_printer(pmt.intern(\"\"),\n False, False, False, False)\n \"\"\"\n ##################################################\n # Connections\n ##################################################\n\n if self.rec_length is not None: #if recording length is defined connect head block after the source\n self.connect((self.rtlsdr_source, 0), (self.blocks_head_0, 0))\n self.connect((self.blocks_head_0, 0), (self.blocks_rotator, 0))\n else:\n self.connect((self.rtlsdr_source, 0), (self.blocks_rotator, 0))\n\n #Connect the file sinks\n if self.store_capture:\n self.connect((self.blocks_rotator, 0), (self.blocks_file_sink, 0))\n self.msg_connect(self.gsm_receiver, \"C0\", self.gsm_burst_file_sink, \"in\")\n\n #Connect the GSM receiver\n self.connect((self.gsm_input, 0), (self.gsm_receiver, 0))\n self.connect((self.blocks_rotator, 0), (self.gsm_input, 0))\n self.msg_connect(self.gsm_clock_offset_control, \"ppm\", self.gsm_input, \"ppm_in\")\n self.msg_connect(self.gsm_receiver, \"measurements\", self.gsm_clock_offset_control, \"measurements\")\n\n #Connect the demapper and decoder for timeslot 0\n self.msg_connect((self.gsm_receiver, 'C0'), (self.gsm_bcch_ccch_demapper_0, 'bursts'))\n self.msg_connect((self.gsm_bcch_ccch_demapper_0, 'bursts'), (self.gsm_control_channels_decoders[0], 'bursts'))\n\n #Connect the demapper and decoders for the other timeslots\n for i in range(1,max_timeslot +1):\n self.msg_connect((self.gsm_receiver, 'C0'), (self.gsm_sdcch8_demappers[i-1], 'bursts'))\n self.msg_connect((self.gsm_sdcch8_demappers[i-1], 'bursts'), (self.gsm_control_channels_decoders[i], 'bursts'))\n\n\n #Connect the UDP clients if requested\n for client_socket in self.client_sockets:\n for i in range(0,max_timeslot + 1):\n self.msg_connect((self.gsm_control_channels_decoders[i], 'msgs'), (client_socket, 'pdus'))\n\n #Connect the printer is self.verbose is True\n if self.verbose:\n for i in range(0,max_timeslot + 1):\n self.msg_connect((self.gsm_control_channels_decoders[i], 'msgs'), (self.gsm_message_printer, 'msgs'))\n\n \"\"\"\n if self.verbose:\n self.msg_connect(self.gsm_receiver, \"C0\", self.gsm_bursts_printer_0, \"bursts\")\n \"\"\"", "def __init__(self, *args):\n this = _digital_swig.new_digital_framer_sink_1_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_packet_sink_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, src):\n self.src = src", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_probe_density_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def glfsr_source_b(*args, **kwargs):\n return _digital_swig.glfsr_source_b(*args, **kwargs)", "def __init__(self, a, b):\n self.a = make_generator(a)\n self.b = make_generator(b)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
glfsr_source_b(int degree, bool repeat = True, int mask = 0, int seed = 1) > digital_glfsr_source_b_sptr Galois LFSR pseudorandom source.
def glfsr_source_b(*args, **kwargs): return _digital_swig.glfsr_source_b(*args, **kwargs)
[ "def glfsr_source_f(*args, **kwargs):\n return _digital_swig.glfsr_source_f(*args, **kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_glfsr_source_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_glfsr_source_f_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def bpsk(input_bits, noise):\n modulator = Modulator()\n demodulator = Demodulator()\n channel = Channel()\n signal = modulator.make_bpsk_mod(input_bits)\n\n signal = channel.send_signal(signal, noise)\n\n result_bits = demodulator.make_bpsk_demod(signal, channel)\n return result_bits", "def gslb(self) :\n try :\n return self._gslb\n except Exception as e:\n raise e", "def LRST_RX_B(self, value):\n if value not in [0, 1]:\n raise ValueError(\"Value must be [0,1]\")\n self._writeReg('CHIPCFG', 'LRST_RX_B', value)", "def cifar100_rnn_gate_110(pretrained=False, **kwargs):\n model = ResNetRecurrentGateSP(BasicBlock, [18, 18, 18], num_classes=100, **kwargs)\n return model", "def cifar100_rnn_gate_74(pretrained=False, **kwargs):\n model = ResNetRecurrentGateSP(BasicBlock, [12, 12, 12], num_classes=100, **kwargs)\n return model", "def LRST_RX_B(self):\n return self._readReg('CHIPCFG', 'LRST_RX_B')", "def cifar100_rnn_gate_38(pretrained=False, **kwargs):\n model = ResNetRecurrentGateSP(BasicBlock, [6, 6, 6], num_classes=100, **kwargs)\n return model", "def scrambler_bb(*args, **kwargs):\n return _digital_swig.scrambler_bb(*args, **kwargs)", "def LRST_TX_B(self, value):\n if value not in [0, 1]:\n raise ValueError(\"Value must be [0,1]\")\n self._writeReg('CHIPCFG', 'LRST_TX_B', value)", "def __init__(self, gain=None, samp_rate=None, ppm=None, arfcn=None, capture_id=None, udp_ports=[], max_timeslot=0, store_capture=True, verbose=False, band=None, rec_length=None, test=False, args=\"\"):\n\n gr.top_block.__init__(self, \"Gr-gsm Capture\")\n\n ##################################################\n # Parameters\n ##################################################\n\n self.arfcn = arfcn\n for band in grgsm.arfcn.get_bands():\n if grgsm.arfcn.is_valid_arfcn(self.arfcn, band):\n self.fc = grgsm.arfcn.arfcn2downlink(arfcn, band)\n break\n\n self.gain = gain\n self.samp_rate = samp_rate\n self.ppm = ppm\n self.arfcn = arfcn\n self.band = band\n self.shiftoff = shiftoff = 400e3\n self.rec_length = rec_length\n self.store_capture = store_capture\n self.capture_id = capture_id\n self.udp_ports = udp_ports\n self.verbose = verbose\n\n ##################################################\n # Processing Blocks\n ##################################################\n\n self.rtlsdr_source = osmosdr.source( args=\"numchan=\" + str(1) + \" \" + \"\" )\n self.rtlsdr_source.set_sample_rate(samp_rate)\n self.rtlsdr_source.set_center_freq(self.fc - shiftoff, 0)\n self.rtlsdr_source.set_freq_corr(ppm, 0)\n self.rtlsdr_source.set_dc_offset_mode(2, 0)\n self.rtlsdr_source.set_iq_balance_mode(2, 0)\n self.rtlsdr_source.set_gain_mode(True, 0)\n self.rtlsdr_source.set_gain(gain, 0)\n self.rtlsdr_source.set_if_gain(20, 0)\n self.rtlsdr_source.set_bb_gain(20, 0)\n self.rtlsdr_source.set_antenna(\"\", 0)\n self.rtlsdr_source.set_bandwidth(250e3+abs(shiftoff), 0)\n self.blocks_rotator = blocks.rotator_cc(-2*pi*shiftoff/samp_rate)\n\n #RUn for the specified amount of seconds or indefenitely\n if self.rec_length is not None:\n self.blocks_head_0 = blocks.head(gr.sizeof_gr_complex, int(samp_rate*rec_length))\n\n self.gsm_receiver = grgsm.receiver(4, ([self.arfcn]), ([]))\n self.gsm_input = grgsm.gsm_input(\n ppm=0,\n osr=4,\n fc=self.fc,\n samp_rate_in=samp_rate,\n )\n self.gsm_clock_offset_control = grgsm.clock_offset_control(self.fc-shiftoff)\n\n #Control channel demapper for timeslot 0\n #self.gsm_bcch_ccch_demapper_0 = grgsm.universal_ctrl_chans_demapper(0, ([2,6,12,16,22,26,32,36,42,46]), ([1,2,2,2,2,2,2,2,2,2]))\n self.gsm_bcch_ccch_demapper_0 = grgsm.gsm_bcch_ccch_demapper(0)\n #For all other timeslots are assumed to contain sdcch8 logical channels, this demapping may be incorrect\n if max_timeslot >= 1 and max_timeslot <= 8:\n self.gsm_sdcch8_demappers = []\n for i in range(1,max_timeslot + 1):\n #self.gsm_sdcch8_demappers.append(grgsm.universal_ctrl_chans_demapper(i, ([0,4,8,12,16,20,24,28,32,36,40,44]), ([8,8,8,8,8,8,8,8,136,136,136,136])))\n self.gsm_sdcch8_demappers.append(grgsm.gsm_sdcch8_demapper(i))\n #Control channel decoder (extracts the packets), one for each timeslot\n self.gsm_control_channels_decoders = []\n for i in range(0,max_timeslot + 1):\n self.gsm_control_channels_decoders.append(grgsm.control_channels_decoder())\n# self.blocks_socket_pdu_0 = blocks.socket_pdu(\"UDP_CLIENT\", \"127.0.0.1\", \"4729\", 10000, False)# self.blocks_socket_pdu_0 = blocks.socket_pdu(\"UDP_CLIENT\", \"127.0.0.1\", \"4729\", 10000, False)\n\n #UDP client that sends all decoded C0T0 packets to the specified port on localhost if requested\n self.client_sockets = []\n self.server_sockets = []\n for udp_port in self.udp_ports:\n #The server is for testing only\n #WARNING remove the server if you want connect to a different one\n if test:\n self.server_sockets.append(blocks.socket_pdu(\"UDP_SERVER\", \"127.0.0.1\", str(udp_port), 10000))\n self.client_sockets.append(blocks.socket_pdu(\"UDP_CLIENT\", \"127.0.0.1\", str(udp_port), 10000))\n\n #Sinks to store the capture file if requested\n if self.store_capture:\n self.gsm_burst_file_sink = grgsm.burst_file_sink(str(self.capture_id) + \".burstfile\")\n self.blocks_file_sink = blocks.file_sink(gr.sizeof_gr_complex*1, str(self.capture_id) + \".cfile\", False)\n self.blocks_file_sink.set_unbuffered(False)\n\n #Printer for printing messages when verbose flag is True\n if self.verbose:\n self.gsm_message_printer = grgsm.message_printer(pmt.intern(\"\"), False)\n\n \"\"\"\n if self.verbose:\n self.gsm_bursts_printer_0 = grgsm.bursts_printer(pmt.intern(\"\"),\n False, False, False, False)\n \"\"\"\n ##################################################\n # Connections\n ##################################################\n\n if self.rec_length is not None: #if recording length is defined connect head block after the source\n self.connect((self.rtlsdr_source, 0), (self.blocks_head_0, 0))\n self.connect((self.blocks_head_0, 0), (self.blocks_rotator, 0))\n else:\n self.connect((self.rtlsdr_source, 0), (self.blocks_rotator, 0))\n\n #Connect the file sinks\n if self.store_capture:\n self.connect((self.blocks_rotator, 0), (self.blocks_file_sink, 0))\n self.msg_connect(self.gsm_receiver, \"C0\", self.gsm_burst_file_sink, \"in\")\n\n #Connect the GSM receiver\n self.connect((self.gsm_input, 0), (self.gsm_receiver, 0))\n self.connect((self.blocks_rotator, 0), (self.gsm_input, 0))\n self.msg_connect(self.gsm_clock_offset_control, \"ppm\", self.gsm_input, \"ppm_in\")\n self.msg_connect(self.gsm_receiver, \"measurements\", self.gsm_clock_offset_control, \"measurements\")\n\n #Connect the demapper and decoder for timeslot 0\n self.msg_connect((self.gsm_receiver, 'C0'), (self.gsm_bcch_ccch_demapper_0, 'bursts'))\n self.msg_connect((self.gsm_bcch_ccch_demapper_0, 'bursts'), (self.gsm_control_channels_decoders[0], 'bursts'))\n\n #Connect the demapper and decoders for the other timeslots\n for i in range(1,max_timeslot +1):\n self.msg_connect((self.gsm_receiver, 'C0'), (self.gsm_sdcch8_demappers[i-1], 'bursts'))\n self.msg_connect((self.gsm_sdcch8_demappers[i-1], 'bursts'), (self.gsm_control_channels_decoders[i], 'bursts'))\n\n\n #Connect the UDP clients if requested\n for client_socket in self.client_sockets:\n for i in range(0,max_timeslot + 1):\n self.msg_connect((self.gsm_control_channels_decoders[i], 'msgs'), (client_socket, 'pdus'))\n\n #Connect the printer is self.verbose is True\n if self.verbose:\n for i in range(0,max_timeslot + 1):\n self.msg_connect((self.gsm_control_channels_decoders[i], 'msgs'), (self.gsm_message_printer, 'msgs'))\n\n \"\"\"\n if self.verbose:\n self.msg_connect(self.gsm_receiver, \"C0\", self.gsm_bursts_printer_0, \"bursts\")\n \"\"\"", "def LRST_TX_B(self):\n return self._readReg('CHIPCFG', 'LRST_TX_B')", "def cifar10_rnn_gate_110(pretrained=False, **kwargs):\n model = ResNetRecurrentGateSP(BasicBlock, [18, 18, 18], num_classes=10, **kwargs)\n return model", "def gghg(sample, rname):\n\n selector = gghgBase(sample, rname)\n\n setupPhotonSelection(selector.findOperator('PhotonSelection'))\n\n if not sample.data:\n addIDSFWeight(sample, selector)\n\n return selector", "def lbcresnet18(**kwargs):\n return LBCResNet(LBCBasicBlock, [2, 2, 2, 2], **kwargs)", "def cifar100_rnn_gate_152(pretrained=False, **kwargs):\n model = ResNetRecurrentGateSP(BasicBlock, [25, 25, 25], num_classes=100, **kwargs)\n return model", "def sample_graph(g_type=None, n=100, g_seed=0):\n\tG = load_graph(g_type)\n\tsampler = Graph_Sampling.SRW_RWF_ISRW()\n\tprng = random.Random(g_seed)\n\tG_sampled = sampler.random_walk_sampling_with_fly_back(G, n, 0.15, prng)\n\treturn G_sampled" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_glfsr_source_f_sptr __init__(self, p) > digital_glfsr_source_f_sptr
def __init__(self, *args): this = _digital_swig.new_digital_glfsr_source_f_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_glfsr_source_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, src):\n self.src = src", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_framer_sink_1_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, gain=None, samp_rate=None, ppm=None, arfcn=None, capture_id=None, udp_ports=[], max_timeslot=0, store_capture=True, verbose=False, band=None, rec_length=None, test=False, args=\"\"):\n\n gr.top_block.__init__(self, \"Gr-gsm Capture\")\n\n ##################################################\n # Parameters\n ##################################################\n\n self.arfcn = arfcn\n for band in grgsm.arfcn.get_bands():\n if grgsm.arfcn.is_valid_arfcn(self.arfcn, band):\n self.fc = grgsm.arfcn.arfcn2downlink(arfcn, band)\n break\n\n self.gain = gain\n self.samp_rate = samp_rate\n self.ppm = ppm\n self.arfcn = arfcn\n self.band = band\n self.shiftoff = shiftoff = 400e3\n self.rec_length = rec_length\n self.store_capture = store_capture\n self.capture_id = capture_id\n self.udp_ports = udp_ports\n self.verbose = verbose\n\n ##################################################\n # Processing Blocks\n ##################################################\n\n self.rtlsdr_source = osmosdr.source( args=\"numchan=\" + str(1) + \" \" + \"\" )\n self.rtlsdr_source.set_sample_rate(samp_rate)\n self.rtlsdr_source.set_center_freq(self.fc - shiftoff, 0)\n self.rtlsdr_source.set_freq_corr(ppm, 0)\n self.rtlsdr_source.set_dc_offset_mode(2, 0)\n self.rtlsdr_source.set_iq_balance_mode(2, 0)\n self.rtlsdr_source.set_gain_mode(True, 0)\n self.rtlsdr_source.set_gain(gain, 0)\n self.rtlsdr_source.set_if_gain(20, 0)\n self.rtlsdr_source.set_bb_gain(20, 0)\n self.rtlsdr_source.set_antenna(\"\", 0)\n self.rtlsdr_source.set_bandwidth(250e3+abs(shiftoff), 0)\n self.blocks_rotator = blocks.rotator_cc(-2*pi*shiftoff/samp_rate)\n\n #RUn for the specified amount of seconds or indefenitely\n if self.rec_length is not None:\n self.blocks_head_0 = blocks.head(gr.sizeof_gr_complex, int(samp_rate*rec_length))\n\n self.gsm_receiver = grgsm.receiver(4, ([self.arfcn]), ([]))\n self.gsm_input = grgsm.gsm_input(\n ppm=0,\n osr=4,\n fc=self.fc,\n samp_rate_in=samp_rate,\n )\n self.gsm_clock_offset_control = grgsm.clock_offset_control(self.fc-shiftoff)\n\n #Control channel demapper for timeslot 0\n #self.gsm_bcch_ccch_demapper_0 = grgsm.universal_ctrl_chans_demapper(0, ([2,6,12,16,22,26,32,36,42,46]), ([1,2,2,2,2,2,2,2,2,2]))\n self.gsm_bcch_ccch_demapper_0 = grgsm.gsm_bcch_ccch_demapper(0)\n #For all other timeslots are assumed to contain sdcch8 logical channels, this demapping may be incorrect\n if max_timeslot >= 1 and max_timeslot <= 8:\n self.gsm_sdcch8_demappers = []\n for i in range(1,max_timeslot + 1):\n #self.gsm_sdcch8_demappers.append(grgsm.universal_ctrl_chans_demapper(i, ([0,4,8,12,16,20,24,28,32,36,40,44]), ([8,8,8,8,8,8,8,8,136,136,136,136])))\n self.gsm_sdcch8_demappers.append(grgsm.gsm_sdcch8_demapper(i))\n #Control channel decoder (extracts the packets), one for each timeslot\n self.gsm_control_channels_decoders = []\n for i in range(0,max_timeslot + 1):\n self.gsm_control_channels_decoders.append(grgsm.control_channels_decoder())\n# self.blocks_socket_pdu_0 = blocks.socket_pdu(\"UDP_CLIENT\", \"127.0.0.1\", \"4729\", 10000, False)# self.blocks_socket_pdu_0 = blocks.socket_pdu(\"UDP_CLIENT\", \"127.0.0.1\", \"4729\", 10000, False)\n\n #UDP client that sends all decoded C0T0 packets to the specified port on localhost if requested\n self.client_sockets = []\n self.server_sockets = []\n for udp_port in self.udp_ports:\n #The server is for testing only\n #WARNING remove the server if you want connect to a different one\n if test:\n self.server_sockets.append(blocks.socket_pdu(\"UDP_SERVER\", \"127.0.0.1\", str(udp_port), 10000))\n self.client_sockets.append(blocks.socket_pdu(\"UDP_CLIENT\", \"127.0.0.1\", str(udp_port), 10000))\n\n #Sinks to store the capture file if requested\n if self.store_capture:\n self.gsm_burst_file_sink = grgsm.burst_file_sink(str(self.capture_id) + \".burstfile\")\n self.blocks_file_sink = blocks.file_sink(gr.sizeof_gr_complex*1, str(self.capture_id) + \".cfile\", False)\n self.blocks_file_sink.set_unbuffered(False)\n\n #Printer for printing messages when verbose flag is True\n if self.verbose:\n self.gsm_message_printer = grgsm.message_printer(pmt.intern(\"\"), False)\n\n \"\"\"\n if self.verbose:\n self.gsm_bursts_printer_0 = grgsm.bursts_printer(pmt.intern(\"\"),\n False, False, False, False)\n \"\"\"\n ##################################################\n # Connections\n ##################################################\n\n if self.rec_length is not None: #if recording length is defined connect head block after the source\n self.connect((self.rtlsdr_source, 0), (self.blocks_head_0, 0))\n self.connect((self.blocks_head_0, 0), (self.blocks_rotator, 0))\n else:\n self.connect((self.rtlsdr_source, 0), (self.blocks_rotator, 0))\n\n #Connect the file sinks\n if self.store_capture:\n self.connect((self.blocks_rotator, 0), (self.blocks_file_sink, 0))\n self.msg_connect(self.gsm_receiver, \"C0\", self.gsm_burst_file_sink, \"in\")\n\n #Connect the GSM receiver\n self.connect((self.gsm_input, 0), (self.gsm_receiver, 0))\n self.connect((self.blocks_rotator, 0), (self.gsm_input, 0))\n self.msg_connect(self.gsm_clock_offset_control, \"ppm\", self.gsm_input, \"ppm_in\")\n self.msg_connect(self.gsm_receiver, \"measurements\", self.gsm_clock_offset_control, \"measurements\")\n\n #Connect the demapper and decoder for timeslot 0\n self.msg_connect((self.gsm_receiver, 'C0'), (self.gsm_bcch_ccch_demapper_0, 'bursts'))\n self.msg_connect((self.gsm_bcch_ccch_demapper_0, 'bursts'), (self.gsm_control_channels_decoders[0], 'bursts'))\n\n #Connect the demapper and decoders for the other timeslots\n for i in range(1,max_timeslot +1):\n self.msg_connect((self.gsm_receiver, 'C0'), (self.gsm_sdcch8_demappers[i-1], 'bursts'))\n self.msg_connect((self.gsm_sdcch8_demappers[i-1], 'bursts'), (self.gsm_control_channels_decoders[i], 'bursts'))\n\n\n #Connect the UDP clients if requested\n for client_socket in self.client_sockets:\n for i in range(0,max_timeslot + 1):\n self.msg_connect((self.gsm_control_channels_decoders[i], 'msgs'), (client_socket, 'pdus'))\n\n #Connect the printer is self.verbose is True\n if self.verbose:\n for i in range(0,max_timeslot + 1):\n self.msg_connect((self.gsm_control_channels_decoders[i], 'msgs'), (self.gsm_message_printer, 'msgs'))\n\n \"\"\"\n if self.verbose:\n self.msg_connect(self.gsm_receiver, \"C0\", self.gsm_bursts_printer_0, \"bursts\")\n \"\"\"", "def __init__(self, *args):\n this = _digital_swig.new_digital_packet_sink_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n _itkOptimizerParametersPython.itkOptimizerParametersF_swiginit(self, _itkOptimizerParametersPython.new_itkOptimizerParametersF(*args))", "def __init__(self, *args):\n this = _digital_swig.new_digital_simple_framer_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def glfsr_source_f(*args, **kwargs):\n return _digital_swig.glfsr_source_f(*args, **kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_probe_density_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, spi_rack, module, frequency=100e6):\n #def __init__(self, module, frequency=100e6):\n self.spi_rack = spi_rack\n self.module = module\n\n self.rf_frequency = frequency\n self.stepsize = 1e6\n self.ref_frequency = 10e6\n self.use_external = 0\n self.outputPower = None\n\n # These are the 6 registers present in the ADF4351\n self.registers = 6*[0]\n # In REG3: set ABP=1 (3 ns, INT-N) and CHARGE CANCEL=1\n self.registers[3] = (1<<22) | (1<<21) | 3\n # In REG5: set LD PIN MODE to 1 -> digital lock detect\n self.registers[5] = (1<<22) | (3<<19) | 5\n\n self.set_frequency(frequency)", "def __init__(self, *args):\n _ida_pro.sval_pointer_swiginit(self, _ida_pro.new_sval_pointer(*args))", "def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
glfsr_source_f(int degree, bool repeat = True, int mask = 0, int seed = 1) > digital_glfsr_source_f_sptr Galois LFSR pseudorandom source generating float outputs 1.0 1.0.
def glfsr_source_f(*args, **kwargs): return _digital_swig.glfsr_source_f(*args, **kwargs)
[ "def glfsr_source_b(*args, **kwargs):\n return _digital_swig.glfsr_source_b(*args, **kwargs)", "def sample_exponential(lambd: float) -> float:\n return -log(random.random()) / lambd", "def gen_samples(f, duration, fs=44100):\n\n samples = (np.sin(2*np.pi*np.arange(fs*duration)*f/fs)).astype(np.float32)\n\n return samples", "def __init__(self, *args):\n this = _digital_swig.new_digital_glfsr_source_f_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def generate_ts(cls, fs: int = 100, nsamples: int = 1000, **kwargs) -> np.ndarray:\n # For unit test\n if \"seed\" in kwargs:\n seed = int(kwargs[\"seed\"])\n else:\n seed = np.random.uniform(1, 100)\n\n # Generate some pink noise\n t = np.arange(nsamples) # timesteps\n f = 2 * np.pi * t / fs # frequency (in radians)\n\n # generate random complex series\n n = np.zeros((nsamples,), dtype=complex)\n np.random.seed = seed\n n = np.exp(1j * (2 * np.pi * np.random.rand(nsamples,)))\n n[0] = 0\n n *= 100 # make the spectrum stronger\n\n # Add some LFP-like components (:TODO:)\n # mix = lambda x, mean, var: 5 * math.exp(-((x - mean) ** 2) / (2 * var ** 2))\n # n = n - min(np.real(n))\n # mean = np.random.randint(10, len(f))\n # var = 3 * len(f) / mean\n # n_new = n + [mix(i, mean, var) for i in range(len(n))]\n # n_new[1:] = np.array(n_new[1:]) / np.arange(len(n))[1:]\n\n # Take a random part of the signal and amplify it\n peak = np.random.random()\n idx_enhanced = int(len(f) * peak)\n n[idx_enhanced] *= 100\n\n # generate the timeseries\n s = np.real(np.fft.ifft(n))\n return s", "def get_float_40bit(sequence, return_string=False):\n if sequence[0]:\n exponent = sequence[0] - 0x80\n\n mantissa_bytes = bytes((sequence[1] & 0x7f,)) + bytes(sequence[2:5])\n mantissa = int.from_bytes(mantissa_bytes, 'big') / 2**32\n\n result = 2**exponent * (0.5 + mantissa)\n\n else:\n result = 0.0\n\n if return_string:\n return f'{result:.0f}' if result.is_integer() else f'{result:f}'\n\n else:\n return result", "def test_random_fast_gradient_sign_method():\n context.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\n input_np = np.random.random((1, 28)).astype(np.float32)\n label = np.asarray([2], np.int32)\n label = np.eye(28)[label].astype(np.float32)\n\n attack = RandomFastGradientSignMethod(Net())\n ms_adv_x = attack.generate(input_np, label)\n\n assert np.any(ms_adv_x != input_np), 'Random fast gradient sign method: ' \\\n 'generate value must not be equal to' \\\n ' original value.'", "def AddPointSource(self,n=-1, E=-1, T='rand', l='rand',b='rand'):\n # Check energies.\n if (type(E)!=int) and (min(E)<self.eMin or max(E)>self.eMax):\n # otherwise load the updated psf between the given energies\n eMin,eMax = min(E)-1e-5,max(E)+1e-5\n nSteps = int(np.ceil((np.log10(eMax)-np.log10(eMin))/0.25))\n psfbins = np.logspace(np.log10(eMin),np.log10(eMax),nSteps+1)\n psf = [FermiPSF.GetPSF(psfbins[i],psfbins[i+1],convType=self.convType)[1] for i in range(len(psfbins)-1)]\n if len(psfbins)==0:\n psf = (FermiPSF.GetPSF(eMin,eMax,convType=self.convType)[1],)\n theta = FermiPSF.GetPSF(eMin,eMax,convType=self.convType)[0]\n else:\n eMin,eMax = self.eMin,self.eMax\n # if using the preset energy range, don't need to reload psf\n theta, psf,psfbins = self.theta, self.psf,self.psfbins\n \n # Sample the energy spectrum if not provided\n if type(E)==int:\n if E ==-1: E = self.SampleE(eMin,eMax,n)\n if n==-1: \n try: n=len(E)\n except: raise ValueError('Need to specify n or a vector E.')\n\n if str(T) =='rand': T = np.random.randint(0,high=self.time,size=n)\n if str(l) =='rand': l=np.random.ranf()*360.\n if str(b) =='rand': b=np.rad2deg(np.arccos(2*np.random.ranf()-1))-90\n #=============================================================\n # Here we apply the fermi point spread function.\n # Inverse monte carlo sampling of psf to obtain r\n # Get the energy averaged psf (with weighting ~ E^-2.5)\n #=============================================================\n dY,dZ = np.zeros(n),np.zeros(n)\n # bin the energies\n e = np.digitize(E,psfbins)\n # make a list of bins containing samples\n ue = np.unique(e)-1\n # for each bin\n for i in range(len(ue)):\n # find which points are in this energy bin\n idx = np.where(e-1==ue[i])[0] \n psfcum = np.cumsum(psf[ue[i]]) # Obtain CDF\n # Invert histogram and sample\n r = theta[np.argmin(np.abs(np.transpose(np.ones((n,len(psfcum)))*psfcum)-np.random.ranf(n)),axis=0)]\n phi = 2*np.pi*np.random.ranf(n) # Random Angle \n # Find X and Y displacements\n dY[idx],dZ[idx] = np.deg2rad(r*np.cos(phi)),np.deg2rad(r*np.sin(phi))\n # normalize the vectors. Now (dx,dy,dz) can be rotated to correct galactic coords.\n dX = np.sqrt(1-dZ*dZ-dY*dY)\n # First rotate about y-axis to the correct lat.\n ny = np.array([0.,1.,0.])\n nz = np.array([0.,0.,1.])\n theta2,theta1 = np.deg2rad((l,b)) \n R1 = self.__rotation_matrix(axis=ny,theta=theta1) # construct the rotation matrix\n # The second rotation will move to the correct longitude\n #R2 = self.__rotation_matrix(axis=nz,theta = theta2)\n R2 = self.__rotation_matrix(axis=nz,theta =-theta2)\n R = np.dot(R2,R1) # construct full rotation matrix \n def rotate(n):\n #n = n/np.sqrt(np.dot(n,n))\n return np.dot(R,n)\n \n # rotate all the vectors (Y component should be zero for all)\n X,Y,Z = np.transpose([rotate(np.transpose((dX,dY,dZ))[i]) for i in range(len(dX))])\n\n # Convert Centroids back to lat/long in radians\n Y = (np.rad2deg(np.arctan2(Y, X)) + 360.)%360 # longitude\n X = np.rad2deg(np.arcsin(Z)) # latitude\n # recondition points which have lat<-90 or lat>90 by meridian flipping.\n idx = np.where(X<-90)[0]\n X[idx] = -(X[idx]%90)\n Y[idx] = (Y[idx] + 180)%360\n idx = np.where(X>90)[0]\n X[idx] = 90-(X[idx]%90)\n Y[idx] = (Y[idx] + 180)%360\n \n if self.sim == []: self.sim = np.array((X,Y,T,E))\n else: self.sim = np.append(self.sim,(X,Y,T,E),axis=1)\n return self.sim", "def test_gaussian_state(self, tol):\n V = np.array([[0.5, 0], [0, 2]])\n r = np.array([0, 0])\n\n wires = [0]\n\n gate_name = \"GaussianState\"\n operation = qml.GaussianState\n\n cutoff_dim = 10\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n sf_operation = dev._operation_map[gate_name]\n\n assert dev.supports_operation(gate_name)\n\n @qml.qnode(dev)\n def circuit(*args):\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n operation(*args, wires=wires)\n return qml.expval(qml.NumberOperator(0)), qml.expval(qml.NumberOperator(1))\n\n res = circuit(V, r)\n sf_res = SF_gate_reference(sf_operation, cutoff_dim, wires, V, r)\n assert np.allclose(res, sf_res, atol=tol, rtol=0)", "def next_float(self, min, max):\n\t\treturn min + (max * self.__rand.random())", "def applyLowPass(x, fs, fc=30, N=4):\n wc = fc / (fs / 2)\n b, a = scipy.signal.butter(N, wc)\n return scipy.signal.filtfilt(b, a, x, method='gust')", "def _get_gyre():\n function = LegacyFunctionSpecification()\n function.name = 'get_gyre'\n function.addParameter('index_of_the_star', dtype='int32',\n direction=function.IN, description=\"The index for the star. \")\n function.addParameter('mode_l', dtype='int32',\n direction=function.IN, description=\"L mode to find (must match that in gyre.in) \")\n function.addParameter('add_center_point', dtype='bool', direction=function.IN,\n description=\"Whether to add center point\")\n function.addParameter('keep_surface_pointt', dtype='bool', direction=function.IN,\n description=\"Whether to keep surface point\")\n function.addParameter('add_atmosphere', dtype='bool', direction=function.IN,\n description=\"Whether to add atmosphere\")\n function.addParameter('fileout', dtype='string', direction=function.IN,\n description=\"Filename to store data at each radial point\")\n function.result_type = 'int32'\n return function", "def __init__(self, *args):\n this = _digital_swig.new_digital_glfsr_source_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def gRD(RD):\r\n q = 0.0057565\r\n pi = math.pi\r\n return 1 / math.sqrt(1 + 3 * q**2 * (RD**2)/(pi**2))", "def bits_float(bits: int) -> float:\n if bits < 0:\n significand = bits % (1 << 63)\n bits = ~significand\n result = struct.unpack('=d', struct.pack('=q', bits))[0]\n return cast(float, result)", "def gen_Greens_function_real(img_size, ps, psz, lambda_in):\n\n N, M, L = img_size\n\n x_r = (np.r_[:M] - M // 2) * ps\n y_r = (np.r_[:N] - N // 2) * ps\n z_r = (np.r_[:L] - L // 2) * psz\n\n xx_r, yy_r, zz_r = np.meshgrid(x_r, y_r, z_r)\n\n # radial coordinate\n rho = (xx_r**2 + yy_r**2 + zz_r**2) ** (0.5)\n\n # average radius of integration around r=0\n epsilon = (ps * ps * psz / np.pi / 4 * 3) ** (1 / 3)\n\n # wavenumber\n k = 2 * np.pi / lambda_in\n\n # average value for Green's function at r=0\n V_epsilon = (\n 1\n / 1j\n / k\n * (\n epsilon * np.exp(1j * k * epsilon)\n - 1 / 1j / k * (np.exp(1j * k * epsilon) - 1)\n )\n / ps\n / ps\n / psz\n )\n\n G_real = np.exp(1j * k * rho) / (rho + 1e-7) / 4 / np.pi\n G_real[rho == 0] = V_epsilon\n\n return G_real", "def target_sampler(seed_f, domain=[-1.0, 1.0], count=1000, noise_dev=0.01):\n \n res = []\n\n for i in xrange(count):\n x = np.random.random()*(domain[1]-domain[0])+domain[0]\n y = seed_f(x) + gauss_distribution(0.0, noise_dev)\n res.append([x,y])\n\n return np.array(res)", "def logg(a,caldir='cal/') :\n #a=fits.open('allStar-r12-l33.fits')[1].data\n\n aspcapmask=bitmask.AspcapBitMask()\n parammask=bitmask.ParamBitMask()\n starmask=bitmask.StarBitMask()\n gd=np.where( ((a['ASPCAPFLAG']&aspcapmask.badval()) == 0) )[0]\n\n cal=fits.open(caldir+'/giant_loggcal.fits')[1].data\n rgbsep=cal['rgbsep'][0]\n cnsep=cal['cnsep'][0]\n rclim=cal['rclim'][0]\n rcfit2=cal['rcfit2'][0]\n rgbfit2=cal['rgbfit2'][0]\n calloggmin=cal['calloggmin']\n calloggmax=cal['calloggmax']\n calteffmin=cal['calteffmin']\n calteffmax=cal['calteffmax']\n\n # for stars that aren't bad, get cn and dt\n cn=a['FPARAM'][gd,4]-a['FPARAM'][gd,5]\n dt=a['FPARAM'][gd,0] - (rgbsep[0] + rgbsep[1]*(a['FPARAM'][gd,1]-2.5) +rgbsep[2]*a['FPARAM'][gd,3])\n snr=clip(a['SNREV'][gd],0,200.)\n\n new=np.zeros(len(a))-9999.99\n\n # select RC\n rc=np.where((a['FPARAM'][gd,1]<rclim[1])&(a['FPARAM'][gd,1]>rclim[0])&\n (cn>cnsep[0]+cnsep[1]*a['FPARAM'][gd,3] + cnsep[2]*dt)&\n (a['FPARAM'][gd,1]<calloggmax)&(a['FPARAM'][gd,1]>calloggmin) &\n (a['FPARAM'][gd,0]<calteffmax)&(a['FPARAM'][gd,0]>calteffmin))[0]\n rccorr=rcfit2[0] + rcfit2[1]*a['FPARAM'][gd,1] + rcfit2[2]*a['FPARAM'][gd,1]**2\n new[gd[rc]]=a['FPARAM'][gd[rc],1]-rccorr[rc]\n a['PARAM'][gd[rc],1]=a['FPARAM'][gd[rc],1]-rccorr[rc]\n a['PARAM_COV'][gd[rc],1,1]=err.elemerr(cal['rcerrpar'][0],a['FPARAM'][gd[rc],0]-4500,snr[rc]-100,a['FPARAM'][gd[rc],3])**2\n #rcidl=np.where( (a['PARAMFLAG'][gd,1]&parammask.getval('LOGG_CAL_RC')) >0)[0]\n\n # select RGB\n rgb=np.where(((a['FPARAM'][gd,1]>rclim[1])|(a['FPARAM'][gd,1]<rclim[0])|\n (cn<cnsep[0]+cnsep[1]*a['FPARAM'][gd,3] + cnsep[2]*dt)) &\n (a['FPARAM'][gd,1]<calloggmax)&(a['FPARAM'][gd,1]>calloggmin) &\n (a['FPARAM'][gd,0]<calteffmax)&(a['FPARAM'][gd,0]>calteffmin))[0]\n #clip logg at loggmin and loggmax\n logg=clip(a['FPARAM'][gd,1],cal['loggmin'],cal['loggmax'])\n mh=clip(a['FPARAM'][gd,3],cal['mhmin'],cal['mhmax'])\n # get correction\n rgbcorr=(rgbfit2[0] + rgbfit2[1]*logg + rgbfit2[2]*logg**2 +\n rgbfit2[3]*logg**3 + rgbfit2[4]*mh )\n new[gd[rgb]]=a['FPARAM'][gd[rgb],1]-rgbcorr[rgb]\n a['PARAM'][gd[rgb],1]=a['FPARAM'][gd[rgb],1]-rgbcorr[rgb]\n a['PARAM_COV'][gd[rgb],1,1]=err.elemerr(cal['rgberrpar'][0],a['FPARAM'][gd[rgb],0]-4500,snr[rgb]-100,a['FPARAM'][gd[rgb],3])**2\n #rgbidl=np.where( (a['PARAMFLAG'][gd,1]&parammask.getval('LOGG_CAL_RGB')) >0)[0]\n\n cal=fits.open(caldir+'/dwarf_loggcal.fits')[1].data\n teff=clip(a['FPARAM'][gd,0],cal['temin'],cal['temax'])\n logg=clip(a['FPARAM'][gd,1],cal['loggmin'],cal['loggmax'])\n mh=clip(a['FPARAM'][gd,3],cal['mhmin'],cal['mhmax'])\n msfit=cal['msfit'][0]\n mscorr=msfit[0]+msfit[1]*teff+msfit[2]*mh\n ms=np.where(a['FPARAM'][gd,1] > cal['calloggmin'])[0]\n new[gd[ms]]=a['FPARAM'][gd[ms],1]-mscorr[ms]\n a['PARAM'][gd[ms],1]=a['FPARAM'][gd[ms],1]-mscorr[ms]\n a['PARAM_COV'][gd[ms],1,1]=err.elemerr(cal['errpar'][0],a['FPARAM'][gd[ms],0]-4500,snr[ms]-100,a['FPARAM'][gd[ms],3])**2\n #msidl=np.where( (a['PARAMFLAG'][gd,1]&parammask.getval('LOGG_CAL_MS')) >0)[0]\n\n trans=np.where((a['FPARAM'][gd,1] < 4) & (a['FPARAM'][gd,1] > 3.5) &\n (a['FPARAM'][gd,0] < calteffmax) )[0]\n ms_weight=(a['FPARAM'][gd[trans],1]-3.5)/0.5\n new[gd[trans]] = a['FPARAM'][gd[trans],1]-(mscorr[trans]*ms_weight+rgbcorr[trans]*(1-ms_weight))\n a['PARAM'][gd[trans],1] = a['FPARAM'][gd[trans],1]-(mscorr[trans]*ms_weight+rgbcorr[trans]*(1-ms_weight))\n\n diff =a['PARAM'][:,1]-new\n bd = np.where (np.isclose(diff,0.,1.e-6,0.01) == False)[0]\n return new", "def fading():\n fdng=np.random.rayleigh()\n fdng_db=20*np.log10(fdng)\n return fdng_db" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_gmskmod_bc_sptr __init__(self, p) > digital_gmskmod_bc_sptr
def __init__(self, *args): this = _digital_swig.new_digital_gmskmod_bc_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_ic_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_glfsr_source_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_if_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_ccf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _coin.new_SbDPMatrix(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoMFVec2b()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoMFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_fff_sptr(*args)\n try: self.this.append(this)\n except: self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
primitive_connect(self, gr_basic_block_sptr block) primitive_connect(self, gr_basic_block_sptr src, int src_port, gr_basic_block_sptr dst, int dst_port)
def primitive_connect(self, *args): return _digital_swig.digital_gmskmod_bc_sptr_primitive_connect(self, *args)
[ "def connect_wire( self, dest=None, src=None ):\n\n self._connect_signal( src, dest ) # expects the src first", "def _connect_signal( self, left_port, right_port ):\n\n # Can't connect a port to itself!\n assert left_port != right_port\n # Create the connection\n connection_edge = ConnectionEdge( left_port, right_port )\n\n # Add the connection to the Model's connection list\n if not connection_edge:\n raise Exception( \"Invalid Connection!\")\n self._connections.add( connection_edge )", "def _connect_bundle( self, left_bundle, right_bundle ):\n\n # Can't connect a port to itself!\n assert left_bundle != right_bundle\n\n ports = zip( left_bundle.get_ports(), right_bundle.get_ports() )\n\n for left, right in ports:\n self._connect_signal( left, right )", "def connect_inline(target, source):\n dependents = source.dependent(nuke.INPUTS | nuke.HIDDEN_INPUTS)\n target.setInput(0, source)\n\n if target.maxOutputs():\n for node in dependents:\n print node.fullName()\n for i in xrange(node.inputs()):\n print \"setting input {0}\".format(i)\n print node.input(i)\n if node.input(i) == source:\n print \"setting that input\"\n node.setInput(i, target)", "def connect(self, layer1, layer2):\n return NotImplemented", "def _install_flow (self, p, c, n, port_src, port_dst = None,\n **kw):\n\n node_p = core.Outband.t.name(p)\n node_c = core.Outband.t.name(c)\n node_n = core.Outband.t.name(n)\n inport = node_c.port(node_p)\n outport = node_c.port(node_n)\n if not inport:\n log.error('%s->%s: not found' % (node_c.name, node_p.name))\n return\n if not outport:\n log.error('%s->%s: not found' % (node_c.name, node_n.name))\n return\n\n nw_src = nw_dst = None\n info_src = info_dst = \"\"\n if port_src:\n nw_src = port_src.ip\n info_src = \"%s(%s) => \" % (port_src.parent.name, nw_src)\n if port_dst:\n nw_dst = port_dst.ip\n info_dst = \" => %s(%s)\" % (port_dst.parent.name, nw_dst)\n\n backport = node_n.port(node_c)\n if backport:\n mac = backport.mac\n else:\n log.error('%s->%s: link not found' % (node_n.name, node_c.name))\n return\n\n str_from = \"%s.%s\" % (dpid_to_str(node_c.dpid), c)\n str_out = \"%s.%s\" % (dpid_to_str(node_n.dpid), n)\n eth_in, eth_out = '', ''\n if mac:\n eth_out = '!'\n\n if not outport or outport < 0 or not inport.num or inport.num < 0:\n log.error('unknown port: %s %s->%s %s' %\n (str_from, inport.num, outport.num, str_out))\n return\n\n actions = []\n if not mac and 'add_eth_label' in kw:\n mac = ETH_LABEL\n eth_out = '+'\n if not mac and 'del_eth_label' in kw:\n mac = ETHER_BROADCAST\n eth_out = '-'\n if mac:\n actions.append(of.ofp_action_dl_addr.set_dst(mac))\n actions.append(of.ofp_action_output(port = outport.num))\n\n match = of.ofp_match(in_port = inport.num,\n #nw_proto = ipv4.TCP_PROTOCOL,\n #dl_vlan = 1301,\n #dl_type = ethernet.VLAN_TYPE,\n dl_type = ethernet.IP_TYPE,\n nw_src = nw_src, #None,\n nw_dst = nw_dst )\n match.adjust_wildcards = False\n if 'with_eth_label' in kw or 'del_eth_label' in kw:\n match.dl_dst = ETH_LABEL\n eth_in = '*'\n\n if port_src and port_src.mac:\n match.dl_src = port_src.mac\n else:\n #log.error('unknown port_src.mac')\n return\n\n priority = of.OFP_DEFAULT_PRIORITY\n if 'add_eth_label' in kw:\n priority = FAILOVER_PRIORITY\n if mac:\n priority = of.OFP_DEFAULT_PRIORITY + 1 + outport.num\n if 'priority' in kw:\n priority = kw['priority']\n\n if 'failover_entry' in kw:\n mark = '=>'\n else:\n mark = '->'\n\n if 'udp' in kw:\n match.nw_proto = ipv4.UDP_PROTOCOL\n\n log.info('%s%s %i%s%s%s%i %s%s',\n info_src, str_from, \n inport.num, eth_in, mark, eth_out, outport.num,\n str_out, info_dst)\n\n msg = of.ofp_flow_mod(command=of.OFPFC_ADD,\n idle_timeout=of.OFP_FLOW_PERMANENT,\n hard_timeout=of.OFP_FLOW_PERMANENT,\n actions=actions,\n match=match,\n priority=priority\n )\n if 'failover_entry' in kw:\n self._add_failover_entry(c, msg)\n else:\n core.openflow.sendToDPID(node_c.dpid, msg.pack())\n\n if (not ('udp' in kw)) and outport.mac:\n #sending to destination, separte udp traffic \n self._install_flow(p, c, n, port_src, port_dst,\n udp = True, priority = of.OFP_DEFAULT_PRIORITY + 99,\n **kw)\n\n return", "def src_sink(self) -> SrcSink:\n pass", "def _connect(self, v1, v2):\n v1.neighbours.append(v2)\n v2.neighbours.append(v1)", "def connectVector(cls, plug, src, *args, **kwargs):\r\n pm.mel.cgfxShader_connectVector(plug, src)", "def add_link (self, src, dst):\n raise NotImplementedError(\"Not implemented yet!\")", "def add_sglink (self, src, dst):\n raise NotImplementedError(\"Not implemented yet!\")", "def _connect(self, start, end):\n if self._game.is_resolving():\n return\n if not self._playing:\n return\n self._grid_view.draw_connection(start, end,\n self._game.grid[start].get_dot().get_kind())", "def onConnectAttr(self, srcattr, dstattr, opts):\n pass", "def copy_(self, src, non_blocking=False): # real signature unknown; restored from __doc__\n pass", "def vnnConnect(disconnect=bool):\n pass", "def change_edge_src(graph: dace.graph.graph.OrderedDiGraph,\n node_a: Union[dace.graph.nodes.Node, dace.graph.graph.\n OrderedMultiDiConnectorGraph],\n node_b: Union[dace.graph.nodes.Node, dace.graph.graph.\n OrderedMultiDiConnectorGraph]):\n\n # Create new outgoing edges from node B, by copying the outgoing edges from\n # node A and setting their source to node B.\n edges = list(graph.out_edges(node_a))\n for e in edges:\n # Delete the outgoing edges from node A from the graph.\n graph.remove_edge(e)\n # Insert the new edges to the graph.\n if isinstance(e, gr.MultiConnectorEdge):\n # src_conn = e.src_conn\n # if e.src_conn is not None:\n # # Remove connector from node A.\n # node_a.remove_out_connector(e.src_conn)\n # # Insert connector to node B.\n # if (not node_b.add_out_connector(src_conn) and isinstance(\n # node_b, (dace.graph.nodes.CodeNode,\n # dace.graph.nodes.MapExit))):\n # while not node_b.add_out_connector(src_conn):\n # src_conn = src_conn + '_'\n # graph.add_edge(node_b, src_conn, e.dst, e.dst_conn, e.data)\n graph.add_edge(node_b, e.src_conn, e.dst, e.dst_conn, e.data)\n else:\n graph.add_edge(node_b, e.dst, e.data)", "def connect(connect_from, connect_to):\r\n connect_to_name = connect_to.layer_name\r\n for node_from in range(len(connect_from.nodes)):\r\n for node_to in range(len(connect_to.nodes)):\r\n connect_from.nodes[node_from].connections.append(Connection(connect_to_name, node_to, random.random()*2 - 1))", "def vnnCopy(sourceNode=\"string\"):\n pass", "def connectInput(self, input, output, other):\n\n if not input.name in self.inputs:\n raise ValueError(\"Input is not part of this block\")\n\n if not output.name in other.outputs:\n raise ValueError(\"Output is not part of target block\")\n\n if input.maxConnections > -1 and (not len(input.targets) < input.maxConnections):\n raise ValueError(\"Too many connections to input '%s'\" % input.name)\n\n if output.maxConnections > -1 and (not len(output.targets) < output.maxConnections):\n raise ValueError(\"Too many connections from output '%s'\" % output.name)\n\n input.targets.append(output)\n output.targets.append(input)", "def copyConnection(self, fromfield: 'SoField') -> \"void\":\n return _coin.SoField_copyConnection(self, fromfield)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
primitive_disconnect(self, gr_basic_block_sptr block) primitive_disconnect(self, gr_basic_block_sptr src, int src_port, gr_basic_block_sptr dst, int dst_port)
def primitive_disconnect(self, *args): return _digital_swig.digital_gmskmod_bc_sptr_primitive_disconnect(self, *args)
[ "def disconnect(self, device):", "def disconnect(self, *args) -> \"void\":\n return _coin.SoField_disconnect(self, *args)", "def l2cap_disconnect(self, conn_handle: memoryview, cid: memoryview, /) -> None:", "def disconnect(self, endpoint):\n raise NotImplementedError", "def test_DisconnectNode(self):\n graph = mGraph.Graph()\n sumNode_1 = graph.createNode(mNode.SumNode)\n sumNode_2 = graph.createNode(mNode.SumNode)\n sumNode_3 = graph.createNode(mNode.SumNode)\n\n sumNode_1.getOutputPort(\"result\").connect(sumNode_3.getInputPort(\"value1\"))\n sumNode_2.getOutputPort(\"result\").connect(sumNode_3.getInputPort(\"value2\"))\n\n sumNode_1.portsIn[0].value = 1.0\n sumNode_1.portsIn[1].value = 1.5\n sumNode_2.portsIn[1].value = 2.25\n sumNode_2.portsIn[0].value = 3.0\n negNode = graph.createNode(mNode.NegateNode)\n sumNode_3.getOutputPort(\"result\").connect(negNode.getInputPort(\"value\"))\n negNode.evaluate()\n self.assertEqual(negNode.portsOut[0].value, -7.75, \"Output from Negate Node incorrect\")\n\n heads = graph.getNetworkHeads()\n self.assertEqual(len(heads), 1)\n tails = graph.getNetworkTails()\n self.assertEqual(len(tails), 2)\n\n sumNode_2.getOutputPort(\"result\").disconnect(sumNode_3.getInputPort(\"value2\"))\n self.assertFalse(sumNode_2.getOutputPort(\"result\").isConnected())\n self.assertFalse(sumNode_3.getInputPort(\"value2\").isConnected())\n self.assertEqual(sumNode_3.getInputPort(\"value2\").value, 5.25, \"Disconnected port values should be equal to there last connected input\")\n\n negNode.evaluate()\n self.assertEqual(negNode.portsOut[0].value, -7.75, \"Output from Negate Node incorrect\")\n\n heads = graph.getNetworkHeads()\n self.assertEqual(len(heads), 2)", "def onDisconnectAttr(self, srcattr, dstattr, opts):\n pass", "def user32_DdeDisconnect(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hConv\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def disconnect(self, reason: str = ''):\r\n reason_b = reason.encode(ENCODING)\r\n self.send(self.Enum.INF_DISCONNECT, reason_b) # Send inform_disconnect message with 'reason'\r\n self.transport.close() # Close the connection. Waits to send all data first. No data received hereon.\r", "def disconnect_nodes(parent_obj, parent_plug, child_obj, child_plug):\n\n parent_plug = get_plug(parent_obj, parent_plug)\n child_plug = get_plug(child_obj, child_plug)\n mdg_mod = maya.api.OpenMaya.MDGModifier()\n mdg_mod.disconnect(parent_plug, child_plug)\n mdg_mod.doIt()", "def __disconnect_field_signal(self, node):\n field = node.elem\n if field != None:\n if field.id != None:\n field.view.disconnect(field.id)", "def disconnectUnits(self, a, b):\n if self.verbose >= 1:\n print \"Remove edge:\", a.vectorStr(), b.vectorStr()\n a.edges.remove(a.getEdgeTo(b))\n b.edges.remove(b.getEdgeTo(a))", "def did_disconnect(self, target: \"SoCTarget\", resume: bool) -> None:\n pass", "def disconnectOutputs(self, node):\r\n node = self.convertToPyNode(node)\r\n if not node.isReferenced():\r\n output = node.outputs(c=1, p=1)\r\n for o in output:\r\n disconnectAttr(o[0], o[1])", "def disconnect(self):\n self.blnkt_dev.disconnect()", "def will_disconnect(self, target: \"SoCTarget\", resume: bool) -> None:\n pass", "def _disconnect_input(self, step_arg_name):\n self._connected_inputs[step_arg_name] = False", "def disconnect_node(G,node1,node2,text_id,idx):\n\tedge = G[node1][node2]\n\tedge_idx_list = edge['paths'][text_id]['word_positions']\n\tidx_pos = edge_idx_list.index(idx)\n\tedge_idx_list.pop(idx_pos)\n\tedge['weight'] -=1\n\tif not len(edge_idx_list):\n\t\tdel edge['paths'][text_id]\n\tif not len(edge['paths']):\n\t\tG.remove_edge(node1,node2)", "def detach_port(self, instance_obj, network_obj):\n raise NotImplementedError()", "def disconnect_connector(self): \n if self.itemA is not None:\n if self in self.itemA.connectorList:\n self.itemA.connectorList.remove(self)\n if self.itemB is not None:\n if self in self.itemB.connectorList:\n self.itemB.connectorList.remove(self)", "def disconnectJoint(attachHandleMode=bool, deleteHandleMode=bool):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
gmskmod_bc(unsigned int samples_per_sym = 2, double bt = 0.3, unsigned int L = 4) > digital_gmskmod_bc_sptr GMSK modulator. The input of this block are symbols from an Mary alphabet +/1, +/3, ..., +/(M1). Usually, M = 2 and therefore, the valid inputs are +/1. The modulator will silently accept any other inputs, though. The output is the phasemodulated signal.
def gmskmod_bc(samples_per_sym = 2, bt = 0.3, L = 4): return _digital_swig.gmskmod_bc(samples_per_sym, bt, L)
[ "def scrambler_bb(*args, **kwargs):\n return _digital_swig.scrambler_bb(*args, **kwargs)", "def bpsk(input_bits, noise):\n modulator = Modulator()\n demodulator = Demodulator()\n channel = Channel()\n signal = modulator.make_bpsk_mod(input_bits)\n\n signal = channel.send_signal(signal, noise)\n\n result_bits = demodulator.make_bpsk_demod(signal, channel)\n return result_bits", "def adcGain(PdBm, nBits, adcVfs, thresOpt = OPTIMUM_THRES):\n# Vrms = np.sqrt(10**(PdBm/10.)*.001*50.) #rms of input\n# PdBmThresOpt = thresOpt * Vrms #Optimum threshold for the input\n# adcThres = adcVfs/(2.**(nBits-1))\n# Vratio = PdBmThresOpt/adcThres\n# GdB = 20.*np.log10(Vratio)\n\n Lev = adcVfs/2**(nBits-1)\n Vrms_i = np.sqrt(10**(PdBm/10.)*.001*50.) #rms of input\n G = (Lev/Vrms_i/thresOpt)**2\n GdB = 10*np.log10(G)\n\n return(GdB)", "def chunks_to_symbols_bc(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_bc(*args, **kwargs)", "def qammod(b, mod):\n if b.size != mod:\n print('number of bits do not match the modulation scheme')\n return -1\n elif mod not in [2, 4, 6, 8, 10]:\n print('Currently supporting only QPSK, 16QAM, 64QAM, 256QAM, 1024QAM')\n return -1\n else:\n dims = np.power(2,mod//2) # one side of the square\n #coord = qamcoord[0:dims]\n xdim = 0\n ydim = 0\n for i in range(0, mod//2):\n xdim = xdim+b[i]*np.power(2,i)\n ydim = ydim+b[i+mod//2]*np.power(2,i)\n return np.complex(dims-(2*xdim+1), dims-(2*ydim+1))", "def build_bkg(self):\n try:\n self.param_bphi.x\n print(\"Bphi already built!\")\n except:\n self.calc_field()\n\n print(\"Build bkg\")\n\n R_temp = np.linspace(self.eqdsk.rboxleft, self.eqdsk.rboxleft+self.eqdsk.rboxlength+self.extend_psi_R, self.nR)\n z_temp = np.linspace(-self.eqdsk.zboxlength/2., self.eqdsk.zboxlength/2., self.nz)\n #R_temp = np.linspace(float(np.around(np.min(self.R_w), decimals=2)), float(np.around(np.max(self.R_w), decimals=2)), self.nR)\n #z_temp = np.linspace(float(np.around(np.min(self.z_w), decimals=2)), float(np.around(np.max(self.z_w), decimals=2)), self.nz)\n\n psitemp = self.psi_coeff(R_temp, z_temp)\n\n bphitemp = self.param_bphi(R_temp, z_temp)\n\n self.bkg={'type':'magn_bkg', 'phi0':0, 'nsector':0, 'nphi_per_sector':1,\\\n 'ncoil':0, 'zero_at_coil':1,\\\n 'R':R_temp,'z':z_temp, \\\n 'phimap_toroidal':0, 'phimap_poloidal':0, \\\n 'psi':[],\\\n 'Bphi':bphitemp, 'BR':self.Br, 'Bz':self.Bz, \\\n 'Bphi_pert':self.Bphi_pert, 'BR_pert':self.BR_pert, 'Bz_pert':self.Bz_pert} \n\n self.bkg['psi'] = psitemp*2*np.pi #in ASCOT Bfield, the psi is divided by 2*pi and reverses sign. This prevents it from happening \n print(\"remember: I am multiplying psi times 2pi since in ascot it divides by it!\")", "def _get_ghash_clmul():\n\n if not _cpu_features.have_clmul():\n return None\n try:\n api = _ghash_api_template.replace(\"%imp%\", \"clmul\")\n lib = load_pycryptodome_raw_lib(\"Crypto.Hash._ghash_clmul\", api)\n result = _build_impl(lib, \"clmul\")\n except OSError:\n result = None\n return result", "def modulate(self, input_bits):\n mapfunc = vectorize(lambda i:\n self._constellation[bitarray2dec(input_bits[i:i + self.num_bits_symbol])])\n\n baseband_symbols = mapfunc(arange(0, len(input_bits), self.num_bits_symbol))\n\n return baseband_symbols", "def DblPwr(Mvec, MLpar, z):\n \n A = MLpar['A']\n b1 = MLpar['b1']\n b2 = MLpar['b2']\n b3 = MLpar['b3']\n Mstar = MLpar['Mstar']\n \n L = A * 10.**(b1*z) * (Mvec/(1.e8*u.Msun))**b2 * (1.+Mvec/Mstar)**b3\n L = L*u.Lsun\n \n return L", "def split_Bregman(sig, mask, initial_d, initial_b, mu, lamda, ninnner,nouter, max_cg):\n sigT=sig[np.newaxis].transpose()\n\n maskT = mask.transpose()\n\n uk=np.dot(maskT, sigT)\n\n dk_x=initial_d[np.newaxis].transpose()\n\n bk_x=initial_b[np.newaxis].transpose()\n fk = sigT\n for jouter in xrange (nouter):\n for jinner in xrange(ninnner):\n ukp=uk\n ifkt=np.dot(maskT, sigT)\n rhs=mu*ifkt+lamda*(dk_x-bk_x)\n\n ruk = np.dot(mask, uk)\n iukt = np.dot(maskT,ruk)\n r = rhs - mu * iukt -lamda *uk\n p = r\n rsold = np.dot(r.transpose(), r)\n\n for i in xrange(max_cg):\n rp=np.dot(mask,p)\n irpt = np.dot(maskT ,rp)\n Ap = mu * irpt + lamda *p\n\n alpha = rsold / np.dot(p.transpose(),Ap)\n uk = uk + alpha * p\n r = r - alpha * Ap\n rsnew = np.dot(r.transpose(),r)\n if rsnew < 1e-32:\n break\n\n p = r + rsnew / rsold * p;\n rsold = rsnew\n\n sk_x = uk + bk_x\n dk_x = np.maximum(np.abs(sk_x)-1/lamda,0)*np.sign(sk_x)\n bk_x = sk_x-dk_x\n\n fk = fk + sigT - np.dot(mask, uk)\n rec_tv = uk\n\n return (uk)", "def test_band_structure_bc(ph_nacl):\n ph_nacl.run_band_structure(\n _get_band_qpoints(), with_group_velocities=False, is_band_connection=True\n )\n ph_nacl.get_band_structure_dict()", "def gghmm(sample, rname):\n\n selector = gghlBase(sample, rname, ROOT.lMuon)\n selector.findOperator('LeptonSelection').setN(0, 2)\n\n dimuMass = ROOT.Mass()\n dimuMass.setPrefix('dimu')\n dimuMass.setMin(60.)\n dimuMass.setMax(120.)\n dimuMass.setCollection1(ROOT.cMuons)\n dimuMass.setCollection2(ROOT.cMuons)\n dimuMass.setIgnoreDecision(True)\n selector.addOperator(dimuMass)\n\n dimuSign = ROOT.OppositeSign()\n dimuSign.setPrefix('dimu')\n dimuSign.setCollection1(ROOT.cMuons)\n dimuSign.setCollection2(ROOT.cMuons)\n dimuSign.setIgnoreDecision(True)\n selector.addOperator(dimuSign)\n\n if not sample.data:\n muonLooseSF = getFromFile(datadir + '/muo_muon_looseid_2016.root', 'Loose_ScaleFactor') # x: abs eta, y: pt\n muonTrackSF = getFromFile(datadir + '/muonpog_muon_tracking_SF_ichep.root', 'htrack2') # x: npv\n\n idsf = selector.findOperator('MuonSF')\n idsf.addFactor(muonLooseSF)\n idsf.setNParticles(2)\n\n track = selector.findOperator('MuonTrackSF')\n track.addFactor(muonTrackSF)\n track.setNParticles(2)\n\n return selector", "def descrambler_bb(*args, **kwargs):\n return _digital_swig.descrambler_bb(*args, **kwargs)", "def basis_to_module(B, K):\n V, from_V, to_V = K.absolute_vector_space()\n M = ZZ**(V.dimension())\n C = [to_V(K(b)) for b in B]\n return M.span_of_basis(C)", "def LamC2pKK ( self ) : \n from GaudiConfUtils.ConfigurableGenerators import DaVinci__N3BodyDecays\n #\n return self.make_selection (\n 'LambdaCpKK' ,\n DaVinci__N3BodyDecays ,\n ## inputs \n [ self.protons() , self.kaons() ] ,\n ##\n DecayDescriptor = \" [ Lambda_c+ -> p+ K- K+ ]cc\" ,\n ##\n Combination12Cut = \"\"\"\n ( AM < 2.5 * GeV ) &\n ( ACHI2DOCA(1,2) < 16 ) \n \"\"\" ,\n ## \n CombinationCut = \"\"\"\n ( ( ADAMASS ( 'Lambda_c+' ) < 65 * MeV ) \n | ( ADAMASS ( 'Xi_c+' ) < 65 * MeV ) ) &\n ( APT > %s ) & \n ( ACHI2DOCA(1,3) < 16 ) &\n ( ACHI2DOCA(2,2) < 16 ) \n \"\"\" % ( 0.95 * self[ 'pT(Lc+)' ] ) ,\n ##\n MotherCut = \"\"\"\n ( chi2vx < 25 ) &\n ( PT > %s ) &\n ( ( ADMASS ( 'Lambda_c+' ) < 55 * MeV ) \n | ( ADMASS ( 'Xi_c+' ) < 55 * MeV ) ) &\n ( ctau > 100 * micrometer ) \n \"\"\" % self [ 'pT(Lc+)']\n )", "def additive_scrambler_bb(*args, **kwargs):\n return _digital_swig.additive_scrambler_bb(*args, **kwargs)", "def m_b(mbmb, scale, f, alphasMZ=0.1185, loop=3):\n if scale == mbmb and f == 5:\n return mbmb # nothing to do\n _sane(scale, f)\n alphas_mb = alpha_s(mbmb, 5, alphasMZ=alphasMZ, loop=loop)\n crd = rundec.CRunDec()\n if f == 5:\n alphas_scale = alpha_s(scale, f, alphasMZ=alphasMZ, loop=loop)\n return crd.mMS2mMS(mbmb, alphas_mb, alphas_scale, f, loop)\n elif f == 4:\n crd.nfMmu.Mth = 4.8\n crd.nfMmu.muth = 4.8\n crd.nfMmu.nf = 5\n return crd.mH2mL(mbmb, alphas_mb, mbmb, crd.nfMmu, scale, loop)\n elif f == 3:\n mc = 1.3\n crd.nfMmu.Mth = 4.8\n crd.nfMmu.muth = 4.8\n crd.nfMmu.nf = 5\n mbmc = crd.mH2mL(mbmb, alphas_mb, mbmb, crd.nfMmu, mc, loop)\n crd.nfMmu.Mth = mc\n crd.nfMmu.muth = mc\n crd.nfMmu.nf = 4\n alphas_mc = alpha_s(mc, 4, alphasMZ=alphasMZ, loop=loop)\n return crd.mH2mL(mbmc, alphas_mc, mc, crd.nfMmu, scale, loop)\n elif f == 6:\n crd.nfMmu.Mth = 170\n crd.nfMmu.muth = 170\n crd.nfMmu.nf = 6\n return crd.mL2mH(mbmb, alphas_mb, mbmb, crd.nfMmu, scale, loop)\n else:\n raise ValueError(f\"Invalid input: f={f}, scale={scale}\")", "def __init__(self,L,M,l1,l2):\n self.L=L\n self.M=M\n self.l1=l1\n self.l2=l2\n self.CG=[]\n self.m1=[]\n self.m2=[]\n for m in range(-l1,l1+1):\n cg=ClebschGordan(L,M,l1,m,l2,M-m)\n if cg!=0: \n self.CG.append(cg)\n self.m1.append(m)\n self.m2.append(M-m)\n self.brad=[]", "def bond_B(k):\n return (4-k) * 300000", "def gabor(sigma, theta=0., lam=None, psi=0.0, gamma=1.0, ksize=None, removeDC=True):\n if ksize == None:\n ksize = sigma * 6\n ksize = int(ksize + 1 - ksize % 2)\n \n if lam == None:\n lam = sigma * 2.\n \n y, x = numpy.lib.index_tricks.nd_grid()[:ksize,:ksize] - ksize/2\n xp = x * cos(theta) + y * sin(theta)\n yp = -x * sin(theta) + y * cos(theta)\n gb = exp(- (xp**2 + gamma**2 * yp**2) / (2 * sigma**2)) * cos(2. * pi * xp / lam + psi)\n if removeDC:\n return gb - average(gb)\n else:\n return gb" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_kurtotic_equalizer_cc_sptr __init__(self, p) > digital_kurtotic_equalizer_cc_sptr
def __init__(self, *args): this = _digital_swig.new_digital_kurtotic_equalizer_cc_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, coeff):\n self.coeff = coeff", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, coefs_tup, setpoint_value, range_tup, integration_samples = 5, diff_filter_samples = 4):\r\n \r\n self.started = False\r\n self.Kp, self.Ki, self.Kd = coefs_tup\r\n \r\n if integration_samples < 3:\r\n integration_samples = 3\r\n print('Integration samples number is set default 3')\r\n \r\n self.integr_deque = collections.deque([(0,0)]* integration_samples, maxlen = integration_samples)\r\n \r\n if diff_filter_samples < 2:\r\n diff_filter_samples = 2\r\n print('Diff filter samples number is set to default 2')\r\n\r\n self.diff_deque = collections.deque([(0,0)]* diff_filter_samples, maxlen = diff_filter_samples)\r\n \r\n self.setpoint_value = setpoint_value\r\n \r\n self.min_value, self.max_value = range_tup\r\n if self.min_value >= self.max_value:\r\n self.min_value = 0\r\n self.max_value = 1\r\n print('Values range is set to default (0,1)')", "def __init__(self, encut, spinaxis, ldaul, Uparam, Jparam, nupdown=None, name='DFTCL_settings'):\n ncl_settings = {\"ISPIN\": 2, \"MAGMOM\": None, \"SAXIS\": spinaxis, \"LSORBIT\": \".TRUE.\", \"LNONCOLLINEAR\": \".TRUE.\", \"NUPDOWN\":nupdown}\n dftu_settings = {\"LDAU\": \".TRUE.\", \"LDAUU\": Uparam, \"LDATYPE\": 2, \"LDAUL\": ldaul, \"LDAUJ\": Jparam , \"LMAXMIX\": 4}\n InputParameters.__init__(self, name=name, magnetic_settings=ncl_settings, hubbard_settings=dftu_settings)\n self.update_electronic_settings(\"ENCUT\", encut)", "def __init__(self):\n super(CorrelogramPooling3D, self).__init__()", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, traj, dyn, pg, Kp=1.0, dt=0.005):\n self.Kp = Kp\n self.trajfunction = traj\n self.pg = pg\n self.rate = 200\n self.dyn = dyn\n self.dt = dt", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n _itkOptimizerParametersPython.itkOptimizerParametersD_swiginit(self, _itkOptimizerParametersPython.new_itkOptimizerParametersD(*args))", "def __init__(self, curve):\n self._curve = curve", "def __init__(self, *args):\n _itkOptimizerParametersPython.itkOptimizerParametersHelperD_swiginit(self, _itkOptimizerParametersPython.new_itkOptimizerParametersHelperD(*args))", "def __init__(self, *args):\n this = _digital_swig.new_digital_pn_correlator_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_ccf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, Mcomp, Mhe, Apre, epre, Nkick=1000, Vkick=None, Mns=None, sys_flag=None, galphi=None, galcosth=None, omega=None, phi=None, costh=None,th_ma = None):\n \n # Convert inputs to SI\n\n\n self.sys_flag = sys_flag\n self.Nkick = Nkick\n\n if Vkick is not None: self.Vkick = Vkick*units.km.to(units.m)\n else: self.Vkick = np.random.uniform(0,1000,self.Nkick)*units.km.to(units.m)\n if phi is not None: self.phi = phi\n else: self.phi = np.random.uniform(0,2*np.pi,self.Nkick)\n\n if costh is not None: self.costh = costh\n else: self.costh = np.random.uniform(-1,1,self.Nkick)\n if Mns is not None: self.Mns = Mns*units.M_sun.to(units.kg)\n else: self.Mns = np.random.uniform(3.,Mhe,self.Nkick)*units.M_sun.to(units.kg)\n \n if th_ma is not None: self.th_ma = th_ma\n else: self.th_ma = np.random.uniform(0,2*np.pi,self.Nkick)\n self.E_ma =np.array([brentq(lambda x:ma -x + epre*np.sin(x),0,2*np.pi) for ma in self.th_ma])\n self.rpre = Apre*(1.-epre*np.cos(self.E_ma))*units.R_sun.to(units.m)\n self.Mhe = np.full((self.Nkick,), Mhe)*units.M_sun.to(units.kg)\n self.Mcomp = np.full((self.Nkick,), Mcomp)*units.M_sun.to(units.kg)\n self.Apre = np.full((self.Nkick,),Apre)*units.R_sun.to(units.m)\n self.epre = np.full((self.Nkick,),epre)\n \n # Get projection of R in the x-y plane to save later into output file", "def __init__(self,n,k,d,es=1e-3,ee=1e-3):\n self.q = 4\n self.n = n\n self.k = k\n self.d = d \n self.t = int((d-1)/2)\n self.symbol_err_rate = es\n self.erasure_err_rate = ee\n self.result = mpfr(\"0\")\n self.has_result = False\n #print (n,k,d,es,ee)", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self,order, xData, yData, uncertainties, deltaChiSqToStop = 0.01,dampingFactor = 1,useDampedGaussNeutonLineSearch = False, recordHistory = False):\n self.order = order \n super().__init__(deltaChiSqToStop = deltaChiSqToStop,dampingFactor = dampingFactor,useDampedGaussNeutonLineSearch = useDampedGaussNeutonLineSearch,recordHistory=recordHistory)\n self.xData = xData\n self.yData = yData\n self.uncert = uncertainties", "def __init__(self, conditionValue):", "def __init__(self, kernel_size, p_dim, in_channels, out_channels, n_LRF, KP_extent, radius,\r\n fixed_kernel_points='center', KP_influence='linear', aggregation_mode='sum'):\r\n super(EKPConv_v1, self).__init__()\r\n\r\n # Save parameters\r\n self.K = kernel_size\r\n self.p_dim = p_dim\r\n self.in_channels = in_channels\r\n self.out_channels = out_channels\r\n self.radius = radius\r\n self.n_LRF = n_LRF\r\n self.KP_extent = KP_extent\r\n self.fixed_kernel_points = fixed_kernel_points\r\n self.KP_influence = KP_influence\r\n self.aggregation_mode = aggregation_mode\r\n self.diff_op = torch.nn.MSELoss(reduction='none')\r\n\r\n # Number of feature per lrf\r\n self.lrf_channels = in_channels // n_LRF\r\n if in_channels % n_LRF != 0:\r\n raise ValueError('Input feature dimension of an equivariant convolution '\r\n 'is not divisible by the number of lrf')\r\n\r\n # Initialize weights\r\n self.weights = Parameter(torch.zeros((self.K, in_channels, out_channels), dtype=torch.float32),\r\n requires_grad=True)\r\n\r\n # Reset parameters\r\n self.reset_parameters()\r\n\r\n # Initialize kernel points\r\n self.kernel_points = self.init_KP()\r\n\r\n return", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
kurtotic_equalizer_cc(int num_taps, float mu) > digital_kurtotic_equalizer_cc_sptr Implements a kurtosisbased adaptive equalizer on complex stream Y. Guo, J. Zhao, Y. Sun, "Sign kurtosis maximization based blind equalization algorithm," IEEE Conf. on Control, Automation, Robotics and Vision, Vol. 3, Dec. 2004, pp. 2052 2057.
def kurtotic_equalizer_cc(*args, **kwargs): return _digital_swig.kurtotic_equalizer_cc(*args, **kwargs)
[ "def kcdetect(data, sf, proba_thr, amp_thr, hypno, nrem_only, tmin, tmax,\n kc_min_amp, kc_max_amp, fmin=.5, fmax=4., delta_thr=.75,\n smoothing_s=20, spindles_thresh=2., range_spin_sec=20,\n min_distance_ms=500.):\n # Find if hypnogram is loaded :\n hyploaded = True if np.unique(hypno).size > 1 and nrem_only else False\n\n # PRE DETECTION\n # Compute delta band power using wavelet\n freqs = np.array([0.1, 4., 8., 12., 16., 30.])\n delta_npow = morlet_power(data, freqs, sf, norm=True)[0]\n delta_nfpow = smoothing(delta_npow, smoothing_s * sf)\n idx_no_delta = np.where(delta_nfpow < delta_thr)[0]\n idx_loc_delta = np.where(delta_npow > np.median(delta_npow))[0]\n\n # MAIN DETECTION\n # Bandpass filtering\n sig_filt = filt(sf, np.array([fmin, fmax]), data)\n # Taiger-Keaser energy operator\n sig_tkeo = tkeo(sig_filt)\n # Define hard and soft thresholds\n hard_thr = np.nanmean(sig_tkeo) + amp_thr * np.nanstd(sig_tkeo)\n soft_thr = 0.8 * hard_thr\n\n with np.errstate(divide='ignore', invalid='ignore'):\n idx_hard = np.where(sig_tkeo > hard_thr)[0]\n idx_soft = np.where(sig_tkeo > soft_thr)[0]\n\n # Find threshold-crossing indices of soft threshold\n idx_zc_soft = _events_to_index(idx_soft).flatten()\n\n if idx_hard.size == 0:\n return np.array([], dtype=int)\n\n # Initialize K-complexes index vector\n idx_kc = np.array([], dtype=int)\n # Fill gap between events separated by less than min_distance_ms\n idx_hard = _events_distance_fill(idx_hard, min_distance_ms, sf)\n # Get where K-complex start / end :\n idx_start, idx_stop = _events_to_index(idx_hard).T\n\n # Find true beginning / end using soft threshold\n for s in idx_start:\n d = s - idx_zc_soft\n soft_beg = d[d > 0].min()\n soft_end = np.abs(d[d < 0]).min()\n idx_kc = np.append(idx_kc, np.arange(s - soft_beg, s + soft_end))\n\n # Check if spindles are present in range_spin_sec\n idx_spin = spindlesdetect(data, sf, spindles_thresh, hypno, False)[0]\n idx_start, idx_stop = _events_to_index(idx_kc).T\n spin_bool = np.array([], dtype=np.bool)\n\n for idx, val in enumerate(idx_start):\n step = 0.5 * range_spin_sec * sf\n is_spin = np.in1d(np.arange(val - step, val + step, 1),\n idx_spin, assume_unique=True)\n spin_bool = np.append(spin_bool, any(is_spin))\n\n kc_spin = np.where(spin_bool)[0]\n idx_kc_spin = _index_to_events(np.c_[idx_start, idx_stop][kc_spin])\n\n # Compute probability\n proba = np.zeros(shape=data.shape)\n proba[idx_kc] += 0.1\n proba[idx_no_delta] += 0.1\n proba[idx_loc_delta] += 0.1\n proba[idx_kc_spin] += 0.1\n\n if hyploaded:\n proba[hypno == -1] += -0.1\n proba[hypno == 0] += -0.2\n proba[hypno == 1] += 0\n proba[hypno == 2] += 0.1\n proba[hypno == 3] += -0.1\n proba[hypno == 4] += -0.2\n\n # Smooth and normalize probability vector\n proba = proba / 0.5 if hyploaded else proba / 0.4\n proba = smoothing(proba, sf)\n # Keep only proba >= proba_thr (user defined threshold)\n idx_kc = np.intersect1d(idx_kc, np.where(proba >= proba_thr)[0], True)\n\n if idx_kc.size == 0:\n return np.array([], dtype=int)\n\n # Morphological criteria\n idx_start, idx_stop = _events_to_index(idx_kc).T\n duration_ms = (idx_stop - idx_start) * (1000 / sf)\n\n # Remove events with bad duration\n good_dur = np.where(np.logical_and(duration_ms > tmin,\n duration_ms < tmax))[0]\n idx_kc = _index_to_events(np.c_[idx_start, idx_stop][good_dur])\n\n # Remove events with bad amplitude\n idx_start, idx_stop = _events_to_index(idx_kc).T\n amp = np.zeros(shape=idx_start.size)\n for i, (start, stop) in enumerate(zip(idx_start, idx_stop)):\n amp[i] = np.ptp(data[start:stop])\n good_amp = np.where(np.logical_and(amp > kc_min_amp,\n amp < kc_max_amp))[0]\n\n return np.c_[idx_start, idx_stop][good_amp]", "def kurtosis(x):\n\treturn stats.kurtosis(x, bias=False)", "def get_opt_func(rate_matrices, distributions):\n\n n = len(rate_matrices[0]) # number of states\n\n def get_dS(W, p):\n \"\"\" Rate of Shannon entropy change, for rate matrix W and distribution p \"\"\"\n\n \"\"\" We rewrite -sum_{i,j} p_i W_ji ln p_j as the \"KL-like\" expression\n 1/tau sum_{i,j} p_i T_ji ln (p_i T_ji/p_j T_ji)\n where tau = -min_i W_ii is the fastest time scale in R and\n T_ji = delta_{ji} + tau W_ji is a conditional probability distribuiton. This \n lets us indicate to cvxpy that -sum_{i,j} p_i W_ji ln p_j is convex in p.\n \"\"\"\n\n tau = -1/np.min(np.diag(W))\n T = np.eye(n) + tau*W\n assert(np.all(T>=0))\n\n dS = 0.\n for i in range(n):\n for j in range(n):\n if i == j: \n continue\n if np.isclose(T[i,j],0):\n continue\n dS += cp.kl_div( T[i,j] * p[j], T[i,j] * p[i]) + T[i,j] * p[j] - T[i,j] * p[i]\n return dS / tau\n\n\n def get_EF(W, p):\n \"\"\" EF rate, for rate matrix W and distribution p, defined as \n sum_{i,j} p_i W_ji ln (W_ji/W_ji)\n \"\"\"\n\n EF = 0.\n for i in range(n):\n for j in range(n):\n if i == j:\n continue\n if np.isclose(W[i,j],0) and np.isclose(W[j,i],0):\n continue\n EF += W[i,j] * p[j] * np.log( W[i,j] / W[j,i] )\n return EF\n\n \n def f(eta):\n p = cp.Variable( n, name='p')\n logQ_param = cp.Parameter(n, name='logQ')\n\n\n min_val = None\n\n print('-'*len(rate_matrices))\n\n for W in rate_matrices:\n sys.stdout.write('.')\n sys.stdout.flush()\n\n cons = [ p >= 0, sum(p) == 1 ]\n for q2 in distributions:\n assert(np.all(q2 > 0))\n cons.append( p @ (logQ_param-np.log(q2)) >= 0 )\n\n obj = (1-eta)*get_dS(W, p) + get_EF(W, p) - eta*(W @ p)@logQ_param\n cons.append( obj <= -1e-6)\n prob = cp.Problem(cp.Minimize(0), cons)\n\n for q in distributions:\n logQ_param.value = np.log(q)\n\n prob.solve(solver=cp.ECOS, reltol=1e-12)\n if prob.status == 'infeasible':\n continue\n\n else:\n print('')\n return False\n\n\n print('')\n return True\n\n return f", "def kurtosis(self, fisher: bool = True, bias: bool = True) -> float | None:\n return self._s.kurtosis(fisher, bias)", "def _thermlc(tautom, theta, deltal, x, jmax, dphdot, bet, c2):\n dphesc = np.zeros(900) # Initialise the output\n a = np.zeros(900); b = np.zeros(900); c = np.zeros(900)\n d = np.zeros(900); alp = np.zeros(900); u = np.zeros(900)\n g = np.zeros(900); gam = np.zeros(900)\n\n #c u(x) is the dimensionless photon occupation number\n c20 = tautom / deltal\n\n #c determine u\n #c define coefficients going into equation\n #c a(j) * u(j + 1) + b(j) * u(j) + c(j) * u(j - 1) = d(j)\n for j in range(1, jmax - 1):\n w1 = np.sqrt( x[j] * x[j + 1] )\n w2 = np.sqrt( x[j - 1] * x[j] )\n #c w1 is x(j + 1 / 2)\n #c w2 is x(j - 1 / 2)\n a[j] = -c20 * c2[j] * (theta / deltal / w1 + 0.5)\n t1 = -c20 * c2[j] * (0.5 - theta / deltal / w1)\n t2 = c20 * c2[j - 1] * (theta / deltal / w2 + 0.5)\n t3 = x[j]**3 * (tautom * bet[j])\n b[j] = t1 + t2 + t3\n c[j] = c20 * c2[j - 1] * (0.5 - theta / deltal / w2)\n d[j] = x[j] * dphdot[j]\n\n #c define constants going into boundary terms\n #c u(1) = aa * u(2) (zero flux at lowest energy)\n #c u(jx2) given from region 2 above\n x32 = np.sqrt(x[0] * x[1])\n aa = (theta / deltal / x32 + 0.5) / (theta / deltal / x32 - 0.5)\n\n #c zero flux at the highest energy\n u[jmax - 1] = 0.0\n\n #c invert tridiagonal matrix\n alp[1] = b[1] + c[1] * aa\n gam[1] = a[1] / alp[1]\n for j in range(2, jmax - 1):\n alp[j] = b[j] - c[j] * gam[j - 1]\n gam[j] = a[j] / alp[j]\n g[1] = d[1] / alp[1]\n for j in range(2, jmax - 2):\n g[j] = (d[j] - c[j] * g[j - 1]) / alp[j]\n g[jmax - 2] = (d[jmax - 2] - a[jmax - 2] * u[jmax - 1] \n - c[jmax - 2] * g[jmax - 3]) / alp[jmax - 2]\n u[jmax - 2] = g[jmax - 2]\n for j in range(2, jmax + 1):\n jj = jmax - j\n u[jj] = g[jj] - gam[jj] * u[jj + 1]\n u[0] = aa * u[1]\n #c compute new value of dph(x) and new value of dphesc(x)\n dphesc[:jmax] = x[:jmax] * x[:jmax] * u[:jmax] * bet[:jmax] * tautom\n\n return dphesc", "def kurtosis(dist):\n\n\t\treturn stats.kurtosis(dist.values())", "def test_apparent_kurtosis_coef():\n\n sph = Sphere(xyz=gtab.bvecs[gtab.bvals > 0])\n AKC = dki.apparent_kurtosis_coef(params_sph, sph)\n\n # check all direction\n for d in range(len(gtab.bvecs[gtab.bvals > 0])):\n assert_array_almost_equal(AKC[d], Kref_sphere)", "def key_rate_calculation(vA, t, h, e, v, bt):\r\n\r\n # Calculate the thermal noise\r\n omega = (t * e - t + 1) / (1 - t)\r\n\r\n # Calculate the mutual information and the reconciliation efficiency\r\n s_y = t * h * (vA - 1 + e) + 1 + v # Variance of y\r\n s_xy = h * t * e + 1 + v # Conditional variance between x and y\r\n i = 0.5 * np.log2(s_y / s_xy)\r\n\r\n # Define the global output state parameters\r\n # c = np.sqrt(t * hd * (vA ** 2 - 1))\r\n b = t * h * (vA + e) + 1 - (t * h) + v\r\n gamma = np.sqrt(h * (1 - t) * (omega ** 2 - 1))\r\n # delta = -np.sqrt((1 - t) * (vA ** 2 - 1))\r\n theta = np.sqrt(h * t * (1 - t)) * (omega - vA)\r\n psi = np.sqrt(t * (omega ** 2 - 1))\r\n phi = t * omega + (1 - t) * vA\r\n\r\n # The global output state ρA′BeE′ of Alice, Bob and Eve is zero-mean Gaussian with CM V_A'BeE'\r\n # To compute the Holevo bound, we need to derive the von Neumann entropies S(ρeE′) and S(ρeE′|y) which can be\r\n # computed from the symplectic spectra of the reduced CM VeE′ and the conditional CM VeE′|y\r\n v_eE = np.array([[omega, 0, psi, 0], [0, omega, 0, -psi], [psi, 0, phi, 0], [0, -psi, 0, phi]])\r\n v_eEy = v_eE - (b ** -1) * np.array([[gamma ** 2, 0, gamma * theta, 0], [0, 0, 0, 0],\r\n [gamma * theta, 0, theta ** 2, 0], [0, 0, 0, 0]])\r\n v_1, v_2 = continuous.symplectic_eigenvalue_calculation(v_eE)\r\n v_3, v_4 = continuous.symplectic_eigenvalue_calculation(v_eEy)\r\n\r\n x = continuous.h_f(v_1) + continuous.h_f(v_2) - continuous.h_f(v_3) - continuous.h_f(v_4) # Holevo bound\r\n r = bt * i - x # Secret key rate\r\n return i, x, r", "def kurtosis(track):\n\n dframe = track\n assert isinstance(dframe, pd.core.frame.DataFrame), \"track must be a pandas\\\n dataframe.\"\n assert isinstance(dframe['X'], pd.core.series.Series), \"track must contain\\\n X column.\"\n assert isinstance(dframe['Y'], pd.core.series.Series), \"track must contain\\\n Y column.\"\n assert dframe.shape[0] > 0, \"track must not be empty.\"\n\n eig1, eig2, eigv1, eigv2 = gyration_tensor(dframe)\n projection = dframe['X']*eigv1[0] + dframe['Y']*eigv1[1]\n\n kurt = np.mean((projection - np.mean(\n projection))**4/(np.std(projection)**4))\n\n return kurt", "def VoigtTc(nu,sigmaD,gammaL):\n \n sfac=1.0/(jnp.sqrt(2)*sigmaD)\n v=sfac*Tc(sfac*gammaL,sfac*nu)/jnp.sqrt(jnp.pi)\n return v", "def weibull_model( k, c, a, b):\n prev_k = np.exp(-1 * np.power(k / a, b))\n curr_k = np.exp(-1 * np.power((k + 1) / a, b))\n return c * (prev_k - curr_k)", "def test_weighted_chisq():\n num_points = 1000\n num_experiments = 100\n num_bins = 20\n bins = np.linspace(-800, 800, num=num_bins + 1)\n\n pi_mass = 139.570\n k_mass = 493.677\n d_mass = 1864.84\n generator = phasespace.nbody_decay(\n d_mass, (k_mass, pi_mass, pi_mass, pi_mass), names=(\"K\", \"pi1\", \"pi2\", \"pi3\")\n )\n\n def gen():\n \"\"\"\n Return K_px arrays a and b for a D->K3pi event, and weights for a\n\n Returns a, b, wt_a\n\n \"\"\"\n # Find k_px with weights\n a_wt, a = generator.generate(num_points)\n a = a[\"K\"].numpy()[:, 0]\n\n # Normalise weights to have an average of 1\n a_wt /= np.mean(a_wt)\n\n # Find k_px using accept-reject\n b = script_util.flat_phsp_points(num_points)[0][0]\n\n return a, b, a_wt\n\n chisqs, p_vals = _find_chisqs(num_experiments, bins, gen)\n _plot(*gen(), bins, chisqs, p_vals)", "def k_mu_krie(data):\n tdata = dc(data)\n\n try:\n k_s = tdata['k_s']\n mu_s = tdata['mu_s']\n por = tdata['por']\n except NameError:\n raise\n a_k = tdata.get('a_k', np.array(3.))\n\n tpor = np.array(por, dtype=float, copy=True, ndmin=1)\n\n k_m = np.zeros(tpor.shape)\n mu_m = np.zeros(tpor.shape)\n a_exp = np.zeros(tpor.shape)\n b_i = (tpor != 1.)\n\n if a_k >= 0:\n a_exp[b_i] = np.array(a_k/(1. - tpor[b_i]))\n else:\n a_exp[b_i] = np.array(1. + -1*a_k/(1. - tpor[b_i]))\n\n k_m[tpor == 1.] = 0.\n mu_m[tpor == 1.] = 0.\n k_m[tpor == 0.] = k_s\n mu_m[tpor == 0.] = mu_s\n b_i = np.logical_and(tpor != 1., tpor != 0.)\n k_m[b_i] = k_s * (1. - tpor[b_i])**a_exp[b_i]\n mu_m[b_i] = (mu_s / k_s) * k_m[b_i]\n\n return k_m, mu_m", "def lifetime_kurtosis(layer_act_all_neur, neuron):\n layer_act_all_neur[neuron]\n M = len(layer_act_all_neur[neuron]) #nb stimuli(nb img)\n KL= 0\n for i in range(M):\n KL += ((layer_act_all_neur[neuron][i] - np.mean(layer_act_all_neur[neuron]))/np.std(layer_act_all_neur[neuron]))**4\n KL = KL/M - 3\n return(KL)", "def test_make_kurucz_tlusty_spectral_grid(self):\n # read in the cached isochrones\n oiso = ezIsoch(self.iso_fname_cache)\n\n # calculate the redshift\n redshift = (self.settings.velocity / const.c).decompose().value\n\n # make the spectral grid\n spec_fname = tempfile.NamedTemporaryFile(suffix=\".hd5\").name\n (spec_fname, g) = make_spectral_grid(\n \"test\",\n oiso,\n osl=self.settings.osl,\n redshift=redshift,\n distance=self.settings.distances,\n distance_unit=self.settings.distance_unit,\n spec_fname=spec_fname,\n # filterLib=filter_fname,\n extLaw=self.settings.extLaw,\n add_spectral_properties_kwargs=self.settings.add_spectral_properties_kwargs,\n )\n\n # compare the new to the cached version\n compare_hdf5(self.spec_fname_cache, spec_fname)", "def maccormack(U_init,numt,numx,numy,delx,dely,Tw,Tfs,rho_fs,ufs,c_v,c_p,viscfs,Prt,lmbda,R,gamma):\n Un = numpy.zeros((numt+1,4,numx,numy))\n Un[0,:,:,:] = U_init.copy()\n #\n U = U_init.copy()\n #\n Us = U_init.copy()\n #\n for t in range(1,numt+1):\n \t#get properties to calculate fluxes:\n \tT = get_Temperature(U, numx, numy, Tw, Tfs, c_v)\n \tmu = get_visc(T, viscfs, Tfs)\n \tk = get_k(mu, c_p, Prt)\n \t#get shear:\n \tt_xyE = get_tau_xy_Epredict(U, mu, numx, numy, delx, dely )\n \tt_xyF = get_tau_xy_Fpredict(U, mu, numx, numy, delx, dely )\n \tt_xx = get_tau_xx_Epredict(U, mu, numx, numy, delx, dely, lmbda)\n \tt_yy = get_tau_yy_Fpredict(U, mu, numx, numy, delx, dely, lmbda)\n \t#calculate fluxes E, F:\n \tE = get_E_flux_predictor(U, numx, numy, delx, mu, T, k, t_xx, t_xyE, R)\n \tF = get_F_flux_predictor(U, numx, numy, dely, mu, T, k, t_xyF, t_yy, R)\n \t#dt:\n \tdt = get_dt(U, numx, numy, delx, dely, mu, T, gamma, R, Prt)\n \t#Predictor Step:\n \tUs[:,1:-1,1:-1] = U[:,1:-1,1:-1] -\\\n \t\t\t\t\t\t\t(dt/delx)*(E[:,2:,1:-1] - E[:,1:-1,1:-1]) -\\\n \t\t\t\t\t\t\t(dt/dely)*(F[:,1:-1,2:] - F[:,1:-1,1:-1])\n \tUstar = get_BC(Us, T, numy, rho_fs, Tw, ufs, c_v, Tfs, R)\n \t#update properties:\n \tT2 = get_Temperature(Ustar, numx, numy, Tw, Tfs, c_v)\n \tmu2 = get_visc(T2, viscfs, Tfs)\n \tk2 = get_k(mu2, c_p, Prt)\n \t#update shear:\n \tt_xyE2 = get_tau_xy_Ecorrect(Ustar,mu2,numx, numy, delx, dely)\n \tt_xyF2 = get_tau_xy_Fcorrect(Ustar,mu2,numx, numy, delx, dely)\n \tt_xx2 = get_tau_xx_Ecorrect(Ustar, mu2, numx, numy, delx, dely, lmbda)\n \tt_yy2 = get_tau_yy_Fcorrect(Ustar, mu2, numx, numy, delx, dely, lmbda)\n \t#update fluxes:\n \tE2 = get_E_flux_correct(Ustar, numx, numy, delx, mu2, T2, k2, t_xx2, t_xyE2, R)\n \tF2 = get_F_flux_correct(Ustar, numx, numy, dely, mu2, T2, k2, t_xyF2, t_yy2, R)\n \t#corrector step:\n \tUn[t,:,1:-1,1:-1] = 0.5*( U[:,1:-1,1:-1] + Ustar[:,1:-1,1:-1] -\\\n \t\t\t\t\t\t\t(dt/delx)*(E2[:,1:-1,1:-1]-E2[:,:-2,1:-1]) -\\\n \t\t\t\t\t\t\t(dt/dely)*(F2[:,1:-1,1:-1]-F2[:,1:-1,:-2] ))\n \t#\n \tUn[t,:,:,:] = get_BC(Un[t,:,:,:], T2, numy, rho_fs, Tw, ufs, c_v, Tfs, R)\n \tU = Un[t,:,:,:].copy()\n \t#print(t)\n \tif( numpy.all(numpy.abs(Un[t,0,:,:]-Un[t-1,0,:,:]) < 1e-8) == True ):\n \t\ttt=t+1\n \t\tUn = Un[:tt,:,:,:].copy()\n \t\tmscn = (numpy.trapz(Un[t,1,0,:])/numpy.trapz(Un[t,1,-1,:]))*100\n \t\tprint('Mass is conserved by %.2f percent' % mscn)\n \t\tbreak\n \n return Un", "def seebeck_thermometry(T_Kelvin):\n\n\tcoeff_E_below_270K = np.array([\n\t\t0,\n\t\t5.8665508708E1,\n\t\t4.5410977124E-2,\n\t\t-7.7998048686E-4,\n\t\t-2.5800160843E-5,\n\t\t-5.9452583057E-7,\n\t\t-9.3214058667E-9,\n\t\t-1.0287605534E-10,\n\t\t-8.0370123621E-13,\n\t\t-4.3979497391E-15,\n\t\t-1.6414776355E-17,\n\t\t-3.9673619516E-20,\n\t\t-5.5827328721E-23,\n\t\t-3.4657842013E-26\n\t])[::-1] # Reverse for poly1d\n\n\n\tcoeff_E_above_270K = np.array([\n\t\t0,\n\t\t5.8665508710E1,\n\t\t4.5032275582E-2,\n\t\t2.8908407212E-5,\n\t\t-3.3056896652E-7,\n\t\t6.5024403270E-10,\n\t\t-1.9197495504E-13,\n\t\t-1.2536600497E-15,\n\t\t2.1489217569E-18,\n\t\t-1.4388041782E-21,\n\t\t3.5960899481E-25\n\t])[::-1] # Reverse for poly1d\n\n\tT_Celsius = T_Kelvin - 273.15\n\n\t## Selection of coefficients for temperature regime\n\n\tindex_below = np.where(T_Celsius <= 0)\n\tindex_above = np.where(T_Celsius > 0)\n\n\tS_values = np.zeros(np.size(T_Kelvin))\n\n\tE_below = np.poly1d(coeff_E_below_270K) # is a poly1d object in microVolt\n\tS_below = np.polyder(E_below) # is a poly1d object in microVolt / Celsius\n\tS_values[index_below] = S_below(T_Celsius[index_below])*1e-6 # is in Volt / K\n\n\tE_above = np.poly1d(coeff_E_above_270K) # is a poly1d object in microVolt\n\tS_above = np.polyder(E_above) # is a poly1d object in microVolt / Celsius\n\tS_values[index_above] = S_above(T_Celsius[index_above])*1e-6 # is in Volt / K\n\n\treturn S_values", "def expK(wavefunction, t):\n wavefunction *= np.exp(-1j * dt * k(p, t + 0.5 * dt))", "def generateBKSets(qubits):\n dic={0:1,1:2,2:4,3:8,4:16,5:32,6:64,7:128,8:256,9:512,10:1024}\n for n in range(0,11):\n if dic[n]>=qubits:\n power2=n\n break\n beta=1.\n invbeta=1.\n size=2**power2\n pi=np.zeros((size,size))\n for n in range(0,size):\n pi[n,0:n]=1\n I = np.array([[1, 0], [0, 1]])\n for n in range(0,power2):\n l1=2**n\n beta=np.kron(I,beta)\n invbeta=np.kron(I,invbeta)\n for m in range(0,l1):\n beta[2*l1-1,m]=1.0\n invbeta[2*l1-1,l1-1]=1.0\n #print np.linalg.inv(beta)\n parity=np.remainder(np.dot(pi,invbeta),2)\n P={}\n for n in range(0,qubits):\n s=[]\n for m in range(0,n):\n if parity[n,m]==1:\n s.append(m)\n P[n]=s\n R={}\n for n in range(0,qubits):\n s=[]\n for m in range(0,n):\n if parity[n,m]==1:\n s.append(m)\n R[n]=s\n F={}\n for n in range(0,qubits):\n set=[]\n for m in range(0,n):\n if invbeta[n,m]==1:\n set.append(m)\n F[n]=set\n U={}\n for n in range(0,qubits):\n set=[]\n for m in range(n+1,qubits):\n if beta[m,n]==1:\n set.append(m)\n U[n]=set\n for n in range(0,qubits):\n for x in F[n]:\n if x in R[n]:\n R[n].remove(x)\n return beta,P,U,F,R", "def compare_timefreqs(s, sample_rate, win_sizes=[0.050, 0.100, 0.250, 0.500, 1.25]):\n\n #construct different types of estimators\n gaussian_est = GaussianSpectrumEstimator(nstd=6)\n mt_est_lowbw = MultiTaperSpectrumEstimator(bandwidth=10.0, adaptive=False)\n mt_est_lowbw_adapt = MultiTaperSpectrumEstimator(bandwidth=10.0, adaptive=True, max_adaptive_iter=150)\n mt_est_lowbw_jn = MultiTaperSpectrumEstimator(bandwidth=10.0, adaptive=False, jackknife=True)\n mt_est_highbw = MultiTaperSpectrumEstimator(bandwidth=30.0, adaptive=False)\n mt_est_highbw_adapt = MultiTaperSpectrumEstimator(bandwidth=30.0, adaptive=True, max_adaptive_iter=150)\n mt_est_highbw_jn = MultiTaperSpectrumEstimator(bandwidth=30.0, adaptive=False, jackknife=True)\n wavelet = WaveletSpectrumEstimator(num_cycles_per_window=10, min_freq=1, max_freq=sample_rate/2, num_freqs=50, nstd=6)\n #estimators = [gaussian_est, mt_est_lowbw, mt_est_lowbw_adapt, mt_est_highbw, mt_est_highbw_adapt]\n estimators = [wavelet]\n #enames = ['gauss', 'lowbw', 'lowbw_a', 'highbw', 'highbw_a']\n enames = ['wavelet']\n\n #run each estimator for each window size and plot the amplitude of the time frequency representation\n plt.figure()\n spnum = 1\n for k,win_size in enumerate(win_sizes):\n increment = 1.0 / sample_rate\n for j,est in enumerate(estimators):\n t,freq,tf = timefreq(s, sample_rate, win_size, increment, est)\n print('freq=',freq)\n ax = plt.subplot(len(win_sizes), len(estimators), spnum)\n plot_spectrogram(t, freq, np.abs(tf), ax=ax, colorbar=True, ticks=True)\n if k == 0:\n plt.title(enames[j])\n #if j == 0:\n #plt.ylabel('%d ms' % (win_size*1000))\n spnum += 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_lms_dd_equalizer_cc_sptr __init__(self, p) > digital_lms_dd_equalizer_cc_sptr
def __init__(self, *args): this = _digital_swig.new_digital_lms_dd_equalizer_cc_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pn_correlator_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n super(CorrelogramPooling3D, self).__init__()", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_ccf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n _itkOptimizerParametersPython.itkOptimizerParametersHelperD_swiginit(self, _itkOptimizerParametersPython.new_itkOptimizerParametersHelperD(*args))", "def __init__(self, *args):\n _itkOptimizerParametersPython.itkOptimizerParametersD_swiginit(self, _itkOptimizerParametersPython.new_itkOptimizerParametersD(*args))", "def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, coeff):\n self.coeff = coeff", "def __init__(self, encut, spinaxis, ldaul, Uparam, Jparam, nupdown=None, name='DFTCL_settings'):\n ncl_settings = {\"ISPIN\": 2, \"MAGMOM\": None, \"SAXIS\": spinaxis, \"LSORBIT\": \".TRUE.\", \"LNONCOLLINEAR\": \".TRUE.\", \"NUPDOWN\":nupdown}\n dftu_settings = {\"LDAU\": \".TRUE.\", \"LDAUU\": Uparam, \"LDATYPE\": 2, \"LDAUL\": ldaul, \"LDAUJ\": Jparam , \"LMAXMIX\": 4}\n InputParameters.__init__(self, name=name, magnetic_settings=ncl_settings, hubbard_settings=dftu_settings)\n self.update_electronic_settings(\"ENCUT\", encut)", "def __init__(self, *args):\n _itkImagePython.vectoritkImageCD2_swiginit(self, _itkImagePython.new_vectoritkImageCD2(*args))", "def __init__(self, *args):\n _itkImagePython.vectoritkImageCVD44_swiginit(self, _itkImagePython.new_vectoritkImageCVD44(*args))", "def __init__(self, *args):\n this = _digital_swig.new_digital_probe_density_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, tensor_rep):\n super(ComponentPlotCPD, self).__init__(tensor_rep=tensor_rep)", "def __init__(self, coefs_tup, setpoint_value, range_tup, integration_samples = 5, diff_filter_samples = 4):\r\n \r\n self.started = False\r\n self.Kp, self.Ki, self.Kd = coefs_tup\r\n \r\n if integration_samples < 3:\r\n integration_samples = 3\r\n print('Integration samples number is set default 3')\r\n \r\n self.integr_deque = collections.deque([(0,0)]* integration_samples, maxlen = integration_samples)\r\n \r\n if diff_filter_samples < 2:\r\n diff_filter_samples = 2\r\n print('Diff filter samples number is set to default 2')\r\n\r\n self.diff_deque = collections.deque([(0,0)]* diff_filter_samples, maxlen = diff_filter_samples)\r\n \r\n self.setpoint_value = setpoint_value\r\n \r\n self.min_value, self.max_value = range_tup\r\n if self.min_value >= self.max_value:\r\n self.min_value = 0\r\n self.max_value = 1\r\n print('Values range is set to default (0,1)')", "def __init__(self):\r\n\r\n super(Panel, self).__init__()\r\n\r\n # Define private dictionary attributes.\r\n\r\n # Define private list attributes.\r\n self._lambdab_count = []\r\n\r\n # Define private scalar attributes.\r\n\r\n # Define public dictionary attributes.\r\n\r\n # Define public list attributes.\r\n\r\n # Define public scalar attributes.\r\n self.quality = 0\r\n self.q_override = 0.0\r\n self.function = 0\r\n self.piA = 0.0\r\n self.piF = 0.0\r\n self.piQ = 0.0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
lms_dd_equalizer_cc(int num_taps, float mu, int sps, digital_constellation_sptr cnst) > digital_lms_dd_equalizer_cc_sptr LeastMeanSquare Decision Directed Equalizer (complex in/out) This block implements an LMSbased decisiondirected equalizer. It uses a set of weights, w, to correlate against the inputs, u, and a decisions is then made from this output. The error in the decision is used to update teh weight vector. y[n] = conj(w[n]) u[n] d[n] = decision(y[n]) e[n] = d[n] y[n] w[n+1] = w[n] + mu u[n] conj(e[n]) Where mu is a gain value (between 0 and 1 and usualy small, around 0.001 0.01.
def lms_dd_equalizer_cc(*args, **kwargs): return _digital_swig.lms_dd_equalizer_cc(*args, **kwargs)
[ "def expected_feedback_effort(controller: PenaltyController, sensory_noise_magnitude: cas.DM) -> cas.MX:\n n_tau = controller.controls[\"tau\"].cx_start.shape[0]\n n_q = controller.states[\"q\"].cx_start.shape[0]\n n_qdot = controller.states[\"qdot\"].cx_start.shape[0]\n n_stochastic = controller.stochastic_variables.shape\n\n states_sym = cas.MX.sym(\"states_sym\", n_q + n_qdot, 1)\n stochastic_sym = cas.MX.sym(\"stochastic_sym\", n_stochastic, 1)\n sensory_noise_matrix = sensory_noise_magnitude * cas.MX_eye(4)\n\n # create the casadi function to be evaluated\n # Get the symbolic variables\n ref = stochastic_sym[controller.stochastic_variables[\"ref\"].index]\n stochastic_sym_dict = {\n key: stochastic_sym[controller.stochastic_variables[key].index]\n for key in controller.stochastic_variables.keys()\n }\n for key in controller.stochastic_variables.keys():\n stochastic_sym_dict[key].cx_start = stochastic_sym_dict[key]\n if \"cholesky_cov\" in controller.stochastic_variables.keys():\n l_cov_matrix = controller.stochastic_variables[\"cholesky_cov\"].reshape_to_cholesky_matrix(\n stochastic_sym_dict,\n n_q + n_qdot,\n Node.START,\n \"cholesky_cov\",\n )\n cov_matrix = l_cov_matrix @ l_cov_matrix.T\n else:\n cov_matrix = controller.stochastic_variables[\"cov\"].reshape_to_matrix(\n stochastic_sym_dict,\n n_q + n_qdot,\n n_q + n_qdot,\n Node.START,\n \"cov\",\n )\n\n k = stochastic_sym_dict[\"k\"].cx_start\n k_matrix = cas.MX(n_q + n_qdot, n_tau)\n for s0 in range(n_q + n_qdot):\n for s1 in range(n_tau):\n k_matrix[s0, s1] = k[s0 * n_tau + s1]\n k_matrix = k_matrix.T\n\n # Compute the expected effort\n hand_pos = controller.model.markers(states_sym[:n_q])[2][:2]\n hand_vel = controller.model.marker_velocities(states_sym[:n_q], states_sym[n_q:])[2][:2]\n\n trace_k_sensor_k = cas.trace(k_matrix @ sensory_noise_matrix @ k_matrix.T)\n ee = cas.vertcat(hand_pos, hand_vel)\n e_fb = k_matrix @ ((ee - ref) + sensory_noise_magnitude)\n jac_e_fb_x = cas.jacobian(e_fb, states_sym)\n trace_jac_p_jack = cas.trace(jac_e_fb_x @ cov_matrix @ jac_e_fb_x.T)\n expectedEffort_fb_mx = trace_jac_p_jack + trace_k_sensor_k\n func = cas.Function(\n \"f_expectedEffort_fb\",\n [states_sym, stochastic_sym],\n [expectedEffort_fb_mx],\n )\n\n out = func(controller.states.cx_start, controller.stochastic_variables.cx_start)\n\n return out", "def twoterm_connection_coefficients( a, d ):\n \n \"\"\"\n Eigen-value problem due to the scaling equations using the auto-correlation\n of the wavelet filter (Fukuda, 2013):\n \"\"\"\n a_c = np.correlate( a, a, mode = \"full\")\n N_c = len(a_c)\n N = N_c - 2\n T = np.zeros((N,N))\n for i,j in itertools.product(range(N), repeat=2): \n if -1 < j - 2*i + N < N_c:\n T[i,j] = a_c[ j - 2*i + N ]\n \n T -= 2**(1-d)*np.eye(N)\n b = np.zeros([N]) \n \n \"\"\"\n Since the eigenvector is determined up to a constant, we alse need a \n normalization equation ( Goedecker, 2009):\n \"\"\"\n M = np.zeros([1, N])\n for i in range(0,N):\n M[0,i] += moment(a, i, d) \n A = np.vstack([T,M])\n b = np.hstack([b, [factorial(d)]])\n \n \"\"\"\n A least squares algorithm is used to solve the over-determined system.\n One can also use np.linalg.lstsq with rcond = None. In my experience \n however, np.linalg.lstsq does not always return residuals correctly.\n \"\"\"\n CC, residuals, rank, singular_values = scipy.linalg.lstsq(A, b)\n \n if abs( residuals ) >= 10**-30:\n msg = 'Residue of lstsq algorithm is {:.2e}!'.format(residuals)\n warnings.warn(msg)\n \n return CC", "def lda_loss(n_components, margin,method='raleigh_coeff'):\n\n def inner_lda_objective(y_true, y_pred):\n \"\"\"\n It is the loss function of LDA as introduced in the original paper.\n It is adopted from the the original implementation in the following link:\n https://github.com/CPJKU/deep_lda\n Note: it is implemented by Theano tensor operations, and does not work on Tensorflow backend\n \"\"\"\n r = 1e-4\n locations = tf.where(tf.equal(y_true, 1))\n indices = locations[:, 1]\n y, idx = tf.unique(indices)\n\n\n def fn(unique, indexes, preds):\n u_indexes = tf.where(tf.equal(unique, indexes))\n u_indexes = tf.reshape(u_indexes, (1, -1))\n X = tf.gather(preds, u_indexes)\n X_mean = X - tf.reduce_mean(X, axis=0)\n m = tf.cast(tf.shape(X_mean)[1], tf.float32)\n return (1 / (m - 1)) * tf.matmul(tf.transpose(X_mean[0]), X_mean[0])\n\n # scan over groups\n covs_t = tf.map_fn(lambda x: fn(x, indices, y_pred), y, dtype=tf.float32)\n\n # compute average covariance matrix (within scatter)\n Sw_t = tf.reduce_mean(covs_t, axis=0)\n\n # compute total scatter\n Xt_bar = y_pred - tf.reduce_mean(y_pred, axis=0)\n m = tf.cast(tf.shape(Xt_bar)[1], tf.float32)\n St_t = (1 / (m - 1)) * tf.matmul(tf.transpose(Xt_bar), Xt_bar)\n\n # compute between scatter\n dim = tf.shape(y)[0]\n Sb_t = St_t - Sw_t\n\n # cope for numerical instability (regularize)\n Sw_t += tf.eye(dim) * r\n\n ''' START : COMPLICATED PART WHERE TENSORFLOW HAS TROUBLE'''\n #cho = tf.eye(dim)\n # look at page 383\n # http://perso.ens-lyon.fr/patrick.flandrin/LedoitWolf_JMA2004.pdf\n\n if method == 'raleigh_coeff':\n # minimize the -ve of Raleigh coefficient\n r = 1e-4\n cho = tf.cholesky(St_t + tf.eye(dim) * r)\n inv_cho = tf.matrix_inverse(cho)\n evals_t = tf.linalg.eigvalsh(tf.transpose(inv_cho) * Sb_t * inv_cho) # Sb_t, St_t # SIMPLIFICATION OF THE EQP USING cholesky \n top_k_evals = evals_t[-n_components:]\n\n index_min = tf.argmin(top_k_evals, 0)\n thresh_min = top_k_evals[index_min] + margin\n mask_min = top_k_evals < thresh_min\n cost_min = tf.boolean_mask(top_k_evals, mask_min)\n cost = -tf.reduce_mean(cost_min)\n\n elif method == 'trace_ratio':\n # minimize the -ve of ratio of trace of betwwen to witin scatter\n cost = -tf.math.divide(tf.linalg.trace(Sb_t),tf.linalg.trace(Sw_t))\n elif method == 'trace_diff':\n # minimize with variation, maximze between variation\n cost = tf.linalg.trace(Sw_t)-tf.linalg.trace(Sb_t)\n else:\n # minimize within variation\n cost = tf.linalg.trace(Sw_t)\n\n return cost\n\n\n return inner_lda_objective", "def test_sdca_sparse_and_dense_consistency(self):\n\n def create_solver():\n return SDCA(max_iter=1, verbose=False, l_l2sq=1e-3,\n seed=TestSolver.sto_seed)\n\n self._test_solver_sparse_and_dense_consistency(create_solver)", "def cca_loss(outdim_size, use_all_singular_values):\n def inner_cca_objective(y_true, y_pred):\n \"\"\"\n It is the loss function of CCA as introduced in the original paper. There can be other formulations.\n It is implemented by Theano tensor operations, and does not work on Tensorflow backend\n y_true is just ignored\n \"\"\"\n\n r1 = 1e-4\n r2 = 1e-4\n eps = 1e-12\n o1 = o2 = y_pred.shape[1]//2\n\n # unpack (separate) the output of networks for view 1 and view 2\n H1 = y_pred[:, 0:o1].T\n H2 = y_pred[:, o1:o1+o2].T\n\n m = H1.shape[1]\n\n H1bar = H1 - (1.0 / m) * T.dot(H1, T.ones([m, m]))\n H2bar = H2 - (1.0 / m) * T.dot(H2, T.ones([m, m]))\n\n SigmaHat12 = (1.0 / (m - 1)) * T.dot(H1bar, H2bar.T)\n SigmaHat11 = (1.0 / (m - 1)) * T.dot(H1bar, H1bar.T) + r1 * T.eye(o1)\n SigmaHat22 = (1.0 / (m - 1)) * T.dot(H2bar, H2bar.T) + r2 * T.eye(o2)\n\n # Calculating the root inverse of covariance matrices by using eigen decomposition\n [D1, V1] = T.nlinalg.eigh(SigmaHat11)\n [D2, V2] = T.nlinalg.eigh(SigmaHat22)\n\n # Added to increase stability\n posInd1 = T.gt(D1, eps).nonzero()[0]\n D1 = D1[posInd1]\n V1 = V1[:, posInd1]\n posInd2 = T.gt(D2, eps).nonzero()[0]\n D2 = D2[posInd2]\n V2 = V2[:, posInd2]\n\n SigmaHat11RootInv = T.dot(T.dot(V1, T.nlinalg.diag(D1 ** -0.5)), V1.T)\n SigmaHat22RootInv = T.dot(T.dot(V2, T.nlinalg.diag(D2 ** -0.5)), V2.T)\n\n Tval = T.dot(T.dot(SigmaHat11RootInv, SigmaHat12), SigmaHat22RootInv)\n\n if use_all_singular_values:\n # all singular values are used to calculate the correlation\n corr = T.sqrt(T.nlinalg.trace(T.dot(Tval.T, Tval)))\n else:\n # just the top outdim_size singular values are used\n [U, V] = T.nlinalg.eigh(T.dot(Tval.T, Tval))\n U = U[T.gt(U, eps).nonzero()[0]]\n U = U.sort()\n corr = T.sum(T.sqrt(U[0:outdim_size]))\n\n return -corr\n\n return inner_cca_objective", "def decode_syndrome_minLLR(y, s, s_y_joins, y_s_joins, qber_est, s_pos, p_pos, k_pos, r_start=None, max_iter=300,\n x=None, show=1, discl_n=20, n_iter_avg_window=5):\n if not qber_est < 0.5: # Adequate QBER check\n raise ValueError('Aprior error probability must be less than 1/2')\n\n m = len(s_y_joins)\n n = len(y_s_joins)\n p_n = len(p_pos)\n s_n = len(s_pos)\n v_pos = list(set(p_pos) | set(k_pos))\n\n # Zeroing\n M = np.zeros((m, n)) # Array of messages from symbol nodes to check nodes\n sum_E_abs_mean_hist = [] # Array for mean values of LLRs\n n_iter = 0 # Iteration counter\n\n # Setting initial LLRs:\n if r_start is None:\n r = zeros(n)\n if s_n > 0:\n r[s_pos] = (1 - 2 * y[s_pos]) * 1000\n if p_n > 0:\n r[p_pos] = 0\n r[k_pos] = (1 - 2 * y[k_pos]) * np.log((1 - qber_est) / qber_est)\n else:\n r = r_start\n if s_n > 0:\n r[s_pos] = (1 - 2 * y[s_pos]) * 1000\n\n for j in xrange(m): # Setting initial messages from symbol nodes to check nodes\n M[j, :] = r\n\n while n_iter < max_iter: # Main cycle\n # Part 1: from check nodes to symbol nodes\n E = np.zeros((m, n)) # Array of messages from check nodes to symbol nodes\n for j in xrange(m): # For all check nodes\n M_cur = M[j][s_y_joins[j]]\n M_cur_n = len(M_cur) # All symbol nodes that are connected to current check node and their number\n n_zeros = list(M_cur).count(0.0) # number of zero LLRs\n if n_zeros > 1: # If check node is dead\n E[j, s_y_joins[j]] = np.zeros(M_cur_n) # No messages\n elif n_zeros == 1: # If current check node has one punctured symbol\n E_cur = np.zeros(M_cur_n) # All messages are initializrd with zeros\n M_cur = list(M_cur)\n zero_ind = M_cur.index(0.0)\n M_cur.pop(zero_ind) # Excluding zero message\n LS = M_cur[0]\n for k in range(1, M_cur_n - 1): # Accumulation of the message\n LS = core_func(LS, M_cur[k])\n E_cur[zero_ind] = LS\n E[j, s_y_joins[j]] = E_cur # Filling with nonzero message\n elif n_zeros == 0: # all messages are non zero\n LS = M_cur[0]\n for k in range(1, M_cur_n):\n LS = core_func(LS, M_cur[k])\n E_cur = zeros(M_cur_n)\n for i1 in range(0, M_cur_n):\n E[j][s_y_joins[j][i1]] = (1 - 2 * s[j]) * (\n h_func(M_cur[i1] + LS) - h_func(M_cur[i1] - LS) - LS) # Computation of messages\n\n # Part 2: from symbol nodes to check nodes\n sum_E = E.sum(axis=0) + r # Array of sums of messages to symbol nodes (LLRs)\n z = (1 - np.sign(sum_E)) / 2 # Current decoded message\n\n if (s == encode_syndrome(z, s_y_joins)).all(): # If syndrome is correct\n if np.count_nonzero(z == x) != n and show > 1:\n print \"Convergence error, error positions:\"\n print '\\n', np.nonzero((z + x) % 2)\n if show > 1:\n print 'Done in ', n_iter, 'iters, matched bits:', np.count_nonzero(z == x), '/', n\n return z, None, sum_E, n_iter\n if show > 2:\n print 'Matched bits:', np.count_nonzero(z == x), '/', n, 'Mean LLR magnitude:', mean(abs(sum_E[v_pos])), \\\n 'Averaged mean LLR magnitude:', sum(sum_E_abs_mean_hist[max(0, n_iter - n_iter_avg_window):n_iter]) / (\n min(n_iter, n_iter_avg_window) + 10 ** (-10))\n\n # Check for procedure stop\n\n sum_E_abs = list(abs(sum_E))\n sum_E_abs_mean_hist.append(mean(list(abs(sum_E[v_pos]))))\n\n if n_iter == n_iter_avg_window - 1:\n sum_E_mean_avg_old = mean(sum_E_abs_mean_hist)\n if n_iter >= n_iter_avg_window:\n sum_E_mean_avg_cur = sum_E_mean_avg_old + (sum_E_abs_mean_hist[n_iter] - sum_E_abs_mean_hist[\n n_iter - n_iter_avg_window]) / n_iter_avg_window\n if sum_E_mean_avg_cur <= sum_E_mean_avg_old:\n minLLR_inds = []\n maxLLR = max(sum_E_abs)\n for cnt in range(discl_n):\n ind = sum_E_abs.index(min(sum_E_abs))\n minLLR_inds.append(ind)\n sum_E_abs[ind] += maxLLR\n return None, minLLR_inds, sum_E, n_iter\n else:\n sum_E_mean_avg_old = sum_E_mean_avg_cur\n\n # Calculating messages from symbol nodes to check nodes\n M = -E + sum_E\n\n n_iter += 1\n\n minLLR_inds = []\n maxLLR = max(sum_E_abs)\n for cnt in range(discl_n):\n ind = sum_E_abs.index(min(sum_E_abs))\n minLLR_inds.append(ind)\n sum_E_abs[ind] += maxLLR\n return None, minLLR_inds, sum_E, n_iter", "def check_stability_pwa(self, eps=1e-3):\n \n if not self.is_state_feedback:\n raise ETCError('Output feedback not yet implemented.')\n \n n = self.plant.nx\n A = {}\n A[1] = np.block([\n [self.Ad + self.Bd, np.zeros((n,n))],\n [np.eye(n), np.zeros((n,n))]\n ])\n A[2] = np.block([\n [self.Ad, self.Bd],\n [np.zeros((n,n)), np.eye(n)]\n ])\n Q = self.Qbar\n \n # CVX variables\n alpha = {(i,j): cvx.Variable(pos=True) for i in range(1,3) \n for j in range(1,3)}\n beta = {(i,j): cvx.Variable(pos=True) for i in range(1,3) \n for j in range(1,3)}\n kappa = {i: cvx.Variable(pos=True) for i in range(1,3)}\n P = {i: cvx.Variable((2*n, 2*n), PSD=True) for i in range(1,3)}\n \n # CVX constraints : make a function of the externally defined lbd\n def make_constraints(lbd):\n con = []\n for i in range(1,3):\n for j in range(1,3):\n con.append(lbd*P[i] - A[i].T @ P[j] @ A[i]\n + ((-1)**i)*alpha[(i,j)]*Q\n + ((-1)**j)*beta[(i,j)]*(A[i].T @ Q @ A[i])\n >> 0) # Eq. (1))\n con.append(P[i] + (-1)**i * kappa[i]* Q # Eq. (2)\n >> _LMIS_SMALL_IDENTITY_FACTOR*np.eye(2*n))\n return con\n \n # Start bisection algorithm: get extreme points\n a = 0\n b = 1\n \n # For b = 1, if GES then it must be feasible\n con = make_constraints(b)\n prob = cvx.Problem(cvx.Minimize(0), con)\n prob.solve()\n if 'infeasible' in prob.status:\n return 1, None\n Pout = (p.value for p in P)\n \n # For a = 0, if it is feasible then this is a deadbeat controller.\n # Can't be better then this\n con = make_constraints(a)\n prob = cvx.Problem(cvx.Minimize(0), con)\n prob.solve()\n if 'optimal' in prob.status:\n return 0, (p.value for p in P)\n \n # Now we should have b = 1 feasible and a = 0 infeasible. Start\n # bisection algorithm\n while b-a > eps:\n c = (a+b)/2\n con = make_constraints(c)\n prob = cvx.Problem(cvx.Minimize(0), con)\n prob.solve()\n if 'optimal' in prob.status:\n b = c\n Pout = (p.value for p in P) # Store output P matrices\n elif 'infeasible' in prob.status:\n a = c\n else:\n warnings.warn(f'{prob.status}: TOL is {b-a}')\n break\n \n return -np.log(b)/2/self.h, Pout", "def lddt_ca_torch(true_coords, pred_coords, cloud_mask, r_0=15.):\n device, dtype = true_coords.device, true_coords.type()\n thresholds = torch.tensor([0.5, 1, 2, 4], device=device).type(dtype)\n # adapt masks\n cloud_mask = cloud_mask.bool().cpu()\n c_alpha_mask = torch.zeros(cloud_mask.shape[1:], device=device).bool() # doesn't have batch dim\n c_alpha_mask[..., 1] = True\n # container for c_alpha scores (between 0,1)\n wrapper = torch.zeros(true_coords.shape[:2], device=device).type(dtype)\n\n for bi, seq in enumerate(true_coords):\n # select atoms for study\n c_alphas = cloud_mask[bi]*c_alpha_mask # only pick c_alpha positions\n selected_pred = pred_coords[bi, c_alphas, :] \n selected_target = true_coords[bi, c_alphas, :]\n # get number under distance\n dist_mat_pred = torch.cdist(selected_pred, selected_pred, p=2)\n dist_mat_target = torch.cdist(selected_target, selected_target, p=2) \n under_r0_target = dist_mat_target < r_0\n compare_dists = torch.abs(dist_mat_pred - dist_mat_target)[under_r0_target]\n # measure diff below threshold\n score = torch.zeros_like(under_r0_target).float()\n max_score = torch.zeros_like(under_r0_target).float()\n max_score[under_r0_target] = 4.\n # measure under how many thresholds\n score[under_r0_target] = thresholds.shape[0] - \\\n torch.bucketize( compare_dists, boundaries=thresholds ).float()\n # dont include diagonal\n l_mask = c_alphas.float().sum(dim=-1).bool()\n wrapper[bi, l_mask] = ( score.sum(dim=-1) - thresholds.shape[0] ) / \\\n ( max_score.sum(dim=-1) - thresholds.shape[0] )\n\n return wrapper", "def y_dense_correlator(xpcs_data, mask):\n ind = np.where(mask > 0) # unused pixels are 0\n xpcs_data = xpcs_data[:, ind[0], ind[1]] # (n_tau, n_pix)\n del ind\n ltimes, lenmatr = np.shape(xpcs_data) # n_tau, n_pix\n meanmatr = np.array(np.mean(xpcs_data, axis=1), np.float32) # xpcs_data.sum(axis=-1).sum(axis=-1)/n_pix\n meanmatr.shape = 1, ltimes\n\n if ltimes * lenmatr > 1000 * 512 * 512:\n nn = 16\n newlen = lenmatr // nn\n num = np.dot(np.array(xpcs_data[:,:newlen], np.float32), np.array(xpcs_data[:,:newlen], np.float32).T)\n xpcs_data = xpcs_data[:, newlen:] + 0\n for i in range(1, nn - 1, 1):\n num += np.dot(np.array(xpcs_data[:,:newlen], np.float32), np.array(xpcs_data[:,:newlen], np.float32).T)\n xpcs_data = xpcs_data[:, newlen:] + 0\n num += np.dot(np.array(xpcs_data, np.float32), np.array(xpcs_data, np.float32).T)\n else:\n num = np.dot(np.array(xpcs_data, np.float32), np.array(xpcs_data, np.float32).T)\n\n num /= lenmatr\n denom = np.dot(meanmatr.T, meanmatr)\n del meanmatr\n res = np.zeros((ltimes - 1, 3)) # was ones()\n for i in range(1, ltimes, 1): # was ltimes-1, so res[-1] was always 1 !\n dia_n = np.diag(num, k=i)\n sdia_d = np.diag(denom, k=i)\n res[i - 1, 0] = i\n res[i - 1, 1] = np.sum(dia_n) / np.sum(sdia_d)\n res[i - 1, 2] = np.std(dia_n / sdia_d) / len(sdia_d) ** 0.5\n return res", "def sn2xy(ds, xs, cl, verbose=True, finite=False):\n\n # ** INPUTS **\n # make a DF from downstream and crossstream coordinates\n \n sn = pd.DataFrame(np.array([ds,xs]).T,\n columns=['ds','xs'])\n \n # extract data point with null ds/xs coords\n sn_nan = sn[sn.isna().any(axis=1)]\n sn.drop(sn[np.isnan(sn.ds)].index, inplace=True)\n \n # - add columns for X and Y transofrmed coords\n # - if finite distance option - add finite columns\n sn['xout'] = np.nan \n sn['yout'] = np.nan \n if finite == True:\n sn['xout_fd'] = np.nan \n sn['yout_fd'] = np.nan \n \n \n if verbose:\n print('SN: %i | SN_nan: %i'%(sn.shape[0],sn_nan.shape[0]))\n\n for i,row in sn.iterrows():\n \n # Calculate the downstream distance difference between the target \n # point (row) and the centerline points. (Signed and Absolute)\n # Sort by the centeline DF by downstream distance to the target point (row) \n \n cl_sort = cl.copy(deep=True)\n cl_sort['ds_diff'] = cl_sort['DS'] - row['ds']\n cl_sort['ds_diffABS'] = np.abs(cl_sort['DS'] - row['ds'])\n cl_sort.sort_values(by='ds_diffABS',inplace=True)\n \n # Centerline Angle Method\n # This method used the original centerline angles calculated by xy2sn\n # conversion\n # Fine the closest 2 upstream points (top two rows of the sorted DF)\n # Sort those based on the signed diffference (neg = upstream, pos = downstream)\n # get the alongstream angle value from the closest upstream point (phi)\n cl_top = cl_sort.iloc[0:2].sort_values(by='ds_diff')\n cl_pt_phi = cl_top.phi.iloc[0]\n \n # get the abs. downstream distance difference between the target and CL point\n # calculate a point rotation (sn,xs) > (x,y) around a \n # central axis (CL point)\n # x' = x cos(phi) - y sin(phi)\n # y' = x sin(phi) + y cos(phi)\n # x',y' = cartesian offset coordinates\n # x = downstream distance difference, y = cross-stream (xs) distance \n dds = cl_top.ds_diffABS.iloc[0]\n dx = dds * np.cos(cl_pt_phi) - row['xs'] * np.sin(cl_pt_phi)\n dy = dds * np.sin(cl_pt_phi) + row['xs'] * np.cos(cl_pt_phi)\n \n # add the rotated target point coordinates to the reference CL point to\n # get the final output x,y coordinates\n sn['xout'][i] = cl_top.X.iloc[0] + dx\n sn['yout'][i] = cl_top.Y.iloc[0] + dy\n \n # FINITE DIFFERECE\n # ** Original Method from Legleiter and Kyriakidis **\n # We want the segment bracketed by the two nodes closest to the point to \n # be transformed\n # Compute finite differences dx0/ds and dy0/ds \n # The basic idea is to calculate dx0/ds and dy0/ds using a finite\n # difference approach and then to calculate the x,y coordinates of a\n # pair of s,n coordinates using the equations given in the caption of\n # Figure 1 of Smith and McLean (1984)\n if finite == True:\n dxy = cl_sort.iloc[0] - cl_sort.iloc[1]\n dy_ds = dxy.Y/dxy.ds_diff\n dx_ds = dxy.X/dxy.ds_diff\n sn['xout_fd'][i] = cl_sort.iloc[0]['X'] - row['xs'] * dy_ds\n sn['yout_fd'][i] = cl_sort.iloc[0]['Y'] + row['xs'] * dx_ds\n \n xy_out = pd.concat([sn,sn_nan])\n xy_out.sort_index(inplace=True)\n \n return xy_out", "def test_uccsd_operations(self, s_wires, d_wires, weights, ref_gates):\n\n sqg = 10 * len(s_wires) + 72 * len(d_wires)\n\n cnots = 0\n for s_wires_ in s_wires:\n cnots += 4 * (len(s_wires_) - 1)\n\n for d_wires_ in d_wires:\n cnots += 16 * (len(d_wires_[0]) - 1 + len(d_wires_[1]) - 1 + 1)\n N = 6\n wires = range(N)\n\n ref_state = np.array([1, 1, 0, 0, 0, 0])\n\n with qml.tape.OperationRecorder() as rec:\n UCCSD(weights, wires, s_wires=s_wires, d_wires=d_wires, init_state=ref_state)\n\n assert len(rec.queue) == sqg + cnots + 1\n\n for gate in ref_gates:\n idx = gate[0]\n\n exp_gate = gate[1]\n res_gate = rec.queue[idx]\n assert isinstance(res_gate, exp_gate)\n\n exp_wires = gate[2]\n res_wires = rec.queue[idx]._wires\n assert res_wires == Wires(exp_wires)\n\n exp_weight = gate[3]\n res_weight = rec.queue[idx].parameters\n if exp_gate != qml.BasisState:\n assert res_weight == exp_weight\n else:\n assert np.allclose(res_weight, exp_weight)", "def test_Leauthaud11Cens():\n\n\tmodel = Leauthaud11Cens()\n\tncen1 = model.mean_occupation(prim_haloprop = 1.e12)\n\n\tmcocc = model.mc_occupation(prim_haloprop = np.ones(1e4)*1e12, seed=43)\n\tassert 0.5590 < np.mean(mcocc) < 0.5592\n\n\tmodel.param_dict['scatter_model_param1'] *= 1.5\n\tncen2 = model.mean_occupation(prim_haloprop = 1.e12)\n\tassert ncen2 < ncen1\n\n\tmodel.param_dict['m10'] *= 1.1\n\tncen3 = model.mean_occupation(prim_haloprop = 1.e12)\n\tassert ncen3 < ncen2\n\n\tmodel.param_dict['m11'] *= 1.1\n\tncen4 = model.mean_occupation(prim_haloprop = 1.e12)\n\tassert ncen4 == ncen3\n\n\n\tmodel2 = Leauthaud11Cens(threshold = 10.75)\n\tncen5 = model2.mean_occupation(prim_haloprop = 1.e12)\n\tassert ncen5 < ncen1", "def _sparse_beads(y, freq_cutoff=0.005, lam_0=1.0, lam_1=1.0, lam_2=1.0, asymmetry=6,\n filter_type=1, use_v2_loss=True, max_iter=50, tol=1e-2, eps_0=1e-6,\n eps_1=1e-6, smooth_half_window=0):\n num_y = y.shape[0]\n d1_diags = np.zeros((5, num_y))\n d2_diags = np.zeros((5, num_y))\n offsets = np.arange(2, -3, -1)\n A, B = _high_pass_filter(num_y, freq_cutoff, filter_type, True)\n # factorize A since A is unchanged in the function and its factorization\n # is used repeatedly; much faster than calling spsolve each time\n A_factor = splu(A.tocsc(), permc_spec='NATURAL')\n BTB = B * B\n\n x = y\n d1_x, d2_x = _abs_diff(x, smooth_half_window)\n # line 2 of Table 3 in beads paper\n d = BTB.dot(A_factor.solve(y)) - A.dot(np.full(num_y, lam_0 * (1 - asymmetry) / 2))\n gamma = np.empty(num_y)\n gamma_factor = lam_0 * (1 + asymmetry) / 2 # 2 * lam_0 * (1 + asymmetry) / 4\n cost_old = 0\n abs_x = np.abs(x)\n big_x = abs_x > eps_0\n tol_history = np.empty(max_iter + 1)\n for i in range(max_iter + 1):\n # calculate line 6 of Table 3 in beads paper using banded matrices rather\n # than sparse matrices since it is much faster; Gamma + D.T * Lambda * D\n\n # row 1 and 3 instead of 0 and 2 to account for zeros on top and bottom\n d1_diags[1][1:] = d1_diags[3][:-1] = -_beads_weighting(d1_x, use_v2_loss, eps_1)\n d1_diags[2] = -(d1_diags[1] + d1_diags[3])\n\n d2_diags[0][2:] = d2_diags[-1][:-2] = _beads_weighting(d2_x, use_v2_loss, eps_1)\n d2_diags[1] = 2 * (d2_diags[0] - np.roll(d2_diags[0], -1, 0)) - 4 * d2_diags[0]\n d2_diags[-2][:-1] = d2_diags[1][1:]\n d2_diags[2] = -(d2_diags[0] + d2_diags[1] + d2_diags[-1] + d2_diags[-2])\n\n d_diags = lam_1 * d1_diags + lam_2 * d2_diags\n gamma[~big_x] = gamma_factor / eps_0\n gamma[big_x] = gamma_factor / abs_x[big_x]\n d_diags[2] += gamma\n\n x = A.dot(\n spsolve(\n BTB + A.dot(spdiags(d_diags, offsets, num_y, num_y, 'csr').dot(A)),\n d, 'NATURAL'\n )\n )\n h = B.dot(A_factor.solve(y - x))\n d1_x, d2_x = _abs_diff(x, smooth_half_window)\n abs_x, big_x, theta = _beads_theta(x, asymmetry, eps_0)\n cost = (\n 0.5 * h.dot(h)\n + lam_0 * theta\n + lam_1 * _beads_loss(d1_x, use_v2_loss, eps_1).sum()\n + lam_2 * _beads_loss(d2_x, use_v2_loss, eps_1).sum()\n )\n cost_difference = relative_difference(cost_old, cost)\n tol_history[i] = cost_difference\n if cost_difference < tol:\n break\n cost_old = cost\n\n diff = y - x\n baseline = diff - B.dot(A_factor.solve(diff))\n\n return baseline, {'signal': x, 'tol_history': tol_history[:i + 1]}", "def getSDM_complex(SDM_v):\n\tcount_calls('getSDM_complex')\n\tnWaves = int(len(SDM_v)**.5)\n\tSDM_c = init_array(nWaves,0.+0.j)\n\tfor i in range(nWaves**2):\n\t\tii = int(i/nWaves)\n\t\tjj = i - ii*nWaves\n\t\tSDM_c[ii][jj] += SDM_v[i]\t# This gets only the j < i entries right\n\t\tSDM_c[jj][ii] += SDM_v[i]*1.j\t#\n\tfor i in range(nWaves):\n\t\tfor j in range(nWaves):\n\t\t\tif i==j: # The diagonal elements are wrong, fis this here\n\t\t\t\tSDM_c[i][j] = SDM_c[i][j].real+0.j\n\t\t\tif j > i: # the j > i entries are set wrong before, fix this here. \n\t\t\t\tSDM_c[i][j] = SDM_c[j][i].conjugate()\n\treturn SDM_c", "def wasserstein_mdp_distance(m1, m2, d=None, threshold=0.1):\n assert m1.nS == m2.nS, \"Error: environments have different number of states: m1.nS={}, m2.nS={}\".format(\n m1.nS, m2.nS\n )\n if d is None:\n d = bi_simulation_distance(m1, m2, threshold)\n ns = m1.nS\n uniform_distribution = (1.0 / float(ns)) * np.ones(shape=ns, dtype=float)\n distance, matching_matrix = distribution.wass_primal(uniform_distribution, uniform_distribution, d)\n matching_matrix = np.reshape(matching_matrix, newshape=(ns, ns))\n return distance, matching_matrix", "def mcdEdxNEW( xzyeT ):\n\n xzyeTStrip = []\n for i in range(len(xzyeT)):\n if xzyeT[i][1] >= -0.49375:\n xzyeTStrip.append(xzyeT[i])\n\n \n if len(xzyeTStrip) < 2:\n return (0,1)\n \n xzyeTStrip = np.asarray(xzyeTStrip)\n upsPointIndex = np.argmin(xzyeTStrip[:,1])\n\n xzyeT5cm = []\n for i in range(len(xzyeTStrip)):\n if np.linalg.norm( xzyeTStrip[i][:3] - xzyeTStrip[upsPointIndex][:3] ) <= 5:\n xzyeT5cm.append( xzyeTStrip[i] )\n\n xzyeT5cm = np.asarray(xzyeT5cm)\n \n end = np.argmax(xzyeT5cm[:,1])\n start = np.argmin(xzyeT5cm[:,1])\n\n dE = (xzyeT5cm[start][3] - xzyeT5cm[end][3])*1000\n dx = np.linalg.norm(xzyeT5cm[start][:3] - xzyeT5cm[end][:3])\n\n return (dE,dx)", "def test_double_sparse(self):\n dname = os.path.dirname(os.path.abspath(__file__))\n mf = mf_c(label='water', cd=dname)\n pb = mf.pb\n v_dab_array = pb.get_dp_vertex_array()\n nnn = v_dab_array.size\n vds = pb.get_dp_vertex_doubly_sparse(axis=0)\n self.assertEqual(vds.shape, v_dab_array.shape)\n self.assertTrue(abs(vds.toarray()-v_dab_array).sum()/nnn<1e-14)\n vds = pb.get_dp_vertex_doubly_sparse(axis=1)\n self.assertTrue(abs(vds.toarray()-v_dab_array).sum()/nnn<1e-14)\n vds = pb.get_dp_vertex_doubly_sparse(axis=2)\n self.assertTrue(abs(vds.toarray()-v_dab_array).sum()/nnn<1e-14)", "def dgesl(a, lda, n, ipvt, b, job):\r\n \r\n \r\n #integer lda,n,ipvt(1),job\r\n #double precision a(lda,1),b(1)\r\n \r\n #double precision ddot,t\r\n #integer k,kb,l,nm1\r\n \r\n #c\r\n nm1 = n - 1\r\n if (job == 0):\r\n #c\r\n #c job = 0 , solve a * x = b\r\n #c first solve l*y = b\r\n #c\r\n if (nm1 >= 1):\r\n \r\n for k in range(nm1):\r\n l = ipvt[k]\r\n t = b[l]\r\n if (l != k):\r\n #print(\"DGESL if triggered\")\r\n b[l] = b[k]\r\n b[k] = t\r\n #print(\"DGESL 1: l \", l, \" k, \", k, \" b \", b[k])\r\n\r\n #FORTRAN call call daxpy(n-k, t, a[k+1][k], 1, b[k+1], 1)\r\n #5th parameter is in/out:\r\n #b[k+1] = daxpy(n-k, t, a[k+1][k], 1, b[k+1], 1)\r\n #[b[kk+1] for kk in range(k, n)] = daxpy(n-k, t,\\\r\n # [a[k+1][kk] for kk in range(k, n)], 1, [b[kk+1] for kk in range(k, n)], 1)\r\n daxpyOut =\\\r\n Daxpy.daxpy(n-k-1, t, [a[kk][k] for kk in range(k+1, n)], 1, [b[kk] for kk in range(k+1, n)], 1)\r\n daxpyCount = 0\r\n for kk in range(k+1, n):\r\n b[kk] = daxpyOut[daxpyCount]\r\n daxpyCount+=1\r\n #print(\"DGESL 2: k \", k, \" b \", b[k])\r\n #scipy: b[k+1] = daxpy(t, a[k+1][k], n-k, 1, 1)\r\n \r\n #c\r\n #c now solve u*x = y\r\n #c\r\n #print(\"DGESL: Before 2nd DAXPY call n \", n)\r\n for kb in range(n):\r\n #k = n + 1 - kb\r\n k = (n-1) - kb\r\n #print(\"DGESL: kb \", kb, \" k \", k, \" b \", b[k], \" a \", a[k][k])\r\n b[k] = b[k]/a[k][k]\r\n t = -b[k]\r\n #FORTRAN call: call daxpy(k-1, t, a[1][k], 1, b[1], 1)\r\n #b[1] = daxpy(k-1, t, a[1][k], 1, b[1], 1)\r\n #[b[kk] for kk in range(1, k)] = daxpy(k-1, t,\\\r\n # [a[1][kk] for kk in range(1, k)], 1, [b[kk] for kk in range(1, k)], 1)\r\n #print(\"DGESL: Before DAPXPY 2:\")\r\n #print(\"a \", [a[kk][k] for kk in range(0, k+1)])\r\n #print(\"b \", [b[kk] for kk in range(0, k+1)])\r\n daxpyOut =\\\r\n Daxpy.daxpy(k, t, [a[kk][k] for kk in range(0, k+1)], 1, [b[kk] for kk in range(0, k+1)], 1)\r\n daxpyCount = 0\r\n for kk in range(0, k+1):\r\n b[kk] = daxpyOut[daxpyCount]\r\n daxpyCount+=1 \r\n #print(\"DGESL: After DAPXPY 2:\")\r\n #print(\"b \", [b[kk] for kk in range(0, k+1)]) \r\n #scipy: b[0] = daxpy(t, a[0][k], k-1, 1, 1)\r\n \r\n # **** goto 100 !!! Oh-oh!!\r\n \r\n #c\r\n #c job = nonzero, solve trans(a) * x = b\r\n #c first solve trans(u)*y = b\r\n #c\r\n \r\n if (job != 0):\r\n \r\n for k in range(n):\r\n #t = ddot(k-1, a[1][k], 1, b[1], 1)\r\n t = Ddot.ddot(k, [a[kk][k] for kk in range(0, k)],\\\r\n 1, [b[kk] for kk in range(0, k)], 1)\r\n b[k] = (b[k] - t)/a[k][k]\r\n #print(\"DDOT 1: t \", t)\r\n \r\n #c\r\n #c now solve trans(l)*x = y\r\n #c\r\n if (nm1 >= 1):\r\n for kb in range(nm1):\r\n #k = n - kb\r\n k = n - kb - 1\r\n #b[k] = b[k] + ddot(n-k, a[k+1][k], 1, b[k+1], 1)\r\n b[k] = b[k] + Ddot.ddot(n-k, [a[kk][k] for kk in range(k, n)],\\\r\n 1, [b[kk] for kk in range(k, n)], 1)\r\n #print(\"DDOT 2: t \", t)\r\n l = ipvt[k]\r\n if (l != k):\r\n t = b[l]\r\n b[l] = b[k]\r\n b[k] = t\r\n\r\n return b", "def fit(direct):\n path = direct+ \"/msd.txt\"\n #making the dataframe with the msd from the txt file\n df = pd.DataFrame(pd.read_csv(path,sep = \" \"))\n msd = np.mean(df)\n #fit the msd\n popt = an.fit(msd)\n D = popt[0]\n alpha = popt[1]\n\n return D, alpha", "def GradSimplex3DP(a, b, c, id, jd, kd):\n\n fa = JacobiP(a, 0, 0, id).reshape(len(a),1)\n dfa = GradJacobiP(a, 0, 0, id)\n gb = JacobiP(b, 2*id+1,0, jd).reshape(len(b),1)\n dgb = GradJacobiP(b, 2*id+1,0, jd)\n hc = JacobiP(c, 2*(id+jd)+2,0, kd).reshape(len(c),1)\n dhc = GradJacobiP(c, 2*(id+jd)+2,0, kd)\n\n # r-derivative\n # d/dr = da/dr d/da + db/dr d/db + dc/dr d/dx\n dmodedr = dfa*gb*hc\n if(id>0):\n dmodedr = dmodedr*((0.5*(1-b))**(id-1))\n if(id+jd>0):\n dmodedr = dmodedr*((0.5*(1-c))**(id+jd-1))\n\n # s-derivative\n dmodeds = 0.5*(1+a)*dmodedr\n tmp = dgb*((0.5*(1-b))**id)\n if(id>0):\n tmp = tmp+(-0.5*id)*(gb*(0.5*(1-b))**(id-1))\n\n if(id+jd>0):\n tmp = tmp*((0.5*(1-c))**(id+jd-1))\n\n tmp = fa*tmp*hc\n dmodeds = dmodeds + tmp\n\n # t-derivative\n dmodedt = 0.5*(1+a)*dmodedr+0.5*(1+b)*tmp\n tmp = dhc*((0.5*(1-c))**(id+jd))\n if(id+jd>0):\n tmp = tmp-0.5*(id+jd)*(hc*((0.5*(1-c))**(id+jd-1)));\n\n tmp = fa*(gb*tmp)\n tmp = tmp*((0.5*(1-b))**id);\n dmodedt = dmodedt+tmp;\n\n # Normalize\n dmodedr = 2**(2*id+jd+1.5)*dmodedr\n dmodeds = 2**(2*id+jd+1.5)*dmodeds\n dmodedt = 2**(2*id+jd+1.5)*dmodedt\n\n return dmodedr[:,0], dmodeds[:,0], dmodedt[:,0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_map_bb_sptr __init__(self, p) > digital_map_bb_sptr
def __init__(self, *args): this = _digital_swig.new_digital_map_bb_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n self._map = dict()", "def __init__(self):\n this = _coin.new_SoBumpMap()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoBumpMapCoordinate()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def map_bb(*args, **kwargs):\n return _digital_swig.map_bb(*args, **kwargs)", "def __init__(self):\n this = _coin.new_SoTextureCoordinateReflectionMap()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoTextureCoordinateNormalMap()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoBumpMapTransform()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoHeightMapToNormalMap()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, addrSet: ghidra.program.model.address.AddressSetView, dataType: ghidra.program.model.data.DataType, stackPointers: bool):\n ...", "def __init__(self):\n this = _coin.new_SoTextureCubeMap()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
map_bb(__dummy_3__ map) > digital_map_bb_sptr output[i] = map[input[i]] This block maps an incoming signal to the value in the map. The block expects that the incoming signal has a maximum value of len(map)1. > output[i] = map[input[i]]
def map_bb(*args, **kwargs): return _digital_swig.map_bb(*args, **kwargs)
[ "def process_output(output_data, label_mapping):\n idx = np.argmax(output_data[0])\n\n return label_mapping[idx]", "def after_map(self, map):", "def calcMap(self, p):\n\n mapping = dict()\n for i in range(2 ** p):\n if isUniform(i, p):\n mapping[i] = i\n else:\n mapping[i] = 5\n\n return mapping", "def init_costmap(map):\n ### START: 1a\n cost_map = np.full_like(map, np.inf)\n return cost_map\n ### END: 1a", "def smoothmap(min, max, x):\n\n pass", "def before_map(self, map):", "def build_fixmaps(in_ann_wo_fixmap, out_ann_w_fixmap):\n # TODO\n pass", "def mapping(self, source):", "def _relabel_tile(\n inp_tile: numpy.ndarray,\n mapping: dict[int, int],\n) -> numpy.ndarray:\n out_tile = numpy.copy(inp_tile)\n for k, v in mapping.items():\n mask = inp_tile == k\n out_tile[mask] = v\n return out_tile", "def callback(self, action: 'SoCallbackAction') -> \"void\":\n return _coin.SoBumpMap_callback(self, action)", "def map(inputs, U_sequences, e0,e1,k):\r\n U_sequences = tf.cast(U_sequences, tf.float32)\r\n codebook = tf.cast(inputs[0][0:2 ** k], tf.float32)\r\n codebook = K.round(tf.cast(codebook, tf.float32))\r\n u_k_hat = tf.TensorArray(tf.float32, size=0, dynamic_size=True)\r\n for y in inputs[1]:\r\n y_map = max_pyx(y, codebook, e0, e1)\r\n u_k_hat = u_k_hat.write(u_k_hat.size(), tf.gather(U_sequences, y_map))\r\n u_k_hat = u_k_hat.stack()\r\n return u_k_hat", "def callback(self, action: 'SoCallbackAction') -> \"void\":\n return _coin.SoBumpMapTransform_callback(self, action)", "def test_mapping_w_delays(self):\n qc = QuantumCircuit(2, 2)\n qc.measure(0, 1)\n qc.delay(10, 0)\n qc.measure(1, 0)\n qc.barrier()\n\n maps = final_measurement_mapping(qc)\n self.assertDictEqual(maps, {1: 0, 0: 1})", "def parse_map_bits(self, map_name, start, end):\n\n map_index = self.map_indices[map_name]\n return self.get_bits_values(map_index, start, end)", "def postmap(self, value, mapper, arg):\n if self.MAP and value is not None:\n value = self.doPostmap(value, mapper, arg)\n\n return value", "def _remap_default(self, remove_input_map=True, remove_output_map=True):\n if not remove_input_map and not remove_output_map:\n return\n\n # Compute inside and outside tensor\n inputs, outputs, _ = select.compute_boundary_ts(self._ops)\n if remove_input_map:\n self._input_ts = list(inputs) + self._passthrough_ts\n if remove_output_map:\n self._output_ts = list(outputs) + self._passthrough_ts", "def computeBinaryMap(\n self, _saliencyMap, _binaryMap=...\n ) -> Tuple[retval, _binaryMap]:\n ...", "def set_input_map(self, device_name, input_map_name):\n settings = ConfigManager().get_settings(input_map_name)\n if settings:\n self._springy_throttle = settings[\"springythrottle\"]\n self._input_map = ConfigManager().get_config(input_map_name)\n if self._input_device:\n self._input_device.input_map = self._input_map\n Config().get(\"device_config_mapping\")[device_name] = input_map_name", "def map(self, p_int, p_int_1, QFile_MemoryMapFlags): # real signature unknown; restored from __doc__\r\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_mpsk_receiver_cc_sptr __init__(self, p) > digital_mpsk_receiver_cc_sptr
def __init__(self, *args): this = _digital_swig.new_digital_mpsk_receiver_cc_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_pn_correlator_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, protocol):\r\n\r\n self.protocol = protocol\r\n self.protocol.protocol_flags['MCCP'] = False\r\n # ask if client will mccp, connect callbacks to handle answer\r\n self.protocol.will(MCCP).addCallbacks(self.do_mccp, self.no_mccp)", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_ccf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_packet_sink_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoMFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, name=None):\n self._mng = pn_messenger(name)", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_framer_sink_1_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, protocol):\r\n self.protocol = protocol\r\n self.protocol.will(MSSP).addCallbacks(self.do_mssp, self.no_mssp)", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoSFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_fff_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n\t\tself.communicator_list = []\n\t\tself.NETWORK_TIMER = 500", "def __init__(self, ip, receive_from_port, quit_event, address_list=[\"/clock*\"], address_handler_list=[None]):\n\n # we want the OscReceiver to run in a separate concurrent thread\n # hence it is a child instance of the threading.Thread class\n super(OscReceiver, self).__init__()\n\n # connection parameters\n self.ip = ip\n self.receiving_from_port = receive_from_port\n\n # dispatcher is used to assign a callback to a received osc message\n self.dispatcher = Dispatcher()\n\n # assign each handler to it's corresponding message\n for ix, address in enumerate(address_list):\n self.dispatcher.map(address, address_handler_list[ix])\n\n # you can have a default_handler for messages that don't have dedicated handlers\n self.dispatcher.set_default_handler(self.default_handler)\n\n # python-osc method for establishing the UDP communication with pd\n self.server = BlockingOSCUDPServer((self.ip, self.receiving_from_port), self.dispatcher)\n\n # used from outside the class/thread to signal finishing the process\n self.quit_event = quit_event", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
modulation_order(self) > float Returns the modulation order (M) currently set.
def modulation_order(self): return _digital_swig.digital_mpsk_receiver_cc_sptr_modulation_order(self)
[ "def calc_mod(self):\n if self.proficiency:\n self.mod = self.attribute.mod + 2\n else:\n self.mod = self.attribute.mod", "def get_partialOrdering(self):\n return self.partialOrdering", "def maximal_order(self):\n if self._maximal_order is None:\n self._do_round_two()\n return self._maximal_order", "def layerGroupOrder( self ):\r\n\t\treturn self.metaData().value( 'groupOrder' )", "def evaluation_order(self):\n return self.container['evaluation_order']", "def EstimateModelOrder(self):\n # Normalize the singular values by the maximum and cut out modes\n # corresponding to singular values below a specified tolerance\n tol1 = 1.0e-2\n snorm = self.s/self.s.max()\n n_above_tol = len(self.s[snorm > tol1])\n\n # Approximate second derivative singular values using convolve as a\n # central difference operator\n w = [1.0, -1.0]\n diff = sig.convolve(snorm, w, 'valid')\n diffdiff = sig.convolve(diff, w, 'valid')\n\n # Cut out more modes depending on the approximated second derivative\n # The idea is sort of to cut at an inflection point in the singular\n # value curve or maybe where they start to bottom out\n tol2 = 1.0e-3\n n_bottom_out = 2 + len(diffdiff[diffdiff > tol2])\n\n # Estimate the number of modes (model order) to have at least two but\n # otherwise informed by the cuts made above\n self.M = min(max(2, min(n_above_tol, n_bottom_out)), self.L)\n\n # Report the model order\n if self.output_level[-1] == \"1\":\n print(\"Model order, M = \", self.M)\n np.savetxt('singular_values.dat',snorm)\n\n # Plotting to help diagnose what the cuts are doing\n if self.output_level[-2] == \"1\":\n # Plot normalized singular values and first cut-off\n plt.figure(figsize=(8, 10))\n ax = plt.gca()\n ax.scatter(np.arange(len(self.s)), snorm, s=40, c='blue', alpha=0.3)\n ax.plot(np.array([-0.2*len(self.s), 1.2*len(self.s)]),\n tol1*np.ones(2),\n color='orange', linestyle='dashed', linewidth=2,\n label='cut-off')\n ax.set_yscale('log')\n ax.set_ylim(1e-20, 1e1)\n plt.legend()\n plt.title('Normalized Singular Values', fontsize=16)\n\n # Plot approximate second derivative and second cut-off\n plt.figure(figsize=(8, 10))\n ax = plt.gca()\n ax.scatter(np.arange(len(diffdiff)), diffdiff,\n s=40, c='blue', alpha=0.3)\n ax.plot(np.array([-0.2*len(self.s), 1.2*len(self.s)]),\n tol2*np.ones(2),\n color='orange', linestyle='dashed', linewidth=2,\n label='2nd cut-off')\n ax.set_yscale('log')\n ax.set_ylim(1e-20, 1e1)\n plt.legend()\n plt.title('Approx. 2nd Derivative of Singular Values', fontsize=16)\n plt.show()\n\n return", "def _order_spec(self):\n return self._columns_dimension.order_spec", "def panel_order(self, force_update=False):\n if 'panel_order' not in self.pattern or force_update:\n self.pattern['panel_order'] = self.define_panel_order()\n return self.pattern['panel_order']", "def rotationOrder(self):\n\t\treturn 0", "def get_order(self):\n # Minimum order for empty or_types is 1:\n if not self._or_types:\n return 1\n\n order_dict = {'~':1.,\n '-':1., ':': 1.5, '=':2., '#':3.,\n '!-':1.5, '!:':1., '!=':1., '!#':1.}\n order_list = [order_dict.get(base,1) for (base, decor) in self._or_types]\n return min(order_list)", "def render_order(self):\n ret_val = self._render_order()\n return ret_val", "def order(self):\n\n return xroms.order(self.da)", "def _get_modularity(self):\n modularity = 0\n # Update modularity of the graph and community weights (sigmas)\n community_set = self.community_dict\n for community in community_set:\n # Update modularity of the graph\n sig_c_in = community_set[community].weight_in\n sig_c_tot = community_set[community].weight_tot\n modularity = modularity + (sig_c_in / (2 * self.m)) - (sig_c_tot / (2 * self.m)) ** 2\n return modularity", "def column_ordering(self) -> Dict[str, bool]:\n return self._column_ordering", "def record_column_order( self ):\n\n\t\tpass", "def order_pressure(self):\n\n pn = self.order\n staggered = self.params.oper.elem.staggered\n\n problemtype_equation = self.params.nek.problemtype.equation.lower()\n\n if \"lin\" in problemtype_equation and staggered is False:\n logger.warning(\n \"Linear equation type and staggered == False leads to \"\n \"undefined behaviour in Nek5000. User should put \"\n 'params.oper.elem.staggered = True or \"auto\" to have evolution'\n \"of perturbation field.\"\n )\n\n if staggered == \"auto\":\n if \"lin\" in problemtype_equation:\n return pn - 2\n else:\n return pn\n elif staggered is True:\n return pn - 2\n elif staggered is False:\n return pn\n else:\n raise ValueError(\n 'params.nek have to be in [True, False, \"auto\"]. '\n f\"staggered = {staggered}\"\n )", "def modality_ids(self):\n return sorted(list(self._modalities.keys()))", "def GetInputOrder(self):\n return self._mapOrder", "def order_flags(self):\n return ['NOORDER', 'ORDERMISMATCH']", "def set_module_order(self, order):\n with self.order_lock:\n self.module_order.set(order)\n self._listeners.notify(\"order\")\n self._listeners.notify(\"dependency\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
gain_mu(self) > float Returns mu gain factor.
def gain_mu(self): return _digital_swig.digital_mpsk_receiver_cc_sptr_gain_mu(self)
[ "def get_mu(self):\n return self.mu", "def gain(self, g):\n return self.normalize(0, 1, scale=g)", "def process_gain(self):\n return 1", "def mag_gain(self, gain=0x20):\n self._mag_gain = gain\n self.i2c.writeto_mem(self.ADDRESS_MAG, self.REGISTER_MAG_CRB_REG_M, self._mag_gain)\n if self._mag_gain == MAGGAIN_1_3:\n self._lsm303mag_gauss_lsb_xy = 1100.0\n self._lsm303mag_gauss_lsb_z = 980.0\n elif self._mag_gain == MAGGAIN_1_9:\n self._lsm303mag_gauss_lsb_xy = 855.0\n self._lsm303mag_gauss_lsb_z = 760.0\n elif self._mag_gain == MAGGAIN_2_5:\n self._lsm303mag_gauss_lsb_xy = 670.0\n self._lsm303mag_gauss_lsb_z = 600.0\n elif self._mag_gain == MAGGAIN_4_0:\n self._lsm303mag_gauss_lsb_xy = 450.0\n self._lsm303mag_gauss_lsb_z = 400.0\n elif self._mag_gain == MAGGAIN_4_7:\n self._lsm303mag_gauss_lsb_xy = 400.0\n self._lsm303mag_gauss_lsb_z = 355.0\n elif self._mag_gain == MAGGAIN_5_6:\n self._lsm303mag_gauss_lsb_xy = 330.0\n self._lsm303mag_gauss_lsb_z = 295.0\n elif self._mag_gain == MAGGAIN_8_1:\n self._lsm303mag_gauss_lsb_xy = 230.0\n self._lsm303mag_gauss_lsb_z = 205.0", "def set_gain(self, gain):\n if gain is None:\n r = self.subdev.gain_range()\n gain = (r[0] + r[1])/2 # set gain to midpoint\n self.gain = gain\n return self.subdev.set_gain(gain)", "def _Gain(self, value):\n v = (((self.max-self.min))-float(value))\n v = int(v*10)/10.0\n return v", "def _set_gain(self, adjustment: int) -> int:\n return _lib.opus_decoder_ctl(self._state, CTL_SET_GAIN, adjustment)", "def setGain(self, gain: 'float') -> \"void\":\n return _coin.SoAudioDevice_setGain(self, gain)", "def ValidGain(self,dB):\n\n gain = int(dB/0.25)\n\n if gain<0:\n\n Gain = 0\n\n elif 0<=gain<=70:\n\n Gain = gain\n\n elif gain>70:\n\n Gain = 80\n\n else:\n\n Gain = 24\n\n return Gain", "def information_gain(self,feature):\n total_ent=self.entropy()\n feat_ent=self.feature_entropy(feature)\n info_gain=total_ent-feat_ent\n return info_gain", "def get_ml_gain_increment(self):\n frames = self.integration.frames\n valid_frames = frames.valid & frames.is_unflagged('MODELING_FLAGS')\n return snf.get_ml_gain_increment(\n frame_data=frames.data,\n signal_wc=frames.temp_wc,\n signal_wc2=frames.temp_wc2,\n sample_flags=frames.sample_flag,\n channel_indices=self.mode.channel_group.indices,\n valid_frames=valid_frames)", "def __gain_scale(self, param_n, src_group):\n\n spec_ = self._config.get(param_n)\n param_data = src_group.get(param_n)\n return param_data / spec_.gain", "def capitalGain(self):\n\t\tself.capital_gain = self.par_value - self.price\n\t\treturn self.capital_gain", "def info_gain(\n\t\tevent: Collection,\n\t\tevent_tests: Collection[Callable],\n\t\tgiven: Collection,\n\t\tgiven_tests: Collection[Callable]) -> float:\n\tcond_entropy = conditional_entropy(event, event_tests, given, given_tests)\n\tevent_entropy = sum(entropy(probability(event, e)) for e in event_tests)\n\tgain = event_entropy - cond_entropy\n\treturn 0 if math.isnan(gain) else gain", "def getEffGain(self):\n\n return self._effGain", "def total_unr_perc_gain(self):\n tbc = self.total_book_cost()\n if tbc == 0.0:\n return 0.0\n return (self.total_market_value() - tbc) / tbc * 100.0", "def promptUsrpGain(min_gain=0, max_gain=31.5):\n\t\twhile(gain := _getValidFloat(\"Enter USRP gain (in dB): \")):\n\t\t\tif(gain < min_gain):\n\t\t\t\tprint(\"Gain of '\" + gain + \"' is below the minimum of '\" + min_gain + \"'.\")\n\t\t\telif(gain > max_gain):\n\t\t\t\tprint(\"Gain of '\" + gain + \"' is above the maximum of '\" + max_gain + \"'.\")\n\t\t\treturn gain", "def get_gain_increment(self, robust=False):\n if self.configuration.get_bool('signal-response'):\n self.integration.comments.append(\n f'{{{self.get_covariance():.2f}}}')\n log.debug(f\"covariance = {self.get_covariance():.2f}\")\n\n # Precalculate the gain-weight products...\n # frames.temp_c = signal\n # frames.temp_wc = frame_weight * signal\n # frames.temp_wc2 = frame_weight * signal^2\n self.prepare_frame_temp_fields()\n\n if robust:\n return self.get_robust_gain_increment()\n else:\n return self.get_ml_gain_increment()", "def mean(self):\n\t\treturn 0.8", "def total_unr_gain(self):\n return sum(\n pos.unr_gain\n for asset, pos in self.positions.items()\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
gain_omega(self) > float Returns omega gain factor.
def gain_omega(self): return _digital_swig.digital_mpsk_receiver_cc_sptr_gain_omega(self)
[ "def get_omega(self):\n return self.omega_", "def angular_velocity(self):\r\n\r\n self.omega += self.angular_acceleration*self.dt\r\n return self.omega", "def _set_gain(self, adjustment: int) -> int:\n return _lib.opus_decoder_ctl(self._state, CTL_SET_GAIN, adjustment)", "def omega(self):\n\n return xroms.omega(self.ds.u, self.ds.v)", "def cosmo_Omega_gw(self):\n ## Cosmological Spectrum\n # cosmo_omega_gw = self.omega_gw ## units = Independent of frequency\n cosmo_omega_gw = self.omega_gw * (self.frequency / 10)**0\n return cosmo_omega_gw", "def mag_gain(self, gain=0x20):\n self._mag_gain = gain\n self.i2c.writeto_mem(self.ADDRESS_MAG, self.REGISTER_MAG_CRB_REG_M, self._mag_gain)\n if self._mag_gain == MAGGAIN_1_3:\n self._lsm303mag_gauss_lsb_xy = 1100.0\n self._lsm303mag_gauss_lsb_z = 980.0\n elif self._mag_gain == MAGGAIN_1_9:\n self._lsm303mag_gauss_lsb_xy = 855.0\n self._lsm303mag_gauss_lsb_z = 760.0\n elif self._mag_gain == MAGGAIN_2_5:\n self._lsm303mag_gauss_lsb_xy = 670.0\n self._lsm303mag_gauss_lsb_z = 600.0\n elif self._mag_gain == MAGGAIN_4_0:\n self._lsm303mag_gauss_lsb_xy = 450.0\n self._lsm303mag_gauss_lsb_z = 400.0\n elif self._mag_gain == MAGGAIN_4_7:\n self._lsm303mag_gauss_lsb_xy = 400.0\n self._lsm303mag_gauss_lsb_z = 355.0\n elif self._mag_gain == MAGGAIN_5_6:\n self._lsm303mag_gauss_lsb_xy = 330.0\n self._lsm303mag_gauss_lsb_z = 295.0\n elif self._mag_gain == MAGGAIN_8_1:\n self._lsm303mag_gauss_lsb_xy = 230.0\n self._lsm303mag_gauss_lsb_z = 205.0", "def gain(self, g):\n return self.normalize(0, 1, scale=g)", "def process_gain(self):\n return 1", "def get_omega(freq, deg=False):\r\n omega = 2 * np.pi * freq\r\n return np.rad2deg(omega) if deg else omega", "def set_gain(self, gain):\n if gain is None:\n r = self.subdev.gain_range()\n gain = (r[0] + r[1])/2 # set gain to midpoint\n self.gain = gain\n return self.subdev.set_gain(gain)", "def _Gain(self, value):\n v = (((self.max-self.min))-float(value))\n v = int(v*10)/10.0\n return v", "def getEffGain(self):\n\n return self._effGain", "def set_omega(self, omega):\n np.copyto(self.omega_, omega)", "def _calculate_omega(self, covariance, tau, pick_matrix, view_confidences, omega_method):\r\n\r\n if omega_method == 'prior_variance':\r\n omega = pick_matrix.dot((tau * covariance).dot(pick_matrix.T))\r\n else:\r\n omega = self._calculate_idzorek_omega(covariance, view_confidences, pick_matrix)\r\n omega = np.diag(np.diag(omega))\r\n return omega", "def cool_rate(gamma):\n # we need a synchrotron object to get U_rad\n syn = Synchrotron(self.model)\n U_rad = syn.U_rad(gamma, self.nu, N_e)\n prefactor = (4 / 3 * sigma_T / mec).value\n rate = (self.model.blob.U_B.value + U_rad.value) * gamma**2\n return prefactor * rate", "def get_gain(applied_volts):\n a, b, c = 2.432, 12.86, -237.5\n #a, b, c = 545.1, 13.65, 0\n gain = a*np.exp(b*applied_volts) + c\n return gain", "def gamma(self) -> float:\n return self.angles[2]", "def _calculate_omega(self, covariance, tau, pick_matrix, view_confidences, omega_method):\n\n if omega_method == 'prior_variance':\n omega = pick_matrix.dot((tau * covariance).dot(pick_matrix.T))\n else:\n omega = self._calculate_idzorek_omega(covariance, view_confidences, pick_matrix)\n omega = np.diag(np.diag(omega))\n return omega", "def set_gain(self, dB: float) -> int:\n\n dB_Q8 = max(-32768, min(32767, round(dB * 256))) # dB * 2^n where n is 8 (Q8)\n return self._set_gain(dB_Q8)", "def get_gain_increment(self, robust=False):\n if self.configuration.get_bool('signal-response'):\n self.integration.comments.append(\n f'{{{self.get_covariance():.2f}}}')\n log.debug(f\"covariance = {self.get_covariance():.2f}\")\n\n # Precalculate the gain-weight products...\n # frames.temp_c = signal\n # frames.temp_wc = frame_weight * signal\n # frames.temp_wc2 = frame_weight * signal^2\n self.prepare_frame_temp_fields()\n\n if robust:\n return self.get_robust_gain_increment()\n else:\n return self.get_ml_gain_increment()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
gain_omega_rel(self) > float Returns the relative omega limit.
def gain_omega_rel(self): return _digital_swig.digital_mpsk_receiver_cc_sptr_gain_omega_rel(self)
[ "def get_omega(self):\n return self.omega_", "def wavelength_rel(self) -> float:\n wavelength_rel = (\n sc.h\n / np.sqrt(\n 2 * sc.m_e * sc.e * 1000 * self.voltage * (1 + (sc.e * 1000 * self.voltage) / (2 * sc.m_e * sc.c**2))\n )\n * (10**10)\n )\n return wavelength_rel", "def getRelativeGain(self):\n if len(self.gainSettings) > 0 :\n return self.gainSettings\n\n xdim = len(self.antennaGrid)\n ydim = len(self.antennaGrid[0])\n self.gainSettings = [[self.beamStrength / self.beamStrength for y in range(ydim)] for x in range(xdim)]\n\n return self.gainSettings", "def angular_velocity(self):\r\n\r\n self.omega += self.angular_acceleration*self.dt\r\n return self.omega", "def omega(self):\n\n return xroms.omega(self.ds.u, self.ds.v)", "def relative_rate(self):\n return _raw_util.raw_pnc_frequency_modulator_fc_sptr_relative_rate(self)", "def relative_rate(self):\n return _wavelet_swig.wavelet_ff_sptr_relative_rate(self)", "def _Gain(self, value):\n v = (((self.max-self.min))-float(value))\n v = int(v*10)/10.0\n return v", "def relative_rate(self):\n return _raw_util.raw_divide_ff_sptr_relative_rate(self)", "def relative_rate(self):\n return _radio_astro_swig.detect_sptr_relative_rate(self)", "def angular_velocity(self):\n return 0.0", "def _set_gain(self, adjustment: int) -> int:\n return _lib.opus_decoder_ctl(self._state, CTL_SET_GAIN, adjustment)", "def relative_rate(self):\n return _radio_astro_swig.dedispersion_sptr_relative_rate(self)", "def accel(self):\n return self.force()/self.mass", "def relative_water_level(self): # Task 2B new method\n levelratio = None # defaults output to none\n if isinstance(self.latest_level, float) and (\n self.typical_range_consistent()):\n # ^checks if data exists and is consistent\n levelratio = ((self.latest_level - self.typical_range[0]) / (\n self.typical_range[1] - self.typical_range[0]))\n # sets level ratio as output\n return levelratio # returns output", "def angular_speed_set_point(self):\n return self.radians(self._motor.speed_sp / self._gear_ratio)", "def get_gain_increment(self, robust=False):\n if self.configuration.get_bool('signal-response'):\n self.integration.comments.append(\n f'{{{self.get_covariance():.2f}}}')\n log.debug(f\"covariance = {self.get_covariance():.2f}\")\n\n # Precalculate the gain-weight products...\n # frames.temp_c = signal\n # frames.temp_wc = frame_weight * signal\n # frames.temp_wc2 = frame_weight * signal^2\n self.prepare_frame_temp_fields()\n\n if robust:\n return self.get_robust_gain_increment()\n else:\n return self.get_ml_gain_increment()", "def relative_rate(self):\n return _radio_astro_swig.vmedian_sptr_relative_rate(self)", "def relative_rate(self):\n return _wavelet_swig.wvps_ff_sptr_relative_rate(self)", "def adcGain(PdBm, nBits, adcVfs, thresOpt = OPTIMUM_THRES):\n# Vrms = np.sqrt(10**(PdBm/10.)*.001*50.) #rms of input\n# PdBmThresOpt = thresOpt * Vrms #Optimum threshold for the input\n# adcThres = adcVfs/(2.**(nBits-1))\n# Vratio = PdBmThresOpt/adcThres\n# GdB = 20.*np.log10(Vratio)\n\n Lev = adcVfs/2**(nBits-1)\n Vrms_i = np.sqrt(10**(PdBm/10.)*.001*50.) #rms of input\n G = (Lev/Vrms_i/thresOpt)**2\n GdB = 10*np.log10(G)\n\n return(GdB)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set_modulation_order(self, unsigned int M) Sets the modulation order (M) currently.
def set_modulation_order(self, *args, **kwargs): return _digital_swig.digital_mpsk_receiver_cc_sptr_set_modulation_order(self, *args, **kwargs)
[ "def setOrder(order):\n ierr = c_int()\n lib.gmshModelMeshSetOrder(\n c_int(order),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelMeshSetOrder returned non-zero error code: \",\n ierr.value)", "def set_module_order(self, order):\n with self.order_lock:\n self.module_order.set(order)\n self._listeners.notify(\"order\")\n self._listeners.notify(\"dependency\")", "def set_modifier(self, mod):\r\n arg_str = p2e._base._util._convert_args_to_string(\"set.node.modifier\", \r\n self._node._eco_id, mod)\r\n p2e._app.Exec(arg_str)", "def set_rank_order(order):\n global RANK_ORDER\n RANK_ORDER = order", "def set_render_order(self, order):\n self._set_render_order(order)", "def set_order(self, order_key: str) -> None:\n if order_key not in self.orders:\n raise exceptions.CommandError(\n \"Unknown flow order: %s\" % order_key\n )\n order_key = self.orders[order_key]\n self.order_key = order_key\n newview = sortedcontainers.SortedListWithKey(key=order_key)\n newview.update(self._view)\n self._view = newview", "def set_order_str(self, new_order):\n\n values = []\n for value in new_order:\n if value == 'S':\n values.append(1)\n elif value == 'D':\n values.append(2)\n elif value == 'T':\n values.append(3)\n elif value == 'Q':\n values.append(4)\n elif value == 'vdW':\n values.append(0)\n elif value == 'B':\n values.append(1.5)\n elif value == 'H':\n values.append(0.1)\n else:\n # try to see if an float disguised as a string was input by mistake\n try:\n values.append(float(value))\n except ValueError:\n raise TypeError('Bond order {} is not hardcoded into this method'.format(value))\n self.order = values", "def test_set_order_num(self):\n\n self.bond.set_order_num(3)\n self.assertEqual(self.bond.get_order_str(), 'T')", "def set_partialOrdering(self, partialOrder):\n self.partialOrdering = partialOrder", "def set_matrix(self, m):\n self.m = m", "def defineSlideOrder(self, slide_order):\n self.slide_order = slide_order", "def save(self):\n self.enabler.storeOrder()", "def setModeFromMODCOD(self, index):\n self.setMode(self.fromMODCOD(index))", "def module_order_insert(self, mod, index=-1):\n with self.order_lock:\n # Insert module\n self.module_order[index] = mod\n\n # Notify listeners\n self._listeners.notify(\"order\")\n self._listeners.notify(\"dependency\")", "def __setitem__(self, key, mod_id):\n key = self._check_key_valid(key)\n if len(key) > 1 and key[-1] == 0:\n raise IndexError(\"Bad index: cannot replace loop head.\")\n isLoop = self.modules[mod_id].is_loop\n with self.lock:\n o = self.order\n while key:\n i = key.pop(0)\n if i == -1:\n i = len(o)\n elif len(key) > 0:\n o = o[i]\n else:\n if i > len(o):\n raise IndexError(\"Cannot insert item: index too large\")\n if isLoop:\n o.insert(i, [mod_id])\n else:\n o.insert(i, mod_id)\n if self._len_cache is not None:\n self._len_cache += 1", "def set_reaction_order(\n self,\n phase: str,\n order: Union[List[Tuple[str, float]], Dict[str, float]],\n require_all: bool = False,\n ) -> None:\n if bool(order) is False:\n raise ValueError(\"No components provided for reaction order\")\n # schema validation should guarantee this structure\n\n # If 'reaction_order' key does not exist, then create one as a copy of stoich\n if self.NAMES.reaction_order in self.data[self.NAMES.param]:\n ro = self.data[self.NAMES.param][self.NAMES.reaction_order]\n else:\n self.data[self.NAMES.param][self.NAMES.reaction_order] = self.data[\n self.NAMES.stoich\n ].copy()\n ro = self.data[self.NAMES.param][self.NAMES.reaction_order]\n\n if phase not in self.PHASES:\n raise ValueError(\n f\"Invalid phase '{phase}'. Valid values: \" f\"{', '.join(self.PHASES)}\"\n )\n if phase not in ro:\n raise KeyError(f\"Phase '{phase}' not found\")\n ro = ro[phase]\n # normalize input to dict form\n if not hasattr(order, \"keys\"):\n order = dict(order)\n # additional checks for 'require_all' flag\n if require_all:\n if len(order) != len(ro):\n why = \"not enough\" if len(order) < len(ro) else \"too many\"\n raise ValueError(\n f\"{why.title()} components provided for new reaction \"\n f\"order, with 'require_all' flag set to True\"\n )\n if set(order.keys()) != set(ro.keys()):\n raise ValueError(\n \"Components in new reaction order do not match \"\n \"components in reaction, with 'require_all' flag \"\n \"set to True\"\n )\n # Replace one component at a time, raising a KeyError if unknown component\n # Ensure that the instance is not modified if there are any errors.\n ro_tmp = ro.copy()\n for key, value in order.items():\n if key not in ro:\n raise KeyError(f\"Component '{key}' not found in reaction\")\n ro_tmp[key] = value\n # Update reaction order in this object\n self.data[self.NAMES.param][self.NAMES.reaction_order][phase] = ro_tmp", "def set_to_modificar(self):\n self.write({'state': 'modificar'})", "def SetNewManualOrder(self):\n self.sortMode = \"manual\"\n self.isManuallySorted = True\n self.lastManuallySortedEntries = self.entries\n \n self.ManualSortingEnabled.emit()", "def test_set_order_str(self):\n\n self.bond.set_order_str(\"B\")\n self.assertEqual(self.bond.order, 1.5)", "def replace_order(self,\n order_specifier: OrderSpecifier = sentinel,\n order_request: OrderRequest = sentinel):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set_omega(self, float omega) Sets value of omega and its min and max values.
def set_omega(self, *args, **kwargs): return _digital_swig.digital_mpsk_receiver_cc_sptr_set_omega(self, *args, **kwargs)
[ "def set_omega(self, omega):\n np.copyto(self.omega_, omega)", "def get_omega(self):\n return self.omega_", "def omega(self):\n\n return xroms.omega(self.ds.u, self.ds.v)", "def valuation(self, omega):\n raise NotImplementedError('Override in Place subtype')", "def servo_set_accel_limit(ch, accel):\n\n # Check to make sure speed is in range\n speed = max(accel, accel_limit_min)\n speed = min(accel, accel_limit_max)\n\n # Send command to servo controller\n servo_send_cmd(cmd_set_accel, ch, accel)", "def setAcceleration(self, a):\n a = float(a)\n if a < 0:\n raise Exception(\"Acceleration must be >= 0\")\n\n self.accel = float(a)\n\n if a > 0:\n self.accel_time = (self.max_vel - self.min_vel) / a\n else:\n self.accel_time = float('INF')\n\n self.__recalculate_acc_constants()", "def setRange(x='0.0', oldmin='0.0', oldmax='1.0', newmin='0.0', newmax='1.0'):\n\n pass", "def angular_velocity(self):\r\n\r\n self.omega += self.angular_acceleration*self.dt\r\n return self.omega", "def setRange(self, min, max, label=''):\n self.min=min\n self.max=max\n self.label=label\n self.drawAxis()", "def setMoleFractions(self, x):\n _cantera.bdry_setMoleFractions(self._hndl, x)", "def diffractometer_omega_reference_changed(self, omega_reference):\n self.graphics_omega_reference_item.set_reference(omega_reference)", "def velocity_limit(self, velocity_limit):\n\n self._velocity_limit = velocity_limit", "def control_range(self, value):\n self.__control_range = value", "def setThrottle(self, throttle):\n \n self._throttle = float(throttle) \n absThrottle = abs(self._throttle)\n \n #Fordwards or backwards movement\n #TODO: 20181114 DPM: This is not required to do if the throttle sign was not changed\n if self._throttle >= 0.0:\n SysfsWriter.writeOnce(\"0\", \"/sys/class/gpio/gpio{0}/value\".format(self._gpioId))\n else:\n SysfsWriter.writeOnce(\"1\", \"/sys/class/gpio/gpio{0}/value\".format(self._gpioId))\n \n\n #Throttle\n if absThrottle > 0.0 and absThrottle <= Motor.MAX_THROTTLE: \n \n self._duty = int((self._rangeDuty * absThrottle) + self._minDuty)\n \n elif absThrottle == 0.0:\n self._setNeutralThrottle()\n \n else: # absThrottle > Motor.MAX_THROTTLE\n self._duty = int((self._rangeDuty * Motor.MAX_THROTTLE) + self._minDuty)\n self._throttle = Motor.MAX_THROTTLE if self._throttle > 0.0 else -Motor.MAX_THROTTLE\n\n self._sysfsWriter.write(str(self._duty))", "def set_motor_velocity_limits(self, max_velocity, max_acceleration):\n self.sdk.SCC_SetMotorVelocityLimits(self._serial, max_velocity, max_acceleration)", "def set_params_range(self):\n pass", "def setAcceleration(self, a):\n pass", "def set_motor_limits(self, max_accel, max_deccel):\n if not isinstance(max_accel, int) or not isinstance(max_deccel, int):\n raise ValueError(\"Must use int set limits\")\n\n max_accel = clip(max_accel, 0, 3200)\n max_deccel = clip(max_deccel, 0, 3200)\n\n accel_b1 = max_accel % 128\n accel_b2 = max_accel // 128\n\n deccel_b1 = max_deccel % 128\n deccel_b2 = max_deccel // 128\n\n # max speed = self._send_command_three(CMD.MOTOR_LIMIT, 0, 0, 0)\n self._send_command_three(CMD.MOTOR_LIMIT, 1, accel_b1, accel_b2)\n self._send_command_three(CMD.MOTOR_LIMIT, 2, deccel_b1, deccel_b2)", "def update(self, value: float):\n self.maximum = max(self.maximum, value)\n self.minimum = min(self.minimum, value)", "def setVoltageRange(self, lower, upper):\n self.write(\"VOLT:LIM %f\" % float(lower))\n self.write(\"VOLT:RANG %f\" % float(upper))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set_gain_mu(self, float gain_mu) Sets value for mu gain factor.
def set_gain_mu(self, *args, **kwargs): return _digital_swig.digital_mpsk_receiver_cc_sptr_set_gain_mu(self, *args, **kwargs)
[ "def set_gain(self, gain):\n if gain is None:\n r = self.subdev.gain_range()\n gain = (r[0] + r[1])/2 # set gain to midpoint\n self.gain = gain\n return self.subdev.set_gain(gain)", "def setGain(self, gain: 'float') -> \"void\":\n return _coin.SoAudioDevice_setGain(self, gain)", "def mag_gain(self, gain=0x20):\n self._mag_gain = gain\n self.i2c.writeto_mem(self.ADDRESS_MAG, self.REGISTER_MAG_CRB_REG_M, self._mag_gain)\n if self._mag_gain == MAGGAIN_1_3:\n self._lsm303mag_gauss_lsb_xy = 1100.0\n self._lsm303mag_gauss_lsb_z = 980.0\n elif self._mag_gain == MAGGAIN_1_9:\n self._lsm303mag_gauss_lsb_xy = 855.0\n self._lsm303mag_gauss_lsb_z = 760.0\n elif self._mag_gain == MAGGAIN_2_5:\n self._lsm303mag_gauss_lsb_xy = 670.0\n self._lsm303mag_gauss_lsb_z = 600.0\n elif self._mag_gain == MAGGAIN_4_0:\n self._lsm303mag_gauss_lsb_xy = 450.0\n self._lsm303mag_gauss_lsb_z = 400.0\n elif self._mag_gain == MAGGAIN_4_7:\n self._lsm303mag_gauss_lsb_xy = 400.0\n self._lsm303mag_gauss_lsb_z = 355.0\n elif self._mag_gain == MAGGAIN_5_6:\n self._lsm303mag_gauss_lsb_xy = 330.0\n self._lsm303mag_gauss_lsb_z = 295.0\n elif self._mag_gain == MAGGAIN_8_1:\n self._lsm303mag_gauss_lsb_xy = 230.0\n self._lsm303mag_gauss_lsb_z = 205.0", "def set_analog_gain(self, gain):\n if gain < 0:\n raise ValueError('Gain register must be greater than 0.')\n self.i2c.mem_write(int(gain), self.bus_addr, 1)", "def _set_gain(self, adjustment: int) -> int:\n return _lib.opus_decoder_ctl(self._state, CTL_SET_GAIN, adjustment)", "def set(self, selfenergy, mu):\n if self.filling is None:\n assert type(mu) in [float, int,\n complex], \"Unexpected type or class of mu.\"\n self.calculate(selfenergy, self.make_matrix(mu))\n else:\n mu = self.find_and_set_mu(\n self.filling, selfenergy, mu, self.dmu_max)\n return mu", "def set_pga_gain(self, pga_num, gain):\n\t\treturn self.config_ads(pga_num, 2, gain)", "def set_sum_input_gain(self, input_channel: Channel, gain: float):\n assert Channel.INPUT_A <= input_channel <= Channel.INPUT_C\n return self._invoke(0x16 + input_channel - Channel.INPUT_A, Channel.SETUP, _15db_range(gain))", "def test_set_gain():\n _setup()\n\n as7262.set_gain(1)\n assert as7262._as7262.CONTROL.get_gain_x() == 1\n\n # Should snap to the highest gain value\n as7262.set_gain(999)\n assert as7262._as7262.CONTROL.get_gain_x() == 64\n\n # Should snap to the lowest gain value\n as7262.set_gain(-1)\n assert as7262._as7262.CONTROL.get_gain_x() == 1", "def Set_ALS_Gain(self,gain):\n\t\tif gain in self._Gain_LOOKUP:\n\t\t\tregval = self._read_reg(self._REG_ALS_CONTR)\n\t\t\tregval = (regval & self._Gain_CLEAR) | self._Gain_LOOKUP[gain][0]\n\t\t\tself._write_reg(self._REG_ALS_CONTR,regval)", "def setMasterVolumeMultiplier(self, volumeMul):\n from audio import GameSound\n GameSound().setVolumeAmplifier(volumeMul)", "def apply_gain(infile, gain):\n fs1, x = monoWavRead(filename=infile)\n\n x = np.copy(x)\n x = x * (10 ** (gain / 20.0))\n x = np.minimum(np.maximum(-1.0, x), 1.0)\n #Change the output file name to suit your requirements here\n outfile_name = os.path.basename(infile).split(\".\")[0] + (\"_gain%s.wav\" % str(gain))\n outfile = os.path.join(outfile_path, outfile_name)\n write(filename = outfile, rate = fs1, data = x)\n if (FILE_DELETION):\n extractFeaturesAndDelete(outfile)", "def set_gain_or_loss_formatted(self, gain_or_loss_formatted):\n self.gain_or_loss_formatted = gain_or_loss_formatted", "def buy_volume(self, buy_volume):\n if buy_volume is None:\n raise ValueError(\"Invalid value for `buy_volume`, must not be `None`\") # noqa: E501\n\n self._buy_volume = buy_volume", "def set_volume_level(self, volume: float) -> None:\n self.send_command([\"mixer\", \"volume\", volume * 100])", "def set_gain(self, dB: float) -> int:\n\n dB_Q8 = max(-32768, min(32767, round(dB * 256))) # dB * 2^n where n is 8 (Q8)\n return self._set_gain(dB_Q8)", "def gain(self, g):\n return self.normalize(0, 1, scale=g)", "def set_music_volume(cls, new_volume: float) -> None:\n new_volume = max(0.0, min(1.0, new_volume))\n cls.music_volume = new_volume", "def set_gain(self):\n DescStr = 'Setting Gain for AHF_Camera '\n if (self.AHFgainMode & 2):\n DescStr += 'from current illumination'\n else:\n DescStr += \"from ISO \" + str(self.iso)\n if (self.AHFgainMode & 1):\n DescStr += ' with white balancing'\n else:\n DescStr += \" with No white balancing\"\n print (DescStr)\n if (self.AHFgainMode & 1):\n self.awb_mode = 'auto'\n else:\n self.awb_mode = 'off'\n self.awb_gains = (1, 1)\n # if (self.AHFgainMode & 2):\n self.exposure_mode = 'auto'\n # else:\n # self.exposure_mode = 'off'\n super().start_preview(fullscreen=False, window=self.AHFpreview)\n sleep(2.0) # let gains settle, then fix values\n if (self.AHFgainMode & 1):\n savedGain = self.awb_gains\n self.awb_gains = savedGain\n self.awb_mode = \"off\"\n # if (self.AHFgainMode & 2):\n self.exposure_mode = 'off'\n super().stop_preview()\n print (\"Red Gain for white balance =\" + str(float(self.awb_gains[0])))\n print (\"Blue Gain for white balance =\" + str(float(self.awb_gains[1])))\n print (\"Analog Gain = \" + str(float(self.analog_gain)))\n print (\"Digital Gain = \" + str(float(self.digital_gain)))\n return", "def set_chan_unit(self, chan, unit):\n self._set_chan_unit(chan, unit.encode())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set_gain_omega(self, float gain_omega) Sets value for omega gain factor.
def set_gain_omega(self, *args, **kwargs): return _digital_swig.digital_mpsk_receiver_cc_sptr_set_gain_omega(self, *args, **kwargs)
[ "def set_gain(self, gain):\n if gain is None:\n r = self.subdev.gain_range()\n gain = (r[0] + r[1])/2 # set gain to midpoint\n self.gain = gain\n return self.subdev.set_gain(gain)", "def set_omega(self, omega):\n np.copyto(self.omega_, omega)", "def set_analog_gain(self, gain):\n if gain < 0:\n raise ValueError('Gain register must be greater than 0.')\n self.i2c.mem_write(int(gain), self.bus_addr, 1)", "def _set_gain(self, adjustment: int) -> int:\n return _lib.opus_decoder_ctl(self._state, CTL_SET_GAIN, adjustment)", "def setGain(self, gain: 'float') -> \"void\":\n return _coin.SoAudioDevice_setGain(self, gain)", "def set_pga_gain(self, pga_num, gain):\n\t\treturn self.config_ads(pga_num, 2, gain)", "def mag_gain(self, gain=0x20):\n self._mag_gain = gain\n self.i2c.writeto_mem(self.ADDRESS_MAG, self.REGISTER_MAG_CRB_REG_M, self._mag_gain)\n if self._mag_gain == MAGGAIN_1_3:\n self._lsm303mag_gauss_lsb_xy = 1100.0\n self._lsm303mag_gauss_lsb_z = 980.0\n elif self._mag_gain == MAGGAIN_1_9:\n self._lsm303mag_gauss_lsb_xy = 855.0\n self._lsm303mag_gauss_lsb_z = 760.0\n elif self._mag_gain == MAGGAIN_2_5:\n self._lsm303mag_gauss_lsb_xy = 670.0\n self._lsm303mag_gauss_lsb_z = 600.0\n elif self._mag_gain == MAGGAIN_4_0:\n self._lsm303mag_gauss_lsb_xy = 450.0\n self._lsm303mag_gauss_lsb_z = 400.0\n elif self._mag_gain == MAGGAIN_4_7:\n self._lsm303mag_gauss_lsb_xy = 400.0\n self._lsm303mag_gauss_lsb_z = 355.0\n elif self._mag_gain == MAGGAIN_5_6:\n self._lsm303mag_gauss_lsb_xy = 330.0\n self._lsm303mag_gauss_lsb_z = 295.0\n elif self._mag_gain == MAGGAIN_8_1:\n self._lsm303mag_gauss_lsb_xy = 230.0\n self._lsm303mag_gauss_lsb_z = 205.0", "def set_gain(self, dB: float) -> int:\n\n dB_Q8 = max(-32768, min(32767, round(dB * 256))) # dB * 2^n where n is 8 (Q8)\n return self._set_gain(dB_Q8)", "def test_set_gain():\n _setup()\n\n as7262.set_gain(1)\n assert as7262._as7262.CONTROL.get_gain_x() == 1\n\n # Should snap to the highest gain value\n as7262.set_gain(999)\n assert as7262._as7262.CONTROL.get_gain_x() == 64\n\n # Should snap to the lowest gain value\n as7262.set_gain(-1)\n assert as7262._as7262.CONTROL.get_gain_x() == 1", "def diffractometer_omega_reference_changed(self, omega_reference):\n self.graphics_omega_reference_item.set_reference(omega_reference)", "def set_gain(self):\n DescStr = 'Setting Gain for AHF_Camera '\n if (self.AHFgainMode & 2):\n DescStr += 'from current illumination'\n else:\n DescStr += \"from ISO \" + str(self.iso)\n if (self.AHFgainMode & 1):\n DescStr += ' with white balancing'\n else:\n DescStr += \" with No white balancing\"\n print (DescStr)\n if (self.AHFgainMode & 1):\n self.awb_mode = 'auto'\n else:\n self.awb_mode = 'off'\n self.awb_gains = (1, 1)\n # if (self.AHFgainMode & 2):\n self.exposure_mode = 'auto'\n # else:\n # self.exposure_mode = 'off'\n super().start_preview(fullscreen=False, window=self.AHFpreview)\n sleep(2.0) # let gains settle, then fix values\n if (self.AHFgainMode & 1):\n savedGain = self.awb_gains\n self.awb_gains = savedGain\n self.awb_mode = \"off\"\n # if (self.AHFgainMode & 2):\n self.exposure_mode = 'off'\n super().stop_preview()\n print (\"Red Gain for white balance =\" + str(float(self.awb_gains[0])))\n print (\"Blue Gain for white balance =\" + str(float(self.awb_gains[1])))\n print (\"Analog Gain = \" + str(float(self.analog_gain)))\n print (\"Digital Gain = \" + str(float(self.digital_gain)))\n return", "def get_omega(self):\n return self.omega_", "def setCharge(self, charge):\n\t\tself._charge = charge", "def OnSetGamma(self, evt=None):\n\t\t#self.gamma = self.gammaSlider.GetValue() * 2\n\t\t#print( 'new gamma: {}'.format( self.gamma ) )\n\t\t#if( self.SetGammaCallback ):\n\t\t\t#self.SetGammaCallback( self.gamma )\n\t\tself._OnSet( self.gamma, 'gamma', self.gammaSlider, self.SetGammaCallback )", "def set_sum_input_gain(self, input_channel: Channel, gain: float):\n assert Channel.INPUT_A <= input_channel <= Channel.INPUT_C\n return self._invoke(0x16 + input_channel - Channel.INPUT_A, Channel.SETUP, _15db_range(gain))", "def apply_gain(infile, gain):\n fs1, x = monoWavRead(filename=infile)\n\n x = np.copy(x)\n x = x * (10 ** (gain / 20.0))\n x = np.minimum(np.maximum(-1.0, x), 1.0)\n #Change the output file name to suit your requirements here\n outfile_name = os.path.basename(infile).split(\".\")[0] + (\"_gain%s.wav\" % str(gain))\n outfile = os.path.join(outfile_path, outfile_name)\n write(filename = outfile, rate = fs1, data = x)\n if (FILE_DELETION):\n extractFeaturesAndDelete(outfile)", "def set_calibration(self, channel: int, gain: float = 1.0,\n offset: float = 0.0):\n self.cal_functions.update({channel: lambda x: gain*x + offset})", "def set_manual_gain_enabled(self, enabled):\n result = librtlsdr.rtlsdr_set_tuner_gain_mode(self.dev_p, int(enabled))\n if result < 0:\n raise IOError('Error code %d when setting gain mode'\\\n % (result))\n\n return", "def Set_ALS_Gain(self,gain):\n\t\tif gain in self._Gain_LOOKUP:\n\t\t\tregval = self._read_reg(self._REG_ALS_CONTR)\n\t\t\tregval = (regval & self._Gain_CLEAR) | self._Gain_LOOKUP[gain][0]\n\t\t\tself._write_reg(self._REG_ALS_CONTR,regval)", "def setGravity( self, angle, accel ):\n self.xaccel = cos( angle ) * accel\n self.yaccel = sin( angle ) * accel\n #self.setGravAngle( angle )\n #self.setGravAccel( accel )\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set_gain_omega_rel(self, float omega_rel) Sets the relative omega limit and resets omega min/max values.
def set_gain_omega_rel(self, *args, **kwargs): return _digital_swig.digital_mpsk_receiver_cc_sptr_set_gain_omega_rel(self, *args, **kwargs)
[ "def set_omega(self, omega):\n np.copyto(self.omega_, omega)", "def set_gain(self, gain):\n if gain is None:\n r = self.subdev.gain_range()\n gain = (r[0] + r[1])/2 # set gain to midpoint\n self.gain = gain\n return self.subdev.set_gain(gain)", "def _set_gain(self, adjustment: int) -> int:\n return _lib.opus_decoder_ctl(self._state, CTL_SET_GAIN, adjustment)", "def servo_set_accel_limit(ch, accel):\n\n # Check to make sure speed is in range\n speed = max(accel, accel_limit_min)\n speed = min(accel, accel_limit_max)\n\n # Send command to servo controller\n servo_send_cmd(cmd_set_accel, ch, accel)", "def diffractometer_omega_reference_changed(self, omega_reference):\n self.graphics_omega_reference_item.set_reference(omega_reference)", "def mag_gain(self, gain=0x20):\n self._mag_gain = gain\n self.i2c.writeto_mem(self.ADDRESS_MAG, self.REGISTER_MAG_CRB_REG_M, self._mag_gain)\n if self._mag_gain == MAGGAIN_1_3:\n self._lsm303mag_gauss_lsb_xy = 1100.0\n self._lsm303mag_gauss_lsb_z = 980.0\n elif self._mag_gain == MAGGAIN_1_9:\n self._lsm303mag_gauss_lsb_xy = 855.0\n self._lsm303mag_gauss_lsb_z = 760.0\n elif self._mag_gain == MAGGAIN_2_5:\n self._lsm303mag_gauss_lsb_xy = 670.0\n self._lsm303mag_gauss_lsb_z = 600.0\n elif self._mag_gain == MAGGAIN_4_0:\n self._lsm303mag_gauss_lsb_xy = 450.0\n self._lsm303mag_gauss_lsb_z = 400.0\n elif self._mag_gain == MAGGAIN_4_7:\n self._lsm303mag_gauss_lsb_xy = 400.0\n self._lsm303mag_gauss_lsb_z = 355.0\n elif self._mag_gain == MAGGAIN_5_6:\n self._lsm303mag_gauss_lsb_xy = 330.0\n self._lsm303mag_gauss_lsb_z = 295.0\n elif self._mag_gain == MAGGAIN_8_1:\n self._lsm303mag_gauss_lsb_xy = 230.0\n self._lsm303mag_gauss_lsb_z = 205.0", "def set_analog_gain(self, gain):\n if gain < 0:\n raise ValueError('Gain register must be greater than 0.')\n self.i2c.mem_write(int(gain), self.bus_addr, 1)", "def getRelativeGain(self):\n if len(self.gainSettings) > 0 :\n return self.gainSettings\n\n xdim = len(self.antennaGrid)\n ydim = len(self.antennaGrid[0])\n self.gainSettings = [[self.beamStrength / self.beamStrength for y in range(ydim)] for x in range(xdim)]\n\n return self.gainSettings", "def set_motor_params_ext(self, steps_per_rev, gear_box_ratio, pitch):\n self.sdk.SCC_SetMotorParamsExt(self._serial, steps_per_rev, gear_box_ratio, pitch)", "def set_manual_gain_enabled(self, enabled):\n result = librtlsdr.rtlsdr_set_tuner_gain_mode(self.dev_p, int(enabled))\n if result < 0:\n raise IOError('Error code %d when setting gain mode'\\\n % (result))\n\n return", "def set_gain(self, dB: float) -> int:\n\n dB_Q8 = max(-32768, min(32767, round(dB * 256))) # dB * 2^n where n is 8 (Q8)\n return self._set_gain(dB_Q8)", "def setOptBoundLower(self, optBoundLower):\n return _core.CGPopt_setOptBoundLower(self, optBoundLower)", "async def set_max_mod(call: ServiceCall) -> None:\n gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]\n level = call.data[ATTR_LEVEL]\n if level == -1:\n # Backend only clears setting on non-numeric values.\n level = \"-\"\n await gw_dev.gateway.set_max_relative_mod(level)", "def set_motor_limits(self, max_accel, max_deccel):\n if not isinstance(max_accel, int) or not isinstance(max_deccel, int):\n raise ValueError(\"Must use int set limits\")\n\n max_accel = clip(max_accel, 0, 3200)\n max_deccel = clip(max_deccel, 0, 3200)\n\n accel_b1 = max_accel % 128\n accel_b2 = max_accel // 128\n\n deccel_b1 = max_deccel % 128\n deccel_b2 = max_deccel // 128\n\n # max speed = self._send_command_three(CMD.MOTOR_LIMIT, 0, 0, 0)\n self._send_command_three(CMD.MOTOR_LIMIT, 1, accel_b1, accel_b2)\n self._send_command_three(CMD.MOTOR_LIMIT, 2, deccel_b1, deccel_b2)", "def setGravity( self, angle, accel ):\n self.xaccel = cos( angle ) * accel\n self.yaccel = sin( angle ) * accel\n #self.setGravAngle( angle )\n #self.setGravAccel( accel )\n return", "def set_pga_gain(self, pga_num, gain):\n\t\treturn self.config_ads(pga_num, 2, gain)", "def set_attenuation(self, attenuation):\n pass", "def test_set_gain():\n _setup()\n\n as7262.set_gain(1)\n assert as7262._as7262.CONTROL.get_gain_x() == 1\n\n # Should snap to the highest gain value\n as7262.set_gain(999)\n assert as7262._as7262.CONTROL.get_gain_x() == 64\n\n # Should snap to the lowest gain value\n as7262.set_gain(-1)\n assert as7262._as7262.CONTROL.get_gain_x() == 1", "def target_field_width_relative(self, target_field_width_relative):\n\n self._target_field_width_relative = target_field_width_relative", "def setReductionRatio(self, rRatio) -> None:\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
mpsk_receiver_cc(unsigned int M, float theta, float loop_bw, float fmin, float fmax, float mu, float gain_mu, float omega, float gain_omega, float omega_rel) > digital_mpsk_receiver_cc_sptr This block takes care of receiving MPSK modulated signals through phase, frequency, and symbol synchronization. This block takes care of receiving MPSK modulated signals through phase, frequency, and symbol synchronization. It performs carrier frequency and phase locking as well as symbol timing recovery. It works with (D)BPSK, (D)QPSK, and (D)8PSK as tested currently. It should also work for OQPSK and PI/4 DQPSK. The phase and frequency synchronization are based on a Costas loop that finds the error of the incoming signal point compared to its nearest constellation point. The frequency and phase of the NCO are updated according to this error. There are optimized phase error detectors for BPSK and QPSK, but 8PSK is done using a bruteforce computation of the constellation points to find the minimum.
def mpsk_receiver_cc(*args, **kwargs): return _digital_swig.mpsk_receiver_cc(*args, **kwargs)
[ "def _my_cNR(self, DM_mass, NLO=None):\n if NLO is None:\n NLO = False\n\n ### Input parameters ####\n\n mpi = self.ip['mpi0']\n mp = self.ip['mproton']\n mn = self.ip['mneutron']\n mN = (mp+mn)/2\n\n alpha = 1/self.ip['alowinv']\n GF = self.ip['GF']\n\n as_2GeV = self.ip['as_at_2GeV']\n\n gs2_2GeV = 4*np.pi*as_2GeV\n\n # Quark masses at 2GeV\n mu = self.ip['mu_at_2GeV']\n md = self.ip['md_at_2GeV']\n ms = self.ip['ms_at_2GeV']\n mtilde = 1/(1/mu + 1/md + 1/ms)\n \n # Lepton masses\n me = self.ip['me']\n mmu = self.ip['mmu']\n mtau = self.ip['mtau']\n\n # Z boson mass\n MZ = self.ip['Mz']\n\n ### Numerical constants\n mproton = self.ip['mproton']\n mneutron = self.ip['mneutron']\n\n F1up = F1('u', 'p', self.ip).value_zero_mom()\n F1dp = F1('d', 'p', self.ip).value_zero_mom()\n F1sp = F1('s', 'p', self.ip).value_zero_mom()\n\n F1un = F1('u', 'n', self.ip).value_zero_mom()\n F1dn = F1('d', 'n', self.ip).value_zero_mom()\n F1sn = F1('s', 'n', self.ip).value_zero_mom()\n\n F1spslope = F1('s', 'p', self.ip).first_deriv_zero_mom()\n F1snslope = F1('s', 'n', self.ip).first_deriv_zero_mom()\n\n F2up = F2('u', 'p', self.ip).value_zero_mom()\n F2dp = F2('d', 'p', self.ip).value_zero_mom()\n F2sp = F2('s', 'p', self.ip).value_zero_mom()\n\n F2un = F2('u', 'n', self.ip).value_zero_mom()\n F2dn = F2('d', 'n', self.ip).value_zero_mom()\n F2sn = F2('s', 'n', self.ip).value_zero_mom()\n\n FAup = FA('u', 'p', self.ip).value_zero_mom()\n FAdp = FA('d', 'p', self.ip).value_zero_mom()\n FAsp = FA('s', 'p', self.ip).value_zero_mom()\n\n FAun = FA('u', 'n', self.ip).value_zero_mom()\n FAdn = FA('d', 'n', self.ip).value_zero_mom()\n FAsn = FA('s', 'n', self.ip).value_zero_mom()\n\n FPpup_pion = FPprimed('u', 'p', self.ip).value_pion_pole()\n FPpdp_pion = FPprimed('d', 'p', self.ip).value_pion_pole()\n FPpsp_pion = FPprimed('s', 'p', self.ip).value_pion_pole()\n\n FPpun_pion = FPprimed('u', 'n', self.ip).value_pion_pole()\n FPpdn_pion = FPprimed('d', 'n', self.ip).value_pion_pole()\n FPpsn_pion = FPprimed('s', 'n', self.ip).value_pion_pole()\n\n FPpup_eta = FPprimed('u', 'p', self.ip).value_eta_pole()\n FPpdp_eta = FPprimed('d', 'p', self.ip).value_eta_pole()\n FPpsp_eta = FPprimed('s', 'p', self.ip).value_eta_pole()\n\n FPpun_eta = FPprimed('u', 'n', self.ip).value_eta_pole()\n FPpdn_eta = FPprimed('d', 'n', self.ip).value_eta_pole()\n FPpsn_eta = FPprimed('s', 'n', self.ip).value_eta_pole()\n\n FSup = FS('u', 'p', self.ip).value_zero_mom()\n FSdp = FS('d', 'p', self.ip).value_zero_mom()\n FSsp = FS('s', 'p', self.ip).value_zero_mom()\n\n FSun = FS('u', 'n', self.ip).value_zero_mom()\n FSdn = FS('d', 'n', self.ip).value_zero_mom()\n FSsn = FS('s', 'n', self.ip).value_zero_mom()\n\n FPup_pion = FP('u', 'p', self.ip).value_pion_pole()\n FPdp_pion = FP('d', 'p', self.ip).value_pion_pole()\n FPsp_pion = FP('s', 'p', self.ip).value_pion_pole()\n\n FPun_pion = FP('u', 'n', self.ip).value_pion_pole()\n FPdn_pion = FP('d', 'n', self.ip).value_pion_pole()\n FPsn_pion = FP('s', 'n', self.ip).value_pion_pole()\n\n FPup_eta = FP('u', 'p', self.ip).value_eta_pole()\n FPdp_eta = FP('d', 'p', self.ip).value_eta_pole()\n FPsp_eta = FP('s', 'p', self.ip).value_eta_pole()\n\n FPun_eta = FP('u', 'n', self.ip).value_eta_pole()\n FPdn_eta = FP('d', 'n', self.ip).value_eta_pole()\n FPsn_eta = FP('s', 'n', self.ip).value_eta_pole()\n\n FGp = FG('p', self.ip).value_zero_mom()\n FGn = FG('n', self.ip).value_zero_mom()\n\n FGtildep = FGtilde('p', self.ip).value_zero_mom()\n FGtilden = FGtilde('n', self.ip).value_zero_mom()\n\n FGtildep_pion = FGtilde('p', self.ip).value_pion_pole()\n FGtilden_pion = FGtilde('n', self.ip).value_pion_pole()\n\n FGtildep_eta = FGtilde('p', self.ip).value_eta_pole()\n FGtilden_eta = FGtilde('n', self.ip).value_eta_pole()\n\n FT0up = FT0('u', 'p', self.ip).value_zero_mom()\n FT0dp = FT0('d', 'p', self.ip).value_zero_mom()\n FT0sp = FT0('s', 'p', self.ip).value_zero_mom()\n\n FT0un = FT0('u', 'n', self.ip).value_zero_mom()\n FT0dn = FT0('d', 'n', self.ip).value_zero_mom()\n FT0sn = FT0('s', 'n', self.ip).value_zero_mom()\n\n FT1up = FT1('u', 'p', self.ip).value_zero_mom()\n FT1dp = FT1('d', 'p', self.ip).value_zero_mom()\n FT1sp = FT1('s', 'p', self.ip).value_zero_mom()\n\n FT1un = FT1('u', 'n', self.ip).value_zero_mom()\n FT1dn = FT1('d', 'n', self.ip).value_zero_mom()\n FT1sn = FT1('s', 'n', self.ip).value_zero_mom()\n\n FTW2up = FTwist2('u', 'p', self.ip).value_zero_mom()\n FTW2dp = FTwist2('d', 'p', self.ip).value_zero_mom()\n FTW2sp = FTwist2('s', 'p', self.ip).value_zero_mom()\n\n FTW2gp = FTwist2('g', 'p', self.ip).value_zero_mom()\n\n FTW2un = FTwist2('u', 'n', self.ip).value_zero_mom()\n FTW2dn = FTwist2('d', 'n', self.ip).value_zero_mom()\n FTW2sn = FTwist2('s', 'n', self.ip).value_zero_mom()\n\n FTW2gn = FTwist2('g', 'n', self.ip).value_zero_mom()\n\n ### The coefficients ###\n #\n # Note that all dependence on q^2, 1/q^2, 1/(m^2-q^2), q^2/(m^2-q^2) is taken care of\n # by defining spurious operators.\n #\n # Therefore, we need to split some of the coefficients\n # into the \"pion part\" etc. with the q-dependence factored out,\n # and introduce a few spurious \"long-distance\" operators.\n #\n # The coefficients cNR1 -- cNR14 correspond to the operators in 1611.00368 and 1308.6288\n #\n # Therefore, we define O6pi = O6/(mpi^2+q^2); \n # O6eta = O6/(meta^2+q^2);\n # O6q2pi = O6*q^2/(mpi^2+q^2);\n # O6q2eta = O6*q^2/(meta^2+q^2);\n # O10pi = O10/(mpi^2+q^2);\n # O10eta = O10/(meta^2+q^2);\n # O10q2pi = O10*q^2/(mpi^2+q^2);\n # O10q2eta = O10*q^2/(meta^2+q^2);\n #\n # For the dipole interactions, these are the ones that have c2p1, c1N2, c2p2 as coefficients. \n # Therefore, we define O5bq2 = O5/q^2; \n # O6bq2 = O6/q^2.\n # O11bq2 = O11/q^2.\n # \n # For the tensors, O4 * q^2 appears as a leading contribution.\n # Therefore, we define O4q2 = O4 * q^2\n #\n # For the tensors, O1 * q^2 appears as a subleading contribution.\n # Therefore, we define O1q2 = O1 * q^2\n #\n # q^2 is here always the spatial part!!! \n #\n\n c3mu_dict = self.coeff_dict\n\n if self.DM_type == \"D\":\n my_cNR_dict = {\n 'cNR1p' : F1up*(c3mu_dict['C61u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C81u'])\\\n + F1dp*(c3mu_dict['C61d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C81d'])\\\n + FGp*c3mu_dict['C71']\\\n + FSup*c3mu_dict['C75u'] + FSdp*c3mu_dict['C75d'] + FSsp*c3mu_dict['C75s']\\\n - alpha/(2*np.pi*DM_mass)*c3mu_dict['C51']\\\n + FTW2up*c3mu_dict['C723u']\\\n + FTW2dp*c3mu_dict['C723d']\\\n + FTW2sp*c3mu_dict['C723s']\\\n + FTW2gp*c3mu_dict['C725'],\n 'cNR2p' : 0,\n 'cNR3p' : F2sp*(c3mu_dict['C61s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s']),\n 'cNR4p' : - 4*( FAup*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FAdp*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d'])\\\n + FAsp*(c3mu_dict['C64s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C84s']))\\\n - 2*alpha/np.pi * self.ip['mup']/mN * c3mu_dict['C51']\\\n + 8*(FT0up*c3mu_dict['C79u'] + FT0dp*c3mu_dict['C79d'] + FT0sp*c3mu_dict['C79s']),\n 'cNR5p' : - 2*mN * ( F1up*c3mu_dict['C715u']\\\n + F1dp*c3mu_dict['C715d']\\\n + F1sp*c3mu_dict['C715s']),\n 'cNR6p' : mN/DM_mass * FGtildep * c3mu_dict['C74']\\\n -2*mN*( (F1up+F2up)*c3mu_dict['C715u']\\\n + (F1dp+F2dp)*c3mu_dict['C715d']\\\n + (F1sp+F2sp)*c3mu_dict['C715s'])\\\n + mN/DM_mass*F2sp*(c3mu_dict['C61s']\n - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s']),\n 'cNR7p' : - 2*( FAup*(c3mu_dict['C63u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C83u'])\\\n + FAdp*(c3mu_dict['C63d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C83d'])\\\n + FAsp*(c3mu_dict['C63s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C83s'])),\n 'cNR8p' : 2*( F1up*(c3mu_dict['C62u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\\\n + F1dp*(c3mu_dict['C62d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C82d'])),\n 'cNR9p' : 2*( (F1up+F2up)*(c3mu_dict['C62u']\\\n - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\\\n + (F1dp+F2dp)*(c3mu_dict['C62d']\\\n - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C82d'])\\\n + (F1sp+F2sp)*(c3mu_dict['C62s']\\\n - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C82s']))\\\n + 2*mN*( FAup*(c3mu_dict['C63u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C83u'])\\\n + FAdp*(c3mu_dict['C63d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C83d'])\\\n + FAsp*(c3mu_dict['C63s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C83s']))\\\n /DM_mass\\\n - 4*mN * ( FAup*c3mu_dict['C717u']\\\n + FAdp*c3mu_dict['C717d']\\\n + FAsp*c3mu_dict['C717s']),\n 'cNR10p' : FGtildep * c3mu_dict['C73']\\\n -2*mN/DM_mass * (FT0up*c3mu_dict['C710u']\\\n + FT0dp*c3mu_dict['C710d']\\\n + FT0sp*c3mu_dict['C710s']),\n 'cNR11p' : - mN/DM_mass * (FSup*c3mu_dict['C76u']\\\n + FSdp*c3mu_dict['C76d']\\\n + FSsp*c3mu_dict['C76s'])\\\n - mN/DM_mass * FGp * c3mu_dict['C72']\\\n + 2*((FT0up-FT1up)*c3mu_dict['C710u']\\\n + (FT0dp-FT1dp)*c3mu_dict['C710d']\\\n + (FT0sp-FT1sp)*c3mu_dict['C710s'])\\\n - 2*mN * ( F1up*(c3mu_dict['C716u'])\\\n + F1dp*(c3mu_dict['C716d'])\\\n + F1sp*(c3mu_dict['C716s'])),\n 'cNR12p' : -8*(FT0up*c3mu_dict['C710u'] + FT0dp*c3mu_dict['C710d'] + FT0sp*c3mu_dict['C710s']),\n 'cNR13p' : 0.,\n 'cNR14p' : + 4*mN * ( FAup*(c3mu_dict['C718u'])\\\n + FAdp*(c3mu_dict['C718d'])\\\n + FAsp*(c3mu_dict['C718s'])),\n\n 'cNR6pip' : mN/DM_mass * (FPup_pion*c3mu_dict['C78u'] + FPdp_pion*c3mu_dict['C78d'])\\\n + FPpup_pion*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FPpdp_pion*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d']),\n 'cNR6etap' : mN/DM_mass * (FPup_eta*c3mu_dict['C78u']\\\n + FPdp_eta*c3mu_dict['C78d']\\\n + FPsp_eta*c3mu_dict['C78s'])\\\n + FPpup_eta*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FPpdp_eta*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d'])\\\n + FPpsp_eta*(c3mu_dict['C64s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C84s']),\n 'cNR6q2pip' : mN/DM_mass * FGtildep_pion * c3mu_dict['C74'],\n 'cNR6q2etap' : mN/DM_mass * FGtildep_eta * c3mu_dict['C74'],\n \n 'cNR10pip' : FPup_pion*c3mu_dict['C77u'] + FPdp_pion*c3mu_dict['C77d'],\n 'cNR10etap' : FPup_eta*c3mu_dict['C77u'] + FPdp_eta*c3mu_dict['C77d'] + FPsp_eta*c3mu_dict['C77s'],\n 'cNR10q2pip' : FGtildep_pion * c3mu_dict['C73'],\n 'cNR10q2etap' : FGtildep_eta * c3mu_dict['C73'],\n \n 'cNR5bq2p' : mN* (2*alpha/np.pi*c3mu_dict['C51']),\n 'cNR6bq2p' : -mN**2* (- 2*alpha/np.pi * self.ip['mup']/mN * c3mu_dict['C51']),\n 'cNR11bq2p' : mN* (2*alpha/np.pi*c3mu_dict['C52']),\n\n 'cNR1q2p' : ( F1up*c3mu_dict['C715u']\\\n + F1dp*c3mu_dict['C715d']\\\n + F1sp*c3mu_dict['C715s'])/(2*DM_mass)\\\n + (F1spslope - F2sp / mN**2/4)\n * (c3mu_dict['C61s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s']),\n 'cNR4q2p' : 2*( (F1up+F2up)*c3mu_dict['C715u']\\\n + (F1dp+F2dp)*c3mu_dict['C715d']\\\n + (F1sp+F2sp)*c3mu_dict['C715s'])/mN\\\n - 1/mN/DM_mass * F2sp\n * (c3mu_dict['C61s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s']),\n\n\n\n\n 'cNR1n' : F1un*(c3mu_dict['C61u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C81u'])\\\n + F1dn*(c3mu_dict['C61d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C81d'])\\\n + FGn*c3mu_dict['C71']\\\n + FSun*c3mu_dict['C75u'] + FSdn*c3mu_dict['C75d'] + FSsn*c3mu_dict['C75s']\\\n + FTW2un*c3mu_dict['C723u']\\\n + FTW2dn*c3mu_dict['C723d']\\\n + FTW2sn*c3mu_dict['C723s']\\\n + FTW2gn*c3mu_dict['C725'],\n 'cNR2n' : 0,\n 'cNR3n' : F2sn*(c3mu_dict['C61s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s']),\n 'cNR4n' : - 4*( FAun*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FAdn*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d'])\\\n + FAsn*(c3mu_dict['C64s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C84s']))\\\n - 2*alpha/np.pi * self.ip['mun']/mN * c3mu_dict['C51']\\\n + 8*(FT0un*c3mu_dict['C79u'] + FT0dn*c3mu_dict['C79d'] + FT0sn*c3mu_dict['C79s']),\n 'cNR5n' : - 2*mN * ( F1un*c3mu_dict['C715u']\\\n + F1dn*c3mu_dict['C715d']\\\n + F1sn*c3mu_dict['C715s']),\n 'cNR6n' : mN/DM_mass * FGtilden * c3mu_dict['C74']\\\n -2*mN*( (F1un+F2un)*c3mu_dict['C715u']\\\n + (F1dn+F2dn)*c3mu_dict['C715d']\\\n + (F1sn+F2sn)*c3mu_dict['C715s'])\\\n + mN/DM_mass * F2sn\n * (c3mu_dict['C61s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s']),\n 'cNR7n' : - 2*( FAun*(c3mu_dict['C63u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C83u'])\\\n + FAdn*(c3mu_dict['C63d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C83d'])\\\n + FAsn*(c3mu_dict['C63s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C83s'])),\n 'cNR8n' : 2*( F1un*(c3mu_dict['C62u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\\\n + F1dn*(c3mu_dict['C62d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C82d'])),\n 'cNR9n' : 2*( (F1un+F2un)*(c3mu_dict['C62u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\\\n + (F1dn+F2dn)*(c3mu_dict['C62d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C82d'])\\\n + (F1sn+F2sn)*(c3mu_dict['C62s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C82s']))\\\n + 2*mN*( FAun*(c3mu_dict['C63u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C83u'])\\\n + FAdn*(c3mu_dict['C63d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C83d'])\\\n + FAsn*(c3mu_dict['C63s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C83s']))\\\n /DM_mass\\\n - 4*mN * ( FAun*c3mu_dict['C717u']\\\n + FAdn*c3mu_dict['C717d']\\\n + FAsn*c3mu_dict['C717s']),\n 'cNR10n' : FGtilden * c3mu_dict['C73']\\\n -2*mN/DM_mass * (FT0un*c3mu_dict['C710u']\\\n + FT0dn*c3mu_dict['C710d']\\\n + FT0sn*c3mu_dict['C710s']),\n 'cNR11n' : - mN/DM_mass * (FSun*c3mu_dict['C76u']\\\n + FSdn*c3mu_dict['C76d']\\\n + FSsn*c3mu_dict['C76s'])\\\n - mN/DM_mass * FGn * c3mu_dict['C72']\\\n + 2*((FT0un-FT1un)*c3mu_dict['C710u']\\\n + (FT0dn-FT1dn)*c3mu_dict['C710d']\\\n + (FT0sn-FT1sn)*c3mu_dict['C710s'])\\\n - 2*mN * ( F1un*(c3mu_dict['C716u'])\\\n + F1dn*(c3mu_dict['C716d'])\\\n + F1sn*(c3mu_dict['C716s'])),\n 'cNR12n' : -8*(FT0un*c3mu_dict['C710u'] + FT0dn*c3mu_dict['C710d'] + FT0sn*c3mu_dict['C710s']),\n 'cNR13n' : 0.,\n 'cNR14n' : + 4*mN * ( FAun*(c3mu_dict['C718u'])\\\n + FAdn*(c3mu_dict['C718d'])\\\n + FAsn*(c3mu_dict['C718s'])),\n \n 'cNR6pin' : mN/DM_mass * (FPun_pion*c3mu_dict['C78u'] + FPdn_pion*c3mu_dict['C78d'])\\\n + FPpun_pion*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FPpdn_pion*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d']),\n 'cNR6etan' : mN/DM_mass * (FPun_eta*c3mu_dict['C78u']\\\n + FPdn_eta*c3mu_dict['C78d']\\\n + FPsn_eta*c3mu_dict['C78s'])\\\n + FPpun_eta*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FPpdn_eta*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d'])\\\n + FPpsn_eta*(c3mu_dict['C64s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C84s']),\n 'cNR6q2pin' : mN/DM_mass * FGtilden_pion * c3mu_dict['C74'],\n 'cNR6q2etan' : mN/DM_mass * FGtilden_eta * c3mu_dict['C74'],\n \n 'cNR10pin' : FPun_pion*c3mu_dict['C77u'] + FPdn_pion*c3mu_dict['C77d'],\n 'cNR10etan' : FPun_eta*c3mu_dict['C77u'] + FPdn_eta*c3mu_dict['C77d'] + FPsn_eta*c3mu_dict['C77s'],\n 'cNR10q2pin' : FGtilden_pion * c3mu_dict['C73'],\n 'cNR10q2etan' : FGtilden_eta * c3mu_dict['C73'],\n \n 'cNR5bq2n' : 0,\n 'cNR6bq2n' : -mN**2 * (- 2*alpha/np.pi * self.ip['mun']/mN * c3mu_dict['C51']),\n 'cNR11bq2n' : 0,\n\n 'cNR1q2n' : ( F1un*c3mu_dict['C715u']\\\n + F1dn*c3mu_dict['C715d']\\\n + F1sn*c3mu_dict['C715s'])/(2*DM_mass)\\\n + (F1snslope - F2sn / mN**2/4)\n * (c3mu_dict['C61s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s']),\n 'cNR4q2n' : 2*( (F1un+F2un)*c3mu_dict['C715u']\\\n + (F1dn+F2dn)*c3mu_dict['C715d']\\\n + (F1sn+F2sn)*c3mu_dict['C715s'])/mN\\\n - 1/mN/DM_mass * F2sn\n * (c3mu_dict['C61s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s'])\n }\n\n if NLO:\n my_cNR_dict['cNR5p'] = my_cNR_dict['cNR5p']\\\n + 2*( (FT0up-FT1up)*c3mu_dict['C79u']\\\n + (FT0dp-FT1dp)*c3mu_dict['C79d']\\\n + (FT0sp-FT1sp)*c3mu_dict['C79s'])\n my_cNR_dict['cNR1q2p'] = my_cNR_dict['cNR1q2p']\\\n - ( (FT0up-FT1up)*c3mu_dict['C79u']\\\n + (FT0dp-FT1dp)*c3mu_dict['C79d']\\\n + (FT0sp-FT1sp)*c3mu_dict['C79s'])/(2*DM_mass*mN)\n my_cNR_dict['cNR5n'] = my_cNR_dict['cNR5n']\\\n + 2*( (FT0un-FT1un)*c3mu_dict['C79u']\\\n + (FT0dn-FT1dn)*c3mu_dict['C79d']\\\n + (FT0sn-FT1sn)*c3mu_dict['C79s'])\n my_cNR_dict['cNR1q2n'] = my_cNR_dict['cNR1q2n']\\\n - ( (FT0un-FT1un)*c3mu_dict['C79u']\\\n + (FT0dn-FT1dn)*c3mu_dict['C79d']\\\n + (FT0sn-FT1sn)*c3mu_dict['C79s'])/(2*DM_mass*mN)\n\n\n if self.DM_type == \"M\":\n my_cNR_dict = {\n 'cNR1p' : FGp*c3mu_dict['C71']\\\n + FSup*c3mu_dict['C75u'] + FSdp*c3mu_dict['C75d'] + FSsp*c3mu_dict['C75s'],\n 'cNR2p' : 0.,\n 'cNR3p' : 0.,\n 'cNR4p' : - 4*( FAup*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FAdp*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d'])\\\n + FAsp*(c3mu_dict['C64s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C84s'])),\n 'cNR5p' : 0,\n 'cNR6p' : mN/DM_mass * FGtildep * c3mu_dict['C74'],\n 'cNR7p' : 0,\n 'cNR8p' : 2*( F1up*(c3mu_dict['C62u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\\\n + F1dp*(c3mu_dict['C62d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C82d'])),\n 'cNR9p' : 2*( (F1up+F2up)*(c3mu_dict['C62u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\\\n + (F1dp+F2dp)*(c3mu_dict['C62d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C82d'])\\\n + (F1sp+F2sp)*(c3mu_dict['C62s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C82s'])),\n 'cNR10p' : FGtildep * c3mu_dict['C73'],\n 'cNR11p' : - mN/DM_mass * (FSup*c3mu_dict['C76u']\\\n + FSdp*c3mu_dict['C76d']\\\n + FSsp*c3mu_dict['C76s'])\\\n - mN/DM_mass * FGp * c3mu_dict['C72'],\n 'cNR12p' : 0.,\n 'cNR13p' : 0.,\n 'cNR14p' : 0.,\n \n 'cNR6pip' : mN/DM_mass * (FPup_pion*c3mu_dict['C78u'] + FPdp_pion*c3mu_dict['C78d'])\\\n + FPpup_pion*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FPpdp_pion*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d']),\n 'cNR6etap' : mN/DM_mass * (FPup_eta*c3mu_dict['C78u']\\\n + FPdp_eta*c3mu_dict['C78d']\\\n + FPsp_eta*c3mu_dict['C78s'])\\\n + FPpup_eta*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FPpdp_eta*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d'])\\\n + FPpsp_eta*(c3mu_dict['C64s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C84s']),\n 'cNR6q2pip' : mN/DM_mass * FGtildep_pion * c3mu_dict['C74'],\n 'cNR6q2etap' : mN/DM_mass * FGtildep_eta * c3mu_dict['C74'],\n \n 'cNR10pip' : FPup_pion*c3mu_dict['C77u'] + FPdp_pion*c3mu_dict['C77d'],\n 'cNR10etap' : FPup_eta*c3mu_dict['C77u'] + FPdp_eta*c3mu_dict['C77d'] + FPsp_eta*c3mu_dict['C77s'],\n 'cNR10q2pip' : FGtildep_pion * c3mu_dict['C73'],\n 'cNR10q2etap' : FGtildep_eta * c3mu_dict['C73'],\n \n 'cNR5bq2p' : 0.,\n 'cNR6bq2p' : 0.,\n 'cNR11bq2p' : 0.,\n\n 'cNR1q2p' : 0.,\n 'cNR4q2p' : 0.,\n\n\n\n\n 'cNR1n' : FGn*c3mu_dict['C71']\\\n + FSun*c3mu_dict['C75u'] + FSdn*c3mu_dict['C75d'] + FSsn*c3mu_dict['C75s'],\n 'cNR2n' : 0.,\n 'cNR3n' : 0.,\n 'cNR4n' : - 4*( FAun*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FAdn*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d'])\\\n + FAsn*(c3mu_dict['C64s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C84s'])),\n 'cNR5n' : 0.,\n 'cNR6n' : mN/DM_mass * FGtilden * c3mu_dict['C74'],\n 'cNR7n' : 0.,\n 'cNR8n' : 2*( F1un*(c3mu_dict['C62u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\\\n + F1dn*(c3mu_dict['C62d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C82d'])),\n 'cNR9n' : 2*( (F1un+F2un)*(c3mu_dict['C62u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\\\n + (F1dn+F2dn)*(c3mu_dict['C62d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C82d'])\\\n + (F1sn+F2sn)*(c3mu_dict['C62s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C82s'])),\n 'cNR10n' : FGtilden * c3mu_dict['C73'],\n 'cNR11n' : - mN/DM_mass * (FSun*c3mu_dict['C76u']\\\n + FSdn*c3mu_dict['C76d']\\\n + FSsn*c3mu_dict['C76s'])\\\n - mN/DM_mass * FGn * c3mu_dict['C72'],\n 'cNR12n' : 0.,\n 'cNR13n' : 0.,\n 'cNR14n' : 0.,\n \n 'cNR6pin' : mN/DM_mass * (FPun_pion*c3mu_dict['C78u'] + FPdn_pion*c3mu_dict['C78d'])\\\n + FPpun_pion*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FPpdn_pion*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d']),\n 'cNR6etan' : mN/DM_mass * (FPun_eta*c3mu_dict['C78u']\\\n + FPdn_eta*c3mu_dict['C78d']\\\n + FPsn_eta*c3mu_dict['C78s'])\\\n + FPpun_eta*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FPpdn_eta*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d'])\\\n + FPpsn_eta*(c3mu_dict['C64s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C84s']),\n 'cNR6q2pin' : mN/DM_mass * FGtilden_pion * c3mu_dict['C74'],\n 'cNR6q2etan' : mN/DM_mass * FGtilden_eta * c3mu_dict['C74'],\n \n 'cNR10pin' : FPun_pion*c3mu_dict['C77u'] + FPdn_pion*c3mu_dict['C77d'],\n 'cNR10etan' : FPun_eta*c3mu_dict['C77u'] + FPdn_eta*c3mu_dict['C77d'] + FPsn_eta*c3mu_dict['C77s'],\n 'cNR10q2pin' : FGtilden_pion * c3mu_dict['C73'],\n 'cNR10q2etan' : FGtilden_eta * c3mu_dict['C73'],\n \n 'cNR5bq2n' : 0.,\n 'cNR6bq2n' : 0.,\n 'cNR11bq2n' : 0.,\n\n 'cNR1q2n' : 0.,\n 'cNR4q2n' : 0.\n }\n\n\n if self.DM_type == \"C\":\n my_cNR_dict = {\n 'cNR1p' : F1up * (c3mu_dict['C61u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C81u'])\\\n + F1dp * (c3mu_dict['C61d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C81d'])\\\n + FGp*c3mu_dict['C65']/2/DM_mass\\\n + (FSup*c3mu_dict['C63u'] + FSdp*c3mu_dict['C63d'] + FSsp*c3mu_dict['C63s'])/2/DM_mass,\n 'cNR2p' : 0,\n 'cNR3p' : F2sp * (c3mu_dict['C61s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s']),\n 'cNR4p' : 0,\n 'cNR5p' : 0,\n 'cNR6p' : 0,\n 'cNR7p' : -2*( FAup * (c3mu_dict['C62u']\\\n - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\\\n + FAdp * (c3mu_dict['C62d']\\\n - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C82d'])\\\n + FAsp * (c3mu_dict['C62s']\\\n - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C82s'])),\n 'cNR8p' : 0,\n 'cNR9p' : 0,\n 'cNR10p' : FGtildep * c3mu_dict['C66']/2/DM_mass,\n 'cNR11p' : 0,\n 'cNR12p' : 0,\n 'cNR13p' : 0.,\n 'cNR14p' : 0,\n\n 'cNR6pip' : 0,\n 'cNR6etap' : 0,\n 'cNR6q2pip' : 0,\n 'cNR6q2etap' : 0,\n \n 'cNR10pip' : (FPup_pion*c3mu_dict['C64u'] + FPdp_pion*c3mu_dict['C64d'])/2/DM_mass,\n 'cNR10etap' : ( FPup_eta*c3mu_dict['C64u']\\\n + FPdp_eta*c3mu_dict['C64d']\\\n + FPsp_eta*c3mu_dict['C64s'])/2/DM_mass,\n 'cNR10q2pip' : FGtildep_pion * c3mu_dict['C66']/2/DM_mass,\n 'cNR10q2etap' : FGtildep_eta * c3mu_dict['C66']/2/DM_mass,\n \n 'cNR5bq2p' : 0,\n 'cNR6bq2p' : 0,\n 'cNR11bq2p' : 0,\n\n 'cNR1q2p' : (F1spslope - 1/mN**2/4 * F2sp)\n * (c3mu_dict['C61s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s']),\n 'cNR4q2p' : 0,\n\n\n\n\n 'cNR1n' : F1un * (c3mu_dict['C61u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C81u'])\\\n + F1dn * (c3mu_dict['C61d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C81d'])\\\n + FGn*c3mu_dict['C65']/2/DM_mass\\\n + (FSun*c3mu_dict['C63u'] + FSdn*c3mu_dict['C63d'] + FSsn*c3mu_dict['C63s'])/2/DM_mass,\n 'cNR2n' : 0,\n 'cNR3n' : F2sp * (c3mu_dict['C61s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s']),\n 'cNR4n' : 0,\n 'cNR5n' : 0,\n 'cNR6n' : 0,\n 'cNR7n' : -2*( FAun * (c3mu_dict['C62u']\\\n - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\\\n + FAdn * (c3mu_dict['C62d']\\\n - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C82d'])\\\n + FAsn * (c3mu_dict['C62s']\\\n - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C82s'])),\n 'cNR8n' : 0,\n 'cNR9n' : 0,\n 'cNR10n' : FGtilden * c3mu_dict['C66']/2/DM_mass,\n 'cNR11n' : 0,\n 'cNR12n' : 0,\n 'cNR13p' : 0.,\n 'cNR14n' : 0,\n\n 'cNR6pin' : 0,\n 'cNR6etan' : 0,\n 'cNR6q2pin' : 0,\n 'cNR6q2etan' : 0,\n \n 'cNR10pin' : (FPun_pion*c3mu_dict['C64u'] + FPdn_pion*c3mu_dict['C64d'])/2/DM_mass,\n 'cNR10etan' : ( FPun_eta*c3mu_dict['C64u']\\\n + FPdn_eta*c3mu_dict['C64d']\\\n + FPsn_eta*c3mu_dict['C64s'])/2/DM_mass,\n 'cNR10q2pin' : FGtilden_pion * c3mu_dict['C66']/2/DM_mass,\n 'cNR10q2etan' : FGtilden_eta * c3mu_dict['C66']/2/DM_mass,\n \n 'cNR5bq2n' : 0,\n 'cNR6bq2n' : 0,\n 'cNR11bq2n' : 0,\n\n 'cNR1q2n' : (F1snslope - 1/mN**2/4 * F2sn)\n * (c3mu_dict['C61s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s']),\n 'cNR4q2n' : 0\n }\n\n\n if self.DM_type == \"R\":\n my_cNR_dict = {\n 'cNR1p' : FSup*c3mu_dict['C63u']/2/DM_mass\\\n + FSdp*c3mu_dict['C63d']/2/DM_mass\\\n + FSsp*c3mu_dict['C63s']/2/DM_mass\\\n + FGp*c3mu_dict['C65']/2/DM_mass,\n 'cNR2p' : 0,\n 'cNR3p' : 0,\n 'cNR4p' : 0,\n 'cNR5p' : 0,\n 'cNR6p' : 0,\n 'cNR7p' : 0,\n 'cNR8p' : 0,\n 'cNR9p' : 0,\n 'cNR10p' : FGtildep * c3mu_dict['C66']/2/DM_mass,\n 'cNR11p' : 0,\n 'cNR12p' : 0,\n 'cNR13p' : 0.,\n 'cNR14p' : 0,\n\n 'cNR6pip' : 0,\n 'cNR6etap' : 0,\n 'cNR6q2pip' : 0,\n 'cNR6q2etap' : 0,\n \n 'cNR10pip' : (FPup_pion*c3mu_dict['C64u'] + FPdp_pion*c3mu_dict['C64d'])/2/DM_mass,\n 'cNR10etap' : FPup_eta*c3mu_dict['C64u']/2/DM_mass\\\n + FPdp_eta*c3mu_dict['C64d']/2/DM_mass\\\n + FPsp_eta*c3mu_dict['C64s']/2/DM_mass,\n 'cNR10q2pip' : FGtildep_pion * c3mu_dict['C66']/2/DM_mass,\n 'cNR10q2etap' : FGtildep_eta * c3mu_dict['C66']/2/DM_mass,\n \n 'cNR5bq2p' : 0,\n 'cNR6bq2p' : 0,\n 'cNR11bq2p' : 0,\n\n 'cNR1q2p' : 0,\n 'cNR4q2p' : 0,\n\n\n\n\n 'cNR1n' : FSun*c3mu_dict['C63u']/2/DM_mass\\\n + FSdn*c3mu_dict['C63d']/2/DM_mass\\\n + FSsn*c3mu_dict['C63s']/2/DM_mass\\\n + FGn*c3mu_dict['C65']/2/DM_mass,\n 'cNR2n' : 0,\n 'cNR3n' : 0,\n 'cNR4n' : 0,\n 'cNR5n' : 0,\n 'cNR6n' : 0,\n 'cNR7n' : 0,\n 'cNR8n' : 0,\n 'cNR9n' : 0,\n 'cNR10n' : FGtilden * c3mu_dict['C66']/2/DM_mass,\n 'cNR11n' : 0,\n 'cNR12n' : 0,\n 'cNR13p' : 0.,\n 'cNR14n' : 0,\n\n 'cNR6pin' : 0,\n 'cNR6etan' : 0,\n 'cNR6q2pin' : 0,\n 'cNR6q2etan' : 0,\n \n 'cNR10pin' : (FPun_pion*c3mu_dict['C64u'] + FPdn_pion*c3mu_dict['C64d'])/2/DM_mass,\n 'cNR10etan' : FPun_eta*c3mu_dict['C64u']/2/DM_mass\\\n + FPdn_eta*c3mu_dict['C64d']/2/DM_mass\\\n + FPsn_eta*c3mu_dict['C64s']/2/DM_mass,\n 'cNR10q2pin' : FGtilden_pion * c3mu_dict['C66']/2/DM_mass,\n 'cNR10q2etan' : FGtilden_eta * c3mu_dict['C66']/2/DM_mass,\n \n 'cNR5bq2n' : 0,\n 'cNR6bq2n' : 0,\n 'cNR11bq2n' : 0,\n\n 'cNR1q2n' : 0,\n 'cNR4q2n' : 0\n }\n\n\n return my_cNR_dict", "def mpc_mta_client2(paillier_sk, cb):\n cb1, cb1_val = make_octet(None, cb)\n alpha1, alpha1_val = make_octet(EGS_SECP256K1) \n \n libamcl_mpc.MPC_MTA_CLIENT2(paillier_sk, cb1, alpha1)\n\n alpha2 = to_str(alpha1)\n \n return alpha2", "def message_subscribers(self, *args, **kwargs):\n return _OFDM_Cyclic_Prefix_swig.vamsi_OFDMCP_ff_sptr_message_subscribers(self, *args, **kwargs)", "def cSpmvh():\n \n R=\"\"\"\n \n KERNEL void pELL_spmvh_mCoil(\n const unsigned int Reps, // number of coils\n const unsigned int nRow, // number of rows\n const unsigned int prodJd, // product of Jd\n const unsigned int sumJd, // sum of Jd\n const unsigned int dim, // dimensionality\n GLOBAL_MEM const unsigned int *Jd, // Jd\n // GLOBAL_MEM const unsigned int *curr_sumJd, // \n GLOBAL_MEM const unsigned int *meshindex, // meshindex, prodJd * dim\n GLOBAL_MEM const unsigned int *kindx, // unmixed column indexes of all dimensions\n GLOBAL_MEM const float2 *udata, // interpolation data before Kronecker product\n GLOBAL_MEM float2 *k, \n //GLOBAL_MEM float2 *res,\n GLOBAL_MEM const float2 *input) // y\n { \n const unsigned int t = get_local_id(0);\n const unsigned int vecWidth=${LL};\n // Thread ID within wavefront\n const unsigned int id = t & (vecWidth-1);\n \n // One row per wavefront\n unsigned int vecsPerBlock=get_local_size(0)/vecWidth;\n unsigned int myRow=(get_group_id(0)*vecsPerBlock) + (t/ vecWidth); // the myRow-th non-Cartesian sample\n unsigned int m = myRow / Reps;\n unsigned int nc = myRow - m * Reps;\n \n float2 zero;\n zero.x = 0.0;\n zero.y = 0.0;\n \n \n if (myRow < nRow * Reps)\n {\n const unsigned int vecStart = 0; \n const unsigned int vecEnd =prodJd; \n float2 u=zero;\n \n for (unsigned int j = vecStart+id; j<vecEnd; j += vecWidth)\n { \n // now doing the first dimension\n unsigned int index_shift = m * sumJd;\n // unsigned int tmp_sumJd = 0;\n unsigned int J = Jd[0];\n unsigned int index = index_shift + meshindex[dim*j + 0];\n unsigned int col = kindx[index] ;\n float2 spdata = udata[index];\n index_shift += J; \n for (unsigned int dimid = 1; dimid < dim; dimid ++ )\n {\n J = Jd[dimid];\n index = index_shift + meshindex[dim*j + dimid]; // the index of the partial ELL arrays *kindx and *udata\n col += kindx[index];// + 1 ; // the column index of the current j\n float tmp_x = spdata.x;\n float2 tmp_udata = udata[index];\n spdata.x = tmp_x * tmp_udata.x - spdata.y * tmp_udata.y; // the spdata of the current j\n spdata.y = tmp_x * tmp_udata.y + spdata.y * tmp_udata.x; \n index_shift += J;\n }; // Iterate over dimensions 1 -> Nd - 1\n \n float2 ydata=input[myRow]; // kout[col];\n u.x = spdata.x*ydata.x + spdata.y*ydata.y;\n u.y = - spdata.y*ydata.x + spdata.x*ydata.y;\n \n atomic_add_float2(k + col*Reps + nc, u);//, res + col*Reps + nc);\n LOCAL_BARRIER;\n // atomic_add_float2(k + col*Reps + nc, u, res + col*Reps + nc);\n }; // Iterate for (unsigned int j = 0; j < prodJd; j ++)\n }; // if (m < nRow)\n \n }; // End of xELL_spmvh_mCoil \n \n \n KERNEL void pELL_spmvh_mCoil_new(\n const unsigned int Reps, // number of coils\n const unsigned int nRow, // number of rows\n const unsigned int prodJd, // product of Jd\n const unsigned int sumJd, // sum of Jd\n const unsigned int dim, // dimensionality\n GLOBAL_MEM const unsigned int *Jd, // Jd\n // GLOBAL_MEM const unsigned int *curr_sumJd, // \n GLOBAL_MEM const unsigned int *meshindex, // meshindex, prodJd * dim\n GLOBAL_MEM const unsigned int *kindx, // unmixed column indexes of all dimensions\n GLOBAL_MEM const float2 *udata, // interpolation data before Kronecker product\n GLOBAL_MEM float2 *k, \n GLOBAL_MEM float2 *res,\n GLOBAL_MEM const float2 *input) // y\n {\n unsigned int myRow0= get_global_id(0);\n unsigned int myRow= myRow0/(float)Reps;\n unsigned int nc = myRow0 - myRow*Reps;\n float2 zero;\n zero.x = 0.0;\n zero.y = 0.0;\n if (myRow < nRow){ \n for (unsigned int j = 0; j < prodJd; j ++){\n float2 u = zero;\n\n // now doing the first dimension\n unsigned int index_shift = myRow * sumJd;\n // unsigned int tmp_sumJd = 0;\n unsigned int J = Jd[0];\n unsigned int index = index_shift + meshindex[dim*j + 0];\n unsigned int col = kindx[index] ;\n float2 spdata = udata[index];\n index_shift += J; \n for (unsigned int dimid = 1; dimid < dim; dimid ++ ){\n J = Jd[dimid];\n index = index_shift + meshindex[dim*j + dimid]; // the index of the partial ELL arrays *kindx and *udata\n col += kindx[index];// + 1 ; // the column index of the current j\n float tmp_x = spdata.x;\n float2 tmp_udata = udata[index];\n spdata.x = tmp_x * tmp_udata.x - spdata.y * tmp_udata.y; // the spdata of the current j\n spdata.y = tmp_x * tmp_udata.y + spdata.y * tmp_udata.x; \n index_shift += J;\n }; // Iterate over dimensions 1 -> Nd - 1\n \n float2 ydata=input[myRow*Reps + nc]; // kout[col];\n u.x = spdata.x*ydata.x + spdata.y*ydata.y;\n u.y = - spdata.y*ydata.x + spdata.x*ydata.y;\n atomic_add_float2(k + col*Reps + nc, u);\n \n }; // Iterate for (unsigned int j = 0; j < prodJd; j ++)\n \n }; // if (m < nRow)\n \n }; // End of pELL_spmvh_mCoil \n \"\"\"\n return R", "def message_subscribers(self, *args, **kwargs):\n return _bs_swig.ec_descrambler_sync_sptr_message_subscribers(self, *args, **kwargs)", "def parameterize_cosmology_Mpc(pars,z_star=3,kp_Mpc=0.7):\n # get logarithmic growth rate at z_star, around k_p_hMpc\n k_p_hMpc=1.0\n f_star = get_f_star(pars,z_star=z_star,k_p_hMpc=k_p_hMpc)\n # compute deviation from EdS expansion\n g_star = get_g_star(pars,z_star=z_star)\n # compute linear power, in Mpc, at z_star\n # and fit a second order polynomial to the log power, around kp_Mpc\n linP_Mpc = fit_linP_Mpc(pars,z_star,kp_Mpc,deg=2)\n results={'f_star':f_star, 'g_star':g_star, 'linP_Mpc':linP_Mpc}\n return results", "def do_mccp(self, option):\r\n self.protocol.protocol_flags['MCCP'] = True\r\n self.protocol.requestNegotiation(MCCP, '')\r\n self.protocol.zlib = zlib.compressobj(9)", "def COM(z, M, **cosmo):\n # Check that z and M are arrays\n z = np.array(z, ndmin=1, dtype=float)\n M = np.array(M, ndmin=1, dtype=float)\n\n # Create array\n c_array = np.empty_like(z)\n sig_array = np.empty_like(z)\n nu_array = np.empty_like(z)\n zf_array = np.empty_like(z)\n \n for i_ind, (zval, Mval) in enumerate(zip(z, M)):\n # Evaluate the indices at each redshift and mass combination\n # that you want a concentration for, different to MAH which\n # uses one a_tilde and b_tilde at the starting redshift only\n a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)\n\n # Minimize equation to solve for 1 unknown, 'c'\n c = scipy.optimize.brentq(_minimize_c, 2, 1000,\n args=(zval, a_tilde, b_tilde,\n cosmo['A_scaling'], cosmo['omega_M_0'],\n cosmo['omega_lambda_0']))\n\n if np.isclose(c, 0):\n print(\"Error solving for concentration with given redshift and \"\n \"(probably) too small a mass\")\n c = -1\n sig = -1\n nu = -1\n zf = -1\n else:\n # Calculate formation redshift for this concentration,\n # redshift at which the scale radius = virial radius: z_-2\n zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],\n omega_M_0=cosmo['omega_M_0'],\n omega_lambda_0=cosmo['omega_lambda_0'])\n\n R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)\n\n sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)\n nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))\n\n c_array[i_ind] = c\n sig_array[i_ind] = sig\n nu_array[i_ind] = nu\n zf_array[i_ind] = zf\n\n return(c_array, sig_array, nu_array, zf_array)", "def reconstruct_pu(self, receivers):\n self.fpts = receivers\n # Initialize variables\n self.p_recon = np.zeros((self.fpts.coord.shape[0], len(self.controls.k0)), dtype=complex)\n self.uz_recon = np.zeros((self.fpts.coord.shape[0], len(self.controls.k0)), dtype=complex)\n # Initialize bar\n bar = tqdm(total = len(self.controls.k0), desc = 'Reconstructing sound field...')\n for jf, k0 in enumerate(self.controls.k0):\n # For smooth transition from continous to discrete k domain\n kappa = np.sqrt(self.delta_kx*self.delta_ky/(2*np.pi*k0**2))\n # compute kz\n kz_f = form_kz(k0, self.kx_f, self.ky_f)\n k_vec_ref = np.array([self.kx_f, self.ky_f, kz_f])\n # Reflected or radiating part\n fz_ref = self.f_ref * np.sqrt(k0/np.abs(kz_f))\n recs = np.array([self.fpts.coord[:,0], self.fpts.coord[:,1],\n self.fpts.coord[:,2]-self.zp]).T\n psi_ref = fz_ref * kappa * np.exp(-1j * recs @ k_vec_ref)\n # Incident part\n if self.f_inc != 0:\n k_vec_inc = np.array([self.kx_f, self.ky_f, -kz_f])\n fz_inc = self.f_inc * np.sqrt(k0/np.abs(kz_f))\n recs = np.array([self.fpts.coord[:,0], self.fpts.coord[:,1],\n self.fpts.coord[:,2]-self.zm]).T\n psi_inc = fz_inc * kappa * np.exp(-1j * recs @ k_vec_inc)\n # Forming the sensing matrix\n if self.f_inc == 0:\n h_mtx = psi_ref\n else:\n h_mtx = np.hstack((psi_inc, psi_ref))\n # Compute p and uz\n self.p_recon[:,jf] = h_mtx @ self.pk[:,jf]\n if self.f_inc == 0:\n self.uz_recon[:,jf] = -((np.divide(kz_f, k0)) * h_mtx) @ self.pk[:,jf]\n else:\n self.uz_recon[:,jf] = -((np.divide(np.concatenate((-kz_f, kz_f)), k0)) * h_mtx) @ self.pk[:,jf]\n bar.update(1)\n bar.close()", "def Keccak(self,M,r=1024,c=576,suffix=0x01,n=1024,verbose=False):\n\n #Check the inputs\n if (r<0) or (r%8!=0):\n raise KeccakError.KeccakError('r must be a multiple of 8 in this implementation')\n if (n%8!=0):\n raise KeccakError.KeccakError('outputLength must be a multiple of 8')\n self.setB(r+c)\n\n if verbose:\n print(\"Create a Keccak[r=%d, c=%d] function with '%s' suffix\" % (r,c,self.delimitedSuffixInBinary(suffix)))\n\n #Compute lane length (in bits)\n w=(r+c)//25\n\n # Initialisation of state\n S=[[0,0,0,0,0],\n [0,0,0,0,0],\n [0,0,0,0,0],\n [0,0,0,0,0],\n [0,0,0,0,0]]\n\n # Appending the suffix\n M = self.appendDelimitedSuffix(M, suffix)\n if verbose:\n print(\"After appending the suffix: \", M)\n\n #Padding of messages\n P = self.pad10star1(M, r)\n\n if verbose:\n print(\"String ready to be absorbed: %s (will be completed by %d x '00')\" % (P, c//8))\n\n #Absorbing phase\n for i in range((len(P)*8//2)//r):\n Pi=self.convertStrToTable(P[i*(2*r//8):(i+1)*(2*r//8)]+'00'*(c//8))\n\n for y in range(5):\n for x in range(5):\n S[x][y] = S[x][y]^Pi[x][y]\n S = self.KeccakF(S, verbose)\n\n if verbose:\n print(\"Value after absorption : %s\" % (self.convertTableToStr(S)))\n\n #Squeezing phase\n Z = ''\n outputLength = n\n while outputLength>0:\n string=self.convertTableToStr(S)\n Z = Z + string[:r*2//8]\n outputLength -= r\n if outputLength>0:\n S = self.KeccakF(S, verbose)\n\n # NB: done by block of length r, could have to be cut if outputLength\n # is not a multiple of r\n\n if verbose:\n print(\"Value after squeezing : %s\" % (self.convertTableToStr(S)))\n\n return Z[:2*n//8]", "def get_MFCC(\n waveform: torch.Tensor,\n sample_rate: int,\n n_mfcc: int = 256,\n n_fft: int = 2048,\n log: bool = True,\n) -> torch.Tensor:\n\n n_fft = 2048\n win_length = None\n hop_length = 512\n n_mels = 256\n\n # NOTE: MFCC Source\n # https://pytorch.org/audio/stable/transforms.html#torchaudio.transforms.MFCC\n # https://pytorch.org/audio/stable/_modules/torchaudio/transforms.html#MFCC\n mfcc_transform = T.MFCC(\n sample_rate=sample_rate,\n n_mfcc=n_mfcc,\n log_mels=log,\n melkwargs={\n \"n_fft\": n_fft,\n \"win_length\": win_length,\n \"n_mels\": n_mels,\n \"hop_length\": hop_length,\n \"mel_scale\": \"htk\",\n },\n )\n mfcc = mfcc_transform(waveform) # (B, n_mfcc, time')\n return mfcc", "def DMFNeuFluxMCDet(ch,DMm,DMsig,param):\n import os\n # FIX SCALING\n ## include years\n DM_annihilation_rate_Sun = DMSunAnnihilationRate(DMm,DMsig,param) # [eV]\n #DM_annihilation_rate_Sun = 1.6e21/param.sec\n normalization = np.sum((DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))) # [eV^3]\n \n ## BEGIN CREATING BINS ##\n # assuming neutrino binnum = 30\n nu_bin_num = 30\n point_num = 1000.0\n Emin = 1.0\n Emax = 1000.0\n \n E_nu_list = gt.LogSpaceEnergies(Emin,Emax,binnum = nu_bin_num)\n E_bin_width = [E_nu_list[i+1]-E_nu_list[i] for i in range(len(E_nu_list)-1)]\n E_nu_hpl = gt.MidPoint(gt.LogSpaceEnergies(Emin,Emax,binnum = nu_bin_num)) \n E_nu_bin = [0.0]*nu_bin_num # neutrino bins\n E_anu_bin = [0.0]*nu_bin_num # antineutrino bins\n E_bin_ratio = E_nu_list[1]/E_nu_list[0]\n ## END CREATING BINS ##\n \n for ineu in range(3):\n ## BEGIN READING DATA FROM MC ## \n \n MCdatapath = \"../data/myMC/trials/legion_ineu_\"+str(ineu)+\"_\"+param.name+\"/\"\n rparam = PC.PhysicsConstants()\n \n files = []\n for filename in os.listdir(MCdatapath):\n files.append(filename)\n \n # load all events\n evt = []\n for filename in files :\n file = open(MCdatapath+filename,'r')\n data = []\n gt.hreadfilev4(file,data,rparam)\n if gt.Compareparams(param,rparam):\n print \"Using : \"+filename\n for e in data :\n for ee in e:\n evt.append(ee)\n \n #del e,ee,data\n \n ## END READING DATA FROM MC ##\n \n # GET DARK MATTER DISTRIBUTION \n DM_pdf = DM_distribution(ch,DMm/param.GeV,ineu)\n \n for i,e in enumerate(evt):\n if len(e) > 4:\n neutrino = True\n \n family = e[0]\n try:\n next_family = evt[i+1]\n if family == next_family and e[1] != 2 :\n neutrino = False\n except:\n pass\n \n E_nu_in = e[2]\n E_nu_out = e[3]\n i = int(np.log(E_nu_out/E_nu_list[0])/np.log(E_bin_ratio))\n j = int(np.log(E_nu_in/E_nu_list[0])/np.log(E_bin_ratio))\n if neutrino:\n E_nu_bin[i] = E_nu_bin[i] + e[5]*(float(DM_pdf.PDF(E_nu_in)/DM_pdf.DMm)*E_bin_width[j]/(np.log(E_nu_list[i])-np.log(E_nu_list[i-1]))) # change to initial neutrino bin width\n #E_nu_bin[i] = E_nu_bin[i] + e[5]*(float(DM_pdf.PDF(E_nu_in)/DM_pdf.DMm))\n else :\n E_anu_bin[i] = E_anu_bin[i] + e[5]*(float(DM_pdf.PDF(E_nu_in)/DM_pdf.DMm)*E_bin_width[i])\n #E_anu_bin[i] = E_anu_bin[i] + e[5]*(float(DM_pdf.PDF(E_nu_in)/DM_pdf.DMm))\n \n #int_weight = integrate.quad(lambda E: PDF.PDF(E)/PDF.DMm,Emin,Emax)[0]\n # rescale\n E_nu_bin = [normalization*x/(point_num) for x in E_nu_bin]\n E_anu_bin = [normalization*x/(point_num) for x in E_anu_bin] \n \n inter_neu = interpolate.InterpolatedUnivariateSpline(E_nu_hpl,E_nu_bin)\n inter_aneu = interpolate.InterpolatedUnivariateSpline(E_nu_hpl,E_anu_bin)\n \n return [inter_neu, inter_aneu]", "def make(self, *args, **kwargs):\n return _OFDM_Cyclic_Prefix_swig.vamsi_OFDMCP_ff_sptr_make(self, *args, **kwargs)", "def MCStracking(\n pr_data,\n bt_data,\n times,\n Lon,\n Lat,\n nc_file,\n DataOutDir,\n DataName):\n\n import mcs_config as cfg\n from skimage.measure import regionprops\n start_time = time.time()\n #Reading tracking parameters\n\n DT = cfg.DT\n\n #Precipitation tracking setup\n smooth_sigma_pr = cfg.smooth_sigma_pr # [0] Gaussion std for precipitation smoothing\n thres_pr = cfg.thres_pr # [2] precipitation threshold [mm/h]\n min_time_pr = cfg.min_time_pr # [3] minum lifetime of PR feature in hours\n min_area_pr = cfg.min_area_pr # [5000] minimum area of precipitation feature in km2\n # Brightness temperature (Tb) tracking setup\n smooth_sigma_bt = cfg.smooth_sigma_bt # [0] Gaussion std for Tb smoothing\n thres_bt = cfg.thres_bt # [241] minimum Tb of cloud shield\n min_time_bt = cfg.min_time_bt # [9] minium lifetime of cloud shield in hours\n min_area_bt = cfg.min_area_bt # [40000] minimum area of cloud shield in km2\n # MCs detection\n MCS_min_pr_MajorAxLen = cfg.MCS_min_pr_MajorAxLen # [100] km | minimum length of major axis of precipitation object\n MCS_thres_pr = cfg.MCS_thres_pr # [10] minimum max precipitation in mm/h\n MCS_thres_peak_pr = cfg.MCS_thres_peak_pr # [10] Minimum lifetime peak of MCS precipitation\n MCS_thres_bt = cfg.MCS_thres_bt # [225] minimum brightness temperature\n MCS_min_area_bt = cfg.MCS_min_area_bt # [40000] min cloud area size in km2\n MCS_min_time = cfg.MCS_min_time # [4] minimum time step\n\n\n # DT = 1 # temporal resolution of data for tracking in hours\n\n # # MINIMUM REQUIREMENTS FOR FEATURE DETECTION\n # # precipitation tracking options\n # smooth_sigma_pr = 0 # Gaussion std for precipitation smoothing\n # thres_pr = 2 # precipitation threshold [mm/h]\n # min_time_pr = 3 # minum lifetime of PR feature in hours\n # min_area_pr = 5000 # minimum area of precipitation feature in km2\n\n # # Brightness temperature (Tb) tracking setup\n # smooth_sigma_bt = 0 # Gaussion std for Tb smoothing\n # thres_bt = 241 # minimum Tb of cloud shield\n # min_time_bt = 9 # minium lifetime of cloud shield in hours\n # min_area_bt = 40000 # minimum area of cloud shield in km2\n\n # # MCs detection\n # MCS_min_area = min_area_pr # minimum area of MCS precipitation object in km2\n # MCS_thres_pr = 10 # minimum max precipitation in mm/h\n # MCS_thres_peak_pr = 10 # Minimum lifetime peak of MCS precipitation\n # MCS_thres_bt = 225 # minimum brightness temperature\n # MCS_min_area_bt = MinAreaC # min cloud area size in km2\n # MCS_min_time = 4 # minimum lifetime of MCS\n\n #Calculating grid distances and areas\n\n _,_,grid_cell_area,grid_spacing = calc_grid_distance_area(Lat,Lon)\n grid_cell_area[grid_cell_area < 0] = 0\n\n obj_structure_3D = np.ones((3,3,3))\n\n start_day = times[0]\n\n\n # connect over date line?\n crosses_dateline = False\n if (Lon[0, 0] < -176) & (Lon[0, -1] > 176):\n crosses_dateline = True\n\n end_time = time.time()\n print(f\"======> 'Initialize MCS tracking function: {(end_time-start_time):.2f} seconds \\n\")\n start_time = time.time()\n # --------------------------------------------------------\n # TRACKING PRECIP OBJECTS\n # --------------------------------------------------------\n print(\" track precipitation\")\n\n pr_smooth= filters.gaussian_filter(\n pr_data, sigma=(0, smooth_sigma_pr, smooth_sigma_pr)\n )\n pr_mask = pr_smooth >= thres_pr * DT\n objects_id_pr, num_objects = ndimage.label(pr_mask, structure=obj_structure_3D)\n print(\" \" + str(num_objects) + \" precipitation object found\")\n\n # connect objects over date line\n if crosses_dateline:\n objects_id_pr = ConnectLon(objects_id_pr)\n\n # get indices of object to reduce memory requirements during manipulation\n object_indices = ndimage.find_objects(objects_id_pr)\n\n\n #Calcualte area of objects\n area_objects = calculate_area_objects(objects_id_pr,object_indices,grid_cell_area)\n\n # Keep only large and long enough objects\n # Remove objects that are too small or short lived\n pr_objects = remove_small_short_objects(objects_id_pr,area_objects,min_area_pr,min_time_pr,DT)\n\n grPRs = calc_object_characteristics(\n pr_objects, # feature object file\n pr_data, # original file used for feature detection\n DataOutDir+DataName+\"_PR_\"+str(start_day.year)+str(start_day.month).zfill(2)+'.pkl',\n times, # timesteps of the data\n Lat, # 2D latidudes\n Lon, # 2D Longitudes\n grid_spacing,\n grid_cell_area,\n min_tsteps=int(min_time_pr/ DT), # minimum lifetime in data timesteps\n )\n\n end_time = time.time()\n print(f\"======> 'Tracking precip: {(end_time-start_time):.2f} seconds \\n\")\n start_time = time.time()\n # --------------------------------------------------------\n # TRACKING CLOUD (BT) OBJECTS\n # --------------------------------------------------------\n print(\" track clouds\")\n bt_smooth = filters.gaussian_filter(\n bt_data, sigma=(0, smooth_sigma_bt, smooth_sigma_bt)\n )\n bt_mask = bt_smooth <= thres_bt\n objects_id_bt, num_objects = ndimage.label(bt_mask, structure=obj_structure_3D)\n print(\" \" + str(num_objects) + \" cloud object found\")\n\n # connect objects over date line\n if crosses_dateline:\n print(\" connect cloud objects over date line\")\n objects_id_bt = ConnectLon(objects_id_bt)\n\n # get indices of object to reduce memory requirements during manipulation\n object_indices = ndimage.find_objects(objects_id_bt)\n\n #Calcualte area of objects\n area_objects = calculate_area_objects(objects_id_bt,object_indices,grid_cell_area)\n\n # Keep only large and long enough objects\n # Remove objects that are too small or short lived\n objects_id_bt = remove_small_short_objects(objects_id_bt,area_objects,min_area_bt,min_time_bt,DT)\n\n end_time = time.time()\n print(f\"======> 'Tracking clouds: {(end_time-start_time):.2f} seconds \\n\")\n start_time = time.time()\n\n print(\" break up long living cloud shield objects that have many elements\")\n objects_id_bt = BreakupObjects(objects_id_bt, int(min_time_bt / DT), DT)\n\n end_time = time.time()\n print(f\"======> 'Breaking up cloud objects: {(end_time-start_time):.2f} seconds \\n\")\n start_time = time.time()\n\n grCs = calc_object_characteristics(\n objects_id_bt, # feature object file\n bt_data, # original file used for feature detection\n DataOutDir+DataName+\"_BT_\"+str(start_day.year)+str(start_day.month).zfill(2)+'.pkl',\n times, # timesteps of the data\n Lat, # 2D latidudes\n Lon, # 2D Longitudes\n grid_spacing,\n grid_cell_area,\n min_tsteps=int(min_time_bt / DT), # minimum lifetime in data timesteps\n )\n end_time = time.time()\n print(f\"======> 'Calculate cloud characteristics: {(end_time-start_time):.2f} seconds \\n\")\n start_time = time.time()\n # --------------------------------------------------------\n # CHECK IF PR OBJECTS QUALIFY AS MCS\n # (or selected strom type according to msc_config.py)\n # --------------------------------------------------------\n print(\" check if pr objects quallify as MCS (or selected storm type)\")\n # check if precipitation object is from an MCS\n object_indices = ndimage.find_objects(pr_objects)\n MCS_objects = np.zeros(pr_objects.shape,dtype=int)\n\n for iobj,_ in enumerate(object_indices):\n if object_indices[iobj] is None:\n continue\n\n time_slice = object_indices[iobj][0]\n lat_slice = object_indices[iobj][1]\n lon_slice = object_indices[iobj][2]\n\n\n pr_object_slice= pr_objects[object_indices[iobj]]\n pr_object_act = np.where(pr_object_slice==iobj+1,True,False)\n\n if len(pr_object_act) < 2:\n continue\n\n pr_slice = pr_data[object_indices[iobj]]\n pr_act = np.copy(pr_slice)\n pr_act[~pr_object_act] = 0\n\n bt_slice = bt_data[object_indices[iobj]]\n bt_act = np.copy(bt_slice)\n bt_act[~pr_object_act] = 0\n\n bt_object_slice = objects_id_bt[object_indices[iobj]]\n bt_object_act = np.copy(bt_object_slice)\n bt_object_act[~pr_object_act] = 0\n\n area_act = np.tile(grid_cell_area[lat_slice, lon_slice], (pr_act.shape[0], 1, 1))\n area_act[~pr_object_act] = 0\n\n # pr_size = np.array(np.sum(area_act,axis=(1,2)))\n pr_max = np.array(np.max(pr_act,axis=(1,2)))\n\n # calculate major axis length of PR object\n pr_object_majoraxislen = np.array([\n regionprops(pr_object_act[tt,:,:].astype(int))[0].major_axis_length*np.mean(area_act[tt,(pr_object_act[tt,:,:] == 1)]/1000**2)**0.5 \n for tt in range(pr_object_act.shape[0])\n ])\n\n #Check overlaps between clouds (bt) and precip objects\n objects_overlap = np.delete(np.unique(bt_object_act[pr_object_act]),0)\n\n if len(objects_overlap) == 0:\n # no deep cloud shield is over the precipitation\n continue\n\n ## Keep bt objects (entire) that partially overlap with pr object\n\n bt_object_overlap = np.in1d(objects_id_bt[time_slice].flatten(), objects_overlap).reshape(objects_id_bt[time_slice].shape)\n\n # Get size of all cloud (bt) objects together\n # We get size of all cloud objects that overlap partially with pr object\n # DO WE REALLY NEED THIS?\n\n bt_size = np.array(\n [\n np.sum(grid_cell_area[bt_object_overlap[tt, :, :] > 0])\n for tt in range(bt_object_overlap.shape[0])\n ]\n )\n\n #Check if BT is below threshold over precip areas\n bt_min_temp = np.nanmin(np.where(bt_object_slice>0,bt_slice,999),axis=(1,2))\n\n # minimum lifetime peak precipitation\n is_pr_peak_intense = np.max(pr_max) >= MCS_thres_peak_pr * DT\n MCS_test = (\n (bt_size / 1000**2 >= MCS_min_area_bt)\n & (np.sum(bt_min_temp <= MCS_thres_bt ) > 0)\n & (pr_object_majoraxislen >= MCS_min_pr_MajorAxLen )\n & (pr_max >= MCS_thres_pr * DT)\n & (is_pr_peak_intense)\n )\n\n # assign unique object numbers\n\n pr_object_act = np.array(pr_object_act).astype(int)\n pr_object_act[pr_object_act == 1] = iobj + 1\n\n window_length = int(MCS_min_time / DT)\n moving_averages = np.convolve(MCS_test, np.ones(window_length), 'valid') / window_length\n\n # if iobj+1 == 19:\n # stop()\n\n if (len(moving_averages) > 0) & (np.max(moving_averages) == 1):\n TMP = np.copy(MCS_objects[object_indices[iobj]])\n TMP = TMP + pr_object_act\n MCS_objects[object_indices[iobj]] = TMP\n\n else:\n continue\n\n #if len(objects_overlap)>1: import pdb; pdb.set_trace()\n # objects_id_MCS, num_objects = ndimage.label(MCS_objects, structure=obj_structure_3D)\n grMCSs = calc_object_characteristics(\n MCS_objects, # feature object file\n pr_data, # original file used for feature detection\n DataOutDir+DataName+\"_MCS_\"+str(start_day.year)+str(start_day.month).zfill(2)+'.pkl',\n times, # timesteps of the data\n Lat, # 2D latidudes\n Lon, # 2D Longitudes\n grid_spacing,\n grid_cell_area,\n min_tsteps=int(MCS_min_time / DT), # minimum lifetime in data timesteps\n )\n\n end_time = time.time()\n print(f\"======> 'MCS tracking: {(end_time-start_time):.2f} seconds \\n\")\n start_time = time.time()\n \n\n ###########################################################\n ###########################################################\n ## WRite netCDF with xarray\n if nc_file is not None:\n print ('Save objects into a netCDF')\n\n fino=xr.Dataset({'MCS_objects':(['time','y','x'],MCS_objects),\n 'PR':(['time','y','x'],pr_data),\n 'PR_objects':(['time','y','x'],objects_id_pr),\n 'BT':(['time','y','x'],bt_data),\n 'BT_objects':(['time','y','x'],objects_id_bt),\n 'lat':(['y','x'],Lat),\n 'lon':(['y','x'],Lon)},\n coords={'time':times.values})\n\n fino.to_netcdf(nc_file,mode='w',encoding={'PR':{'zlib': True,'complevel': 5},\n 'PR_objects':{'zlib': True,'complevel': 5},\n 'BT':{'zlib': True,'complevel': 5},\n 'BT_objects':{'zlib': True,'complevel': 5},\n 'MCS_objects':{'zlib': True,'complevel': 5}})\n\n\n # fino = xr.Dataset({\n # 'MCS_objects': xr.DataArray(\n # data = objects_id_MCS, # enter data here\n # dims = ['time','y','x'],\n # attrs = {\n # '_FillValue': const.missingval,\n # 'long_name': 'Mesoscale Convective System objects',\n # 'units' : '',\n # }\n # ),\n # 'PR_objects': xr.DataArray(\n # data = objects_id_pr, # enter data here\n # dims = ['time','y','x'],\n # attrs = {\n # '_FillValue': const.missingval,\n # 'long_name': 'Precipitation objects',\n # 'units' : '',\n # }\n # ),\n # 'BT_objects': xr.DataArray(\n # data = objects_id_bt, # enter data here\n # dims = ['time','y','x'],\n # attrs = {\n # '_FillValue': const.missingval,\n # 'long_name': 'Cloud (brightness temperature) objects',\n # 'units' : '',\n # }\n # ),\n # 'PR': xr.DataArray(\n # data = pr_data, # enter data here\n # dims = ['time','y','x'],\n # attrs = {\n # '_FillValue': const.missingval,\n # 'long_name': 'Precipitation',\n # 'standard_name': 'precipitation',\n # 'units' : 'mm h-1',\n # }\n # ),\n # 'BT': xr.DataArray(\n # data = bt_data, # enter data here\n # dims = ['time','y','x'],\n # attrs = {\n # '_FillValue': const.missingval,\n # 'long_name': 'Brightness temperature',\n # 'standard_name': 'brightness_temperature',\n # 'units' : 'K',\n # }\n # ),\n # 'lat': xr.DataArray(\n # data = Lat, # enter data here\n # dims = ['y','x'],\n # attrs = {\n # '_FillValue': const.missingval,\n # 'long_name': \"latitude\",\n # 'standard_name': \"latitude\",\n # 'units' : \"degrees_north\",\n # }\n # ),\n # 'lon': xr.DataArray(\n # data = Lon, # enter data here\n # dims = ['y','x'],\n # attrs = {\n # '_FillValue': const.missingval,\n # 'long_name': \"longitude\",\n # 'standard_name': \"longitude\",\n # 'units' : \"degrees_east\",\n # }\n # ),\n # },\n # attrs = {'date':datetime.date.today().strftime('%Y-%m-%d'),\n # \"comments\": \"File created with MCS_tracking\"},\n # coords={'time':times.values}\n # )\n\n\n # fino.to_netcdf(nc_file,mode='w',format = \"NETCDF4\",\n # encoding={'PR':{'zlib': True,'complevel': 5},\n # 'PR_objects':{'zlib': True,'complevel': 5},\n # 'BT':{'zlib': True,'complevel': 5},\n # 'BT_objects':{'zlib': True,'complevel': 5}})\n\n\n end_time = time.time()\n print(f\"======> 'Writing files: {(end_time-start_time):.2f} seconds \\n\")\n start_time = time.time()\n else:\n print(f\"No writing files required, output file name is empty\")\n ###########################################################\n ###########################################################\n # ============================\n # Write NetCDF\n return grMCSs, MCS_objects", "def test_reset_protocol_work(self):\n try:\n from openmm import app\n except ImportError: # OpenMM < 7.6\n from simtk.openmm import app\n\n parameter_name = 'lambda_electrostatics'\n temperature = 298.0 * unit.kelvin\n parameter_initial = 1.0\n parameter_final = 0.0\n platform_name = 'CPU'\n nonbonded_method = 'CutoffPeriodic'\n\n # Creating the test system with a high frequency barostat.\n testsystem = testsystems.AlchemicalAlanineDipeptide(nonbondedMethod=getattr(app, nonbonded_method))\n context, integrator = self.create_system(testsystem, parameter_name, parameter_initial, temperature, platform_name)\n\n # Number of NCMC steps\n nsteps = 20\n niterations = 3\n\n # Running several rounds of configuration updates and NCMC\n for i in range(niterations):\n integrator.step(5)\n # Reseting the protocol work inside the integrator\n integrator.reset_protocol_work()\n integrator.reset()\n external_protocol_work, integrator_protocol_work = self.run_ncmc(context, integrator, temperature, nsteps, parameter_name, parameter_initial, parameter_final)\n assert abs(external_protocol_work - integrator_protocol_work) < 1.E-5", "def __init__(self, Mcomp, Mhe, Apre, epre, Nkick=1000, Vkick=None, Mns=None, sys_flag=None, galphi=None, galcosth=None, omega=None, phi=None, costh=None,th_ma = None):\n \n # Convert inputs to SI\n\n\n self.sys_flag = sys_flag\n self.Nkick = Nkick\n\n if Vkick is not None: self.Vkick = Vkick*units.km.to(units.m)\n else: self.Vkick = np.random.uniform(0,1000,self.Nkick)*units.km.to(units.m)\n if phi is not None: self.phi = phi\n else: self.phi = np.random.uniform(0,2*np.pi,self.Nkick)\n\n if costh is not None: self.costh = costh\n else: self.costh = np.random.uniform(-1,1,self.Nkick)\n if Mns is not None: self.Mns = Mns*units.M_sun.to(units.kg)\n else: self.Mns = np.random.uniform(3.,Mhe,self.Nkick)*units.M_sun.to(units.kg)\n \n if th_ma is not None: self.th_ma = th_ma\n else: self.th_ma = np.random.uniform(0,2*np.pi,self.Nkick)\n self.E_ma =np.array([brentq(lambda x:ma -x + epre*np.sin(x),0,2*np.pi) for ma in self.th_ma])\n self.rpre = Apre*(1.-epre*np.cos(self.E_ma))*units.R_sun.to(units.m)\n self.Mhe = np.full((self.Nkick,), Mhe)*units.M_sun.to(units.kg)\n self.Mcomp = np.full((self.Nkick,), Mcomp)*units.M_sun.to(units.kg)\n self.Apre = np.full((self.Nkick,),Apre)*units.R_sun.to(units.m)\n self.epre = np.full((self.Nkick,),epre)\n \n # Get projection of R in the x-y plane to save later into output file", "def getTransmissionCoefficients(self, skipFission=True, method='weakCoupling'):\n allowedMethods=[\"weakCoupling\", '1stOrder', '2ndOrder', 'sumRule', 'opticalModel', 'SPRT']\n if method not in allowedMethods:\n raise ValueError('Transmission coefficient calculation method must be one of '+', '.join(allowedMethods))\n # Initialize the reduced width factors for the elastic channel\n redWidthFactor={}\n if not self.averageWidths: self.getWidthsAndSpacings()\n for lj in self.averageWidths:\n if lj[0] not in redWidthFactor:\n redWidthFactor[lj[0]]=XYs1dModule.XYs1d.createFromFunction(\n XYs1dModule.XYs1d.defaultAxes(\n labelsUnits={\n XYs1dModule.yAxisIndex : ( 'gamma' , '' ),\n XYs1dModule.xAxisIndex : ( 'Ex', 'eV' ) }),\n self.averageWidths[lj]['elastic'].domain(),\n lambda E,nope: math.sqrt(E)*self.penetrationFactor( lj[0], self.rho(E) )/self.rho(E),\n {},\n 1e-6,\n 100)\n\n # Now compute the Tc's\n Tc={}\n channelClass={'capture':GAMMACHANNEL, 'elastic':NEUTRONCHANNEL, 'fission':FISSIONCHANNEL}\n for rxn in channelClass.keys():\n for lj in self.averageWidths:\n if rxn=='elastic':\n tau=math.pi*redWidthFactor[lj[0]]*self.averageWidths[lj][rxn]/self.levelSpacings[lj]\n elif rxn=='fission':\n if skipFission: continue\n if rxn not in self.averageWidths[lj]: continue\n tau=math.pi*self.averageWidths[lj][rxn]/self.levelSpacings[lj]\n else:\n tau=math.pi*self.averageWidths[lj][rxn]/self.levelSpacings[lj]\n c=ChannelDesignator(lj[0], lj[1], rxn, len(Tc), int(2.0*abs(lj[0]-lj[1])), gfact=None,\n particleA=None, particleB=None, isElastic=(rxn=='elastic'),\n channelClass=channelClass[rxn], useRelativistic=False, eliminated=False)\n if method in [\"weakCoupling\", '1stOrder']:\n Tc[c] = 2.0 * tau\n elif method=='2ndOrder':\n Tc[c] = 2.0 * tau * (1.0 - tau)\n elif method==\"opticalModel\":\n Tc[c] = tau.applyFunction(lambda x, par: 1.0 - math.exp(-2.0 * tau.evaluate(x)), None) #FIXME: \"tau.evaluate(x)\" should only be \"x\", but applyFunction() is broken as is exp()\n elif method == 'sumRule':\n Tc[c] = tau.applyFunction(lambda x, par: 2.0 * tau.evaluate(x) * (math.sqrt(1.0 + tau.evaluate(x) * tau.evaluate(x)) - tau.evaluate(x)), None)\n else: #method==\"SPRT\"\n Tc[c] = 2.0*tau/(1.0+tau/2.0)/(1.0+tau/2.0)\n\n # Fix axis label, still says \"Gamma\"\n Tc[c].axes[0].label='Tc(rxn=%s, L=%i, J=%s)'%(c.reaction, c.l, str(c.J))\n return Tc", "def test_wrong_ref_power_mfcc():\n MFCC(file_struct, FeatureTypes.framesync, ref_power=\"caca\")", "def cNR(self, DM_mass, q, NLO=None):\n if NLO is None:\n NLO = False\n\n meta = self.ip['meta']\n mpi = self.ip['mpi0']\n\n qsq = q**2\n\n # The traditional coefficients, where different from above\n cNR_dict = {}\n my_cNR = self._my_cNR(DM_mass, NLO)\n\n # Add meson- / photon-pole contributions\n cNR_dict['cNR1p'] = my_cNR['cNR1p'] + qsq * my_cNR['cNR1q2p']\n cNR_dict['cNR2p'] = my_cNR['cNR2p']\n cNR_dict['cNR3p'] = my_cNR['cNR3p']\n cNR_dict['cNR4p'] = my_cNR['cNR4p'] + qsq * my_cNR['cNR4q2p']\n cNR_dict['cNR5p'] = my_cNR['cNR5p'] + 1/qsq * my_cNR['cNR5bq2p']\n cNR_dict['cNR6p'] = my_cNR['cNR6p']\\\n + 1/(mpi**2 + qsq) * my_cNR['cNR6pip']\\\n + 1/(meta**2 + qsq) * my_cNR['cNR6etap']\\\n + qsq/(mpi**2 + qsq) * my_cNR['cNR6q2pip']\\\n + qsq/(meta**2 + qsq) * my_cNR['cNR6q2etap']\\\n + 1/qsq * my_cNR['cNR6bq2p']\n cNR_dict['cNR7p'] = my_cNR['cNR7p']\n cNR_dict['cNR8p'] = my_cNR['cNR8p']\n cNR_dict['cNR9p'] = my_cNR['cNR9p']\n cNR_dict['cNR10p'] = my_cNR['cNR10p']\\\n + 1/(mpi**2 + qsq) * my_cNR['cNR10pip']\\\n + 1/(meta**2 + qsq) * my_cNR['cNR10etap']\\\n + qsq/(mpi**2 + qsq) * my_cNR['cNR10q2pip']\\\n + qsq/(meta**2 + qsq) * my_cNR['cNR10q2etap']\n cNR_dict['cNR11p'] = my_cNR['cNR11p'] + 1/qsq * my_cNR['cNR11bq2p']\n cNR_dict['cNR12p'] = my_cNR['cNR12p']\n cNR_dict['cNR13p'] = my_cNR['cNR13p']\n cNR_dict['cNR14p'] = my_cNR['cNR14p']\n\n cNR_dict['cNR1n'] = my_cNR['cNR1n'] + qsq * my_cNR['cNR1q2n']\n cNR_dict['cNR2n'] = my_cNR['cNR2n']\n cNR_dict['cNR3n'] = my_cNR['cNR3n']\n cNR_dict['cNR4n'] = my_cNR['cNR4n'] + qsq * my_cNR['cNR4q2n']\n cNR_dict['cNR5n'] = my_cNR['cNR5n'] + 1/qsq * my_cNR['cNR5bq2n']\n cNR_dict['cNR6n'] = my_cNR['cNR6n']\\\n + 1/(mpi**2 + qsq) * my_cNR['cNR6pin']\\\n + 1/(meta**2 + qsq) * my_cNR['cNR6etan']\\\n + qsq/(mpi**2 + qsq) * my_cNR['cNR6q2pin']\\\n + qsq/(meta**2 + qsq) * my_cNR['cNR6q2etan']\\\n + 1/qsq * my_cNR['cNR6bq2n']\n cNR_dict['cNR7n'] = my_cNR['cNR7n']\n cNR_dict['cNR8n'] = my_cNR['cNR8n']\n cNR_dict['cNR9n'] = my_cNR['cNR9n']\n cNR_dict['cNR10n'] = my_cNR['cNR10n']\\\n + 1/(mpi**2 + qsq) * my_cNR['cNR10pin']\\\n + 1/(meta**2 + qsq) * my_cNR['cNR10etan']\\\n + qsq/(mpi**2 + qsq) * my_cNR['cNR10q2pin']\\\n + qsq/(meta**2 + qsq) * my_cNR['cNR10q2etan']\n cNR_dict['cNR11n'] = my_cNR['cNR11n'] + 1/qsq * my_cNR['cNR11bq2n']\n cNR_dict['cNR12n'] = my_cNR['cNR12n']\n cNR_dict['cNR13n'] = my_cNR['cNR13n']\n cNR_dict['cNR14n'] = my_cNR['cNR14n']\n\n return cNR_dict", "def RJMCMC(Model_parameters, midpoint_age, delta_age, intensity, delta_intensity, stratification, Return_info ):\n \n# Seed the generator so we get the same values every time\n np.random.seed(seed = 1) \n \n# set the best K to -10, in order that it is obvious if it is never updated. \n k_best = -10\n \n k_max_array_bound = Model_parameters['K_max'] + 1;\n# Num_samples_to_store = int(np.ceil((Model_parameters['Nsamples']-Model_parameters['Burn_in'])/Model_parameters['thinning']))\n \n \n# Calculate number of collected samples for credible intervals -- if we are collecting.\n if Model_parameters['Calc_credible']:\n Num_samples_credible=int(np.ceil((Model_parameters['Nsamples']-Model_parameters['Burn_in'])*((100 - Model_parameters['credible'])/200.0)/Model_parameters['thinning'])) \n print('Collecting credible interval data' )\n# Define an equally-spaced grid to define the model:\n X = np.linspace(Model_parameters['X_min'], Model_parameters['X_max'],Model_parameters['discretise_size'])\n\n# predefine arrays to keep track of the credible intervals\n val_min, val_max = np.zeros(Model_parameters['discretise_size']), np.zeros(Model_parameters['discretise_size'])\n ind_min, ind_max = np.zeros(Model_parameters['discretise_size'],dtype=int), np.zeros(Model_parameters['discretise_size'],dtype=int)\n MINI, MAXI = np.zeros((Model_parameters['discretise_size'], Num_samples_credible)), np.zeros((Model_parameters['discretise_size'], Num_samples_credible))\n \n# predefine other arrays \n age, age_prop = np.zeros(len(midpoint_age)), np.zeros(len(midpoint_age))\n pt, pt_prop, pt_best = np.zeros( (k_max_array_bound, 2)), np.zeros( (k_max_array_bound, 2)), np.zeros( (k_max_array_bound, 2))\n endpt = np.zeros(2)\n\n# initialise working variables\n b = bb = AB = AD = PB = PD = ACV = PCV = AP = PP = PA = AA = 0\n\n# Initialize - Define randomly the first model of the chain\n k = np.random.randint(Model_parameters['K_min'],high=Model_parameters['K_max']+1)\n\n# set the data ages to be the given nominal age (i.e. discount any age error). \n# This is so datasets with stratification are valid for the initial model.\n# If we randomised the ages, we'd have to check that stratification was satifisfied, \n# and it could take a while before we find a valid model.\n\n age = midpoint_age.copy() #make a copy of the midpoint age.\n\n# Check to ensure that the stratification constraints (if any) are satisifed\n if not check_stratification(age, stratification):\n print( 'INITIAL DATA SET IS NOT CONSISTENT WITH GIVEN STRATIFICATION CONSTRAINTS')\n sys.exit(0)\n\n# Check to make sure that the ages do not extend past the model ends. For then we can't compute the likelihood.\n# This only happens with normally distributed ages, for which the age can be any value with prob > 0.\n# age = np.array( [ max( Model_parameters['X_min'], min(a, Model_parameters['X_max'])) for a in age] )\n for i in range(len(age)):\n age[i] = max( Model_parameters['X_min'], min(age[i], Model_parameters['X_max']))\n\n for i in range(k):\n pt[i,0] = Model_parameters['X_min'] + np.random.rand() * (Model_parameters['X_max'] - Model_parameters['X_min']) #position of internal vertex\n pt[i,1] = Model_parameters['I_min'] + np.random.rand() * (Model_parameters['I_max'] - Model_parameters['I_min']) #magnitude of internal vertex\n \n endpt[0] = Model_parameters['I_min'] + np.random.rand() * (Model_parameters['I_max'] - Model_parameters['I_min'])\n endpt[1] = Model_parameters['I_min'] + np.random.rand() * (Model_parameters['I_max'] - Model_parameters['I_min'])\n \n# make sure the positions are sorted in ascending order based on age.\n #print(pt)\n #print('*')\n pt[0:k] = pt[pt[0:k,0].argsort()]\n #np.ndarray.sort(pt, axis = 0)\n #print(pt)\n \n# COMPUTE INITIAL MISFIT\n# suppress exp overflow warnings - this can happen at the early stages of the algorithm\n trash = np.seterr(over = 'ignore')\n \n like=0;\n interpolated_signal = Find_linear_interpolated_values( Model_parameters['X_min'], Model_parameters['X_max'], pt[0:k,:], endpt, age )\n #print(delta_intensity)\n \n #print( len(age))\n #print( intensity[81] )\n #print( interpolated_signal[81] )\n #print( delta_intensity[81] )\n #q = (intensity - interpolated_signal)**2 / (2.0 * delta_intensity**2)\n #print(q[81])\n if Model_parameters['running_mode'] == 1:\n like = np.sum( (intensity - interpolated_signal)**2 / (2.0 * delta_intensity**2) )\n else:\n like = 1.0\n \n\n like_best=like\n like_init=like\n print('Initial likelihood is %s' % like)\n\n# setup output for model data\n if Model_parameters['output_model_freq'] > 0:\n output_models = open(os.path.join(Model_parameters['outputs_directory'],Model_parameters['output_model_name']), 'w')\n output_models.write('%d\\n' % Model_parameters['discretise_size'])\n for i in range(Model_parameters['discretise_size']):\n output_models.write('%10.3f ' % X[i] )\n output_models.write('\\n')\n\n# setup output for joint distribution data\n if Model_parameters['output_joint_distribution_freq'] > 0:\n joint_distribution_directory = os.path.join(Model_parameters['outputs_directory'],'Joint_distribution_data')\n if not os.path.exists(joint_distribution_directory): os.makedirs(joint_distribution_directory)\n\n joint_dist = [0] * len(age)\n for i in range(len(age)):\n joint_dist[i] = open(os.path.join(joint_distribution_directory,'Sample_%04d.dat'% (i+1)),'w')\n \n# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n#%%%%%%%%%%%%%%%%% START RJ-MCMC SAMPLING %%%%%%%%%%%%%%%%%\n# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n for s in range(1,Model_parameters['Nsamples']+1):\n \n# Print statistics of the chain. \n if np.mod(s,Model_parameters['show'])==0 and s > Model_parameters['Burn_in']:\n print( 'Samples %d, Vertices %d, Acceptance: change F %7.2f, change age %7.2f, birth %7.2f, death %7.2f resample ages %7.2f, likelihood %8.2f' % (s,k,100.0*ACV/PCV if PCV != 0 else np.NaN,100.0*AP/PP if PP != 0 else np.NaN, 100.0*AB/PB if PB != 0 else np.NaN, 100.0*AD/PD if PD != 0 else np.NaN, 100.0*AA/PA if PA != 0 else np.NaN, like) )\n \n birth=move=death=change_age = change_value = 0\n \n# initialise the proposed model with the current \n age_prop = age.copy()\n pt_prop=pt.copy()\n endpt_prop = endpt.copy()\n like_prop = like\n k_prop = k\n prob = 1.0\n out = 1\n \n#----------------------------------------------------------------------\n# Every 3rd iteration, propose a new value\n if np.mod(s,3)==0:\n if s>Model_parameters['Burn_in']: PCV +=1\n change_value = 1\n k_prop = k\n ind = np.random.randint(0,high=k+2) #generate a random integer between 0 and k+1\n \n# choose which interior point to change, and check bounds to see if outside prior\n\n if ind == k: # change left end point\n endpt_prop[0] = endpt[0] + np.random.randn() * Model_parameters['sigma_change']\n if endpt_prop[0] < Model_parameters['I_min'] or endpt_prop[0] > Model_parameters['I_max']: out = 0\n \n elif ind == k+1: # change right end point\n endpt_prop[1] = endpt[1] + np.random.randn()*Model_parameters['sigma_change']\n if endpt_prop[1] < Model_parameters['I_min'] or endpt_prop[1] > Model_parameters['I_max']: out = 0\n \n else: # change interior point\n #print(pt_prop[ind,1], pt[0,1])\n pt_prop[ind,1] += np.random.randn(1)*Model_parameters['sigma_change']\n #pt_prop[ind,1] = pt_prop[ind,1] + np.random.randn(1)*Model_parameters['sigma_change']\n #print(pt_prop[ind,1], pt[0,1])\n if pt_prop[ind,1] < Model_parameters['I_min'] or pt_prop[ind,1] > Model_parameters['I_max']: out = 0\n\n# Every 3rd iteration iteration change the vertex positions\n elif np.mod(s,3)==1: # Change age position\n u = np.random.randint(0,high=3) #choose randomly between 3 operations:\n \n if u == 0: # BIRTH ++++++++++++++++++++++++++++++++++++++\n birth=1\n if s> Model_parameters['Burn_in']: PB += 1\n k_prop = k+1\n #print(np.size(pt_prop), k_prop)\n pt_prop[k_prop-1,0]=Model_parameters['X_min'] + np.random.rand()*(Model_parameters['X_max']-Model_parameters['X_min'])\n # Ensure that the new age is different to all the others - if it is, set out = 0 and abandon this model\n if pt_prop[k_prop-1,0] in pt_prop[0:k_prop-1,0]: out = 0 \n if k_prop > Model_parameters['K_max']: out=0\n\n# interpolate to find magnitude as inferred by current state\n if out == 1:\n interpolated_signal = Find_linear_interpolated_values( Model_parameters['X_min'], \n Model_parameters['X_max'], pt[0:k,:], endpt, pt_prop[k_prop-1,0] )\n \n pt_prop[k_prop-1,1]=interpolated_signal+np.random.randn()*Model_parameters['sigma_birth']\n \n# Get probability\n prob=(1.0/(Model_parameters['sigma_birth']*np.sqrt( 2.0 * np.pi )) *\n np.exp(-(interpolated_signal-pt_prop[k_prop-1,1])**2/(2.0*Model_parameters['sigma_birth']**2)) )\n \n# Check BOUNDS to see if outside prior\n \n if pt_prop[k_prop-1,1] > Model_parameters['I_max'] or pt_prop[k_prop-1,1] < Model_parameters['I_min']: out=0\n if pt_prop[k_prop-1,0] > Model_parameters['X_max'] or pt_prop[k_prop-1,0] < Model_parameters['X_min']: out=0\n\n# make sure the positions are sorted in ascending order.\n pt_prop[0:k_prop] = pt_prop[pt_prop[0:k_prop,0].argsort()]\n \n elif u == 1: # ! DEATH +++++++++++++++++++++++++++++++++++++++++\n death=1\n if s> Model_parameters['Burn_in']: PD += 1\n \n k_prop = k-1\n if k_prop < Model_parameters['K_min']: out=0\n \n if out == 1:\n ind = np.random.randint(0,high=k) # choose a vertex between 0 and k-1\n pt_death = pt[ind,:]\n pt_prop = pt.copy()\n pt_prop = np.delete(pt_prop,ind,axis=0) # remove point to be deleted\n pt_prop = np.append( pt_prop, [[0,0]],axis=0) #add row of zeros to end to make sure the shape doesn't change.\n \n# Get prob - interpolate \n interpolated_signal = Find_linear_interpolated_values( Model_parameters['X_min'], \n Model_parameters['X_max'], pt_prop[0:k_prop,:], endpt_prop, pt_death[0] )\n prob=( 1.0/(Model_parameters['sigma_birth']*np.sqrt(2.0*np.pi)) * \n np.exp(-(interpolated_signal -pt_death[1])**2/(2.0*Model_parameters['sigma_birth']**2)) )\n \n\n else: # MOVE +++++++++++++++++++++++++++++++++++++++++++++++++++++++\n if s> Model_parameters['Burn_in']: PP += 1\n move=1\n k_prop = k\n if k == 0: out = 0 #If there are no points to move, then we can't move any\n \n if out == 1: \n ind = np.random.randint(0,high=k) # choose a vertex between 0 and k-1\n pt_prop[ind,0] = pt[ind,0]+np.random.randn()*Model_parameters['sigma_move'] #Normal distribution of move destination\n if pt_prop[ind,0] < Model_parameters['X_min'] or pt_prop[ind,0] > Model_parameters['X_max']: out = 0 \n \n# Ensure that the new age is different to all the others - if it is, set out = 0 and abandon this model\n if pt_prop[ind,0] in np.delete(pt[0:k],ind,axis=0): out = 0 \n\n\n# make sure the positions are sorted in ascending order.\n pt_prop[0:k_prop] = pt_prop[pt_prop[0:k_prop,0].argsort()]\n\n else: # every 3rd iteration change the ages\n# select ages at random\n\n if s> Model_parameters['Burn_in']: PA += 1\n change_age = 1 \n num_age_changes = int(np.floor(len(age)/float(Model_parameters['age_frac'])))\n random_indices = np.random.randint(0,len(age),num_age_changes)\n for i in random_indices: #choose num_age_changes from the set of ages and perturb\n if Model_parameters['Age_distribution'] == 'U':\n age_prop[i] = midpoint_age[i] + 2.0 * (np.random.rand(1)-0.5) * delta_age[i]\n else:\n age_prop[i] = midpoint_age[i] + np.random.randn() * delta_age[i]\n if age_prop[i] < Model_parameters['X_min'] or age_prop[i] > Model_parameters['X_max']: out = 0\n \n\n# Check to ensure that the stratification constraints (if any) are satisifed\n if not check_stratification(age_prop, stratification): out = 0\n \n# end: decide on what proposal to make\n\n# COMPUTE MISFIT OF THE PROPOSED MODEL \n\n if out==1:\n like_prop=0;\n interpolated_signal = Find_linear_interpolated_values( Model_parameters['X_min'], Model_parameters['X_max'], \n pt_prop[0:k_prop,:], endpt_prop, age_prop )\n if Model_parameters['running_mode'] == 1:\n like_prop = np.sum( (intensity - interpolated_signal)**2 / (2.0 * delta_intensity**2) )\n else:\n like_prop = 1.0\n \n \n \n# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n# SEE WHETHER MODEL IS ACCEPTED\n \n accept=0\n alpha = 0\n# The acceptance term takes different values according the the proposal that has been made.\n if out == 1:\n if birth==1:\n \n alpha = ((1.0/((Model_parameters['I_max']-Model_parameters['I_min'])*prob))*np.exp(-like_prop+like))\n if np.random.rand() <alpha:\n accept=1\n if s>Model_parameters['Burn_in']: AB += 1\n \n elif death==1:\n alpha = ((Model_parameters['I_max']-Model_parameters['I_min'])*prob)*np.exp(-like_prop+like)\n if np.random.rand() <alpha:\n accept=1\n if s>Model_parameters['Burn_in']: AD+=1\n \n else: # NO JUMP, i.e no change in dimension\n alpha = np.exp(-like_prop+like)\n if np.random.rand() <alpha:\n accept=1\n if s>Model_parameters['Burn_in']: \n if change_value == 1:\n ACV += 1\n elif move == 1:\n AP += 1\n elif change_age ==1:\n AA += 1\n else:\n print('FATAL ERROR 1'); sys.exit(0)\n \n#If accept, update the values\n if accept==1:\n k=k_prop\n pt=pt_prop.copy()\n like=like_prop\n endpt = endpt_prop.copy()\n age = age_prop.copy()\n \n\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n# Collect samples for the ensemble solution\n\n if s>Model_parameters['Burn_in'] and np.mod( s-Model_parameters['Burn_in'],Model_parameters['thinning'])==0:\n b+=1\n\n # if Model_parameters['joint_distribution_freq'] > 0:\n # write joint distribution data\n \n\n # CALL Find_linear_interpolated_values( k, x_min, x_max, pt, endpt, discretise_size, x(1:discretise_size), interpolated_signal)\n \n #IF( FREQ_WRITE_MODELS > 0) then\n #if( s>burn_in .AND. mod(s-burn_in,thin * FREQ_WRITE_MODELS) == 0) WRITE(15,format_descriptor) interpolated_signal(1:discretise_size)\n #ENDIF\n\n\n# CALL Find_linear_interpolated_values( k, x_min, x_max, pt, endpt, discretise_size, x(1:discretise_size), interpolated_signal)\n interpolated_signal = Find_linear_interpolated_values( Model_parameters['X_min'], Model_parameters['X_max'], \n pt[0:k,:], endpt, X )\n# DO THE AVERAGE\n Return_info['Av'] += interpolated_signal[:]\n\n \n# build marginal distribution for ages:\n for i in range(len(age)):\n if Model_parameters['Age_distribution'] == 'U':\n bin_index = int( np.floor( (age[i]-(midpoint_age[i]-delta_age[i])) / (delta_age[i] * 2.0) * Model_parameters['Nbins_age_marginal']))\n else:\n bin_index = int( np.floor( (age[i]-(midpoint_age[i]-2.0 * delta_age[i])) / (delta_age[i] * 4.0) * Model_parameters['Nbins_age_marginal']))\n# For normally distributed ages, bin centred on mean with a 2*standard deviation range each side.\n# Should a value fall outside this range, then simply add to either the 1st or last bin.\n bin_index = max(bin_index, 0)\n bin_index = min(bin_index, Model_parameters['Nbins_age_marginal']-1)\n\n Return_info['Marginal_ages'][i,bin_index] += 1\n\n# write model data to disk\n\n if Model_parameters['output_model_freq'] > 0:\n if np.mod( s-Model_parameters['Burn_in'], Model_parameters['thinning'] * Model_parameters['output_model_freq']) == 0:\n for i in range(Model_parameters['discretise_size']):\n output_models.write('%10.3f \\n' % interpolated_signal[i] )\n\n# collect joint distribution data\n if Model_parameters['output_joint_distribution_freq'] > 0 and np.mod( s-Model_parameters['Burn_in'], Model_parameters['thinning'] * Model_parameters['output_joint_distribution_freq']) == 0:\n interpolated_samples = Find_linear_interpolated_values( Model_parameters['X_min'], Model_parameters['X_max'], pt[0:k,:], endpt, age )\n for i in range(len(age)):\n joint_dist[i].write('%15.3f %15.3f\\n' % (age[i],interpolated_samples[i]) )\n \n# build marginal intensity density\n for i in range(len(X)):\n bin_index = int(np.floor( (interpolated_signal[i]-Model_parameters['I_min'])/ (Model_parameters['I_max']-Model_parameters['I_min']) * Model_parameters['Nbins']))\n if bin_index <0 or bin_index > Model_parameters['Nbins']-1:\n print('FATAL ERROR, BIN_INDEX IS OUT OF RANGE')\n print('MODEL POINT %s VALUE %s' %(i,interpolated_signal[i]) )\n print('INTENSITY MIN/MAX %s %s ' %(Model_parameters['I_min'], Model_parameters['I_max'] ))\n print('Model is %s %s %s' % (k,endpt,pt[0:k,:]))\n print(age); print(''); print(interpolated_signal)\n sys.exit(0)\n Return_info['Intensity_density'][i,bin_index] += 1\n\n \n# Do (e.g.) the 95% credible interval by keeping the lowest and greatest 2.5% of\n# all models at each sample point. We could either keep ALL the data and at\n# the end determine these regions (but this is very costly in terms of memory), or keep a running list of the\n# number of data points we need. At the end of the algorithm, simply take the\n# maximum of the smallest points, and the min of the largest, to get the\n# bounds on the credible intervals.\n# Method:\n# Num_samples_credible is the number of data points corresponding to 2.5% of the total number\n# of samples (after thinning).\n# Collect Num_samples_credible datapoints from the first Num_samples_credible samples.\n# For each subsequent sample, see if the value should actually be inside\n# the 2.5% tail. If it is, replace an existing value by the current value.\n# Repeat.\n\n if Model_parameters['Calc_credible']:\n for i in range(Model_parameters['discretise_size']):\n if b <= Num_samples_credible: \n #print(b-1, Num_samples_credible)\n MINI[i,b-1]=interpolated_signal[i]\n MAXI[i,b-1]=interpolated_signal[i]\n if b == Num_samples_credible:\n val_min[i] = MAXI[i,:].min(); ind_min[i] = MAXI[i,:].argmin()\n val_max[i] = MINI[i,:].max(); ind_max[i] = MINI[i,:].argmax()\n\n else: #we've already filled the tails, now compare each data point to see whether it should be included or not.\n if interpolated_signal[i] > val_min[i]:\n MAXI[i,ind_min[i]] = interpolated_signal[i]\n val_min[i] = MAXI[i,:].min(); ind_min[i] = MAXI[i,:].argmin()\n \n if interpolated_signal[i] < val_max[i]:\n MINI[i,ind_max[i]] = interpolated_signal[i]\n val_max[i] = MINI[i,:].max(); ind_max[i] = MINI[i,:].argmax()\n \n \n# Build histogram of number of changepoints: k\n Return_info['Changepoint_hist'][k] += 1\n\n# k can be zero here - I think there is mistake in the fortran: k can never be zero.\n\n# Do the histogram on change points\n for i in range(k):\n Return_info['Change_points'][bb]=pt[i,0]\n bb += 1\n\n# process ALL models now...\n Return_info['Misfit'][s-1] = like\n \n# Get the best model\n if like<like_best and accept == 1:\n pt_best = pt.copy()\n k_best = k\n endpt_best = endpt.copy()\n like_best = like\n age_best = age.copy()\n \n# ----------------------------- \n# end: the Sampling of the mcmc\n# ----------------------------\n\n Return_info['Change_points'] = Return_info['Change_points'][0:bb] #only return non-zero values.\n Return_info['Av'] = Return_info['Av']/b\n # print( Return_info['Intensity_density'][0,:], Return_info['Intensity_density'][10,:])\n\n# Compute the credible intervals:\n Return_info['Credible_Sup'] = np.min ( MAXI[:,:], axis = 1)\n Return_info['Credible_Inf'] = np.max ( MINI[:,:], axis = 1)\n\n# normalise marginal distributions\n Return_info['Intensity_density'][:,:] = np.array(Return_info['Intensity_density'][:,:]) / np.sum( Return_info['Intensity_density'][0,:] )\n \n# Compute the mode\n Return_info['Mode'] = (0.5 + np.argmax(Return_info['Intensity_density'], axis=1)) / Model_parameters['Nbins'] * (Model_parameters['I_max'] - Model_parameters['I_min']) + Model_parameters['I_min']\n\n# Compute the median. Get the first instance of the count from the left being greater than half the total:\n for i in range(Model_parameters['discretise_size']):\n for j in range(Model_parameters['Nbins']):\n if np.sum ( Return_info['Intensity_density'][i,0:j]) >= np.sum( Return_info['Intensity_density'][i,:] )/2.0:\n #print(j, np.sum ( Return_info['Intensity_density'][i,0:j]), np.sum( Return_info['Intensity_density'][i,:] )/2.0); \n Return_info['Median'][i] = (0.5 + j) / Model_parameters['Nbins'] * (Model_parameters['I_max'] - Model_parameters['I_min']) + Model_parameters['I_min']\n break\n\n\n# Calculate the \"best\" solution\n if k_best < 0:\n print('NO MINIMUM LIKELIHOOD SOLUTION FOUND')\n Return_info['Best'] = np.zeros(Model_parameters['discretise_size'])\n else:\n Return_info['Best'] = Find_linear_interpolated_values( Model_parameters['X_min'], Model_parameters['X_max'], \n pt_best[0:k_best,:], endpt_best, X )\n# close file of model data\n if Model_parameters['output_model_freq'] > 0:\n output_models.close()\n \n# close file for joint distributions\n if Model_parameters['output_joint_distribution_freq'] > 0:\n for i in range(len(age)):\n joint_dist[i].close()\n \n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_mpsk_snr_est_cc_sptr __init__(self, p) > digital_mpsk_snr_est_cc_sptr
def __init__(self, *args): this = _digital_swig.new_digital_mpsk_snr_est_cc_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_pn_correlator_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, coeff):\n self.coeff = coeff", "def __init__(self, name, smarts, score) -> None:\n ...", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args, **kwargs):\n self.npumps = kwargs.pop('npumps', 1)\n self.nsetups = kwargs.pop('nsetups', 4)\n IPSerial.__init__(self, *args, **kwargs)", "def __init__(self, s, t, mask):\n super(RealNVPLayer, self).__init__()\n self.mask = mask\n self.t = t\n self.s = s", "def __init__(self, atoms, contraints=None, label=\"SpikeSourcePoisson\",\n rate = 1, start = 0, duration = 10000, seed=None):\n super( SpikeSourcePoisson, self ).__init__(\n n_neurons = atoms,\n constraints = contraints,\n label = label\n )\n \n self.rate = rate\n self.start = start\n self.duration = duration\n self.seed = seed", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, params=None):\n super(NetPositionsMe, self).__init__()\n self.params = params", "def __init__(self, mem, inp, outp):\n self.pc = 0\n self.mem = mem\n self.inp = inp\n self.outp = outp", "def __init__(self,n,k,d,es=1e-3,ee=1e-3):\n self.q = 4\n self.n = n\n self.k = k\n self.d = d \n self.t = int((d-1)/2)\n self.symbol_err_rate = es\n self.erasure_err_rate = ee\n self.result = mpfr(\"0\")\n self.has_result = False\n #print (n,k,d,es,ee)", "def __init__(self):\n super(CorrelogramPooling3D, self).__init__()", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_ccf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoVectorizePSAction()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoPSVectorOutput()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _coin.new_SbDPMatrix(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, model, sd, sn, md, mn, contactNames, swingIds=None, swingPosNoise=None, swingVelNoise=None):\n self.model = model\n self.pin_model = model.state.pinocchio\n self.pin_data = self.pin_model.createData()\n self.nx, self.ndx, self.nu = model.state.nx, model.state.ndx, model.nu\n self.nq = self.pin_model.nq \n self.nv = self.pin_model.nv \n self.ny = self.ndx\n self.sd = sd\n self.sn = sn\n self.md = md\n self.mn = mn\n self.np = self.sd.shape[0]\n self.nm = self.md.shape[0]\n \n self.measurement = np.zeros(self.nx)\n self.MeasurementDataType = MeasurementDataFullState\n self.contact_names = contactNames\n self.contact_ids = [self.pin_model.getFrameId(name) for name in self.contact_names]\n self.nc = len(contactNames)\n self.state_names = []\n self.control_names = []\n self.branch_names = []\n self.branch_joints = []\n self.branch_ids = []\n self.parse_model()\n self.njoints = self.nv - 6 \n self.nq_base = 7 \n self.nv_base = 6\n self.swingIds = swingIds\n self.swingPosNoise = swingPosNoise\n self.swingVelNoise = swingVelNoise\n if self.swingIds is not None: \n assert len(self.swingIds) == len(self.swingPosNoise), \"swingPosNoise Dimension Missmatch\"\n assert len(self.swingIds) == len(self.swingVelNoise), \"swingVelNoise Dimension Missmatch\"\n # find active branches\n self.active_branches = []\n self.q_indices = []\n self.dq_indices = []\n\n if self.swingIds is not None:\n for fid in self.swingIds:\n for i, branch in enumerate(self.branch_ids):\n if fid in branch:\n self.active_branches += [i]\n # now collect state indeces \n \n for i in self.active_branches:\n q_inds = [self.state_names.index(jn) - 1 for jn in self.branch_joints[i]]\n dq_inds = [self.nv-1+self.state_names.index(jn) for jn in self.branch_joints[i]]\n self.q_indices += [q_inds]\n self.dq_indices += [dq_inds]", "def __init__(self, c, n_simulations):\n self.root = None\n self.action = None\n self.dist_probability = None\n self.c = c\n self.n_simulations = n_simulations\n self.samples = []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
tag_nsample(self) > int Return how many samples between SNR tags.
def tag_nsample(self): return _digital_swig.digital_mpsk_snr_est_cc_sptr_tag_nsample(self)
[ "def n_samples(self):\n return len(self.sampler)", "def num_samples(self):\r\n return self.snapshots[0].num_samples", "def n_profile_samples(self):\n return self.__n_profile_samples", "def n_samples(self):\n if self.isempty:\n return 0\n return utils.PrettyInt(len(self._abscissa_vals))", "def getNumSamples(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n return SliceSamplerBase.getNumSamples(self)", "def get_num_samples(self, split_name):", "def getNumSamples(sound):\n return getLength(sound)", "def num_samples(self, split: str) -> int:\n raise NotImplementedError", "def sample_size(self):\n\t\treturn _get_sample_size(self._device)", "def oversample(self):\n return self._oversample", "def count_samples(\n self,\n samples: List,\n ) -> int:\n num_samples = len(samples)\n with utils.format_text(\"yellow\", [\"underline\"]) as fmt:\n self.log.info(fmt(f\"number of data: {num_samples}\"))\n\n return num_samples", "def packet_get_samples_per_frame(cls, data: bytes) -> int:\n return _lib.opus_packet_get_samples_per_frame(data, cls.SAMPLING_RATE)", "def nr_tags(self):\n return self._nr_tags", "def total_sample_count(self):\n return np.sum(self.sample_counts)", "def num_sequences_sampled(self) -> int:\n return self._num_sequences_sampled", "def total_samples(self, dt_per_sample):\n return self.duration // dt_per_sample", "def num_samplets(self):\n if self._data is not None:\n return len(self._data)\n else:\n return 0", "def set_sample_number(self):\r\n self.n_samples = self.exprs.shape[0]", "def multi_sampling(self) -> int:\n return self._frame_sdf[self._im_node.index].multi_sampling", "def nr_tags(self, nr_tags):\n\n self._nr_tags = nr_tags" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set_tag_nsample(self, int n) Set the number of samples between SNR tags.
def set_tag_nsample(self, *args, **kwargs): return _digital_swig.digital_mpsk_snr_est_cc_sptr_set_tag_nsample(self, *args, **kwargs)
[ "def nr_tags(self, nr_tags):\n\n self._nr_tags = nr_tags", "def set_sample_number(self):\r\n self.n_samples = self.exprs.shape[0]", "def set_sample(self, data, nid):\n self.sample = Sample(self, data, nid)", "def resample(self, n):\n if n==len(self.times):\n return\n self.times = np.linspace(self.times[0], self.times[-1], n)", "def set_nfeatures(cls, n):\n if not isinstance(n, int):\n raise ValueError(\"Attribute 'nfeatures' must be of <type 'int'>, got %s\" % str(type(n)))\n cls.nfeatures = n", "def _set_snr(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"snr\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"snr must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"snr\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__snr = t\n if hasattr(self, '_set'):\n self._set()", "def set_n(self, value):\n self._n = value", "def set_n_rejection_samples(self, rejection_samples=200):\n if rejection_samples < 0:\n raise ValueError('Must have non-negative rejection samples.')\n self._n_rejection_samples = rejection_samples", "def truncate_samples(self, n_samples):\n if not self.active_region_is_default:\n raise AudioSignalException('Cannot truncate while active region is not set as default!')\n\n n_samples = int(n_samples)\n if n_samples > self.signal_length:\n n_samples = self.signal_length\n\n self.audio_data = self.audio_data[:, 0: n_samples]", "def set_n_files(self, n_files):\n self._n_files = n_files", "def SetNumberOfSpatialSamples(self, num: 'unsigned int') -> \"void\":\n return _itkMutualInformationImageToImageMetricPython.itkMutualInformationImageToImageMetricIUS3IUS3_SetNumberOfSpatialSamples(self, num)", "def SetNumberOfSpatialSamples(self, num: 'unsigned int') -> \"void\":\n return _itkMutualInformationImageToImageMetricPython.itkMutualInformationImageToImageMetricIF2IF2_SetNumberOfSpatialSamples(self, num)", "def SetNumberOfSpatialSamples(self, num: 'unsigned int') -> \"void\":\n return _itkMutualInformationImageToImageMetricPython.itkMutualInformationImageToImageMetricISS3ISS3_SetNumberOfSpatialSamples(self, num)", "def SetNumberOfSpatialSamples(self, num: 'unsigned int') -> \"void\":\n return _itkMutualInformationImageToImageMetricPython.itkMutualInformationImageToImageMetricIF3IF3_SetNumberOfSpatialSamples(self, num)", "def _sample_gaussian_noise(self, n):\n check_positive_integer(n)\n delta_t = 1.0 * self.t / n\n\n noise = self.rng.normal(scale=np.sqrt(delta_t), size=n)\n\n return noise", "def SetNumberOfSpatialSamples(self, num: 'unsigned int') -> \"void\":\n return _itkMutualInformationImageToImageMetricPython.itkMutualInformationImageToImageMetricIUS2IUS2_SetNumberOfSpatialSamples(self, num)", "def set_rep_years(self, n: int, ref_var: str) -> None:\n\n self.rep_years_n = n\n self.feature_var = ref_var", "def SetNumberOfSpatialSamples(self, num: 'unsigned int') -> \"void\":\n return _itkMutualInformationImageToImageMetricPython.itkMutualInformationImageToImageMetricIUC2IUC2_SetNumberOfSpatialSamples(self, num)", "def SetNumberOfSpatialSamples(self, num: 'unsigned int') -> \"void\":\n return _itkMutualInformationImageToImageMetricPython.itkMutualInformationImageToImageMetricISS2ISS2_SetNumberOfSpatialSamples(self, num)", "def SetNumberOfSpatialSamples(self, num: 'unsigned int') -> \"void\":\n return _itkMutualInformationImageToImageMetricPython.itkMutualInformationImageToImageMetricIUC3IUC3_SetNumberOfSpatialSamples(self, num)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
mpsk_snr_est_cc(snr_est_type_t type, int tag_nsamples = 10000, double alpha = 0.001) > digital_mpsk_snr_est_cc_sptr A block for computing SNR of a signal. This block can be used to monitor and retrieve estimations of the signal SNR. It is designed to work in a flowgraph and passes all incoming data along to its output. The block is designed for use with MPSK signals especially. The type of estimator is specified as the parameter in the constructor. The estimators tend to trade off performance for accuracy, although experimentation should be done to figure out the right approach for a given implementation. Further, the current set of estimators are designed and proven theoretically under AWGN conditions; some amount of error should be assumed and/or estimated for real channel conditions. Factory function returning shared pointer of this class
def mpsk_snr_est_cc(*args, **kwargs): return _digital_swig.mpsk_snr_est_cc(*args, **kwargs)
[ "def type(self):\n return _digital_swig.digital_probe_mpsk_snr_est_c_sptr_type(self)", "def set_type(self, *args, **kwargs):\n return _digital_swig.digital_probe_mpsk_snr_est_c_sptr_set_type(self, *args, **kwargs)", "def rmse_and_cramer_rao(SNR_range, N_samples_range, iteration, A, angles, locations, K, method_code, return_name):\n \n import numpy as np\n from stochastic_cramer_rao import cramer_rao\n \n N_samples_zero = N_samples_range[0]\n SNR_zero = SNR_range[0]\n \n if SNR_range[1] == SNR_range[0]+1:\n snr_dB = SNR_range[1]\n if return_name == \"rmse\":\n MSE = np.zeros(N_samples_range[1]-N_samples_range[0])\n elif return_name == \"cramer\":\n cramer = np.zeros(N_samples_range[1]-N_samples_range[0])\n\n elif N_samples_range[1] == N_samples_range[0]+1:\n N_samples = N_samples_range[1]\n if return_name == \"rmse\":\n MSE = np.zeros(SNR_range[1]-SNR_range[0])\n elif return_name == \"cramer\":\n cramer = np.zeros(SNR_range[1]-SNR_range[0])\n \n for snr_dB in range(SNR_range[0],SNR_range[1]):\n \n for N_samples in range(N_samples_range[0], N_samples_range[1]):\n \n for i in range(500):\n\n # Signal(A*s) to noise(n) ratio\n received_snr = 10**(snr_dB/10)\n ratio_As_to_s = 1/4\n snr = received_snr*ratio_As_to_s\n #snr = received_snr\n \n # Source signal implementation (shape: (3,500))\n signal = np.random.normal(0,np.sqrt(snr),(3,N_samples))\n #w = np.atleast_2d([np.pi/3, np.pi/4, np.pi/5]).T\n #signal = (np.sqrt(snr))*np.exp(1j*w*(np.atleast_2d(np.arange(1,N_samples+1))))\n\n # Received signal power on sensors\n signal_power = sum(sum(np.abs(A.dot(signal))**2))/(12*N_samples)\n\n # Noise signal implementation (shape: (12,500))\n noise = np.random.normal(0,np.sqrt(0.5),(12,N_samples)) + 1j*np.random.normal(0,np.sqrt(0.5),(12,N_samples))\n noise_power = sum(sum(np.abs(noise)**2))/(12*N_samples)\n #if i == 0:\n # print()\n # print(\"SIGNAL POWER\")\n # print(signal_power)\n # print(\"NOISE POWER\")\n # print(noise_power)\n # print(\"SIGNAL TO NOISE RATIO\")\n # print(signal_power/noise_power)\n\n # Received signal (shape: (12,500))\n z = A.dot(signal) + noise\n\n # Sample covariance matrix\n R_sample = z.dot(z.conj().T)/N_samples\n\n # Eigenvalue and eigenvectors\n w_sample, v_sample = np.linalg.eig(R_sample)\n \n #if i == 0 and snr_dB == -20:\n # print()\n # print(\"EIGENVALUES OF SAMPLE COVARIANCE MATRIX\")\n # print(w_sample[0])\n # print(w_sample[1])\n # print(w_sample[2])\n # print(w_sample[3])\n\n # Sensor Selection Matrix (shape: (12,6))\n T = np.array([[1,0,0,0,0,0],\n [1,0,0,0,0,0],\n [0,1,0,0,0,0],\n [0,1,0,0,0,0],\n [0,0,1,0,0,0],\n [0,0,1,0,0,0],\n [0,0,0,1,0,0],\n [0,0,0,1,0,0],\n [0,0,0,0,1,0],\n [0,0,0,0,1,0],\n [0,0,0,0,0,1],\n [0,0,0,0,0,1]])\n\n # Push-Sum Matrix (shape: (6,6))\n P_push = np.array([[0.2,0.2,0.2,0 ,0 ,0],\n [0.2,0.2,0.2,0 ,0 ,0],\n [0.6,0.6,0.2,0.2,0 ,0],\n [0 ,0 ,0.4,0.2,0.2,0.2],\n [0 ,0 ,0 ,0.2,0.2,0.2],\n [0 ,0 ,0 ,0.4,0.6,0.6]])\n\n # Average-Consensus Matrix (shape: (6,6))\n P_ave = np.array([[0.17,0.5,0.33,0 ,0 ,0],\n [0.5,0.17,0.33,0 ,0 ,0],\n [0.33,0.33,0.01,0.33,0 ,0],\n [0 ,0 ,0.33,0.01,0.33,0.33],\n [0 ,0 ,0 ,0.33,0.17,0.5],\n [0 ,0 ,0 ,0.33,0.5,0.17]])\n\n # Weight Vector (shape: (6,1))\n w = np.atleast_2d([1,1,1,1,1,1]).T\n \n if method_code == 1:\n\n # Average Consensus Covariance Matrix Estimation \n R_ave_con = K * np.multiply(T.dot(np.linalg.matrix_power(P_ave,iteration)).dot(T.T), R_sample)\n R = R_ave_con\n \n if method_code == 2:\n # Push-Sum Covariance Matrix Estimation\n R_push_numerator = np.multiply(T.dot(np.linalg.matrix_power(P_push,iteration)).dot(T.T), R_sample)\n R_push_denominator = T.dot(np.linalg.matrix_power(P_push,iteration)).dot(w).dot(np.ones((1,6))).dot(T.T)\n\n # Push Sum Covariance Matrix (shape: (12,12))\n R_push = K*np.multiply(R_push_numerator, (1/(R_push_denominator)))\n R = R_push\n\n if method_code == 3:\n # Conventional ESPRIT Algorithm \n R = R_sample \n\n w_push, v_push = np.linalg.eig(R)\n\n # Upper group selection matrix J_up\n J_up = np.kron(np.eye(6),np.array([1,0]))\n\n # Lower group selection matrix J_down\n J_down = np.kron(np.eye(6),np.array([0,1]))\n\n # Push-Sum estimated signal eigenvector matrices\n U_s_push = v_push[:,:3]\n\n # Upper signal eigenvectors\n U_s_up = J_up.dot(U_s_push)\n\n # Lower signal eigenvectors\n U_s_down = J_down.dot(U_s_push)\n\n # Matrix including knowledge about DOAs of the source signals\n psi = np.linalg.inv((U_s_up.conj().T).dot(U_s_up)).dot((U_s_up.conj().T)).dot(U_s_down)\n\n w2, v2 = np.linalg.eig(psi)\n doa = []\n doa.append(np.arcsin(np.angle(w2[0])/np.pi)*360/(2*np.pi))\n doa.append(np.arcsin(np.angle(w2[1])/np.pi)*360/(2*np.pi))\n doa.append(np.arcsin(np.angle(w2[2])/np.pi)*360/(2*np.pi))\n\n #if i == 0:\n # print()\n # print(\" DOAs of the source signals in degrees with SNR: \" + str(snr_dB) )\n # print(\" DOAs of the source signals in degrees with N_samples: \" + str(N_samples) )\n # print(\"****************************************************************\")\n # print(\"****************************************************************\")\n # print(\"DOA of the first source signal: \" + str(doa[0]))\n # print(\"DOA of the second source signal: \" + str(doa[1]))\n # print(\"DOA of the third source signal: \" + str(doa[2]))\n \n diff_1 = min(abs(doa[0]-(angles*360/(2*np.pi))))\n diff_2 = min(abs(doa[1]-(angles*360/(2*np.pi))))\n diff_3 = min(abs(doa[2]-(angles*360/(2*np.pi))))\n \n if SNR_range[1] == SNR_range[0] + 1:\n if return_name == \"rmse\":\n MSE[N_samples - N_samples_zero] = MSE[N_samples - N_samples_zero]+1/3*1/500*((diff_1)**2+(diff_2)**2+(diff_3)**2)\n if i == 499: \n print(\"RMSE\")\n print(np.sqrt(MSE[N_samples - N_samples_zero]))\n elif return_name == \"cramer\":\n cramer[N_samples - N_samples_zero] = cramer[N_samples - N_samples_zero]+(1/500)*np.sqrt(cramer_rao(A, signal, angles, locations))*360/(2*np.pi)\n if i == 499: \n print(\"Cramer Rao Bound\")\n print(np.sqrt(cramer[N_samples - N_samples_zero]))\n\n elif N_samples_range[1] == N_samples_range[0] + 1:\n if return_name == \"rmse\":\n MSE[snr_dB - SNR_zero] = MSE[snr_dB - SNR_zero]+1/3*1/500*((diff_1)**2+(diff_2)**2+(diff_3)**2)\n if i == 499:\n print(\"RMSE\")\n print(np.sqrt(MSE[snr_dB - SNR_zero]))\n elif return_name == \"cramer\":\n cramer[snr_dB - SNR_zero] = cramer[snr_dB - SNR_zero]+(1/500)*np.sqrt(cramer_rao(A, signal, angles, locations))*360/(2*np.pi)\n if i == 499:\n print(\"Cramer Rao Bound\")\n print(np.sqrt(cramer[snr_dB - SNR_zero]))\n \n if return_name == \"rmse\":\n return np.sqrt(MSE)\n elif return_name == \"cramer\":\n return cramer", "def msr (riskfree_rate,er,cov):\r\n n=er.shape[0]\r\n init_guess=np.repeat(1/n,n)\r\n bounds=((0.0,1.0),)*n \r\n def neg_sharpe_ratio(weights,riskfree_rate,er,cov):\r\n \"\"\"\r\n Returns the negative of Sharpe Ratio, given weights\r\n \"\"\"\r\n r=portfolio_return(weights,er)\r\n vol=portfolio_vol(weights,cov)\r\n return -(r-riskfree_rate)/vol\r\n \r\n weights_sum_to_1={'type':'eq','fun':lambda weights:np.sum(weights)-1}\r\n results=minimize(neg_sharpe_ratio,init_guess,args=(riskfree_rate,er,cov,),method='SLSQP',options={'disp':False},constraints=(weights_sum_to_1),bounds=bounds)\r\n return results.x", "def estimate_snr(images):\n\n if len(images.shape) == 2: # in case of a single projection\n images = images[:, :, None]\n\n p = images.shape[1]\n n = images.shape[2]\n\n radius_of_mask = p // 2 - 1\n\n points_inside_circle = disc(p, r=radius_of_mask, inner=True)\n num_signal_points = np.count_nonzero(points_inside_circle)\n num_noise_points = p * p - num_signal_points\n\n noise = np.sum(np.var(images[~points_inside_circle], axis=0)) * num_noise_points / (num_noise_points * n - 1)\n\n signal = np.sum(np.var(images[points_inside_circle], axis=0)) * num_signal_points / (num_signal_points * n - 1)\n\n signal -= noise\n\n snr = signal / noise\n\n return snr, signal, noise", "def SNR(st, stations, components, mv=None, T=None):\n import operator\n ns = len(stations)\n nc = len(components)\n #==============================\n # == SORT BY SNR ==\n SNR_c = np.zeros((ns,nc), dtype=np.float32)\n SNR = np.zeros(ns, dtype=np.float32)\n SNR_dic = {}\n for s in range(ns):\n for c in range(nc):\n if mv is None:\n data = st.select(station=stations[s])[c].data\n else:\n id1 = max(0, T + mv[s] - np.int32(autodet.cfg.template_len/2 * autodet.cfg.sampling_rate))\n id2 = min(st.select(station=stations[s])[c].data.size, T + mv[s] + np.int32(autodet.cfg.template_len/2 * autodet.cfg.sampling_rate))\n if id2-id1 <= 0:\n data = np.float32([0.])\n else:\n data = st.select(station=stations[s])[c].data[id1:id2]\n if np.var(data) != 0.:\n SNR_c[s,c] = np.power(data, 2).max()/np.var(data)\n else:\n pass\n SNR[s] = np.mean(SNR_c[s,c])\n SNR_dic.update({stations[s]:SNR[s]})\n SNR_sorted = sorted(SNR_dic.items(), key=operator.itemgetter(1))\n SNR_sorted.reverse()\n return SNR_sorted", "def set_snr(self, snr):\n return _raw_util.raw_message_sptr_set_snr(self, snr)", "def psnr(noisy: np.ndarray, clean: np.ndarray, dynamic: float=1.0) -> float:\n assert noisy.shape == clean.shape, \"Shape mismatch when computing PSNR.\"\n peak = dynamic * dynamic\n return 10 * math.log10(peak / mse(noisy, clean))", "def spectralSNR(partarray, apix=1.0):\n\tt0 = time.time()\n\t### initialization\n\tpart0 = partarray[0]\n\tif isinstance(partarray, list):\n\t\tnumimg = len(partarray)\n\telse:\n\t\tnumimg = partarray.shape[0]\n\tif numimg < 2:\n\t\tapDisplay.printWarning(\"Cannot calculate the SSNR for less than 2 images\")\n\t\treturn 0.0\n\tfor partimg in partarray:\n\t\tif part0.shape != partimg.shape:\n\t\t\tapDisplay.printError(\"Cannot calculate the SSNR for images of different sizes\")\n\t\tif len(partimg.shape) != 2:\n\t\t\tapDisplay.printError(\"Cannot calculate the SSNR non-2D images\")\n\n\t### get fft\n\tfftlist = []\n\tfor partimg in partarray:\n\t\tfftim = real_fft2d(partimg)\n\t\tfftlist.append(fftim)\n\n\t### dimension init\n\tfftim0 = real_fft2d(partarray[0])\n\tfftshape = numpy.asarray(fftim0.shape, dtype=numpy.float32)\n\tfftcenter = fftshape/2.0\n\tlength = int(max(fftshape)/2.0)\n\tlinear = numpy.zeros((length), dtype=numpy.float32)\n\tlinear[0] = 1.0\n\n\t### figure out which pixels go with which ring\n\tringdict = getLinearIndices2d(fftshape)\n\n\t### for each ring calculate the FRC\n\tkeys = ringdict.keys()\n\tkeys.sort()\n\tfor key in keys:\n\t\tsys.stderr.write(\".\")\n\t\tindexlist = ringdict[key]\n\t\tnumer = 0.0\n\t\tdenom = 0.0\n\t\tfor indextuple in indexlist:\n\t\t\tn1, d1 = mini_ssnr1fft(fftlist, indextuple)\n\t\t\t#n1, d1 = mini_ssnr1(partarray, indextuple)\n\t\t\t#n2, d2 = mini_ssnr2(partarray, indextuple)\n\t\t\t#if indextuple[0] == 5 and indextuple[1] == 5:\n\t\t\t#print \"%d,%d (%.3f / %.3f) vs (%.3f / %.3f) %.3f\"%(indextuple[0], indextuple[1], n1, d1, n2, d2, n1/d1)\n\t\t\t#return\n\t\t\tnumer += n1\n\t\t\tdenom += d1\n\t\tK = len(indexlist)\n\t\tssnr = numer / ( K/(K-1.0) * denom ) - 1.0\n\t\tfrc = ssnr / (ssnr + 1)\n\t\t#if key >= 3 and key <= 5:\n\t\t#\tprint \"======================\"\n\t\t#\tprint \"numerring=\", key, numer\n\t\t#\tprint \"denomring=\", key, denom\n\t\t#\tprint \"ssnr=\", key, ssnr\n\t\t#print \"%02d %.3f %.3f (%.3f / %.3f)\"%(key, ssnr, frc, numer/K, denom/K)\n\t\t#print key, frc\n\t\tlinear[key] = frc\n\tsys.stderr.write(\"\\n\")\n\n\t### output\n\twriteFrcPlot(\"ssnr.dat\", linear, apix)\n\tres = getResolution(linear, apix, boxsize=linear.shape[0]*2)\n\tapDisplay.printMsg(\"Finished SSNR of res %.3f Angstroms in %s\"%(res, apDisplay.timeString(time.time()-t0)))\n\treturn res", "def msr(riskfree_rate, er, cov):\n n = er.shape[0]\n initial_weights = np.repeat(1/n, n) # Equally distr. weights\n bounds = ((0.0, 1.0),)*n # n bounds of (0,1) tuples\n constraint_weight_sum_is_one = {\n 'type': 'eq',\n 'fun': lambda weights: np.sum(weights) - 1\n }\n\n def neg_sharpe_ratio(weights, riskfree_rate, er, cov):\n \"\"\"\n Returns the inverse of the Sharpe ratio given:\n * weights: allocation of the assets\n \"\"\"\n r = portfolio_return(weights, er)\n v = portfolio_vol(weights, cov)\n return -(r - riskfree_rate)/v\n\n results = minimize(neg_sharpe_ratio, initial_weights, args=(riskfree_rate, er, cov,), method=\"SLSQP\", options={\n 'disp': False}, constraints=(constraint_weight_sum_is_one), bounds=bounds)\n return results.x", "def psnr_calc(noisy, real):\n numpix = noisy.size(1)*noisy.size(2)*noisy.size(3)\n bs = noisy.size(0)\n avg_sq_norm = (1/numpix)*torch.norm(0.5*(noisy.view(bs, -1)- real.view(bs,-1)), dim = 1)**2#multiplication by 0.5 because vals between [-1,1]\n psnrs = -10*torch.log10(avg_sq_norm)\n return psnrs, torch.tensor([torch.mean(psnrs), torch.std(psnrs)])", "def psnr(mse):\n return -10.0 * mse.log10()", "def snr_value(self):\n return _raw_util.raw_message_sptr_snr_value(self)", "def compute_snr(self, doplot='online'):\n\n # Apply extinction\n self.apply_atmos_ext()\n # Apply frontend throughput\n self.apply_throughput_front()\n # Apply injection efficiency\n self.apply_injeff()\n # Apply backend throughput\n self.apply_throughput_back()\n\n # Surface of the telescope (60 segments of 1.44m corner to corner)\n surf = 60. * 3. / 2. * np.sqrt(3.) * (1.44 * 100. / 2.) ** 2.\n\n # Counts per second and per resolution element\n self.tgtdetec = self.tgtflux['on_detec'] * surf * self.reselgrid / self.egrid\n self.skydetec = self.skyflux['on_detec'] * surf * self.reselgrid / self.egrid\n\n # Resolution element\n if self.spectro == 'LR' or self.spectro == 'MR':\n npixspat = 4.1\n npixspec = 3.5\n elif self.spectro == 'HR':\n npixspat = 4.5\n npixspec = 4.5\n\n # Detector/spectrographs characteristics (per pixel, per second)\n if self.spectro == 'LR' or self.spectro == 'MR':\n dark = np.ones_like(self.wgrid) * 0.02 / 3600.\n dark[self.armgrid > 2] = 72. / 3600.\n readout = np.ones_like(self.wgrid) * 5.\n readout[self.armgrid > 2] = 8.\n nreadout = np.ones_like(self.wgrid)\n nreadout[self.armgrid > 0] = 2. # (ASSUMING 2 READOUT IN GREEN ARM)\n nreadout[self.armgrid > 1] = 4. # (ASSUMING 4 READOUT IN RED ARM)\n nreadout[self.armgrid > 2] = 12. # (ASSUMING 12 READOUT UP THE RAMP IN NIR ARM)\n well = np.ones_like(self.wgrid) * 70000.\n well[self.armgrid > 2] = 45000.\n thermal = np.zeros_like(self.wgrid)\n thermal[self.armgrid > 2] = 9. / 3600.\n elif self.spectro == 'HR':\n dark = np.ones_like(self.wgrid) * 0.02 / 3600.\n readout = np.ones_like(self.wgrid) * 5.\n nreadout = np.ones_like(self.wgrid)\n well = np.ones_like(self.wgrid) * 70000.\n thermal = np.zeros_like(self.wgrid)\n\n # Contamination from other source\n # -- need to create contaminating target with same conditions\n if self.badtgtmag > 0:\n badtgt = MseSpectrum(sessionID=-1, tgtmag=self.badtgtmag, band=self.band, template='flat', redshift=0,\n airmass=self.airmass, skymag=self.skymag, seeing=self.seeing, coating=self.coating,\n fibdiam=self.fibdiam, spectro=self.spectro, src_type='point', lmr=self.lmr)\n badtgt.apply_atmos_ext()\n badtgt.apply_throughput_front()\n badtgt.apply_injeff()\n badtgt.apply_throughput_back()\n badspec = badtgt.tgtflux['on_detec'] * surf * self.reselgrid / self.egrid\n\n else:\n badspec = np.zeros_like(self.tgtdetec) # best case is, sky is the only contaminant so bad-target is null\n # -- compute x-talk (bad target + sky)\n self.xtalk = 0.001 * (badspec + self.skydetec)\n # -- compute ghost (bad target + sky)\n self.ghost = 0.001 * (badspec + self.skydetec)\n # -- compute instrument diffuse light (real target + sky, wrong wavelength)\n self.instdiffuse = np.zeros_like(self.tgtdetec)\n for i in range(1 + int(np.max(self.armgrid))): # max within the same arm\n self.instdiffuse[self.armgrid == i] = 0.01 * np.max((self.tgtdetec[self.armgrid == i] + signal.medfilt(self.skydetec[self.armgrid == i], 101)))\n # -- compute telescope diffuse light (sky)\n self.teldiffuse = np.zeros_like(self.tgtdetec)\n for i in range(1 + int(np.max(self.armgrid))): # max within the same arm\n self.teldiffuse[self.armgrid == i] = 0.01 * signal.medfilt(self.skydetec[self.armgrid == i], 101)\n\n # Dark current per resolution element\n self.dark = dark * npixspat * npixspec\n # Readout noise per resolution element\n self.readout = readout * np.sqrt(npixspat / self.spatbin * npixspec / self.specbin * nreadout)\n # Thermal per resolution element\n self.thermal = thermal * npixspat * npixspec\n\n # Compute SNR or exptime\n if self.meth == 'getSNR':\n # account for exposure time\n self.tgtdetec *= self.etime\n self.skydetec *= self.etime\n self.dark *= self.etime\n self.thermal *= self.etime\n self.xtalk *= self.etime\n self.ghost *= self.etime\n self.teldiffuse *= self.etime\n self.instdiffuse *= self.etime\n # Photon noise\n self.skynoise = np.sqrt(self.skydetec)\n self.tgtnoise = np.sqrt(self.tgtdetec)\n self.darknoise = np.sqrt(self.dark)\n self.thermalnoise = np.sqrt(self.thermal)\n self.xtalknoise = np.sqrt(self.xtalk)\n self.ghostnoise = np.sqrt(self.ghost)\n self.teldiffusenoise = np.sqrt(self.teldiffuse)\n self.instdiffusenoise = np.sqrt(self.instdiffuse)\n # compute SNR\n self.snr = self.tgtdetec / np.sqrt(\n self.tgtnoise ** 2 + self.skynoise ** 2 + self.darknoise ** 2 + self.thermalnoise ** 2 + self.readout ** 2 +\n self.xtalknoise ** 2 + self.ghostnoise ** 2 + self.teldiffusenoise ** 2 + self.instdiffusenoise ** 2)\n elif self.meth == 'getEtime':\n aa = self.tgtdetec ** 2\n bb = - self.snr ** 2 * (self.tgtdetec + self.skydetec + self.dark + self.thermal + self.xtalk + self.ghost +\n self.teldiffuse + self.instdiffuse)\n cc = - self.snr ** 2 * self.readout\n self.etime = (- bb + np.sqrt(bb ** 2 - 4. * aa * cc)) / (2. * aa)\n # account for exposure time\n self.tgtdetec *= self.etime\n self.skydetec *= self.etime\n self.dark *= self.etime\n self.thermal *= self.etime\n self.xtalk *= self.etime\n self.ghost *= self.etime\n self.teldiffuse *= self.etime\n self.instdiffuse *= self.etime\n # Photon noise\n self.skynoise = np.sqrt(self.skydetec)\n self.tgtnoise = np.sqrt(self.tgtdetec)\n self.darknoise = np.sqrt(self.dark)\n self.thermalnoise = np.sqrt(self.thermal)\n self.xtalknoise = np.sqrt(self.xtalk)\n self.ghostnoise = np.sqrt(self.ghost)\n self.teldiffusenoise = np.sqrt(self.teldiffuse)\n self.instdiffusenoise = np.sqrt(self.instdiffuse)\n\n # Plots\n output_file(\"output/plots.html\")\n\n # Prepare figure\n if self.meth == 'getSNR':\n fig1 = figure(title=\"SNR\", x_axis_label=\"Wavelength (A)\")\n else:\n fig1 = figure(title=\"Time\", y_axis_type=\"log\", x_axis_label=\"Wavelength (A)\", y_axis_label=\"Seconds\",\n y_range=(np.nanmin(self.etime)/1.1, np.nanmax(self.etime)*1.1))\n etime_hours = self.etime / 3600.\n fig1.extra_y_ranges = {\"hours\": Range1d(start=np.nanmin(etime_hours)/1.1, end=np.nanmax(etime_hours)*1.1)}\n fig1.add_layout(LogAxis(y_range_name=\"hours\", axis_label=\"Hours\"), 'right')\n\n if self.meth == 'getSNR':\n # SNR and plot\n for i in range(1 + int(np.max(self.armgrid))):\n arm = self.armgrid == i\n fig1.line(self.wgrid[arm], self.snr[arm], line_color='black', line_alpha=.25)\n fig1.line(self.wgrid[arm], signal.medfilt(self.snr[arm], 101), line_color='black')\n if self.spectro == 'LR':\n fig1.line([3600, 4000, 4000, 18000], [1, 1, 2, 2], line_color='cyan', line_dash=\"dashed\")\n elif self.spectro == 'MR':\n fig1.line([3600, 4000, 4000, 9500], [1, 1, 2, 2], line_color='cyan', line_dash=\"dashed\")\n fig1.line([9500, 18000], [2, 2], line_color='aquamarine', line_dash=\"dotted\")\n elif self.spectro == 'HR':\n fig1.line([3600, 4000, 4000, 9000], [5, 5, 10, 10], line_color='cyan', line_dash=\"dashed\")\n else:\n # Time and plot\n for i in range(1 + int(np.max(self.armgrid))):\n arm = self.armgrid == i\n fig1.line(self.wgrid[arm], self.etime[arm], line_color='black', line_alpha=.25)\n fig1.line(self.wgrid[arm], signal.medfilt(self.etime[arm], 101), line_color='black')\n\n if doplot == 'online':\n script, div = components(fig1)\n\n elif doplot == 'offline':\n fig2 = figure(title=\"Spectra\", y_axis_type=\"log\", y_axis_label=\"Flux (erg/s/cm2/A)\", x_axis_label=\"Wavelength (A)\")\n fig3 = figure(title=\"Counts\", y_axis_type=\"log\", y_axis_label=\"Counts (photons/s/cm2/res.elem)\", x_axis_label=\"Wavelength (A)\")\n fig4 = figure(title=\"Throughput\", x_axis_label=\"Wavelength (A)\")\n fig6 = figure(title=\"Noise\", y_axis_type=\"log\", y_axis_label=\"Counts (photons/s/cm2/res.elem)\", x_axis_label=\"Wavelength (A)\")\n\n # Plot intrinsic spectra\n for i in range(1 + int(np.max(self.armgrid))):\n arm = self.armgrid == i\n fig2.line(self.wgrid[arm], self.tgtflux['origin'][arm], line_color='#FFBB00', line_alpha=.25)\n fig2.line(self.wgrid[arm], self.skyflux['origin'][arm], line_color='#0088BB', line_alpha=.25)\n # Overline spectrum after extinction\n fig2.line(self.wgrid[arm], self.tgtflux['at_m1'][arm], line_color='#DD8800', line_alpha=.25)\n # Overline spectrum after throughput+injection\n fig2.line(self.wgrid[arm], self.tgtflux['in_fiber'][arm], line_color='#FF0000', line_alpha=.25)\n fig2.line(self.wgrid[arm], self.skyflux['in_fiber'][arm], line_color='#0000FF', line_alpha=.25)\n # Overline median filtered spectra\n fig2.line(self.wgrid[arm], signal.medfilt(self.tgtflux['origin'][arm], 101), line_color='#FFBB00', legend_label='Target')\n fig2.line(self.wgrid[arm], signal.medfilt(self.skyflux['origin'][arm], 101), line_color='#0088BB', legend_label='Sky')\n fig2.line(self.wgrid[arm], signal.medfilt(self.tgtflux['at_m1'][arm], 101), line_color='#DD8800', legend_label='Target + atmosphere')\n fig2.line(self.wgrid[arm], signal.medfilt(self.tgtflux['on_detec'][arm], 101), line_color='#FF0000', legend_label='Target out')\n fig2.line(self.wgrid[arm], signal.medfilt(self.skyflux['on_detec'][arm], 101), line_color='#0000FF', legend_label='Sky out')\n\n # Plot counts on detector\n for i in range(1 + int(np.max(self.armgrid))):\n arm = self.armgrid == i\n fig3.line(self.wgrid[arm], self.dark[arm], line_color='#00FF00', legend_label='Dark')\n fig3.line(self.wgrid[arm], self.tgtdetec[arm], line_color='#FF0000', line_alpha=.25)\n fig3.line(self.wgrid[arm], self.skydetec[arm], line_color='#0000FF', line_alpha=.25)\n # Overline median filtered spectra\n fig3.line(self.wgrid[arm], signal.medfilt(self.tgtdetec[arm], 101), line_color='#FF0000', legend_label='Target counts')\n fig3.line(self.wgrid[arm], signal.medfilt(self.skydetec[arm], 101), line_color='#0000FF', legend_label='Sky counts')\n\n # Throughput plot\n for i in range(1 + int(np.max(self.armgrid))):\n arm = self.armgrid == i\n fig4.line(self.wgrid[arm], self.thr_struc[arm], line_color='#FF0000', legend_label='Structure')\n fig4.line(self.wgrid[arm], self.thr_m1[arm], line_color='#0000FF', legend_label='M1')\n fig4.line(self.wgrid[arm], self.thr_pfue[arm], line_color='#AA4400', legend_label='PFUE')\n fig4.line(self.wgrid[arm], self.inj[arm], line_color='#00AA66', legend_label='Inj.Eff.')\n fig4.line(self.wgrid[arm], self.thr_poss[arm], line_color='#00FF88', legend_label='PosS')\n fig4.line(self.wgrid[arm], self.thr_fiber[arm], line_color='#8800FF', legend_label='FiTS')\n fig4.line(self.wgrid[arm], self.thr_spectro[arm], line_color='#CCCC00', legend_label='Spectro')\n # overall throughput\n fig4.line(self.wgrid[arm], (self.thr_struc * self.thr_m1 * self.thr_pfue * self.thr_poss * self.inj * self.thr_fiber\n * self.thr_spectro)[arm], line_color='black')\n\n # Dark, readout, Poisson noise ...\n for i in range(1 + int(np.max(self.armgrid))):\n arm = self.armgrid == i\n fig6.line(self.wgrid[arm], self.darknoise[arm], line_color='#00FF00', legend_label='Dark noise')\n fig6.line(self.wgrid[arm], self.readout[arm], line_color='#FF8800', legend_label='Read noise')\n fig6.line(self.wgrid[arm], self.tgtnoise[arm], line_color='#FF0000', alpha=.25)\n fig6.line(self.wgrid[arm], self.skynoise[arm], line_color='#0000FF', alpha=.25)\n fig6.line(self.wgrid[arm], signal.medfilt(self.tgtnoise[arm], 101), line_color='#FF0000', legend_label='Target noise')\n fig6.line(self.wgrid[arm], signal.medfilt(self.skynoise[arm], 101), line_color='#0000FF', legend_label='Sky noise')\n fig6.line(self.wgrid[arm], self.thermalnoise[arm], line_color='#FF4488', legend_label='Thermal noise')\n fig6.line(self.wgrid[arm], self.xtalknoise[arm], line_color='#004488', legend_label='X-talk noise')\n fig6.line(self.wgrid[arm], self.instdiffusenoise[arm], line_color='#440088', legend_label='Diffuse (inst.) noise')\n fig6.line(self.wgrid[arm], self.teldiffusenoise[arm], line_color='#00FF88', legend_label='Diffuse (tel.) noise')\n\n # make a grid\n grid = gridplot([[fig1, fig2, fig6], [fig4, None, fig3]])\n show(grid)\n script, div = components(grid)\n\n else:\n script = ''\n div = ''\n\n return script, div", "def pca_optimize_snr(cube, angle_list, source_xy, fwhm, cube_ref=None,\n mode='fullfr', annulus_width=20, range_pcs=None,\n svd_mode='lapack', scaling=None, mask_center_px=None, \n fmerit='px', min_snr=0, collapse='median', verbose=True, \n full_output=False, debug=False, plot=True, save_plot=None,\n plot_title=None): \n def truncate_svd_get_finframe(matrix, angle_list, ncomp, V):\n \"\"\" Projection, subtraction, derotation plus combination in one frame.\n Only for full-frame\"\"\"\n transformed = np.dot(V[:ncomp], matrix.T)\n reconstructed = np.dot(transformed.T, V[:ncomp])\n residuals = matrix - reconstructed\n frsize = int(np.sqrt(matrix.shape[1])) # only for square frames\n residuals_res = reshape_matrix(residuals, frsize, frsize)\n residuals_res_der = cube_derotate(residuals_res, angle_list)\n frame = cube_collapse(residuals_res_der, mode=collapse)\n return frame\n\n def truncate_svd_get_finframe_ann(matrix, indices, angle_list, ncomp, V):\n \"\"\" Projection, subtraction, derotation plus combination in one frame.\n Only for annular mode\"\"\"\n transformed = np.dot(V[:ncomp], matrix.T)\n reconstructed = np.dot(transformed.T, V[:ncomp])\n residuals_ann = matrix - reconstructed\n residuals_res = np.zeros_like(cube)\n residuals_res[:,indices[0],indices[1]] = residuals_ann\n residuals_res_der = cube_derotate(residuals_res, angle_list)\n frame = cube_collapse(residuals_res_der, mode=collapse)\n return frame\n\n def get_snr(matrix, angle_list, y, x, mode, V, fwhm, ncomp, fmerit,\n full_output):\n if mode=='fullfr':\n frame = truncate_svd_get_finframe(matrix, angle_list, ncomp, V)\n elif mode=='annular':\n frame = truncate_svd_get_finframe_ann(matrix, annind, angle_list,\n ncomp, V)\n else:\n raise RuntimeError('Wrong mode. Choose either full or annular')\n \n if fmerit=='max':\n yy, xx = draw.circle(y, x, fwhm/2.)\n res = [phot.snr_ss(frame, (x_,y_), fwhm, plot=False, verbose=False, \n full_output=True) for y_, x_ in zip(yy, xx)]\n snr_pixels = np.array(res)[:,-1]\n fluxes = np.array(res)[:,2]\n argm = np.argmax(snr_pixels)\n if full_output:\n # integrated fluxes for the max snr\n return np.max(snr_pixels), fluxes[argm], frame\n else:\n return np.max(snr_pixels), fluxes[argm]\n elif fmerit=='px':\n res = phot.snr_ss(frame, (x,y), fwhm, plot=False, verbose=False,\n full_output=True)\n snrpx = res[-1]\n fluxpx = np.array(res)[2]\n if full_output:\n # integrated fluxes for the given px\n return snrpx, fluxpx, frame\n else:\n return snrpx, fluxpx\n elif fmerit=='mean':\n yy, xx = draw.circle(y, x, fwhm/2.)\n res = [phot.snr_ss(frame, (x_,y_), fwhm, plot=False, verbose=False, \n full_output=True) for y_, x_ in zip(yy, xx)] \n snr_pixels = np.array(res)[:,-1]\n fluxes = np.array(res)[:,2]\n if full_output:\n # mean of the integrated fluxes (shifting the aperture)\n return np.mean(snr_pixels), np.mean(fluxes), frame\n else: \n return np.mean(snr_pixels), np.mean(fluxes)\n \n def grid(matrix, angle_list, y, x, mode, V, fwhm, fmerit, step, inti, intf, \n debug, full_output, truncate=True):\n nsteps = 0\n snrlist = []\n pclist = []\n fluxlist = []\n if full_output: frlist = []\n counter = 0\n if debug: \n print('Step current grid:', step)\n print('PCs | SNR')\n for pc in range(inti, intf+1, step):\n if full_output:\n snr, flux, frame = get_snr(matrix, angle_list, y, x, mode, V,\n fwhm, pc, fmerit, full_output)\n else:\n snr, flux = get_snr(matrix, angle_list, y, x, mode, V, fwhm, pc,\n fmerit, full_output)\n if np.isnan(snr): snr=0\n if nsteps>1 and snr<snrlist[-1]: counter += 1\n snrlist.append(snr)\n pclist.append(pc)\n fluxlist.append(flux)\n if full_output: frlist.append(frame)\n nsteps += 1\n if truncate and nsteps>2 and snr<min_snr: \n if debug: print('SNR too small')\n break\n if debug: print('{} {:.3f}'.format(pc, snr))\n if truncate and counter==5: break \n argm = np.argmax(snrlist)\n \n if len(pclist)==2: pclist.append(pclist[-1]+1)\n \n if debug:\n print('Finished current stage')\n try:\n pclist[argm+1]\n print('Interval for next grid: ', pclist[argm-1], 'to',\n pclist[argm+1])\n except:\n print('The optimal SNR seems to be outside of the given PC range')\n print()\n \n if argm==0: argm = 1 \n if full_output:\n return argm, pclist, snrlist, fluxlist, frlist\n else: \n return argm, pclist, snrlist, fluxlist\n \n #---------------------------------------------------------------------------\n if not cube.ndim==3:\n raise TypeError('Input array is not a cube or 3d array')\n \n if verbose: start_time = time_ini()\n n = cube.shape[0]\n x, y = source_xy \n \n if range_pcs is not None:\n if len(range_pcs)==2:\n pcmin, pcmax = range_pcs\n pcmax = min(pcmax, n)\n step = 1\n elif len(range_pcs)==3:\n pcmin, pcmax, step = range_pcs\n pcmax = min(pcmax, n)\n else:\n msg = 'Range_pcs tuple must be entered as (PC_INI, PC_MAX, STEP) '\n msg += 'or (PC_INI, PC_MAX)'\n raise TypeError(msg)\n else:\n pcmin = 1\n pcmax = 200\n pcmax = min(pcmax, n)\n \n # Getting `pcmax` principal components a single time\n if mode=='fullfr':\n matrix = prepare_matrix(cube, scaling, mask_center_px, verbose=False)\n if cube_ref is not None:\n ref_lib = prepare_matrix(cube_ref, scaling, mask_center_px,\n verbose=False)\n else:\n ref_lib = matrix\n\n elif mode=='annular':\n y_cent, x_cent = frame_center(cube[0])\n ann_radius = dist(y_cent, x_cent, y, x)\n matrix, annind = prepare_matrix(cube, scaling, None, mode='annular',\n annulus_radius=ann_radius,\n annulus_width=annulus_width,\n verbose=False)\n if cube_ref is not None:\n ref_lib, _ = prepare_matrix(cube_ref, scaling, mask_center_px,\n mode='annular', annulus_radius=ann_radius,\n annulus_width=annulus_width, verbose=False)\n else:\n ref_lib = matrix\n\n else:\n raise RuntimeError('Wrong mode. Choose either fullfr or annular')\n\n V = svd_wrapper(ref_lib, svd_mode, pcmax, False, verbose)\n\n\n # sequential grid\n if range_pcs is not None:\n grid1 = grid(matrix, angle_list, y, x, mode, V, fwhm, fmerit, step, \n pcmin, pcmax, debug, full_output, False)\n if full_output: argm, pclist, snrlist, fluxlist, frlist = grid1\n else: argm, pclist, snrlist, fluxlist = grid1\n \n opt_npc = pclist[argm] \n if verbose:\n print('Number of steps', len(pclist))\n msg = 'Optimal number of PCs = {}, for SNR={}'\n print(msg.format(opt_npc, snrlist[argm]))\n print()\n timing(start_time)\n \n if full_output: \n cubeout = np.array((frlist))\n\n # Plot of SNR as function of PCs \n if plot: \n plt.figure(figsize=(8,4))\n ax1 = plt.subplot(211) \n ax1.plot(pclist, snrlist, '-', alpha=0.5)\n ax1.plot(pclist, snrlist, 'o', alpha=0.5, color='blue')\n ax1.set_xlim(np.array(pclist).min(), np.array(pclist).max())\n ax1.set_ylim(0, np.array(snrlist).max()+1)\n ax1.set_ylabel('SNR')\n ax1.minorticks_on()\n ax1.grid('on', 'major', linestyle='solid', alpha=0.4)\n \n ax2 = plt.subplot(212)\n ax2.plot(pclist, fluxlist, '-', alpha=0.5, color='green')\n ax2.plot(pclist, fluxlist, 'o', alpha=0.5, color='green')\n ax2.set_xlim(np.array(pclist).min(), np.array(pclist).max())\n ax2.set_ylim(0, np.array(fluxlist).max()+1)\n ax2.set_xlabel('Principal components')\n ax2.set_ylabel('Flux in FWHM ap. [ADUs]')\n ax2.minorticks_on()\n ax2.grid('on', 'major', linestyle='solid', alpha=0.4)\n print()\n \n # automatic \"clever\" grid\n else:\n grid1 = grid(matrix, angle_list, y, x, mode, V, fwhm, fmerit, \n max(int(pcmax*0.1),1), pcmin, pcmax, debug, full_output)\n if full_output: argm, pclist, snrlist, fluxlist, frlist1 = grid1\n else: argm, pclist, snrlist, fluxlist = grid1\n \n grid2 = grid(matrix, angle_list, y, x, mode, V, fwhm, fmerit, \n max(int(pcmax*0.05),1), pclist[argm-1], pclist[argm+1], debug, \n full_output)\n if full_output: argm2, pclist2, snrlist2, fluxlist2, frlist2 = grid2\n else: argm2, pclist2, snrlist2, fluxlist2 = grid2\n \n grid3 = grid(matrix, angle_list, y, x, mode, V, fwhm, fmerit, 1, \n pclist2[argm2-1], pclist2[argm2+1], debug, full_output, \n False)\n if full_output: _, pclist3, snrlist3, fluxlist3, frlist3 = grid3\n else: _, pclist3, snrlist3, fluxlist3 = grid3\n \n argm = np.argmax(snrlist3)\n opt_npc = pclist3[argm] \n dfr = pd.DataFrame(np.array((pclist+pclist2+pclist3, \n snrlist+snrlist2+snrlist3,\n fluxlist+fluxlist2+fluxlist3)).T) \n dfrs = dfr.sort_values(0)\n dfrsrd = dfrs.drop_duplicates()\n ind = np.array(dfrsrd.index) \n \n if verbose:\n print('Number of evaluated steps', ind.shape[0])\n msg = 'Optimal number of PCs = {}, for SNR={}'\n print(msg.format(opt_npc, snrlist3[argm]), '\\n')\n timing(start_time)\n \n if full_output: \n cubefrs = np.array((frlist1+frlist2+frlist3))\n cubeout = cubefrs[ind]\n \n # Plot of SNR as function of PCs \n if plot: \n alpha = 0.4\n lw = 2\n plt.figure(figsize=(6,4)) \n ax1 = plt.subplot(211) \n ax1.plot(np.array(dfrsrd.loc[:,0]), np.array(dfrsrd.loc[:,1]), '-', \n alpha=alpha, color='blue', lw=lw)\n ax1.plot(np.array(dfrsrd.loc[:,0]), np.array(dfrsrd.loc[:,1]), 'o', \n alpha=alpha/2, color='blue')\n ax1.set_xlim(np.array(dfrsrd.loc[:,0]).min(), np.array(dfrsrd.loc[:,0]).max())\n ax1.set_ylim(0, np.array(dfrsrd.loc[:,1]).max()+1)\n #ax1.set_xlabel('')\n ax1.set_ylabel('S/N')\n ax1.minorticks_on()\n ax1.grid('on', 'major', linestyle='solid', alpha=0.2)\n if plot_title is not None:\n ax1.set_title('Optimal pc: ' + str(opt_npc) + ' for ' + plot_title)\n \n ax2 = plt.subplot(212)\n ax2.plot(np.array(dfrsrd.loc[:,0]), np.array(dfrsrd.loc[:,2]), '-', \n alpha=alpha, color='green', lw=lw)\n ax2.plot(np.array(dfrsrd.loc[:,0]), np.array(dfrsrd.loc[:,2]), 'o', \n alpha=alpha/2, color='green')\n ax2.set_xlim(np.array(pclist).min(), np.array(pclist).max())\n #ax2.set_ylim(0, np.array(fluxlist).max()+1)\n ax2.set_xlabel('Principal components')\n ax2.set_ylabel('Flux in FWHM aperture')\n ax2.minorticks_on()\n ax2.set_yscale('log')\n ax2.grid('on', 'major', linestyle='solid', alpha=0.2)\n #plt.savefig('figure.pdf', dpi=300, bbox_inches='tight')\n print()\n \n # Optionally, save the contrast curve\n if save_plot != None:\n plt.savefig(save_plot, dpi=100, bbox_inches='tight')\n\n if mode == 'fullfr':\n finalfr = pca(cube, angle_list, cube_ref=cube_ref, ncomp=opt_npc,\n svd_mode=svd_mode, mask_center_px=mask_center_px,\n scaling=scaling, collapse=collapse, verbose=False)\n elif mode == 'annular':\n finalfr = pca_annulus(cube, angle_list, ncomp=opt_npc,\n annulus_width=annulus_width, r_guess=ann_radius,\n cube_ref=cube_ref, svd_mode=svd_mode,\n scaling=scaling, collapse=collapse)\n\n _ = phot.frame_quick_report(finalfr, fwhm, (x,y), verbose=verbose)\n \n if full_output:\n return opt_npc, finalfr, cubeout\n else:\n return opt_npc", "def psnr_ycbcr(gt_im, interpolated_im):\r\n # convert the images from tensor to numpy arrays\r\n if torch.cuda.is_available():\r\n gt_im = gt_im.cpu().detach()\r\n gt_im = gt_im.permute(1, 2, 0).numpy()\r\n interpolated_im = interpolated_im.cpu().detach()\r\n interpolated_im = interpolated_im.permute(1, 2, 0).numpy()\r\n\r\n # convert the images into ycbcr color space, get only the first channels\r\n gt_im_ycbcr = rgb2ycbcr(gt_im)[:, :, 0]\r\n interpolated_im_ycbcr = rgb2ycbcr(interpolated_im)[:, :, 0]\r\n # find the psnr of the first channels\r\n e = np.abs(gt_im_ycbcr - interpolated_im_ycbcr) ** 2\r\n mse = np.sum(e) / e.size\r\n if mse > 0.001: # mse should not be zero\r\n psnr_err = 10 * np.log10(255**2 / mse)\r\n return psnr_err\r\n else:\r\n pass", "def psnr(gt_im, interpolated_im):\r\n e = torch.abs(gt_im - interpolated_im) ** 2\r\n mse = torch.sum(e) / e.numel()\r\n psnr_err = 10 * torch.log10(torch.tensor(255) * torch.tensor(255) / mse)\r\n\r\n return psnr_err.item()", "def compute_PSNR(out, lbl):\n out = out[0, :, :, 0]\n lbl = lbl[0, :, :, 0]\n diff = out - lbl\n rmse = np.sqrt(np.mean(diff**2))\n psnr = 20*np.log10(255/rmse)\n return psnr", "def calculatesReceiverSNR(self, SNR=None):\n pass", "def iter_gaussian_bitrate(rho, psnr_out_1D, psnr_out_2D, snr_out_1D,\n snr_out_2D):\n import numpy as np\n rho = float(rho)\n psnrs = []\n snrs = []\n for bit_rate in range(1, 5):\n result_dict = gaussian_train_test(bit_rate, rho)\n\n psnr_1d = result_dict['psnr']['1d']\n mean_psnr_1d = sum(psnr_1d)/len(psnr_1d)\n psnr_2d = result_dict['psnr']['2d']\n mean_psnr_2d = sum(psnr_2d)/len(psnr_2d)\n psnrs.append((bit_rate, mean_psnr_1d, mean_psnr_2d))\n\n snr_1d = result_dict['snr']['1d']\n mean_snr_1d = sum(snr_1d)/len(snr_1d)\n snr_2d = result_dict['snr']['2d']\n mean_snr_2d = sum(snr_2d)/len(snr_2d)\n snrs.append((bit_rate, mean_snr_1d, mean_snr_2d))\n\n with open(psnr_out_1D, \"w\") as f:\n for entry in psnrs:\n f.write(\"{},{}\\n\".format(entry[0], entry[1]))\n with open(psnr_out_2D, \"w\") as f:\n for entry in psnrs:\n f.write(\"{},{}\\n\".format(entry[0], entry[2]))\n\n with open(snr_out_1D, \"w\") as f:\n for entry in snrs:\n f.write(\"{},{}\\n\".format(entry[0], entry[1]))\n with open(snr_out_2D, \"w\") as f:\n for entry in snrs:\n f.write(\"{},{}\\n\".format(entry[0], entry[2]))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_ofdm_cyclic_prefixer_sptr __init__(self, p) > digital_ofdm_cyclic_prefixer_sptr
def __init__(self, *args): this = _digital_swig.new_digital_ofdm_cyclic_prefixer_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_pn_correlator_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, prefix_set):\n self.prefix_set = prefix_set", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_if_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_ic_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def make(self, *args, **kwargs):\n return _OFDM_Cyclic_Prefix_swig.vamsi_OFDMCP_ff_sptr_make(self, *args, **kwargs)", "def vamsi_OFDMCP_ff_make(*args, **kwargs):\n return _OFDM_Cyclic_Prefix_swig.vamsi_OFDMCP_ff_make(*args, **kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_fff_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoBlinker()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __deref__(self):\n return _OFDM_Cyclic_Prefix_swig.vamsi_OFDMCP_ff_sptr___deref__(self)", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_ccf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, fft_length, cp_length, occupied_tones, snr, ks, carrier_map_bin, nc_filter, logging=False):\n\n\tgr.hier_block2.__init__(self, \"ofdm_receiver\",\n\t\t\t\tgr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature\n gr.io_signature2(2, 2, gr.sizeof_gr_complex*occupied_tones, gr.sizeof_char)) # Output signature\n\n bw = (float(occupied_tones) / float(fft_length)) / 2.0\n tb = bw*0.04\n print \"ofdm_receiver:__init__:occupied_tones %s fft_length %d \" % (occupied_tones, fft_length)\n \n chan_coeffs = filter.firdes.low_pass (1.0, # gain\n 1.0, # sampling rate\n bw+tb, # midpoint of trans. band\n tb, # width of trans. band\n filter.firdes.WIN_HAMMING) # filter type\n \n self.chan_filt = filter.fft_filter_ccc(1, chan_coeffs)\n\n # linklab, get ofdm parameters\n self._fft_length = fft_length\n self._occupied_tones = occupied_tones\n self._cp_length = cp_length\n self._nc_filter = nc_filter\n self._carrier_map_bin = carrier_map_bin\n \n win = [1 for i in range(fft_length)]\n \n # linklab, initialization function\n self.initialize(ks, self._carrier_map_bin)\n \n\n zeros_on_left = int(math.ceil((fft_length - occupied_tones)/2.0))\n ks0 = fft_length*[0,]\n ks0[zeros_on_left : zeros_on_left + occupied_tones] = ks[0]\n\n ks0 = np_fft.ifftshift(ks0)\n ks0time = np_fft.ifft(ks0)\n # ADD SCALING FACTOR\n ks0time = ks0time.tolist()\n\n SYNC = \"pn\"\n if SYNC == \"ml\":\n nco_sensitivity = -1.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_ml(fft_length,\n cp_length,\n snr,\n ks0time,\n logging)\n elif SYNC == \"pn\":\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_pn(fft_length,\n cp_length,\n logging)\n elif SYNC == \"pnac\":\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_pnac(fft_length,\n cp_length,\n ks0time,\n logging)\n # for testing only; do not user over the air\n # remove filter and filter delay for this\n elif SYNC == \"fixed\":\n self.chan_filt = gr.multiply_const_cc(1.0)\n nsymbols = 18 # enter the number of symbols per packet\n freq_offset = 0.0 # if you use a frequency offset, enter it here\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_fixed(fft_length,\n cp_length,\n nsymbols,\n freq_offset,\n logging)\n\n # Set up blocks\n\n # Create a delay line, linklab\n self.delay = blocks.delay(gr.sizeof_gr_complex, fft_length)\n\n self.nco = analog.frequency_modulator_fc(nco_sensitivity) # generate a signal proportional to frequency error of sync block\n self.sigmix = blocks.multiply_cc()\n self.sampler = gr_papyrus.ofdm_sampler(fft_length, fft_length+cp_length)\n self.fft_demod = gr_fft.fft_vcc(fft_length, True, win, True)\n self.ofdm_frame_acq = gr_papyrus.ofdm_frame_acquisition(occupied_tones,\n fft_length,\n cp_length, ks[0])\n # linklab, check current mode: non-contiguous OFDM or not\n if self._nc_filter:\n print '\\nMulti-band Filter Turned ON!'\n # linklab, non-contiguous filter\n self.ncofdm_filt = ncofdm_filt(self._fft_length, self._occupied_tones, self._carrier_map_bin)\n self.connect(self, self.chan_filt, self.ncofdm_filt)\n self.connect(self.ncofdm_filt, self.ofdm_sync) # into the synchronization alg.\n self.connect((self.ofdm_sync,0), self.nco, (self.sigmix,1)) # use sync freq. offset output to derotate input signal\n self.connect(self.ncofdm_filt, self.delay, (self.sigmix,0)) # signal to be derotated\n else :\n print '\\nMulti-band Filter Turned OFF!'\n self.connect(self, self.chan_filt)\n self.connect(self.chan_filt, self.ofdm_sync) # into the synchronization alg.\n self.connect((self.ofdm_sync,0), self.nco, (self.sigmix,1)) # use sync freq. offset output to derotate input signal\n self.connect(self.chan_filt, self.delay, (self.sigmix,0)) # signal to be derotated\n\n self.connect(self.sigmix, (self.sampler,0)) # sample off timing signal detected in sync alg\n self.connect((self.ofdm_sync,1), (self.sampler,1)) # timing signal to sample at\n\n self.connect((self.sampler,0), self.fft_demod) # send derotated sampled signal to FFT\n self.connect(self.fft_demod, (self.ofdm_frame_acq,0)) # find frame start and equalize signal\n self.connect((self.sampler,1), (self.ofdm_frame_acq,1)) # send timing signal to signal frame start\n self.connect((self.ofdm_frame_acq,0), (self,0)) # finished with fine/coarse freq correction,\n self.connect((self.ofdm_frame_acq,1), (self,1)) # frame and symbol timing, and equalization\n\n if logging:\n self.connect(self.chan_filt, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-chan_filt_c.dat\"))\n self.connect(self.fft_demod, gr.file_sink(gr.sizeof_gr_complex*fft_length, \"ofdm_receiver-fft_out_c.dat\"))\n self.connect(self.ofdm_frame_acq,\n gr.file_sink(gr.sizeof_gr_complex*occupied_tones, \"ofdm_receiver-frame_acq_c.dat\"))\n self.connect((self.ofdm_frame_acq,1), gr.file_sink(1, \"ofdm_receiver-found_corr_b.dat\"))\n self.connect(self.sampler, gr.file_sink(gr.sizeof_gr_complex*fft_length, \"ofdm_receiver-sampler_c.dat\"))\n self.connect(self.sigmix, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-sigmix_c.dat\"))\n self.connect(self.nco, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-nco_c.dat\"))", "def __init__(self):\n this = _coin.new_SoMFName()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ofdm_cyclic_prefixer(size_t input_size, size_t output_size) > digital_ofdm_cyclic_prefixer_sptr adds a cyclic prefix vector to an input size long ofdm symbol(vector) and converts vector to a stream output_size long.
def ofdm_cyclic_prefixer(*args, **kwargs): return _digital_swig.ofdm_cyclic_prefixer(*args, **kwargs)
[ "def vamsi_OFDMCP_ff_make(*args, **kwargs):\n return _OFDM_Cyclic_Prefix_swig.vamsi_OFDMCP_ff_make(*args, **kwargs)", "def gen_bin(length:int, prefix=\"\"):\n if length == 0:\n print(prefix)\n return\n\n gen_bin(length - 1, prefix + \"0\")\n gen_bin(length - 1, prefix + \"1\")", "def make(self, *args, **kwargs):\n return _OFDM_Cyclic_Prefix_swig.vamsi_OFDMCP_ff_sptr_make(self, *args, **kwargs)", "def addPrefix(self, prefix):\n \n pass", "def build_ids(size, id_start=0, prefix=\"id_\", max_length=10):\n return [prefix + str(x).zfill(max_length)\n for x in np.arange(id_start, id_start + size)]", "def prefix_bytes(listx):\n\n listy = []\n for item in listx:\n item = \"0x\" + item\n listy.append(item)\n return listy", "def cyclic(d, n):\n return call_polymake_function(b\"polytope\", b\"cyclic\", d, n)", "def getPrefixedPath(prefixLength, id):\n prefix = id[:prefixLength]\n rest = id[prefixLength:]\n path = ''\n for i in range(0, prefixLength):\n path = path + prefix[i] + '/'\n path = path + rest + '/' + id + '/'\n return path", "def symbol_factory(packager,prefix):\n i=1\n while True:\n yield packager(prefix+str(i))\n i +=1", "def _set_prefix(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"ip_prefix\",yc_prefix_openconfig_routing_policy__routing_policy_defined_sets_prefix_sets_prefix_set_prefixes_prefix, yang_name=\"prefix\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ip-prefix', extensions=None), is_container='list', yang_name=\"prefix\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/routing-policy', defining_module='openconfig-routing-policy', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"prefix must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"ip_prefix\",yc_prefix_openconfig_routing_policy__routing_policy_defined_sets_prefix_sets_prefix_set_prefixes_prefix, yang_name=\"prefix\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ip-prefix', extensions=None), is_container='list', yang_name=\"prefix\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/routing-policy', defining_module='openconfig-routing-policy', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__prefix = t\n if hasattr(self, '_set'):\n self._set()", "def _set_prefix(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"prefix\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='leafref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"prefix must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"prefix\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='leafref', is_config=True)\"\"\",\n })\n\n self.__prefix = t\n if hasattr(self, '_set'):\n self._set()", "def cyclic_caratheodory(d, n):\n return call_polymake_function(b\"polytope\", b\"cyclic_caratheodory\", d, n)", "def print_binary_sequences_with_prefix(prefix, n):\n if len(prefix) == n: # Base case, when the prefix is big as n\n print(prefix)\n else:\n # Calling again the function with prefix increased by 0\n print_binary_sequences_with_prefix(prefix + BINARY_FIRST, n)\n # Calling again the function with prefix increased by 0\n print_binary_sequences_with_prefix(prefix + BINARY_SECOND, n)", "def process_common_prefix(self):\n total_count = 0\n # This counts the number of iteration\n it_count = 0\n while True:\n it_count += 1\n\n count = 0\n # Always make a new copy here because LF will add\n # new terminals\n temp = self.non_terminal_set.copy()\n\n for symbol in temp:\n ret = symbol.left_factorize()\n if ret is True:\n count += 1\n\n if count == 0:\n break\n else:\n total_count += count\n\n dbg_printf(\"Left factorized for %d symbols in %d iterations\",\n total_count,\n it_count)\n\n return", "def build_bpe(\n corpus: List[str],\n max_vocab_size: int\n) -> List[int]:\n # Special tokens\n PAD = BytePairEncoding.PAD_token # Index of <PAD> must be 0\n UNK = BytePairEncoding.UNK_token # Index of <UNK> must be 1\n CLS = BytePairEncoding.CLS_token # Index of <CLS> must be 2\n SEP = BytePairEncoding.SEP_token # Index of <SEP> must be 3\n MSK = BytePairEncoding.MSK_token # Index of <MSK> must be 4\n SPECIAL = [PAD, UNK, CLS, SEP, MSK]\n\n WORD_END = BytePairEncoding.WORD_END # Use this token as the end of a word\n\n # YOUR CODE HERE (~22 lines)\n real_corpus = [x for x in corpus]\n idx2word: List[str] = SPECIAL\n words: Counter = Counter(\n [' '.join(list(x)) + ' ' + WORD_END for x in real_corpus])\n\n initial_words: Set = set()\n for word in real_corpus:\n initial_words.update(word)\n initial_words.add(WORD_END)\n\n subwords: List[str] = list(initial_words)\n while len(subwords) < max_vocab_size - len(SPECIAL):\n pairs: defaultdict = defaultdict(int)\n for word, freq in words.items():\n symbols = word.split()\n for i in range(len(symbols) - 1):\n pairs[symbols[i], symbols[i + 1]] += freq\n\n if len(pairs) == 0:\n break\n\n max_freq_pair = max(pairs, key=pairs.get)\n new_subword = ''.join(max_freq_pair)\n\n new_words: Counter = Counter()\n bigram = r'(?!\\s)(%s %s)(?!\\S)' % (max_freq_pair[0].replace(\n '.', '\\.'), max_freq_pair[1].replace('.', '\\.'))\n for word in words:\n new_words[re.sub(bigram, new_subword, word)] = words[word]\n\n if words == new_words:\n break\n words = new_words\n subwords.append(new_subword)\n subwords.sort(key=len, reverse=True)\n idx2word += subwords\n # END YOUR CODE\n\n return idx2word", "def no_repetition_sequences_with_prefix_list(char_list, prefix, n,\n strings_list):\n if len(prefix) == n: # Base case, when the prefix is big as n\n strings_list.append(prefix)\n else:\n # For each character, calling again the function with prefix increased\n # by the char\n for char in char_list:\n # Checking that's the char not already in the sequence to avoid\n # repetitions\n #if char not in prefix:\n no_repetition_sequences_with_prefix_list(char_list,\n prefix + char, n,\n strings_list)\n return strings_list", "def _set_prefix(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"destination\",prefix.prefix, yang_name=\"prefix\", rest_name=\"prefix\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='destination', extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}), is_container='list', yang_name=\"prefix\", rest_name=\"prefix\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"prefix must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"destination\",prefix.prefix, yang_name=\"prefix\", rest_name=\"prefix\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='destination', extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}), is_container='list', yang_name=\"prefix\", rest_name=\"prefix\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)\"\"\",\n })\n\n self.__prefix = t\n if hasattr(self, '_set'):\n self._set()", "def build_verhoeff_id(prefix, number, length=4):\n number_str = str(number).rjust(length, \"0\")\n checksum = verhoeff_digit(number_str)\n return prefix + number_str + checksum", "def _make_sync_word1(fft_len, occupied_carriers, pilot_carriers):\n active_carriers = _get_active_carriers(fft_len, occupied_carriers, pilot_carriers)\n numpy.random.seed(_seq_seed)\n bpsk = {0: numpy.sqrt(2), 1: -numpy.sqrt(2)}\n sw1 = [bpsk[numpy.random.randint(2)] if x in active_carriers and x % 2 else 0 for x in range(fft_len)]\n return numpy.fft.fftshift(sw1)", "def prefixes(s):\n\n size = len(s)\n word = s[0]\n for i in range(1, size):\n to_add = s[0:i+1]\n word = word + to_add \n \n return word" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_ofdm_frame_acquisition_sptr __init__(self, p) > digital_ofdm_frame_acquisition_sptr
def __init__(self, *args): this = _digital_swig.new_digital_ofdm_frame_acquisition_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, description, framecount=None):\n super().__init__(description)\n if 'parameters' in description:\n parameters = description['parameters']\n has_s2mm = parameters['C_INCLUDE_S2MM'] == '1'\n has_mm2s = parameters['C_INCLUDE_MM2S'] == '1'\n framecount = int(parameters['C_NUM_FSTORES'])\n s2mm_addr_width = int(parameters['C_M_AXI_S2MM_ADDR_WIDTH'])\n mm2s_addr_width = int(parameters['C_M_AXI_MM2S_ADDR_WIDTH'])\n if ((has_s2mm and s2mm_addr_width > 32) or\n (has_mm2s and mm2s_addr_width > 32)):\n raise UnsupportedConfiguration(\n 'VDMA driver only supports 32-bit addresses')\n\n else:\n has_s2mm = True\n has_mm2s = True\n framecount = 4 if framecount is None else framecount\n\n self.framecount = framecount\n if has_s2mm:\n self.readchannel = AxiVDMA.S2MMChannel(self, self.s2mm_introut)\n if has_mm2s:\n self.writechannel = AxiVDMA.MM2SChannel(self, self.mm2s_introut)", "def __init__(self, *args):\n this = _digital_swig.new_digital_simple_framer_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_fff_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_ccf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_framer_sink_1_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoMFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\r\n\r\n super(Panel, self).__init__()\r\n\r\n # Define private dictionary attributes.\r\n\r\n # Define private list attributes.\r\n self._lambdab_count = []\r\n\r\n # Define private scalar attributes.\r\n\r\n # Define public dictionary attributes.\r\n\r\n # Define public list attributes.\r\n\r\n # Define public scalar attributes.\r\n self.quality = 0\r\n self.q_override = 0.0\r\n self.function = 0\r\n self.piA = 0.0\r\n self.piF = 0.0\r\n self.piQ = 0.0", "def __init__(self, *args):\n this = _digital_swig.new_digital_packet_sink_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, description : dict):\n super().__init__(description)\n if 'parameters' in description:\n populate_params(self, description['parameters'])\n else:\n warnings.warn(\"Please use an hwh file with the SD-FEC driver\"\n \" - the default configuration is being used\")\n self._config = _lib.XSdFecLookupConfig(0)\n # TODO consider how we should set default LDPC and Turbo code params\n self._instance = _ffi.new(\"XSdFec*\")\n self._config.BaseAddress = self.mmio.array.ctypes.data\n _lib.XSdFecCfgInitialize(self._instance, self._config)", "def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, spi_rack, module, frequency=100e6):\n #def __init__(self, module, frequency=100e6):\n self.spi_rack = spi_rack\n self.module = module\n\n self.rf_frequency = frequency\n self.stepsize = 1e6\n self.ref_frequency = 10e6\n self.use_external = 0\n self.outputPower = None\n\n # These are the 6 registers present in the ADF4351\n self.registers = 6*[0]\n # In REG3: set ABP=1 (3 ns, INT-N) and CHARGE CANCEL=1\n self.registers[3] = (1<<22) | (1<<21) | 3\n # In REG5: set LD PIN MODE to 1 -> digital lock detect\n self.registers[5] = (1<<22) | (3<<19) | 5\n\n self.set_frequency(frequency)", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, channel=105, state=BusState.ACTIVE, bitrate=500000, *args, **kwargs):\n\n #super(CanFoxBus, self).__init__(self, channel='PCAN_USBBUS1', state=BusState.ACTIVE, bitrate=500000, *args, **kwargs)\n self.channel_info = channel\n self.fd = kwargs.get('fd', False)\n pcan_bitrate = CANFOX_bitrate_objs.get(bitrate, CANFOX_BAUD_250K)\n\n\n\n self.m_objCANFOXBasic = CANFOXBasic()\n self.m_PcanHandle = 105 #globals()[channel]\n self._filters = None\n\n if state is BusState.ACTIVE or state is BusState.PASSIVE:\n self.state = state\n else:\n raise ArgumentError(\"BusState must be Active or Passive\")\n\n\n if self.fd:\n f_clock_val = kwargs.get('f_clock', None)\n if f_clock_val is None:\n f_clock = \"{}={}\".format('f_clock_mhz', kwargs.get('f_clock_mhz', None))\n else:\n f_clock = \"{}={}\".format('f_clock', kwargs.get('f_clock', None))\n\n fd_parameters_values = [f_clock] + [\"{}={}\".format(key, kwargs.get(key, None)) for key in pcan_fd_parameter_list if kwargs.get(key, None) is not None]\n\n self.fd_bitrate = ' ,'.join(fd_parameters_values).encode(\"ascii\")\n\n\n result = self.m_objCANFOXBasic.InitializeFD(self.m_PcanHandle, self.fd_bitrate)\n else:\n if HAS_EVENTS:\n self._recv_event = CreateEvent(None, 0, 0, \"R2\")\n self._tran_event = CreateEvent(None, 0, 0, \"T2\")\n result = self.m_objCANFOXBasic.Initialize(self.m_PcanHandle, pcan_bitrate)\n\n if result != CANFOX_ERROR_OK:\n raise PcanError(self._get_formatted_error(result))\n\n if HAS_EVENTS:\n\n if 0:\n self._recv_event = CreateEvent(None, 0, 0, \"R2\")\n result = self.m_objCANFOXBasic.SetValue(\n self.m_PcanHandle, 1, self._recv_event) #\"\"\"PCAN_RECEIVE_EVENT\"\"\"\n if result != CANFOX_ERROR_OK:\n raise PcanError(self._get_formatted_error(result))\n\n super(CanFoxBus, self).__init__(channel=channel, state=state, bitrate=bitrate, *args, **kwargs)", "def __init__(self, fft_length, cp_length, occupied_tones, snr, ks, carrier_map_bin, nc_filter, logging=False):\n\n\tgr.hier_block2.__init__(self, \"ofdm_receiver\",\n\t\t\t\tgr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature\n gr.io_signature2(2, 2, gr.sizeof_gr_complex*occupied_tones, gr.sizeof_char)) # Output signature\n\n bw = (float(occupied_tones) / float(fft_length)) / 2.0\n tb = bw*0.04\n print \"ofdm_receiver:__init__:occupied_tones %s fft_length %d \" % (occupied_tones, fft_length)\n \n chan_coeffs = filter.firdes.low_pass (1.0, # gain\n 1.0, # sampling rate\n bw+tb, # midpoint of trans. band\n tb, # width of trans. band\n filter.firdes.WIN_HAMMING) # filter type\n \n self.chan_filt = filter.fft_filter_ccc(1, chan_coeffs)\n\n # linklab, get ofdm parameters\n self._fft_length = fft_length\n self._occupied_tones = occupied_tones\n self._cp_length = cp_length\n self._nc_filter = nc_filter\n self._carrier_map_bin = carrier_map_bin\n \n win = [1 for i in range(fft_length)]\n \n # linklab, initialization function\n self.initialize(ks, self._carrier_map_bin)\n \n\n zeros_on_left = int(math.ceil((fft_length - occupied_tones)/2.0))\n ks0 = fft_length*[0,]\n ks0[zeros_on_left : zeros_on_left + occupied_tones] = ks[0]\n\n ks0 = np_fft.ifftshift(ks0)\n ks0time = np_fft.ifft(ks0)\n # ADD SCALING FACTOR\n ks0time = ks0time.tolist()\n\n SYNC = \"pn\"\n if SYNC == \"ml\":\n nco_sensitivity = -1.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_ml(fft_length,\n cp_length,\n snr,\n ks0time,\n logging)\n elif SYNC == \"pn\":\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_pn(fft_length,\n cp_length,\n logging)\n elif SYNC == \"pnac\":\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_pnac(fft_length,\n cp_length,\n ks0time,\n logging)\n # for testing only; do not user over the air\n # remove filter and filter delay for this\n elif SYNC == \"fixed\":\n self.chan_filt = gr.multiply_const_cc(1.0)\n nsymbols = 18 # enter the number of symbols per packet\n freq_offset = 0.0 # if you use a frequency offset, enter it here\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_fixed(fft_length,\n cp_length,\n nsymbols,\n freq_offset,\n logging)\n\n # Set up blocks\n\n # Create a delay line, linklab\n self.delay = blocks.delay(gr.sizeof_gr_complex, fft_length)\n\n self.nco = analog.frequency_modulator_fc(nco_sensitivity) # generate a signal proportional to frequency error of sync block\n self.sigmix = blocks.multiply_cc()\n self.sampler = gr_papyrus.ofdm_sampler(fft_length, fft_length+cp_length)\n self.fft_demod = gr_fft.fft_vcc(fft_length, True, win, True)\n self.ofdm_frame_acq = gr_papyrus.ofdm_frame_acquisition(occupied_tones,\n fft_length,\n cp_length, ks[0])\n # linklab, check current mode: non-contiguous OFDM or not\n if self._nc_filter:\n print '\\nMulti-band Filter Turned ON!'\n # linklab, non-contiguous filter\n self.ncofdm_filt = ncofdm_filt(self._fft_length, self._occupied_tones, self._carrier_map_bin)\n self.connect(self, self.chan_filt, self.ncofdm_filt)\n self.connect(self.ncofdm_filt, self.ofdm_sync) # into the synchronization alg.\n self.connect((self.ofdm_sync,0), self.nco, (self.sigmix,1)) # use sync freq. offset output to derotate input signal\n self.connect(self.ncofdm_filt, self.delay, (self.sigmix,0)) # signal to be derotated\n else :\n print '\\nMulti-band Filter Turned OFF!'\n self.connect(self, self.chan_filt)\n self.connect(self.chan_filt, self.ofdm_sync) # into the synchronization alg.\n self.connect((self.ofdm_sync,0), self.nco, (self.sigmix,1)) # use sync freq. offset output to derotate input signal\n self.connect(self.chan_filt, self.delay, (self.sigmix,0)) # signal to be derotated\n\n self.connect(self.sigmix, (self.sampler,0)) # sample off timing signal detected in sync alg\n self.connect((self.ofdm_sync,1), (self.sampler,1)) # timing signal to sample at\n\n self.connect((self.sampler,0), self.fft_demod) # send derotated sampled signal to FFT\n self.connect(self.fft_demod, (self.ofdm_frame_acq,0)) # find frame start and equalize signal\n self.connect((self.sampler,1), (self.ofdm_frame_acq,1)) # send timing signal to signal frame start\n self.connect((self.ofdm_frame_acq,0), (self,0)) # finished with fine/coarse freq correction,\n self.connect((self.ofdm_frame_acq,1), (self,1)) # frame and symbol timing, and equalization\n\n if logging:\n self.connect(self.chan_filt, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-chan_filt_c.dat\"))\n self.connect(self.fft_demod, gr.file_sink(gr.sizeof_gr_complex*fft_length, \"ofdm_receiver-fft_out_c.dat\"))\n self.connect(self.ofdm_frame_acq,\n gr.file_sink(gr.sizeof_gr_complex*occupied_tones, \"ofdm_receiver-frame_acq_c.dat\"))\n self.connect((self.ofdm_frame_acq,1), gr.file_sink(1, \"ofdm_receiver-found_corr_b.dat\"))\n self.connect(self.sampler, gr.file_sink(gr.sizeof_gr_complex*fft_length, \"ofdm_receiver-sampler_c.dat\"))\n self.connect(self.sigmix, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-sigmix_c.dat\"))\n self.connect(self.nco, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-nco_c.dat\"))", "def __init__(self):\n this = _coin.new_SoSFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, gain=None, samp_rate=None, ppm=None, arfcn=None, capture_id=None, udp_ports=[], max_timeslot=0, store_capture=True, verbose=False, band=None, rec_length=None, test=False, args=\"\"):\n\n gr.top_block.__init__(self, \"Gr-gsm Capture\")\n\n ##################################################\n # Parameters\n ##################################################\n\n self.arfcn = arfcn\n for band in grgsm.arfcn.get_bands():\n if grgsm.arfcn.is_valid_arfcn(self.arfcn, band):\n self.fc = grgsm.arfcn.arfcn2downlink(arfcn, band)\n break\n\n self.gain = gain\n self.samp_rate = samp_rate\n self.ppm = ppm\n self.arfcn = arfcn\n self.band = band\n self.shiftoff = shiftoff = 400e3\n self.rec_length = rec_length\n self.store_capture = store_capture\n self.capture_id = capture_id\n self.udp_ports = udp_ports\n self.verbose = verbose\n\n ##################################################\n # Processing Blocks\n ##################################################\n\n self.rtlsdr_source = osmosdr.source( args=\"numchan=\" + str(1) + \" \" + \"\" )\n self.rtlsdr_source.set_sample_rate(samp_rate)\n self.rtlsdr_source.set_center_freq(self.fc - shiftoff, 0)\n self.rtlsdr_source.set_freq_corr(ppm, 0)\n self.rtlsdr_source.set_dc_offset_mode(2, 0)\n self.rtlsdr_source.set_iq_balance_mode(2, 0)\n self.rtlsdr_source.set_gain_mode(True, 0)\n self.rtlsdr_source.set_gain(gain, 0)\n self.rtlsdr_source.set_if_gain(20, 0)\n self.rtlsdr_source.set_bb_gain(20, 0)\n self.rtlsdr_source.set_antenna(\"\", 0)\n self.rtlsdr_source.set_bandwidth(250e3+abs(shiftoff), 0)\n self.blocks_rotator = blocks.rotator_cc(-2*pi*shiftoff/samp_rate)\n\n #RUn for the specified amount of seconds or indefenitely\n if self.rec_length is not None:\n self.blocks_head_0 = blocks.head(gr.sizeof_gr_complex, int(samp_rate*rec_length))\n\n self.gsm_receiver = grgsm.receiver(4, ([self.arfcn]), ([]))\n self.gsm_input = grgsm.gsm_input(\n ppm=0,\n osr=4,\n fc=self.fc,\n samp_rate_in=samp_rate,\n )\n self.gsm_clock_offset_control = grgsm.clock_offset_control(self.fc-shiftoff)\n\n #Control channel demapper for timeslot 0\n #self.gsm_bcch_ccch_demapper_0 = grgsm.universal_ctrl_chans_demapper(0, ([2,6,12,16,22,26,32,36,42,46]), ([1,2,2,2,2,2,2,2,2,2]))\n self.gsm_bcch_ccch_demapper_0 = grgsm.gsm_bcch_ccch_demapper(0)\n #For all other timeslots are assumed to contain sdcch8 logical channels, this demapping may be incorrect\n if max_timeslot >= 1 and max_timeslot <= 8:\n self.gsm_sdcch8_demappers = []\n for i in range(1,max_timeslot + 1):\n #self.gsm_sdcch8_demappers.append(grgsm.universal_ctrl_chans_demapper(i, ([0,4,8,12,16,20,24,28,32,36,40,44]), ([8,8,8,8,8,8,8,8,136,136,136,136])))\n self.gsm_sdcch8_demappers.append(grgsm.gsm_sdcch8_demapper(i))\n #Control channel decoder (extracts the packets), one for each timeslot\n self.gsm_control_channels_decoders = []\n for i in range(0,max_timeslot + 1):\n self.gsm_control_channels_decoders.append(grgsm.control_channels_decoder())\n# self.blocks_socket_pdu_0 = blocks.socket_pdu(\"UDP_CLIENT\", \"127.0.0.1\", \"4729\", 10000, False)# self.blocks_socket_pdu_0 = blocks.socket_pdu(\"UDP_CLIENT\", \"127.0.0.1\", \"4729\", 10000, False)\n\n #UDP client that sends all decoded C0T0 packets to the specified port on localhost if requested\n self.client_sockets = []\n self.server_sockets = []\n for udp_port in self.udp_ports:\n #The server is for testing only\n #WARNING remove the server if you want connect to a different one\n if test:\n self.server_sockets.append(blocks.socket_pdu(\"UDP_SERVER\", \"127.0.0.1\", str(udp_port), 10000))\n self.client_sockets.append(blocks.socket_pdu(\"UDP_CLIENT\", \"127.0.0.1\", str(udp_port), 10000))\n\n #Sinks to store the capture file if requested\n if self.store_capture:\n self.gsm_burst_file_sink = grgsm.burst_file_sink(str(self.capture_id) + \".burstfile\")\n self.blocks_file_sink = blocks.file_sink(gr.sizeof_gr_complex*1, str(self.capture_id) + \".cfile\", False)\n self.blocks_file_sink.set_unbuffered(False)\n\n #Printer for printing messages when verbose flag is True\n if self.verbose:\n self.gsm_message_printer = grgsm.message_printer(pmt.intern(\"\"), False)\n\n \"\"\"\n if self.verbose:\n self.gsm_bursts_printer_0 = grgsm.bursts_printer(pmt.intern(\"\"),\n False, False, False, False)\n \"\"\"\n ##################################################\n # Connections\n ##################################################\n\n if self.rec_length is not None: #if recording length is defined connect head block after the source\n self.connect((self.rtlsdr_source, 0), (self.blocks_head_0, 0))\n self.connect((self.blocks_head_0, 0), (self.blocks_rotator, 0))\n else:\n self.connect((self.rtlsdr_source, 0), (self.blocks_rotator, 0))\n\n #Connect the file sinks\n if self.store_capture:\n self.connect((self.blocks_rotator, 0), (self.blocks_file_sink, 0))\n self.msg_connect(self.gsm_receiver, \"C0\", self.gsm_burst_file_sink, \"in\")\n\n #Connect the GSM receiver\n self.connect((self.gsm_input, 0), (self.gsm_receiver, 0))\n self.connect((self.blocks_rotator, 0), (self.gsm_input, 0))\n self.msg_connect(self.gsm_clock_offset_control, \"ppm\", self.gsm_input, \"ppm_in\")\n self.msg_connect(self.gsm_receiver, \"measurements\", self.gsm_clock_offset_control, \"measurements\")\n\n #Connect the demapper and decoder for timeslot 0\n self.msg_connect((self.gsm_receiver, 'C0'), (self.gsm_bcch_ccch_demapper_0, 'bursts'))\n self.msg_connect((self.gsm_bcch_ccch_demapper_0, 'bursts'), (self.gsm_control_channels_decoders[0], 'bursts'))\n\n #Connect the demapper and decoders for the other timeslots\n for i in range(1,max_timeslot +1):\n self.msg_connect((self.gsm_receiver, 'C0'), (self.gsm_sdcch8_demappers[i-1], 'bursts'))\n self.msg_connect((self.gsm_sdcch8_demappers[i-1], 'bursts'), (self.gsm_control_channels_decoders[i], 'bursts'))\n\n\n #Connect the UDP clients if requested\n for client_socket in self.client_sockets:\n for i in range(0,max_timeslot + 1):\n self.msg_connect((self.gsm_control_channels_decoders[i], 'msgs'), (client_socket, 'pdus'))\n\n #Connect the printer is self.verbose is True\n if self.verbose:\n for i in range(0,max_timeslot + 1):\n self.msg_connect((self.gsm_control_channels_decoders[i], 'msgs'), (self.gsm_message_printer, 'msgs'))\n\n \"\"\"\n if self.verbose:\n self.msg_connect(self.gsm_receiver, \"C0\", self.gsm_bursts_printer_0, \"bursts\")\n \"\"\"", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ofdm_frame_acquisition(unsigned int occupied_carriers, unsigned int fft_length, unsigned int cplen, gr_complex_vector known_symbol, unsigned int max_fft_shift_len = 4) > digital_ofdm_frame_acquisition_sptr take a vector of complex constellation points in from an FFT and performs a correlation and equalization. This block takes the output of an FFT of a received OFDM symbol and finds the start of a frame based on two known symbols. It also looks at the surrounding bins in the FFT output for the correlation in case there is a large frequency shift in the data. This block assumes that the fine frequency shift has already been corrected and that the samples fall in the middle of one FFT bin. It then uses one of those known symbols to estimate the channel response over all subcarriers and does a simple 1tap equalization on all subcarriers. This corrects for the phase and amplitude distortion caused by the channel.
def ofdm_frame_acquisition(*args, **kwargs): return _digital_swig.ofdm_frame_acquisition(*args, **kwargs)
[ "def __init__(self, fft_length, cp_length, occupied_tones, snr, ks, carrier_map_bin, nc_filter, logging=False):\n\n\tgr.hier_block2.__init__(self, \"ofdm_receiver\",\n\t\t\t\tgr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature\n gr.io_signature2(2, 2, gr.sizeof_gr_complex*occupied_tones, gr.sizeof_char)) # Output signature\n\n bw = (float(occupied_tones) / float(fft_length)) / 2.0\n tb = bw*0.04\n print \"ofdm_receiver:__init__:occupied_tones %s fft_length %d \" % (occupied_tones, fft_length)\n \n chan_coeffs = filter.firdes.low_pass (1.0, # gain\n 1.0, # sampling rate\n bw+tb, # midpoint of trans. band\n tb, # width of trans. band\n filter.firdes.WIN_HAMMING) # filter type\n \n self.chan_filt = filter.fft_filter_ccc(1, chan_coeffs)\n\n # linklab, get ofdm parameters\n self._fft_length = fft_length\n self._occupied_tones = occupied_tones\n self._cp_length = cp_length\n self._nc_filter = nc_filter\n self._carrier_map_bin = carrier_map_bin\n \n win = [1 for i in range(fft_length)]\n \n # linklab, initialization function\n self.initialize(ks, self._carrier_map_bin)\n \n\n zeros_on_left = int(math.ceil((fft_length - occupied_tones)/2.0))\n ks0 = fft_length*[0,]\n ks0[zeros_on_left : zeros_on_left + occupied_tones] = ks[0]\n\n ks0 = np_fft.ifftshift(ks0)\n ks0time = np_fft.ifft(ks0)\n # ADD SCALING FACTOR\n ks0time = ks0time.tolist()\n\n SYNC = \"pn\"\n if SYNC == \"ml\":\n nco_sensitivity = -1.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_ml(fft_length,\n cp_length,\n snr,\n ks0time,\n logging)\n elif SYNC == \"pn\":\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_pn(fft_length,\n cp_length,\n logging)\n elif SYNC == \"pnac\":\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_pnac(fft_length,\n cp_length,\n ks0time,\n logging)\n # for testing only; do not user over the air\n # remove filter and filter delay for this\n elif SYNC == \"fixed\":\n self.chan_filt = gr.multiply_const_cc(1.0)\n nsymbols = 18 # enter the number of symbols per packet\n freq_offset = 0.0 # if you use a frequency offset, enter it here\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_fixed(fft_length,\n cp_length,\n nsymbols,\n freq_offset,\n logging)\n\n # Set up blocks\n\n # Create a delay line, linklab\n self.delay = blocks.delay(gr.sizeof_gr_complex, fft_length)\n\n self.nco = analog.frequency_modulator_fc(nco_sensitivity) # generate a signal proportional to frequency error of sync block\n self.sigmix = blocks.multiply_cc()\n self.sampler = gr_papyrus.ofdm_sampler(fft_length, fft_length+cp_length)\n self.fft_demod = gr_fft.fft_vcc(fft_length, True, win, True)\n self.ofdm_frame_acq = gr_papyrus.ofdm_frame_acquisition(occupied_tones,\n fft_length,\n cp_length, ks[0])\n # linklab, check current mode: non-contiguous OFDM or not\n if self._nc_filter:\n print '\\nMulti-band Filter Turned ON!'\n # linklab, non-contiguous filter\n self.ncofdm_filt = ncofdm_filt(self._fft_length, self._occupied_tones, self._carrier_map_bin)\n self.connect(self, self.chan_filt, self.ncofdm_filt)\n self.connect(self.ncofdm_filt, self.ofdm_sync) # into the synchronization alg.\n self.connect((self.ofdm_sync,0), self.nco, (self.sigmix,1)) # use sync freq. offset output to derotate input signal\n self.connect(self.ncofdm_filt, self.delay, (self.sigmix,0)) # signal to be derotated\n else :\n print '\\nMulti-band Filter Turned OFF!'\n self.connect(self, self.chan_filt)\n self.connect(self.chan_filt, self.ofdm_sync) # into the synchronization alg.\n self.connect((self.ofdm_sync,0), self.nco, (self.sigmix,1)) # use sync freq. offset output to derotate input signal\n self.connect(self.chan_filt, self.delay, (self.sigmix,0)) # signal to be derotated\n\n self.connect(self.sigmix, (self.sampler,0)) # sample off timing signal detected in sync alg\n self.connect((self.ofdm_sync,1), (self.sampler,1)) # timing signal to sample at\n\n self.connect((self.sampler,0), self.fft_demod) # send derotated sampled signal to FFT\n self.connect(self.fft_demod, (self.ofdm_frame_acq,0)) # find frame start and equalize signal\n self.connect((self.sampler,1), (self.ofdm_frame_acq,1)) # send timing signal to signal frame start\n self.connect((self.ofdm_frame_acq,0), (self,0)) # finished with fine/coarse freq correction,\n self.connect((self.ofdm_frame_acq,1), (self,1)) # frame and symbol timing, and equalization\n\n if logging:\n self.connect(self.chan_filt, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-chan_filt_c.dat\"))\n self.connect(self.fft_demod, gr.file_sink(gr.sizeof_gr_complex*fft_length, \"ofdm_receiver-fft_out_c.dat\"))\n self.connect(self.ofdm_frame_acq,\n gr.file_sink(gr.sizeof_gr_complex*occupied_tones, \"ofdm_receiver-frame_acq_c.dat\"))\n self.connect((self.ofdm_frame_acq,1), gr.file_sink(1, \"ofdm_receiver-found_corr_b.dat\"))\n self.connect(self.sampler, gr.file_sink(gr.sizeof_gr_complex*fft_length, \"ofdm_receiver-sampler_c.dat\"))\n self.connect(self.sigmix, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-sigmix_c.dat\"))\n self.connect(self.nco, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-nco_c.dat\"))", "def _get_active_carriers(fft_len, occupied_carriers, pilot_carriers):\n active_carriers = list()\n for carrier in list(occupied_carriers[0]) + list(pilot_carriers[0]):\n if carrier < 0:\n carrier += fft_len\n active_carriers.append(carrier)\n return active_carriers", "def measure_drive_cancellation(\n dev, driven_qubit, ramsey_qubits, sweep_points,\n phases=None, n_pulses=1, pulse='X180',\n n_cal_points_per_state=2, cal_states='auto', prep_params=None,\n exp_metadata=None, label=None, upload=True, analyze=True):\n if phases is None:\n phases = np.linspace(0, 360, 3, endpoint=False)\n\n if isinstance(driven_qubit, str):\n driven_qubit = dev.get_qb(driven_qubit)\n ramsey_qubits = [dev.get_qb(qb) if isinstance(qb, str) else qb\n for qb in ramsey_qubits]\n ramsey_qubit_names = [qb.name for qb in ramsey_qubits]\n\n MC = dev.instr_mc.get_instr()\n if label is None:\n label = f'drive_{driven_qubit.name}_cancel_'\\\n f'{list(sweep_points[0].keys())}'\n\n if prep_params is None:\n prep_params = dev.get_prep_params(ramsey_qubits)\n\n sweep_points = deepcopy(sweep_points)\n sweep_points.add_sweep_dimension()\n sweep_points.add_sweep_parameter('phase', phases, 'deg', 'Ramsey phase')\n\n if exp_metadata is None:\n exp_metadata = {}\n\n for qb in [driven_qubit] + ramsey_qubits:\n qb.prepare(drive='timedomain')\n\n cal_states = CalibrationPoints.guess_cal_states(cal_states,\n for_ef=False)\n cp = CalibrationPoints.multi_qubit(\n [qb.name for qb in ramsey_qubits], cal_states,\n n_per_state=n_cal_points_per_state)\n operation_dict = dev.get_operation_dict()\n\n drive_op_code = pulse + ' ' + driven_qubit.name\n # We get sweep_vals for only one dimension since drive_cancellation_seq\n # turns 2D sweep points into 1D-SegmentHardSweep.\n # FIXME: in the future, this should rather be implemented via\n # sequence.compress_2D_sweep\n seq, sweep_vals = mqs.drive_cancellation_seq(\n drive_op_code, ramsey_qubit_names, operation_dict, sweep_points,\n n_pulses=n_pulses, prep_params=prep_params, cal_points=cp,\n upload=False)\n\n [seq.repeat_ro(f\"RO {qbn}\", operation_dict)\n for qbn in ramsey_qubit_names]\n\n sweep_func = awg_swf.SegmentHardSweep(\n sequence=seq, upload=upload,\n parameter_name='segment_index')\n MC.set_sweep_function(sweep_func)\n MC.set_sweep_points(sweep_vals)\n\n det_func = get_multiplexed_readout_detector_functions(\n 'int_avg_det', ramsey_qubits,\n nr_averages=max([qb.acq_averages() for qb in ramsey_qubits]))\n MC.set_detector_function(det_func)\n\n # !!! Watch out with the call below. See docstring for this function\n # to see the assumptions it makes !!!\n meas_obj_sweep_points_map = sweep_points.get_meas_obj_sweep_points_map(\n [qb.name for qb in ramsey_qubits])\n exp_metadata.update({\n 'ramsey_qubit_names': ramsey_qubit_names,\n 'preparation_params': prep_params,\n 'cal_points': repr(cp),\n 'sweep_points': sweep_points,\n 'meas_obj_sweep_points_map': meas_obj_sweep_points_map,\n 'meas_obj_value_names_map':\n get_meas_obj_value_names_map(ramsey_qubits, det_func),\n 'rotate': len(cp.states) != 0,\n 'data_to_fit': {qbn: 'pe' for qbn in ramsey_qubit_names}\n })\n\n MC.run(label, exp_metadata=exp_metadata)\n\n if analyze:\n return tda.DriveCrosstalkCancellationAnalysis(\n qb_names=ramsey_qubit_names, options_dict={'TwoD': True})", "def reframe(mxds, vis, mode='channel', nchan=None, start=0, width=1, interpolation='linear', phasecenter=None, restfreq=None, outframe=None, veltype='radio'):\n import xarray\n import datetime\n import numpy as np\n from astropy import units as u\n from astropy.time import Time\n from astropy.coordinates import EarthLocation, SpectralCoord, SkyCoord\n \n xds = mxds.attrs[vis]\n\n fields = xds.FIELD_ID.values.clip(0).flatten()\n sources = mxds.FIELD.sel(field_id=fields).source_id.values #[[xds.FIELD_ID.values.clip(0)]]\n unique_sources = np.unique(sources)\n \n #directions = mxds.SOURCE.DIRECTION.where(mxds.SOURCE.source_id\n targets = SkyCoord(directions[...,0], directions[...,1], unit='rad')\n \n #location = EarthLocation.of_site(input_xds['OBS_TELESCOPE_NAME']).get_itrs(obstime=Time(reference_time))\n #location = EarthLocation(input_xds['ANT_POSITION'].mean()).get_itrs(obstime=Time(reference_time))\n location = EarthLocation.of_site('ALMA')\n alma = location.get_itrs(obstime=Time(xds.time.values))\n \n time = _reference_time(global_xds)\n place = _reference_location(global_xds, reference_time)\n source = _target_location(global_xds, ddi)\n\n # epoch lookup or assume J2000\n #target_frame = 'FK5'\n\n aspc = SpectralCoord(input_array,\n unit=u.Hz,\n observer=place,\n target=source)\n # doppler_reference=img_xds.attrs['spectral__reference'],\n # doppler_convention=img_xds.attrs['velocity__type'])\n\n\n output_xds = xarray.apply_ufunc(change_frame, place, source, vis_xds.DATA.chunk({'chan': -1}), input_core_dims=[['chan']],\n dask='parallelized', output_dtypes=[vis_xds.DATA.dtype])\n\n # update some properties of global_xds after conversion?\n\n # ouptut_xds.compute()\n return output_xds", "def nextframe(self):\n # extract the next frame, slice it by atoms if necessary.\n frame = self.cord.nextframe()\n self.ca_origframe = frame # not .copy()'ing it, when I wrote it we didn't need to.\n if self.atomlist is None:\n self.curframe = frame.copy()\n else:\n self.curframe = frame[self.atomlist].copy()\n\n if self._align_to_next_frame:\n # We wanted to align to the first frame of the DCD. See\n # above for an explanation.\n if self.atomlist is None:\n self.aligntoframe = frame.copy()\n else:\n self.aligntoframe = frame[self.atomlist].copy()\n self._align_to_next_frame = False\n\n X = self.aligntoframe.copy()\n Y = self.curframe.copy()\n\n natoms,ndimensions = numpy.shape(X)\n \n center1 = sum(X,0) / float(natoms)\n center2 = sum(Y,0) / float(natoms)\n X -= center1\n Y -= center2\n\n E0 = sum(sum(X * X)) + sum(sum(Y * Y))\n\n correlation_matrix = numpy.dot(numpy.transpose(Y), X)\n\n V, S, W_trans = numpy.linalg.svd(correlation_matrix)\n\n is_reflection = (numpy.linalg.det(V) * numpy.linalg.det(W_trans)) < 0.0\n if is_reflection:\n # reflect along smallest principal axis\n S[-1] = -S[-1]\n V[-1,:] = V[-1,:] * (-1.0)\n\n optimal_rotation = numpy.dot(V, W_trans)\n self._frame = numpy.dot(frame, optimal_rotation) - center2 + center1\n \n self.nextframe_end_hook(self)\n return self._frame\n \n \n # UPDATE (JRD, Sept. 2007): This section of code contained between the #**...*# markers\n # is not the correct way to align frames for a single protein MD trajectory. The proper \n # method for aligning frames is due to Kabsch: Kabsch, Wolfgang, (1976) \"A solution of \n # the best rotation to relate two sets of vectors\", Acta Crystallographica 32:922\n #*******************************************************************************************#\n # Create a wrapper function to take the msd between the two frames.\n # This is what is passed to the simplex optimizer.\n #rmsd = lambda vect: mp3.functions.rmsd(self.aligntoframe, \\\n # mp3.functions.cordtransform(self.curframe, move=vect[0:3], rotate=vect[3:6] ))\n #if self.verbose:\n # dispval = 1\n #else:\n # dispval = 0\n #if self.callback:\n # callback = lambda xk: self.callback(xk, self)\n # # your callback can increment this to figure out what step it is on\n # self.iterstep = 0 \n #else:\n # callback = None\n #if self.minimizer == \"scipy:powell\":\n # result = scipy.optimize.fmin_powell(rmsd,self.guess,disp=dispval,full_output=1,\n # ftol=1e-6,callback=callback)\n # self.iterations = result[3]\n # self.funcalls = result[4]\n #elif self.minimizer == \"scipy:simplex\":\n # result = scipy.optimize.fmin(rmsd,self.guess,disp=dispval,full_output=1,\n # callback=callback)\n # self.iterations = result[2]\n # self.funcalls = result[3]\n #else:\n # sys.stderr.write(\"ERROR: minimizer must be either scipy:powell or scipy:simplex\")\n # sys.exit()\n #self.guess = result[0]\n #self._frame = mp3.functions.cordtransform(frame, move=self.guess[0:3],\n # rotate=self.guess[3:6])\n #if self._saveaverage:\n # self._sum_of_frames += self._frame\n # self._sum_of_frames_count += 1\n #\n #self.nextframe_end_hook(self)\n #return self._frame\n #*******************************************************************************************# ", "def test_acq_fid_catalog_two_cand_fid(n_fid):\n box_size_thresh = 90\n dither = 20\n sim_offset = 29829\n offset = box_size_thresh + FID.spoiler_margin + dither\n\n dark = DARK40.copy()\n stars = StarsTable.empty()\n stars.add_fake_constellation(mag=[9.5, 9.6, 9.7, 10], n_stars=4)\n\n # Add stars near fid light positions.\n stars.add_fake_stars_from_fid(\n fid_id=[1, 2],\n id=[1, 2], # assigned catalog ID\n mag=[8.2, 11.5],\n offset_y=[offset, 10],\n detector=\"HRC-I\",\n sim_offset=sim_offset,\n )\n\n kwargs = mod_std_info(\n stars=stars,\n dark=dark,\n dither=dither,\n raise_exc=True,\n n_guide=0,\n n_fid=n_fid,\n n_acq=5,\n detector=\"HRC-I\",\n sim_offset=sim_offset,\n )\n aca = get_aca_catalog(**kwargs)\n\n assert aca.acqs[\"id\"].tolist() == [1, 100, 101, 102, 103]\n assert aca.acqs[\"halfw\"].tolist() == [80, 160, 160, 160, 120]\n\n assert aca.n_fid == n_fid\n assert aca.fids[\"id\"].tolist() == [1, 2]\n assert aca.acqs.fid_set == (1, 2)", "def first_correlate_and_fill(self, data, header, trb=1, freq_select=None):\n\n slots = set(header[:, 2])\n\n print \"Data has\", len(slots), \"slots: \", slots\n print data.shape\n data = data[:, ::2] + 1j * data[:, 1::2]\n\n data_corr = data.real**2 + data.imag**2\n data_corr = data_corr.reshape(-1, self.nperpacket, 8).mean(1)\n\n data_real = data.real.reshape(-1, self.nperpacket, 8).transpose((0, 2, 1))\n data_imag = data.imag.reshape(-1, self.nperpacket, 8).transpose((0, 2, 1))\n\n arr = np.zeros([data_corr.shape[0] / self.nfr / 2 / len(slots) + 256\n , 2*self.npol, self.nfreq], np.float64)\n\n tt = np.zeros([data_corr.shape[0] / self.nfr / 2 / len(slots) + 256\n , 2*self.npol, self.nfreq], np.float64)\n\n tlen = []\n for qq in xrange(self.nfr):\n for ii in slots:\n\n fin = ii + 16 * qq + 128 * np.arange(8)\n \n indpol0 = np.where((header[:, 0]==0) & \\\n (header[:, 1]==qq) & (header[:, 2]==ii))[0]\n\n indpol1 = np.where((header[:, 0]==1) & \\\n (header[:, 1]==qq) & (header[:, 2]==ii))[0]\n \n inl = min(len(indpol0), len(indpol1))\n\n maxlen = max(len(indpol0), len(indpol1))\n tlen.append(maxlen)\n\n if inl < 1:\n continue\n\n indpol0 = indpol0[:inl]\n indpol1 = indpol1[:inl]\n \n XYreal, XYimag, tt_xy = self.correlate_xy(\n data[indpol0], data[indpol1], header, indpol0, indpol1)\n\n XYreal = np.concatenate(XYreal, \n axis=0).reshape(-1, self.nperpacket, 8)\n XYimag = np.concatenate(XYimag, \n axis=0).reshape(-1, self.nperpacket, 8)\n\n arr[:len(indpol0), 0, fin] = data_corr[indpol0]\n arr[:len(indpol1), 3, fin] = data_corr[indpol1]\n \n arr[:len(XYreal), 1, fin] = XYreal.mean(1)\n arr[:len(XYimag), 2, fin] = XYimag.mean(1)\n\n tt[:len(tt_xy), 1, fin] = np.array(tt_xy).repeat(8).reshape(-1, 8)\n tt[:len(tt_xy), 2, fin] = tt[:len(tt_xy), 1, fin].copy()\n \n \n if (len(indpol0) >= 1) and (len(indpol0) < arr.shape[0]): \n tt[:len(indpol0), 0, fin] = self.get_times(\\\n header[indpol0])[0].repeat(8).reshape(-1, 8)\n\n if (len(indpol1) >= 1) and (len(indpol1) < arr.shape[0]):\n tt[:len(indpol1), 3, fin] = self.get_times(\\\n header[indpol1])[0].repeat(8).reshape(-1, 8)\n \n \n maxt = np.array(tlen).max()\n arr = arr[:maxt]\n tt = tt[:maxt]\n\n return arr, tt", "def __init__(self, file1, file2,\n df=0.1*u.kHz,\n dt=0.1*u.s,\n window=None,\n shift=0):\n\n self.file1=file1\n self.file2=file2\n self.df=df\n self.dt=dt\n \n self.s1=sdrdata(self.file1)\n self.s2=sdrdata(self.file2)\n\n if self.s1.rate != self.s2.rate:\n raise ValueError('Sampling rate for Rx2 (%f Hz) does not match rate for Rx1 (%f Hz)' % (self.s1.rate.value,self.s2.rate.value))\n if self.s1.center_freq != self.s2.center_freq:\n raise ValueError('Frequency for Rx2 (%f MHz) does not match rate for Rx1 (%f MHz)' % (self.s1.center_freq.to(u.MHz).value,\n self.s2.center_freq.to(u.MHz).value))\n \n\n # read in the data, convert to complex\n self.d1=self.s1.data\n self.d2=self.s2.data\n\n self.dtype=self.d1.dtype\n\n self.exptime=self.s1.exptime\n self.center_freq=self.s1.center_freq\n\n # this is the complex sampling rate\n self.rate=self.s1.rate\n \n # needs this long a fft\n self.nfft=int(round((self.rate/self.df).decompose()))\n # there are this many ffts\n self.nsamples=self.d1.shape[0]/self.nfft\n # so each chunk is this long\n self.chunk=(self.nfft/self.rate).decompose()\n # and we need to add this many together\n self.nint=int(round(self.dt/self.chunk))\n # and we have this many correlations\n self.ncorr=int((self.exptime/self.dt))\n\n # frequency of FFTs\n self.freq=np.fft.fftshift(np.fft.fftfreq(self.nfft,(1/self.rate).decompose().value))*u.Hz\n # and the actual frequency on the sky\n self.rf_freq=self.center_freq+self.freq\n\n # Hz per channel\n self.channel=np.diff(self.freq).mean()\n\n if window is None:\n self.window=np.kaiser(self.nfft,5).astype('complex')\n else:\n self.window=window(self.nfft).astype('complex')\n\n # apply a phase shift\n # to the second input\n phi=np.exp(2j*np.pi*(self.rf_freq*shift).decompose().value)\n \n # dynamic spectra\n self.DS1=np.zeros((self.nsamples,self.nfft),dtype=self.dtype)\n self.DS2=np.zeros((self.nsamples,self.nfft),dtype=self.dtype)\n\n # output correlation\n self.outcorr=np.zeros((self.ncorr,self.nfft),dtype=self.dtype)\n\n j=0\n for i in range(self.ncorr):\n corr=np.zeros((self.nfft),dtype=self.dtype)\n for k in range(self.nint):\n D1=self.d1[j*self.nfft:(j+1)*self.nfft]*self.window\n D2=self.d2[j*self.nfft:(j+1)*self.nfft]*self.window\n \n F1=np.fft.fftshift(np.fft.fft(D1))\n F2=np.fft.fftshift(np.fft.fft(D2))\n F2*=phi\n self.DS1[j]=F1\n self.DS2[j]=F2\n \n corr+=F1*np.conj(F2)\n j+=1\n\n self.outcorr[i]=corr\n \n self.phase=np.angle(self.outcorr)*u.rad\n self.amp=np.absolute(self.outcorr)", "def test_acq_fid_catalog_zero_cand_fid():\n dither = 20\n stars = StarsTable.empty()\n stars.add_fake_constellation(n_stars=5)\n dark = DARK40.copy()\n\n kwargs = mod_std_info(\n stars=stars,\n dark=dark,\n dither=dither,\n raise_exc=True,\n n_guide=0,\n n_fid=3,\n n_acq=5,\n detector=\"HRC-S\",\n sim_offset=300000,\n )\n aca = get_aca_catalog(**kwargs)\n\n assert len(aca.fids) == 0\n assert len(aca.fids.cand_fids) == 0\n assert aca.acqs.fid_set == ()\n assert len(aca.acqs) == 5", "def test_cf_contig(self):\n for ar in [self.ad, self.af, self.az, self.ac]:\n r_tol, a_tol = _get_rtol_atol(ar)\n d_ccont = ar.copy()\n d_fcont = np.asfortranarray(d_ccont)\n for a in range(ar.ndim):\n f1 = mkl_fft.fft(d_ccont, axis=a)\n f2 = mkl_fft.fft(d_fcont, axis=a)\n assert_allclose(f1, f2, rtol=r_tol, atol=a_tol)", "def get_mfcc_feature(data):\n\n try:\n ft1 = librosa.feature.mfcc(data, sr=SAMPLE_RATE, n_mfcc=NUM_MFCC)\n ft2 = librosa.feature.zero_crossing_rate(data, hop_length=FRAME)[0]\n ft3 = librosa.feature.spectral_rolloff(data, sr=SAMPLE_RATE, hop_length=FRAME)[0]\n ft4 = librosa.feature.spectral_centroid(data, sr=SAMPLE_RATE, hop_length=FRAME)[0]\n # ft5 = librosa.feature.spectral_contrast(data, sr=SAMPLE_RATE, n_bands=6, fmin=200.0)[0]\n # ft6 = librosa.feature.spectral_bandwidth(data, sr=SAMPLE_RATE, hop_length=FRAME)[0]\n ft1_trunc = np.hstack((np.mean(ft1, axis=1),\n np.std(ft1, axis=1),\n skew(ft1, axis=1),\n np.max(ft1, axis=1),\n np.median(ft1, axis=1),\n np.min(ft1, axis=1)))\n ft2_trunc = np.hstack((np.mean(ft2), np.std(ft2), skew(ft2), np.max(ft2), np.median(ft2), np.min(ft2)))\n ft3_trunc = np.hstack((np.mean(ft3), np.std(ft3), skew(ft3), np.max(ft3), np.median(ft3), np.min(ft3)))\n ft4_trunc = np.hstack((np.mean(ft4), np.std(ft4), skew(ft4), np.max(ft4), np.median(ft4), np.min(ft4)))\n # ft5_trunc = np.hstack((np.mean(ft5), np.std(ft5), skew(ft5), np.max(ft5), np.median(ft5), np.min(ft5)))\n # ft6_trunc = np.hstack((np.mean(ft6), np.std(ft6), skew(ft6), np.max(ft6), np.median(ft6), np.max(ft6)))\n return pd.Series(np.hstack((ft1_trunc, ft2_trunc, ft3_trunc, ft4_trunc)))\n # return pd.Series(np.hstack((ft1_trunc, ft2_trunc, ft3_trunc, ft4_trunc, ft5_trunc, ft6_trunc)))\n\n except Exception as error:\n print('bad file', error)\n # return pd.Series([0] * 210)\n return pd.Series([0] * 198)", "def barycorr(obs_start, burst_times, f_ref, dms, FRB='R1', telescope='Eff'):\n FRB = str(FRB)\n if FRB == 'R3':\n FRB_coord = coord.SkyCoord(\"01:58:00.7502\", \"+65:43:00.3152\", unit=(u.hourangle, u.deg),\n frame='icrs') # R3 obs pos\n if FRB == 'R1':\n FRB_coord = coord.SkyCoord(\"05:31:58.698\", \"+33:08:52.586\", unit=(u.hourangle, u.deg),\n frame='icrs')\n\n telescope = str(telescope)\n if telescope == 'Eff':\n telescope_coord = coord.EarthLocation.from_geodetic(\n lon=(06. + 53./60. + 00.99425/3600.)*u.deg, # Effelsberg geodetic coords in deg\n lat=(50. + 31./60. + 29.39459/3600.)*u.deg,\n height=369.082*u.m)\n if telescope == 'CHIME':\n telescope_coord = coord.EarthLocation.from_geodetic(\n lon=(-119. + 37./60. + 26./3600.)*u.deg, # CHIME geodetic coords in deg\n lat=(49. + 19./60.+16./3600.)*u.deg,\n height=545.0*u.m)\n if telescope == 'DSS43':\n telescope_coord = coord.EarthLocation.from_geodetic(\n lon=(148. + 58./60. + 52.55394/3600.)*u.deg, # DSS-43 geodetic coords in deg\n lat=(35. + 24./60. + 8.74388/3600.)*u.deg,\n height=689.608*u.m)\n if telescope == 'Arecibo':\n telescope_coord = coord.EarthLocation.from_geodetic(\n lon=-(66. + 45./60. + 11.1/3600.)*u.deg, # arecibo geodetic coords in deg\n lat=(18. + 20./60. + 36.6/3600.)*u.deg,\n height=497.*u.m)\n\n # obs_start from the filterbank header. Top of the top frequency channel (readfile reports\n # mid of channel) #in MJD\n burst_time_MJD = obs_start + burst_times/(24.*3600.)\n\n # TOA_correctDM = (TOA-dm_shift)\n TOA_bary = get_bary(burst_time_MJD, source=FRB_coord, location=telescope_coord)\n\n dm_const = (const.e.gauss**2/(2*pi*const.m_e*const.c)).to(u.cm**3/u.pc*u.MHz**2*u.s)\n\n dm_shift = (dm_const.value*(1./(f_ref)**2)*dms)/(24.*3600.)\n return TOA_bary, TOA_bary - dm_shift", "def init_bfield_diag13():\n global bt, iub\n fbname = \"bk1.\" + s1.cdrun\n in1.fbname[:] = fbname\n in1.modesxb = int(min(in1.modesxb,nxh+1))\n# bt = store selected fourier modes for magnetic field\n bt = numpy.empty((2,in1.modesxb),complex_type,'F')\n# open file: updates nbrec and possibly iub\n if (in1.nbrec==0):\n mdiag1.dafopenvc1(bt,iub,in1.nbrec,fbname)", "def FP_scan(ao_pts_half = 250,v_final = 6.0):\n ao_trig_src = '/Dev2/PFI0'\n ai_trig_src = '/Dev2/PFI1' \n ao_scan_rate = 5000\n\n dig_trig_src = '/Dev2/port0/line0'\n piezo_src = '/Dev2/ao0'\n pd_src = '/Dev2/ai0'\n\n a = np.linspace(0,v_final,ao_pts_half) \n b = np.linspace(v_final,0,ao_pts_half)\n v_ao_scan = np.concatenate((a,b),axis=None)\n ao_pts = ao_pts_half*2\n\n # time\n t0 = np.linspace(0,(ao_pts-1)/ao_scan_rate,ao_pts)\n\n dig_trig = DO(dig_trig_src)\n dig_trig.write(False)\n\n # set up AO AI\n FPtask = AO(piezo_src)\n cavityPDtask = AI(pd_src)\n\n FPtask.config_write(v_ao_scan,ao_scan_rate,ao_trig_src)\n cavityPDtask.config_read_rising(ao_pts,ao_scan_rate,ai_trig_src)\n\n # start FP scan\n dig_trig.write(True)\n FPtask.wait_until_done()\n aiV = cavityPDtask.read(number_of_samples_per_channel=ao_pts)\n\n FPtask.close()\n cavityPDtask.close() \n dig_trig.write(False)\n dig_trig.close()\n return t0,v_ao_scan,aiV", "def refine_candid(candid, indexprefix='new', ddm=50, npix_max=8192, npix_max_orig=None, mode='deployment', devicenum=None, cl=None):\n\n from rfpipe import reproduce\n from realfast import elastic\n\n doc = elastic.get_doc(indexprefix+'cands', Id=candid)\n if 'sdmname' not in doc['_source']:\n logger.warn(\"No SDM found for candId {0}\".format(candid))\n return\n sdmname = doc['_source']['sdmname']\n prefsname = doc['_source']['prefsname']\n prefsdoc = elastic.get_doc(indexprefix+'preferences', Id=prefsname)\n if npix_max_orig is None:\n npix_max_orig = prefsdoc['_source']['npix_max']\n\n workdir = '/lustre/evla/test/realfast/archive/refined'\n sdmloc0 = '/home/mctest/evla/mcaf/workspace/'\n sdmloc0b = '/home/mctest/evla/mcaf/workspace/realfast-archived/'\n sdmloc1 = '/lustre/evla/test/realfast/archive/sdm_archive'\n if os.path.exists(os.path.join(sdmloc0, sdmname)):\n sdmname_full = os.path.join(sdmloc0, sdmname)\n elif os.path.exists(os.path.join(sdmloc0b, sdmname)):\n sdmname_full = os.path.join(sdmloc0b, sdmname)\n else:\n sdmname_full = os.path.join(sdmloc1, sdmname)\n# sdmname_full = os.path.join(sdmloc0, sdmname) if os.path.exists(os.path.join(sdmloc0, sdmname)) else os.path.join(sdmloc1, sdmname)\n assert os.path.exists(sdmname_full)\n dm = doc['_source']['canddm']\n scanId = doc['_source']['scanId']\n refined_png = 'cands_{0}.1.1_refined.png'.format(sdmname)\n refined_loc = os.path.join(workdir, refined_png)\n refined_url = os.path.join(_candplot_url_prefix, 'refined', refined_png)\n\n def move_refined_plots(cc):\n if os.path.exists(refined_loc):\n logger.info(\"Refined candidate plot for candId {0} and sdm {1} found. Copying...\".format(candid, sdmname))\n moveplots('/lustre/evla/test/realfast/archive/refined/', sdmname, destination='claw@nmpost-master:/lustre/aoc/projects/fasttransients/realfast/plots/refined')\n else:\n logger.info(\"No refinement plot found for candId {0}.\".format(candid))\n\n# Ids = elastic.get_ids(indexprefix+'cands', sdmname)\n# if cc is not None:\n if os.path.exists(refined_loc):\n if len(cc):\n url = refined_url\n logger.info(\"Updating refinement plot for new new refined_url.\")\n else:\n url = 'No candidate found during refinement'\n logger.info(\"Updating refinement plot for no refined_url.\")\n else:\n url = 'No candidate found during refinement'\n logger.info(\"Updating refinement plot for no refined_url.\")\n\n# for Id in Ids:\n elastic.update_field(indexprefix+'cands', 'refined_url', url, Id=candid)\n for k,v in elastic.gettags(indexprefix, candid).items(): # remove notify tag\n if 'notify' in v: \n newtags = ','.join([tag for tag in v.split(',') if tag != 'notify'])\n elastic.update_field(indexprefix+'cands', k, newtags, Id=candid)\n\n # decide whether to submit or update index for known plots\n if os.path.exists(refined_loc):\n logger.info(\"Refined candidate plot for candId {0} and sdm {1} exists locally. Skipping.\".format(candid, sdmname))\n return\n\n if cl is not None:\n logger.info(\"Submitting refinement for candId {0} and sdm {1}\".format(candid, sdmname))\n workernames = [v['id'] for k, v in cl.scheduler_info()['workers'].items() if 'fetch' in v['id']]\n assert len(workernames)\n\n fut = cl.submit(reproduce.refine_sdm, sdmname_full, dm, preffile='/lustre/evla/test/realfast/realfast.yml',\n npix_max=npix_max, npix_max_orig=npix_max_orig,\n refine=True, classify=True, ddm=ddm, workdir=workdir,\n resources={\"GPU\": 1, \"MEMORY\": 10e9}, devicenum=devicenum, retries=1, workers=workernames)\n\n fut2 = cl.submit(move_refined_plots, fut)\n distributed.fire_and_forget(fut2)\n else:\n logger.info(\"Running refinement for candId {0} and sdm {1}\".format(candid, sdmname))\n cc = reproduce.refine_sdm(sdmname_full, dm, preffile='/lustre/evla/test/realfast/realfast.yml', npix_max_orig=npix_max_orig,\n npix_max=npix_max, refine=True, classify=True, ddm=ddm, workdir=workdir, devicenum=devicenum)\n move_refined_plots(cc)", "def get_multiplexed_readout_detector_functions(df_name, qubits,\n nr_averages=None,\n nr_shots=None,\n used_channels=None,\n correlations=None,\n add_channels=None,\n det_get_values_kws=None,\n enforce_pulsar_restart=False,\n **kw):\n if nr_averages is None:\n nr_averages = max(qb.acq_averages() for qb in qubits)\n if nr_shots is None:\n nr_shots = max(qb.acq_shots() for qb in qubits)\n\n uhfs = set()\n uhf_instances = {}\n max_int_len = {}\n int_channels = {}\n inp_channels = {}\n acq_classifier_params = {}\n acq_state_prob_mtxs = {}\n for qb in qubits:\n uhf = qb.instr_acq()\n uhfs.add(uhf)\n uhf_instances[uhf] = qb.instr_acq.get_instr()\n\n if uhf not in max_int_len:\n max_int_len[uhf] = 0\n max_int_len[uhf] = max(max_int_len[uhf], qb.acq_length())\n\n if uhf not in int_channels:\n int_channels[uhf] = {}\n inp_channels[uhf] = {}\n int_channels[uhf][qb.name] = qb.get_acq_int_channels()\n inp_channels[uhf][qb.name] = qb.get_acq_inp_channels()\n\n if uhf not in acq_classifier_params:\n acq_classifier_params[uhf] = []\n param = 'acq_classifier_params'\n acq_classifier_params[uhf] += [\n qb.get(param) if param in qb.parameters else {}]\n if uhf not in acq_state_prob_mtxs:\n acq_state_prob_mtxs[uhf] = []\n param = 'acq_state_prob_mtx'\n acq_state_prob_mtxs[uhf] += [\n qb.get(param) if param in qb.parameters else None]\n\n if add_channels is None:\n add_channels = {}\n elif isinstance(add_channels, list):\n add_channels = {uhf: add_channels for uhf in uhfs}\n for uhf, add_chs in add_channels.items():\n if isinstance(add_chs, dict):\n add_chs = [add_chs] # autocorrect to a list of dicts\n if uhf not in uhfs:\n uhfs.add(uhf)\n uhf_instances[uhf] = qubits[0].find_instrument(uhf)\n max_int_len[uhf] = 0\n int_channels[uhf] = {}\n inp_channels[uhf] = {}\n acq_classifier_params[uhf] = []\n acq_state_prob_mtxs[uhf] = []\n for params in add_chs:\n if not isinstance(params, dict):\n params = dict(acq_channels=params)\n\n # FIXME: the following is a hack that will work as long as all\n # detector functions below use either int_channels or inp_channels,\n # but not both: we just add the extra channels to both lists to\n # make sure that they will be passed to the detector function no\n # matter which list the particular detector function gets.\n int_channels[uhf]['add_channels'] = params.get('acq_channels', [])\n inp_channels[uhf]['add_channels'] = params.get('acq_channels', [])\n\n max_int_len[uhf] = max(max_int_len[uhf], params.get('acq_length',\n 0))\n acq_classifier_params[uhf] += [params.get('acq_classifier_params',\n {})]\n acq_state_prob_mtxs[uhf] += [params.get('acq_state_prob_mtx', None)]\n\n if det_get_values_kws is None:\n det_get_values_kws = {}\n det_get_values_kws_in = None\n else:\n det_get_values_kws_in = deepcopy(det_get_values_kws)\n for uhf in acq_state_prob_mtxs:\n det_get_values_kws_in.pop(uhf, False)\n for uhf in acq_state_prob_mtxs:\n if uhf not in det_get_values_kws:\n det_get_values_kws[uhf] = {}\n det_get_values_kws[uhf].update({\n 'classifier_params': acq_classifier_params[uhf],\n 'state_prob_mtx': acq_state_prob_mtxs[uhf]})\n if det_get_values_kws_in is not None:\n det_get_values_kws[uhf].update(det_get_values_kws_in)\n\n if correlations is None:\n correlations = {uhf: [] for uhf in uhfs}\n elif isinstance(correlations, list):\n correlations = {uhf: correlations for uhf in uhfs}\n else: # is a dict\n for uhf in uhfs:\n if uhf not in correlations:\n correlations[uhf] = []\n\n if used_channels is None:\n used_channels = {uhf: None for uhf in uhfs}\n elif isinstance(used_channels, list):\n used_channels = {uhf: used_channels for uhf in uhfs}\n else: # is a dict\n for uhf in uhfs:\n if uhf not in used_channels:\n used_channels[uhf] = None\n\n AWG = None\n for qb in qubits:\n qbAWG = qb.instr_pulsar.get_instr()\n if AWG is not None and qbAWG is not AWG:\n raise Exception('Multi qubit detector can not be created with '\n 'multiple pulsar instances')\n AWG = qbAWG\n trigger_dev = None\n for qb in qubits:\n qb_trigger = qb.instr_trigger.get_instr()\n if trigger_dev is not None and qb_trigger is not trigger_dev:\n raise Exception('Multi qubit detector can not be created with '\n 'multiple trigger device instances')\n trigger_dev = qb_trigger\n\n if df_name == 'int_log_det':\n return det.MultiPollDetector([\n det.IntegratingSingleShotPollDetector(\n acq_dev=uhf_instances[uhf], AWG=AWG,\n channels=int_channels[uhf],\n integration_length=max_int_len[uhf], nr_shots=nr_shots,\n data_type='raw', **kw)\n for uhf in uhfs])\n elif df_name == 'dig_log_det':\n return det.MultiPollDetector([\n det.IntegratingSingleShotPollDetector(\n acq_dev=uhf_instances[uhf], AWG=AWG,\n channels=int_channels[uhf],\n integration_length=max_int_len[uhf], nr_shots=nr_shots,\n data_type='digitized', **kw)\n for uhf in uhfs])\n elif df_name == 'int_avg_det':\n return det.MultiPollDetector([\n det.IntegratingAveragingPollDetector(\n acq_dev=uhf_instances[uhf], AWG=AWG,\n channels=int_channels[uhf],\n integration_length=max_int_len[uhf], nr_averages=nr_averages,\n **kw)\n for uhf in uhfs])\n elif df_name == 'int_hist_det':\n print(\"nr_shots\", nr_shots)\n return det.MultiPollDetector([\n det.IntegratingHistogramPollDetector(\n acq_dev=uhf_instances[uhf], AWG=AWG,\n channels=int_channels[uhf],\n integration_length=max_int_len[uhf], nr_shots=nr_shots,\n **kw)\n for uhf in uhfs])\n elif df_name == 'int_avg_det_spec':\n # Can be used to force a hard sweep by explicitly setting to False\n kw['single_int_avg'] = kw.get('single_int_avg', True)\n return det.MultiPollDetector([\n det.IntegratingAveragingPollDetector(\n acq_dev=uhf_instances[uhf],\n AWG=(AWG if enforce_pulsar_restart\n else uhf_instances[uhf].get_awg_control_object()[0]),\n channels=int_channels[uhf],\n prepare_and_finish_pulsar=(not enforce_pulsar_restart),\n integration_length=max_int_len[uhf], nr_averages=nr_averages,\n polar=False, **kw)\n for uhf in uhfs],\n AWG=trigger_dev if len(uhfs) > 1 and not enforce_pulsar_restart else None)\n elif df_name == 'dig_avg_det':\n return det.MultiPollDetector([\n det.IntegratingAveragingPollDetector(\n acq_dev=uhf_instances[uhf], AWG=AWG,\n channels=int_channels[uhf],\n integration_length=max_int_len[uhf], nr_averages=nr_averages,\n data_type='digitized', **kw)\n for uhf in uhfs])\n elif df_name == 'int_avg_classif_det':\n return det.MultiPollDetector([\n det.ClassifyingPollDetector(\n acq_dev=uhf_instances[uhf], AWG=AWG,\n channels=int_channels[uhf],\n integration_length=max_int_len[uhf], nr_shots=nr_shots,\n get_values_function_kwargs=det_get_values_kws[uhf],\n data_type='raw', **kw)\n for uhf in uhfs])\n elif df_name == 'inp_avg_det':\n return det.MultiPollDetector([\n det.AveragingPollDetector(\n acq_dev=uhf_instances[uhf], AWG=AWG, nr_averages=nr_averages,\n acquisition_length=max_int_len[uhf],\n channels=inp_channels[uhf],\n **kw)\n for uhf in uhfs])\n elif df_name == 'int_corr_det':\n return det.MultiPollDetector([\n det.UHFQC_correlation_detector(\n acq_dev=uhf_instances[uhf], AWG=AWG,\n channels=int_channels[uhf],\n used_channels=used_channels[uhf],\n integration_length=max_int_len[uhf], nr_averages=nr_averages,\n correlations=correlations[uhf], data_type='raw_corr', **kw)\n for uhf in uhfs])\n elif df_name == 'dig_corr_det':\n return det.MultiPollDetector([\n det.UHFQC_correlation_detector(\n acq_dev=uhf_instances[uhf], AWG=AWG,\n channels=int_channels[uhf],\n used_channels=used_channels[uhf],\n integration_length=max_int_len[uhf], nr_averages=nr_averages,\n correlations=correlations[uhf], data_type='digitized_corr',\n **kw)\n for uhf in uhfs])\n elif df_name == 'timetrace_avg_ss_det': # ss: single-shot\n return det.MultiPollDetector([\n det.ScopePollDetector(\n acq_dev=uhf_instances[uhf], AWG=AWG, channels=int_channels[uhf],\n nr_shots=nr_shots,\n acquisition_length=max_int_len[uhf], nr_averages=nr_averages,\n data_type='timedomain',\n **kw)\n for uhf in uhfs])\n elif df_name == 'psd_avg_det':\n return det.MultiPollDetector([\n det.ScopePollDetector(\n acq_dev=uhf_instances[uhf], AWG=AWG, channels=int_channels[uhf],\n nr_shots=nr_shots,\n acquisition_length=max_int_len[uhf], nr_averages=nr_averages,\n data_type='fft_power',\n **kw)\n for uhf in uhfs])", "def __init__(self, channel=105, state=BusState.ACTIVE, bitrate=500000, *args, **kwargs):\n\n #super(CanFoxBus, self).__init__(self, channel='PCAN_USBBUS1', state=BusState.ACTIVE, bitrate=500000, *args, **kwargs)\n self.channel_info = channel\n self.fd = kwargs.get('fd', False)\n pcan_bitrate = CANFOX_bitrate_objs.get(bitrate, CANFOX_BAUD_250K)\n\n\n\n self.m_objCANFOXBasic = CANFOXBasic()\n self.m_PcanHandle = 105 #globals()[channel]\n self._filters = None\n\n if state is BusState.ACTIVE or state is BusState.PASSIVE:\n self.state = state\n else:\n raise ArgumentError(\"BusState must be Active or Passive\")\n\n\n if self.fd:\n f_clock_val = kwargs.get('f_clock', None)\n if f_clock_val is None:\n f_clock = \"{}={}\".format('f_clock_mhz', kwargs.get('f_clock_mhz', None))\n else:\n f_clock = \"{}={}\".format('f_clock', kwargs.get('f_clock', None))\n\n fd_parameters_values = [f_clock] + [\"{}={}\".format(key, kwargs.get(key, None)) for key in pcan_fd_parameter_list if kwargs.get(key, None) is not None]\n\n self.fd_bitrate = ' ,'.join(fd_parameters_values).encode(\"ascii\")\n\n\n result = self.m_objCANFOXBasic.InitializeFD(self.m_PcanHandle, self.fd_bitrate)\n else:\n if HAS_EVENTS:\n self._recv_event = CreateEvent(None, 0, 0, \"R2\")\n self._tran_event = CreateEvent(None, 0, 0, \"T2\")\n result = self.m_objCANFOXBasic.Initialize(self.m_PcanHandle, pcan_bitrate)\n\n if result != CANFOX_ERROR_OK:\n raise PcanError(self._get_formatted_error(result))\n\n if HAS_EVENTS:\n\n if 0:\n self._recv_event = CreateEvent(None, 0, 0, \"R2\")\n result = self.m_objCANFOXBasic.SetValue(\n self.m_PcanHandle, 1, self._recv_event) #\"\"\"PCAN_RECEIVE_EVENT\"\"\"\n if result != CANFOX_ERROR_OK:\n raise PcanError(self._get_formatted_error(result))\n\n super(CanFoxBus, self).__init__(channel=channel, state=state, bitrate=bitrate, *args, **kwargs)", "def synch_data_mux(self, time_ofdm_data_symbols):\n\n buffer_tx_time = self.synch.synch_mask # Add data into this\n # plt.plot(buffer_tx_time[0, :].real)\n # plt.plot(buffer_tx_time[0, :].imag)\n # plt.show()\n total_symb_count = 0\n synch_symb_count = 0\n data_symb_count = 0\n for symb in self.symb_pattern.tolist():\n symb_start = total_symb_count*self.OFDMsymb_len\n symb_end = symb_start + self.OFDMsymb_len\n # print(symb_start, symb_end)\n if int(symb) == 0:\n synch_symb_count += 1\n else:\n # print(symb, symb_start, symb_end)\n data_start = data_symb_count*self.OFDMsymb_len\n data_end = data_start + self.OFDMsymb_len\n\n buffer_tx_time[:, symb_start: symb_end] = time_ofdm_data_symbols[:, data_start: data_end]\n data_symb_count += 1\n\n total_symb_count += 1\n\n # plt.plot(buffer_tx_time[0, :].real)\n # plt.plot(buffer_tx_time[0, :].imag)\n # plt.show()\n return buffer_tx_time", "def ofdm_modulate(self, num_data_symb, freq_bin_data):\n min_pow = 1e-30\n time_ofdm_symbols = zeros((self.num_ant, num_data_symb * self.OFDMsymb_len), dtype=complex)\n for symb in range(num_data_symb):\n freq_data_start = symb * self.num_data_bins\n freq_data_end = freq_data_start + self.num_data_bins\n\n time_symb_start = symb * self.OFDMsymb_len\n time_symb_end = time_symb_start + self.OFDMsymb_len\n\n P = 0\n for ant in range(self.num_ant):\n\n ofdm_symb = zeros(self.NFFT, dtype=complex)\n ofdm_symb[self.used_data_bins] = freq_bin_data[ant, freq_data_start:freq_data_end]\n # plt.stem(array(range(-int(self.NFFT/2), int(self.NFFT/2))), abs(ofdm_symb))\n # plt.show()\n data_ifft = ifft(ofdm_symb, self.NFFT)\n cyclic_prefix = data_ifft[-self.CP:]\n data_time = concatenate((cyclic_prefix, data_ifft)) # add CP\n\n sig_energy = abs(dot(data_time, conj(data_time).T))\n # power scaling to normalize to 1\n if sig_energy > min_pow and ant == 0:\n scale_factor = sqrt(len(data_time) / sig_energy)\n else:\n scale_factor = 1\n data_time *= scale_factor\n P += var(data_time)\n time_ofdm_symbols[ant, time_symb_start: time_symb_end] = data_time\n\n for ant in range(self.num_ant):\n time_ofdm_symbols[ant, time_symb_start: time_symb_end] *= (1 / sqrt(P))\n\n return time_ofdm_symbols", "def test_wrong_ref_power_mfcc():\n MFCC(file_struct, FeatureTypes.framesync, ref_power=\"caca\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_ofdm_frame_sink_sptr __init__(self, p) > digital_ofdm_frame_sink_sptr
def __init__(self, *args): this = _digital_swig.new_digital_ofdm_frame_sink_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_framer_sink_1_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_packet_sink_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_simple_framer_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, sink):\n\n self.sink = sink", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, description, framecount=None):\n super().__init__(description)\n if 'parameters' in description:\n parameters = description['parameters']\n has_s2mm = parameters['C_INCLUDE_S2MM'] == '1'\n has_mm2s = parameters['C_INCLUDE_MM2S'] == '1'\n framecount = int(parameters['C_NUM_FSTORES'])\n s2mm_addr_width = int(parameters['C_M_AXI_S2MM_ADDR_WIDTH'])\n mm2s_addr_width = int(parameters['C_M_AXI_MM2S_ADDR_WIDTH'])\n if ((has_s2mm and s2mm_addr_width > 32) or\n (has_mm2s and mm2s_addr_width > 32)):\n raise UnsupportedConfiguration(\n 'VDMA driver only supports 32-bit addresses')\n\n else:\n has_s2mm = True\n has_mm2s = True\n framecount = 4 if framecount is None else framecount\n\n self.framecount = framecount\n if has_s2mm:\n self.readchannel = AxiVDMA.S2MMChannel(self, self.s2mm_introut)\n if has_mm2s:\n self.writechannel = AxiVDMA.MM2SChannel(self, self.mm2s_introut)", "def __init__(self, fft_length, cp_length, occupied_tones, snr, ks, carrier_map_bin, nc_filter, logging=False):\n\n\tgr.hier_block2.__init__(self, \"ofdm_receiver\",\n\t\t\t\tgr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature\n gr.io_signature2(2, 2, gr.sizeof_gr_complex*occupied_tones, gr.sizeof_char)) # Output signature\n\n bw = (float(occupied_tones) / float(fft_length)) / 2.0\n tb = bw*0.04\n print \"ofdm_receiver:__init__:occupied_tones %s fft_length %d \" % (occupied_tones, fft_length)\n \n chan_coeffs = filter.firdes.low_pass (1.0, # gain\n 1.0, # sampling rate\n bw+tb, # midpoint of trans. band\n tb, # width of trans. band\n filter.firdes.WIN_HAMMING) # filter type\n \n self.chan_filt = filter.fft_filter_ccc(1, chan_coeffs)\n\n # linklab, get ofdm parameters\n self._fft_length = fft_length\n self._occupied_tones = occupied_tones\n self._cp_length = cp_length\n self._nc_filter = nc_filter\n self._carrier_map_bin = carrier_map_bin\n \n win = [1 for i in range(fft_length)]\n \n # linklab, initialization function\n self.initialize(ks, self._carrier_map_bin)\n \n\n zeros_on_left = int(math.ceil((fft_length - occupied_tones)/2.0))\n ks0 = fft_length*[0,]\n ks0[zeros_on_left : zeros_on_left + occupied_tones] = ks[0]\n\n ks0 = np_fft.ifftshift(ks0)\n ks0time = np_fft.ifft(ks0)\n # ADD SCALING FACTOR\n ks0time = ks0time.tolist()\n\n SYNC = \"pn\"\n if SYNC == \"ml\":\n nco_sensitivity = -1.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_ml(fft_length,\n cp_length,\n snr,\n ks0time,\n logging)\n elif SYNC == \"pn\":\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_pn(fft_length,\n cp_length,\n logging)\n elif SYNC == \"pnac\":\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_pnac(fft_length,\n cp_length,\n ks0time,\n logging)\n # for testing only; do not user over the air\n # remove filter and filter delay for this\n elif SYNC == \"fixed\":\n self.chan_filt = gr.multiply_const_cc(1.0)\n nsymbols = 18 # enter the number of symbols per packet\n freq_offset = 0.0 # if you use a frequency offset, enter it here\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_fixed(fft_length,\n cp_length,\n nsymbols,\n freq_offset,\n logging)\n\n # Set up blocks\n\n # Create a delay line, linklab\n self.delay = blocks.delay(gr.sizeof_gr_complex, fft_length)\n\n self.nco = analog.frequency_modulator_fc(nco_sensitivity) # generate a signal proportional to frequency error of sync block\n self.sigmix = blocks.multiply_cc()\n self.sampler = gr_papyrus.ofdm_sampler(fft_length, fft_length+cp_length)\n self.fft_demod = gr_fft.fft_vcc(fft_length, True, win, True)\n self.ofdm_frame_acq = gr_papyrus.ofdm_frame_acquisition(occupied_tones,\n fft_length,\n cp_length, ks[0])\n # linklab, check current mode: non-contiguous OFDM or not\n if self._nc_filter:\n print '\\nMulti-band Filter Turned ON!'\n # linklab, non-contiguous filter\n self.ncofdm_filt = ncofdm_filt(self._fft_length, self._occupied_tones, self._carrier_map_bin)\n self.connect(self, self.chan_filt, self.ncofdm_filt)\n self.connect(self.ncofdm_filt, self.ofdm_sync) # into the synchronization alg.\n self.connect((self.ofdm_sync,0), self.nco, (self.sigmix,1)) # use sync freq. offset output to derotate input signal\n self.connect(self.ncofdm_filt, self.delay, (self.sigmix,0)) # signal to be derotated\n else :\n print '\\nMulti-band Filter Turned OFF!'\n self.connect(self, self.chan_filt)\n self.connect(self.chan_filt, self.ofdm_sync) # into the synchronization alg.\n self.connect((self.ofdm_sync,0), self.nco, (self.sigmix,1)) # use sync freq. offset output to derotate input signal\n self.connect(self.chan_filt, self.delay, (self.sigmix,0)) # signal to be derotated\n\n self.connect(self.sigmix, (self.sampler,0)) # sample off timing signal detected in sync alg\n self.connect((self.ofdm_sync,1), (self.sampler,1)) # timing signal to sample at\n\n self.connect((self.sampler,0), self.fft_demod) # send derotated sampled signal to FFT\n self.connect(self.fft_demod, (self.ofdm_frame_acq,0)) # find frame start and equalize signal\n self.connect((self.sampler,1), (self.ofdm_frame_acq,1)) # send timing signal to signal frame start\n self.connect((self.ofdm_frame_acq,0), (self,0)) # finished with fine/coarse freq correction,\n self.connect((self.ofdm_frame_acq,1), (self,1)) # frame and symbol timing, and equalization\n\n if logging:\n self.connect(self.chan_filt, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-chan_filt_c.dat\"))\n self.connect(self.fft_demod, gr.file_sink(gr.sizeof_gr_complex*fft_length, \"ofdm_receiver-fft_out_c.dat\"))\n self.connect(self.ofdm_frame_acq,\n gr.file_sink(gr.sizeof_gr_complex*occupied_tones, \"ofdm_receiver-frame_acq_c.dat\"))\n self.connect((self.ofdm_frame_acq,1), gr.file_sink(1, \"ofdm_receiver-found_corr_b.dat\"))\n self.connect(self.sampler, gr.file_sink(gr.sizeof_gr_complex*fft_length, \"ofdm_receiver-sampler_c.dat\"))\n self.connect(self.sigmix, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-sigmix_c.dat\"))\n self.connect(self.nco, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-nco_c.dat\"))", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_fff_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, gain=None, samp_rate=None, ppm=None, arfcn=None, capture_id=None, udp_ports=[], max_timeslot=0, store_capture=True, verbose=False, band=None, rec_length=None, test=False, args=\"\"):\n\n gr.top_block.__init__(self, \"Gr-gsm Capture\")\n\n ##################################################\n # Parameters\n ##################################################\n\n self.arfcn = arfcn\n for band in grgsm.arfcn.get_bands():\n if grgsm.arfcn.is_valid_arfcn(self.arfcn, band):\n self.fc = grgsm.arfcn.arfcn2downlink(arfcn, band)\n break\n\n self.gain = gain\n self.samp_rate = samp_rate\n self.ppm = ppm\n self.arfcn = arfcn\n self.band = band\n self.shiftoff = shiftoff = 400e3\n self.rec_length = rec_length\n self.store_capture = store_capture\n self.capture_id = capture_id\n self.udp_ports = udp_ports\n self.verbose = verbose\n\n ##################################################\n # Processing Blocks\n ##################################################\n\n self.rtlsdr_source = osmosdr.source( args=\"numchan=\" + str(1) + \" \" + \"\" )\n self.rtlsdr_source.set_sample_rate(samp_rate)\n self.rtlsdr_source.set_center_freq(self.fc - shiftoff, 0)\n self.rtlsdr_source.set_freq_corr(ppm, 0)\n self.rtlsdr_source.set_dc_offset_mode(2, 0)\n self.rtlsdr_source.set_iq_balance_mode(2, 0)\n self.rtlsdr_source.set_gain_mode(True, 0)\n self.rtlsdr_source.set_gain(gain, 0)\n self.rtlsdr_source.set_if_gain(20, 0)\n self.rtlsdr_source.set_bb_gain(20, 0)\n self.rtlsdr_source.set_antenna(\"\", 0)\n self.rtlsdr_source.set_bandwidth(250e3+abs(shiftoff), 0)\n self.blocks_rotator = blocks.rotator_cc(-2*pi*shiftoff/samp_rate)\n\n #RUn for the specified amount of seconds or indefenitely\n if self.rec_length is not None:\n self.blocks_head_0 = blocks.head(gr.sizeof_gr_complex, int(samp_rate*rec_length))\n\n self.gsm_receiver = grgsm.receiver(4, ([self.arfcn]), ([]))\n self.gsm_input = grgsm.gsm_input(\n ppm=0,\n osr=4,\n fc=self.fc,\n samp_rate_in=samp_rate,\n )\n self.gsm_clock_offset_control = grgsm.clock_offset_control(self.fc-shiftoff)\n\n #Control channel demapper for timeslot 0\n #self.gsm_bcch_ccch_demapper_0 = grgsm.universal_ctrl_chans_demapper(0, ([2,6,12,16,22,26,32,36,42,46]), ([1,2,2,2,2,2,2,2,2,2]))\n self.gsm_bcch_ccch_demapper_0 = grgsm.gsm_bcch_ccch_demapper(0)\n #For all other timeslots are assumed to contain sdcch8 logical channels, this demapping may be incorrect\n if max_timeslot >= 1 and max_timeslot <= 8:\n self.gsm_sdcch8_demappers = []\n for i in range(1,max_timeslot + 1):\n #self.gsm_sdcch8_demappers.append(grgsm.universal_ctrl_chans_demapper(i, ([0,4,8,12,16,20,24,28,32,36,40,44]), ([8,8,8,8,8,8,8,8,136,136,136,136])))\n self.gsm_sdcch8_demappers.append(grgsm.gsm_sdcch8_demapper(i))\n #Control channel decoder (extracts the packets), one for each timeslot\n self.gsm_control_channels_decoders = []\n for i in range(0,max_timeslot + 1):\n self.gsm_control_channels_decoders.append(grgsm.control_channels_decoder())\n# self.blocks_socket_pdu_0 = blocks.socket_pdu(\"UDP_CLIENT\", \"127.0.0.1\", \"4729\", 10000, False)# self.blocks_socket_pdu_0 = blocks.socket_pdu(\"UDP_CLIENT\", \"127.0.0.1\", \"4729\", 10000, False)\n\n #UDP client that sends all decoded C0T0 packets to the specified port on localhost if requested\n self.client_sockets = []\n self.server_sockets = []\n for udp_port in self.udp_ports:\n #The server is for testing only\n #WARNING remove the server if you want connect to a different one\n if test:\n self.server_sockets.append(blocks.socket_pdu(\"UDP_SERVER\", \"127.0.0.1\", str(udp_port), 10000))\n self.client_sockets.append(blocks.socket_pdu(\"UDP_CLIENT\", \"127.0.0.1\", str(udp_port), 10000))\n\n #Sinks to store the capture file if requested\n if self.store_capture:\n self.gsm_burst_file_sink = grgsm.burst_file_sink(str(self.capture_id) + \".burstfile\")\n self.blocks_file_sink = blocks.file_sink(gr.sizeof_gr_complex*1, str(self.capture_id) + \".cfile\", False)\n self.blocks_file_sink.set_unbuffered(False)\n\n #Printer for printing messages when verbose flag is True\n if self.verbose:\n self.gsm_message_printer = grgsm.message_printer(pmt.intern(\"\"), False)\n\n \"\"\"\n if self.verbose:\n self.gsm_bursts_printer_0 = grgsm.bursts_printer(pmt.intern(\"\"),\n False, False, False, False)\n \"\"\"\n ##################################################\n # Connections\n ##################################################\n\n if self.rec_length is not None: #if recording length is defined connect head block after the source\n self.connect((self.rtlsdr_source, 0), (self.blocks_head_0, 0))\n self.connect((self.blocks_head_0, 0), (self.blocks_rotator, 0))\n else:\n self.connect((self.rtlsdr_source, 0), (self.blocks_rotator, 0))\n\n #Connect the file sinks\n if self.store_capture:\n self.connect((self.blocks_rotator, 0), (self.blocks_file_sink, 0))\n self.msg_connect(self.gsm_receiver, \"C0\", self.gsm_burst_file_sink, \"in\")\n\n #Connect the GSM receiver\n self.connect((self.gsm_input, 0), (self.gsm_receiver, 0))\n self.connect((self.blocks_rotator, 0), (self.gsm_input, 0))\n self.msg_connect(self.gsm_clock_offset_control, \"ppm\", self.gsm_input, \"ppm_in\")\n self.msg_connect(self.gsm_receiver, \"measurements\", self.gsm_clock_offset_control, \"measurements\")\n\n #Connect the demapper and decoder for timeslot 0\n self.msg_connect((self.gsm_receiver, 'C0'), (self.gsm_bcch_ccch_demapper_0, 'bursts'))\n self.msg_connect((self.gsm_bcch_ccch_demapper_0, 'bursts'), (self.gsm_control_channels_decoders[0], 'bursts'))\n\n #Connect the demapper and decoders for the other timeslots\n for i in range(1,max_timeslot +1):\n self.msg_connect((self.gsm_receiver, 'C0'), (self.gsm_sdcch8_demappers[i-1], 'bursts'))\n self.msg_connect((self.gsm_sdcch8_demappers[i-1], 'bursts'), (self.gsm_control_channels_decoders[i], 'bursts'))\n\n\n #Connect the UDP clients if requested\n for client_socket in self.client_sockets:\n for i in range(0,max_timeslot + 1):\n self.msg_connect((self.gsm_control_channels_decoders[i], 'msgs'), (client_socket, 'pdus'))\n\n #Connect the printer is self.verbose is True\n if self.verbose:\n for i in range(0,max_timeslot + 1):\n self.msg_connect((self.gsm_control_channels_decoders[i], 'msgs'), (self.gsm_message_printer, 'msgs'))\n\n \"\"\"\n if self.verbose:\n self.msg_connect(self.gsm_receiver, \"C0\", self.gsm_bursts_printer_0, \"bursts\")\n \"\"\"", "def __init__(self, *args):\n this = _digital_swig.new_digital_glfsr_source_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_glfsr_source_f_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_ccf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, src):\n self.src = src", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, channel=105, state=BusState.ACTIVE, bitrate=500000, *args, **kwargs):\n\n #super(CanFoxBus, self).__init__(self, channel='PCAN_USBBUS1', state=BusState.ACTIVE, bitrate=500000, *args, **kwargs)\n self.channel_info = channel\n self.fd = kwargs.get('fd', False)\n pcan_bitrate = CANFOX_bitrate_objs.get(bitrate, CANFOX_BAUD_250K)\n\n\n\n self.m_objCANFOXBasic = CANFOXBasic()\n self.m_PcanHandle = 105 #globals()[channel]\n self._filters = None\n\n if state is BusState.ACTIVE or state is BusState.PASSIVE:\n self.state = state\n else:\n raise ArgumentError(\"BusState must be Active or Passive\")\n\n\n if self.fd:\n f_clock_val = kwargs.get('f_clock', None)\n if f_clock_val is None:\n f_clock = \"{}={}\".format('f_clock_mhz', kwargs.get('f_clock_mhz', None))\n else:\n f_clock = \"{}={}\".format('f_clock', kwargs.get('f_clock', None))\n\n fd_parameters_values = [f_clock] + [\"{}={}\".format(key, kwargs.get(key, None)) for key in pcan_fd_parameter_list if kwargs.get(key, None) is not None]\n\n self.fd_bitrate = ' ,'.join(fd_parameters_values).encode(\"ascii\")\n\n\n result = self.m_objCANFOXBasic.InitializeFD(self.m_PcanHandle, self.fd_bitrate)\n else:\n if HAS_EVENTS:\n self._recv_event = CreateEvent(None, 0, 0, \"R2\")\n self._tran_event = CreateEvent(None, 0, 0, \"T2\")\n result = self.m_objCANFOXBasic.Initialize(self.m_PcanHandle, pcan_bitrate)\n\n if result != CANFOX_ERROR_OK:\n raise PcanError(self._get_formatted_error(result))\n\n if HAS_EVENTS:\n\n if 0:\n self._recv_event = CreateEvent(None, 0, 0, \"R2\")\n result = self.m_objCANFOXBasic.SetValue(\n self.m_PcanHandle, 1, self._recv_event) #\"\"\"PCAN_RECEIVE_EVENT\"\"\"\n if result != CANFOX_ERROR_OK:\n raise PcanError(self._get_formatted_error(result))\n\n super(CanFoxBus, self).__init__(channel=channel, state=state, bitrate=bitrate, *args, **kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_probe_density_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, stream):\n self.send = stream.send" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ofdm_frame_sink(gr_complex_vector sym_position, __dummy_0__ sym_value_out, gr_msg_queue_sptr target_queue, unsigned int occupied_tones, float phase_gain = 0.25, float freq_gain = 0.250.25/4) > digital_ofdm_frame_sink_sptr Takes an OFDM symbol in, demaps it into bits of 0's and 1's, packs them into packets, and sends to to a message queue sink.
def ofdm_frame_sink(*args, **kwargs): return _digital_swig.ofdm_frame_sink(*args, **kwargs)
[ "def ofdm_modulate(self, num_data_symb, freq_bin_data):\n min_pow = 1e-30\n time_ofdm_symbols = zeros((self.num_ant, num_data_symb * self.OFDMsymb_len), dtype=complex)\n for symb in range(num_data_symb):\n freq_data_start = symb * self.num_data_bins\n freq_data_end = freq_data_start + self.num_data_bins\n\n time_symb_start = symb * self.OFDMsymb_len\n time_symb_end = time_symb_start + self.OFDMsymb_len\n\n P = 0\n for ant in range(self.num_ant):\n\n ofdm_symb = zeros(self.NFFT, dtype=complex)\n ofdm_symb[self.used_data_bins] = freq_bin_data[ant, freq_data_start:freq_data_end]\n # plt.stem(array(range(-int(self.NFFT/2), int(self.NFFT/2))), abs(ofdm_symb))\n # plt.show()\n data_ifft = ifft(ofdm_symb, self.NFFT)\n cyclic_prefix = data_ifft[-self.CP:]\n data_time = concatenate((cyclic_prefix, data_ifft)) # add CP\n\n sig_energy = abs(dot(data_time, conj(data_time).T))\n # power scaling to normalize to 1\n if sig_energy > min_pow and ant == 0:\n scale_factor = sqrt(len(data_time) / sig_energy)\n else:\n scale_factor = 1\n data_time *= scale_factor\n P += var(data_time)\n time_ofdm_symbols[ant, time_symb_start: time_symb_end] = data_time\n\n for ant in range(self.num_ant):\n time_ofdm_symbols[ant, time_symb_start: time_symb_end] *= (1 / sqrt(P))\n\n return time_ofdm_symbols", "def __init__(self, fft_length, cp_length, occupied_tones, snr, ks, carrier_map_bin, nc_filter, logging=False):\n\n\tgr.hier_block2.__init__(self, \"ofdm_receiver\",\n\t\t\t\tgr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature\n gr.io_signature2(2, 2, gr.sizeof_gr_complex*occupied_tones, gr.sizeof_char)) # Output signature\n\n bw = (float(occupied_tones) / float(fft_length)) / 2.0\n tb = bw*0.04\n print \"ofdm_receiver:__init__:occupied_tones %s fft_length %d \" % (occupied_tones, fft_length)\n \n chan_coeffs = filter.firdes.low_pass (1.0, # gain\n 1.0, # sampling rate\n bw+tb, # midpoint of trans. band\n tb, # width of trans. band\n filter.firdes.WIN_HAMMING) # filter type\n \n self.chan_filt = filter.fft_filter_ccc(1, chan_coeffs)\n\n # linklab, get ofdm parameters\n self._fft_length = fft_length\n self._occupied_tones = occupied_tones\n self._cp_length = cp_length\n self._nc_filter = nc_filter\n self._carrier_map_bin = carrier_map_bin\n \n win = [1 for i in range(fft_length)]\n \n # linklab, initialization function\n self.initialize(ks, self._carrier_map_bin)\n \n\n zeros_on_left = int(math.ceil((fft_length - occupied_tones)/2.0))\n ks0 = fft_length*[0,]\n ks0[zeros_on_left : zeros_on_left + occupied_tones] = ks[0]\n\n ks0 = np_fft.ifftshift(ks0)\n ks0time = np_fft.ifft(ks0)\n # ADD SCALING FACTOR\n ks0time = ks0time.tolist()\n\n SYNC = \"pn\"\n if SYNC == \"ml\":\n nco_sensitivity = -1.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_ml(fft_length,\n cp_length,\n snr,\n ks0time,\n logging)\n elif SYNC == \"pn\":\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_pn(fft_length,\n cp_length,\n logging)\n elif SYNC == \"pnac\":\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_pnac(fft_length,\n cp_length,\n ks0time,\n logging)\n # for testing only; do not user over the air\n # remove filter and filter delay for this\n elif SYNC == \"fixed\":\n self.chan_filt = gr.multiply_const_cc(1.0)\n nsymbols = 18 # enter the number of symbols per packet\n freq_offset = 0.0 # if you use a frequency offset, enter it here\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_fixed(fft_length,\n cp_length,\n nsymbols,\n freq_offset,\n logging)\n\n # Set up blocks\n\n # Create a delay line, linklab\n self.delay = blocks.delay(gr.sizeof_gr_complex, fft_length)\n\n self.nco = analog.frequency_modulator_fc(nco_sensitivity) # generate a signal proportional to frequency error of sync block\n self.sigmix = blocks.multiply_cc()\n self.sampler = gr_papyrus.ofdm_sampler(fft_length, fft_length+cp_length)\n self.fft_demod = gr_fft.fft_vcc(fft_length, True, win, True)\n self.ofdm_frame_acq = gr_papyrus.ofdm_frame_acquisition(occupied_tones,\n fft_length,\n cp_length, ks[0])\n # linklab, check current mode: non-contiguous OFDM or not\n if self._nc_filter:\n print '\\nMulti-band Filter Turned ON!'\n # linklab, non-contiguous filter\n self.ncofdm_filt = ncofdm_filt(self._fft_length, self._occupied_tones, self._carrier_map_bin)\n self.connect(self, self.chan_filt, self.ncofdm_filt)\n self.connect(self.ncofdm_filt, self.ofdm_sync) # into the synchronization alg.\n self.connect((self.ofdm_sync,0), self.nco, (self.sigmix,1)) # use sync freq. offset output to derotate input signal\n self.connect(self.ncofdm_filt, self.delay, (self.sigmix,0)) # signal to be derotated\n else :\n print '\\nMulti-band Filter Turned OFF!'\n self.connect(self, self.chan_filt)\n self.connect(self.chan_filt, self.ofdm_sync) # into the synchronization alg.\n self.connect((self.ofdm_sync,0), self.nco, (self.sigmix,1)) # use sync freq. offset output to derotate input signal\n self.connect(self.chan_filt, self.delay, (self.sigmix,0)) # signal to be derotated\n\n self.connect(self.sigmix, (self.sampler,0)) # sample off timing signal detected in sync alg\n self.connect((self.ofdm_sync,1), (self.sampler,1)) # timing signal to sample at\n\n self.connect((self.sampler,0), self.fft_demod) # send derotated sampled signal to FFT\n self.connect(self.fft_demod, (self.ofdm_frame_acq,0)) # find frame start and equalize signal\n self.connect((self.sampler,1), (self.ofdm_frame_acq,1)) # send timing signal to signal frame start\n self.connect((self.ofdm_frame_acq,0), (self,0)) # finished with fine/coarse freq correction,\n self.connect((self.ofdm_frame_acq,1), (self,1)) # frame and symbol timing, and equalization\n\n if logging:\n self.connect(self.chan_filt, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-chan_filt_c.dat\"))\n self.connect(self.fft_demod, gr.file_sink(gr.sizeof_gr_complex*fft_length, \"ofdm_receiver-fft_out_c.dat\"))\n self.connect(self.ofdm_frame_acq,\n gr.file_sink(gr.sizeof_gr_complex*occupied_tones, \"ofdm_receiver-frame_acq_c.dat\"))\n self.connect((self.ofdm_frame_acq,1), gr.file_sink(1, \"ofdm_receiver-found_corr_b.dat\"))\n self.connect(self.sampler, gr.file_sink(gr.sizeof_gr_complex*fft_length, \"ofdm_receiver-sampler_c.dat\"))\n self.connect(self.sigmix, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-sigmix_c.dat\"))\n self.connect(self.nco, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-nco_c.dat\"))", "def synch_data_mux(self, time_ofdm_data_symbols):\n\n buffer_tx_time = self.synch.synch_mask # Add data into this\n # plt.plot(buffer_tx_time[0, :].real)\n # plt.plot(buffer_tx_time[0, :].imag)\n # plt.show()\n total_symb_count = 0\n synch_symb_count = 0\n data_symb_count = 0\n for symb in self.symb_pattern.tolist():\n symb_start = total_symb_count*self.OFDMsymb_len\n symb_end = symb_start + self.OFDMsymb_len\n # print(symb_start, symb_end)\n if int(symb) == 0:\n synch_symb_count += 1\n else:\n # print(symb, symb_start, symb_end)\n data_start = data_symb_count*self.OFDMsymb_len\n data_end = data_start + self.OFDMsymb_len\n\n buffer_tx_time[:, symb_start: symb_end] = time_ofdm_data_symbols[:, data_start: data_end]\n data_symb_count += 1\n\n total_symb_count += 1\n\n # plt.plot(buffer_tx_time[0, :].real)\n # plt.plot(buffer_tx_time[0, :].imag)\n # plt.show()\n return buffer_tx_time", "def message_sink(itemsize, num_symbol, msgq, dont_block):\n return _raw_util.message_sink(itemsize, num_symbol, msgq, dont_block)", "def dw2000q_graph(**kwargs):\n target_graph = dnx.generators.chimera_graph(16, 16, 4, **kwargs)\n target_graph.graph['chip_id'] = 'DW_2000Q'\n return target_graph", "def _post(self, which_port, msg):\n return _raw_util.raw_pnc_frequency_modulator_fc_sptr__post(self, which_port, msg)", "def setDiamFin(sinkDims, dcFinWidth, dcFinGap,\n model, z\n ): \n\n widDim = dcFinWidth / cos( radians(45) )\n gapDim = dcFinGap / cos( radians(45) )\n \n fct = 1. + 1.e-8\n if sinkDims[0] == sinkDims[1]:\n finNumX = int((sinkDims[0] + fct*gapDim) / ( gapDim + widDim ))\n sideGapX= sinkDims[0] - finNumX * widDim - (finNumX - 1) * gapDim\n finNumY = finNumX\n sideGapY= sideGapX \n finNum = 2 * finNumX \n \n else:\n finNumX = int((sinkDims[0] + fct*gapDim) / ( gapDim + widDim ))\n finNumY = int((sinkDims[1] + fct*gapDim) / ( gapDim + widDim ))\n sideGapX= sinkDims[0] - finNumX * widDim - (finNumX - 1) * gapDim\n sideGapY= sinkDims[0] - finNumY * widDim - (finNumY - 1) * gapDim\n finNum = finNumX + finNumY\n\n if finNumX <= 1 or finNumY <= 1:\n raise HeatSinkCADError, \"The model dimensions are invalid.\"\n\n x0 = -(sinkDims[0] - sideGapX) / 2\n y0 = -(sinkDims[1] - sideGapY) / 2\n z0 = z\n x1 = (sinkDims[0] - sideGapX) / 2\n y1 = (sinkDims[1] - sideGapY) / 2\n z1 = z0 + sinkDims[2]\n\n x0 += (dcFinWidth / 2)\n x1 += (dcFinWidth / 2)\n y0 += (dcFinWidth / 2)\n y1 += (dcFinWidth / 2)\n\n for finN in range(0,finNumX):\n (rc,fin)= GO_BODY_create_box_cross_section(\n (x0 + (finN * gapDim) + (finN * widDim), y0, z1),\n (x1, y1 - (finN * gapDim) - (finN * widDim) ,z1), \n dcFinWidth ,\n sinkDims[2] )\n \n if fin == pk.PK_ENTITY_null: \n raise HeatSinkCADError,\"There is not an entity for fin.\"\n \n \n (rc,model)= GO_BODY_unite( model,\n fin )\n if model == pk.PK_ENTITY_null:\n raise HeatSinkCADError,\"There is not an entity.\"\n \n \n for finN in range(0,finNumY):\n \n (rc,fin)= GO_BODY_create_box_cross_section(\n (x0 ,y0 + (finN * gapDim) + (finN * widDim), z1),\n (x1 - (finN * gapDim) - (finN * widDim), y1 ,z1), \n dcFinWidth ,\n sinkDims[2] )\n \n \n if fin == pk.PK_ENTITY_null: \n raise HeatSinkCADError,\"There is not an entity for fin.\"\n \n \n (rc,model)= GO_BODY_unite( model,\n fin )\n if model == pk.PK_ENTITY_null:\n raise HeatSinkCADError,\"There is not an entity.\"\n \n for finN in range(0,finNum):\n \n (rc,fin)= GO_BODY_create_box_cross_section(\n (x0 + (finN + 2)* widDim + finN * gapDim, y0, z1),\n (x0, y0 + (finN + 2)*widDim + finN * gapDim , z1), \n dcFinGap ,\n sinkDims[2] )\n \n if fin == pk.PK_ENTITY_null: \n raise HeatSinkCADError,\"There is not an entity for fin.\"\n \n \n (rc,model)= GO_BODY_subtract( model,\n fin )\n if model == pk.PK_ENTITY_null:\n raise HeatSinkCADError,\"There is not an entity.\"", "def match_target_amplitude(sound, target_dbfs):\n change_in_dbfs = target_dbfs - sound.dBFS\n print(\"change_in_dBFS = \", change_in_dbfs)\n return sound.apply_gain(change_in_dbfs)", "def make(param_mode, debug):\n return _wmbus_swig.wmbus_packet_sink_make(param_mode, debug)", "def write_framebuf(self) -> None:\n self.dc_pin.value = 1\n with self.spi_device as spi:\n spi.write(self.buffer)", "def wmbus_packet_sink_make(param_mode, debug):\n return _wmbus_swig.wmbus_packet_sink_make(param_mode, debug)", "def DMFluxneuDet(flavor,Enu,ch,DMm,DMsig,body,param,osc): \n ##B From Arxiv: 0506298 ec. 21 & 24\n #DM_annihilation_rate_Earth = 1.0e14*(100*param.GeV/DMm)**2/param.sec #[annhilations/s]\n #DM_annihilation_rate_Sun = ((1.0*param.AU)/(param.EARTHRADIUS*param.km))**2*DM_annihilation_rate_Earth\n DM_annihilation_rate_Sun = float(np.sum(DMSunAnnihilationRate(DMm,DMsig,param)))# [eV]\n ##E\n \n flux = 0.0\n \n if param.neutype == \"neutrino\":\n if osc :\n for flv in range(3):\n #p = DMParameters(flv)\n #if param.name == \"STD\":\n flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flv*2,ch,DMm/param.GeV)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #flux = flux + (1.0/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #else :\n # flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flv*2,ch,DMm/param.GeV)*no.AvgNeuProb_RK(flv,flavor,Enu,param)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK(flv,flavor,Enu,param)\n else :\n #p = DMParameters(flavor)\n flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flavor*2,ch,DMm/param.GeV)\n #flux = flux + (1.0/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)\n return flux\n elif param.neutype == \"antineutrino\":\n if osc :\n for flv in range(3):\n #p = DMParameters(flv)\n #if param.name == \"STD\":\n flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flv*2+1,ch,DMm/param.GeV)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #flux = flux + (1.0/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #else :\n # flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flv*2+1,ch,DMm/param.GeV)*no.AvgNeuProb_RK(flv,flavor,Enu,param)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK(flv,flavor,Enu,param)\n else :\n #p = DMParameters(flavor)\n flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flavor*2+1,ch,DMm/param.GeV)\n #flux = flux + (1.0/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)\n return flux\n else :\n print \"Wrong neutrino type.\"\n quit()", "def __deref__(self):\n return _wmbus_swig.wmbus_packet_sink_sptr___deref__(self)", "def bode(tf: Union[control.TransferFunction, control.StateSpace],\n deg: bool=True, dB: bool=True, Hz: bool=True, Plot: bool=True):\n mag, phase, w = control.bode(tf, deg=deg, dB=dB, Hz=Hz, Plot=False)\n if Plot:\n plt.subplot(211)\n plt.semilogx(w, mag)\n plt.grid(which='both')\n plt.ylabel('magnitude (dB)')\n\n plt.subplot(212)\n plt.semilogx(w, phase)\n plt.xlabel('Hz')\n plt.ylabel('phase (deg)')\n plt.grid(which='both')\n return mag, phase, w", "def set_Df_sweep(instrument, f_start, f_stop, unit='MHZ', channel_num=1):\n command1 = ':SENSe%d:FREQuency:STARt %G %s' % (channel_num, f_start, unit)\n command2 = ':SENSe%d:FREQuency:STOP %G %s' % (channel_num, f_stop, unit)\n instrument.write(command1)\n instrument.write(command2)", "def framer_sink_1(*args, **kwargs):\n return _digital_swig.framer_sink_1(*args, **kwargs)", "def pc_output_buffers_full(self, *args):\n return _OFDM_Cyclic_Prefix_swig.vamsi_OFDMCP_ff_sptr_pc_output_buffers_full(self, *args)", "def min_output_buffer(self, *args, **kwargs):\n return _OFDM_Cyclic_Prefix_swig.vamsi_OFDMCP_ff_sptr_min_output_buffer(self, *args, **kwargs)", "def _post(self, which_port, msg):\n return _wmbus_swig.wmbus_packet_sink_sptr__post(self, which_port, msg)", "def message_ports_out(self):\n return _OFDM_Cyclic_Prefix_swig.vamsi_OFDMCP_ff_sptr_message_ports_out(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_ofdm_insert_preamble_sptr __init__(self, p) > digital_ofdm_insert_preamble_sptr
def __init__(self, *args): this = _digital_swig.new_digital_ofdm_insert_preamble_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_if_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_ic_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __deref__(self):\n return _wmbus_swig.wmbus_preamble_sptr___deref__(self)", "def __init__(self, *args):\n this = _digital_swig.new_digital_pn_correlator_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def make(self, usleep, debug):\n return _wmbus_swig.wmbus_preamble_sptr_make(self, usleep, debug)", "def __init__(self, *args):\n this = _digital_swig.new_digital_simple_framer_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, ciphertext):\n self.preamble = ciphertext[:30]\n self.ciphertext = ciphertext[30:]", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_packet_sink_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_glfsr_source_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_ofdm_mapper_bcv_sptr __init__(self, p) > digital_ofdm_mapper_bcv_sptr
def __init__(self, *args): this = _digital_swig.new_digital_ofdm_mapper_bcv_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoMFVec2b()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_glfsr_source_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoMFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n self.parameters = {}", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n _itkImagePython.vectoritkImageCVF44_swiginit(self, _itkImagePython.new_vectoritkImageCVF44(*args))", "def __init__(self, *args):\n this = _digital_swig.new_digital_pn_correlator_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n _itkImagePython.vectoritkImageCVF43_swiginit(self, _itkImagePython.new_vectoritkImageCVF43(*args))", "def __init__(self):\n this = _coin.new_SoSFVec2b()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n _itkImagePython.vectoritkImageCVD44_swiginit(self, _itkImagePython.new_vectoritkImageCVD44(*args))", "def __init__(self, *args):\n _itkImagePython.vectoritkImageCVD43_swiginit(self, _itkImagePython.new_vectoritkImageCVD43(*args))", "def __init__(self, *args):\n _itkImagePython.vectoritkImageCVD22_swiginit(self, _itkImagePython.new_vectoritkImageCVD22(*args))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ofdm_mapper_bcv(gr_complex_vector constellation, unsigned int msgq_limit, unsigned int bits_per_symbol, unsigned int fft_length) > digital_ofdm_mapper_bcv_sptr take a stream of bytes in and map to a vector of complex constellation points suitable for IFFT input to be used in an ofdm modulator. Abstract class must be subclassed with specific mapping.
def ofdm_mapper_bcv(*args, **kwargs): return _digital_swig.ofdm_mapper_bcv(*args, **kwargs)
[ "def _convert_to_cbf_convention(self, xparm_filename):\n # Create a coordinate frame converter and extract other quantities\n cfc = coordinate_frame_converter(xparm_filename)\n self._detector_origin = cfc.get(\"detector_origin\")\n self._rotation_axis = cfc.get(\"rotation_axis\")\n self._fast_axis = cfc.get(\"detector_fast\")\n self._slow_axis = cfc.get(\"detector_slow\")\n self._wavelength = cfc.get(\"wavelength\")\n self._image_size = cfc.get(\"detector_size_fast_slow\")\n self._pixel_size = cfc.get(\"detector_pixel_size_fast_slow\")\n self._starting_angle = cfc.get(\"starting_angle\")\n self._oscillation_range = cfc.get(\"oscillation_range\")\n self._starting_frame = cfc.get(\"starting_frame\")\n self._data_range = cfc.get(\"data_range\")\n self._divergence = 0.0\n self._sigma_divergence = cfc.get(\"sigma_divergence\")\n sample_vector = cfc.get(\"sample_to_source\")\n self._beam_vector = tuple(matrix.col(sample_vector))\n self._panel_offset = cfc.get(\"panel_offset\")\n self._panel_size = cfc.get(\"panel_size\")\n self._panel_origin = cfc.get(\"panel_origin\")\n self._panel_fast = cfc.get(\"panel_fast\")\n self._panel_slow = cfc.get(\"panel_slow\")", "def _process_bitmapped_descriptor(self,\n func_process_string,\n func_process_codeflag,\n func_process_numeric,\n func_process_numeric_with_new_refval,\n marker_id,\n nbytes_new,\n nbits_offset,\n scale_offset,\n nbits_increment,\n scale_increment,\n refval_factor):\n\n idx_descriptor, bitmapped_descriptor = self.next_bitmapped_descriptor()\n self.bitmap_links[len(self.decoded_descriptors)] = idx_descriptor\n\n # difference statistical values marker has different refval and nbits values\n if marker_id == 225255:\n bitmapped_descriptor = MarkerDescriptor.from_element_descriptor(\n bitmapped_descriptor,\n marker_id,\n refval=-2 ** bitmapped_descriptor.nbits,\n nbits=bitmapped_descriptor.nbits + 1,\n )\n else:\n bitmapped_descriptor = MarkerDescriptor.from_element_descriptor(\n bitmapped_descriptor,\n marker_id,\n )\n\n if bitmapped_descriptor.unit == 'CCITT IA5':\n nbytes = nbytes_new if nbytes_new else bitmapped_descriptor.nbits // 8\n func_process_string(bitmapped_descriptor, nbytes)\n\n elif bitmapped_descriptor.unit in ('FLAG TABLE', 'CODE TABLE'):\n func_process_codeflag(bitmapped_descriptor, bitmapped_descriptor.nbits)\n\n else: # numeric\n nbits = bitmapped_descriptor.nbits + nbits_offset + nbits_increment\n scale = bitmapped_descriptor.scale + scale_offset + scale_increment\n scale_powered = 1.0 * 10 ** scale\n\n if bitmapped_descriptor.id not in self.refval_new:\n refval = bitmapped_descriptor.refval * refval_factor\n func_process_numeric(bitmapped_descriptor, nbits, scale_powered, refval)\n else:\n func_process_numeric_with_new_refval(\n bitmapped_descriptor, nbits, scale_powered, refval_factor)", "def __init__(self, fft_length, cp_length, occupied_tones, snr, ks, carrier_map_bin, nc_filter, logging=False):\n\n\tgr.hier_block2.__init__(self, \"ofdm_receiver\",\n\t\t\t\tgr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature\n gr.io_signature2(2, 2, gr.sizeof_gr_complex*occupied_tones, gr.sizeof_char)) # Output signature\n\n bw = (float(occupied_tones) / float(fft_length)) / 2.0\n tb = bw*0.04\n print \"ofdm_receiver:__init__:occupied_tones %s fft_length %d \" % (occupied_tones, fft_length)\n \n chan_coeffs = filter.firdes.low_pass (1.0, # gain\n 1.0, # sampling rate\n bw+tb, # midpoint of trans. band\n tb, # width of trans. band\n filter.firdes.WIN_HAMMING) # filter type\n \n self.chan_filt = filter.fft_filter_ccc(1, chan_coeffs)\n\n # linklab, get ofdm parameters\n self._fft_length = fft_length\n self._occupied_tones = occupied_tones\n self._cp_length = cp_length\n self._nc_filter = nc_filter\n self._carrier_map_bin = carrier_map_bin\n \n win = [1 for i in range(fft_length)]\n \n # linklab, initialization function\n self.initialize(ks, self._carrier_map_bin)\n \n\n zeros_on_left = int(math.ceil((fft_length - occupied_tones)/2.0))\n ks0 = fft_length*[0,]\n ks0[zeros_on_left : zeros_on_left + occupied_tones] = ks[0]\n\n ks0 = np_fft.ifftshift(ks0)\n ks0time = np_fft.ifft(ks0)\n # ADD SCALING FACTOR\n ks0time = ks0time.tolist()\n\n SYNC = \"pn\"\n if SYNC == \"ml\":\n nco_sensitivity = -1.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_ml(fft_length,\n cp_length,\n snr,\n ks0time,\n logging)\n elif SYNC == \"pn\":\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_pn(fft_length,\n cp_length,\n logging)\n elif SYNC == \"pnac\":\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_pnac(fft_length,\n cp_length,\n ks0time,\n logging)\n # for testing only; do not user over the air\n # remove filter and filter delay for this\n elif SYNC == \"fixed\":\n self.chan_filt = gr.multiply_const_cc(1.0)\n nsymbols = 18 # enter the number of symbols per packet\n freq_offset = 0.0 # if you use a frequency offset, enter it here\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_fixed(fft_length,\n cp_length,\n nsymbols,\n freq_offset,\n logging)\n\n # Set up blocks\n\n # Create a delay line, linklab\n self.delay = blocks.delay(gr.sizeof_gr_complex, fft_length)\n\n self.nco = analog.frequency_modulator_fc(nco_sensitivity) # generate a signal proportional to frequency error of sync block\n self.sigmix = blocks.multiply_cc()\n self.sampler = gr_papyrus.ofdm_sampler(fft_length, fft_length+cp_length)\n self.fft_demod = gr_fft.fft_vcc(fft_length, True, win, True)\n self.ofdm_frame_acq = gr_papyrus.ofdm_frame_acquisition(occupied_tones,\n fft_length,\n cp_length, ks[0])\n # linklab, check current mode: non-contiguous OFDM or not\n if self._nc_filter:\n print '\\nMulti-band Filter Turned ON!'\n # linklab, non-contiguous filter\n self.ncofdm_filt = ncofdm_filt(self._fft_length, self._occupied_tones, self._carrier_map_bin)\n self.connect(self, self.chan_filt, self.ncofdm_filt)\n self.connect(self.ncofdm_filt, self.ofdm_sync) # into the synchronization alg.\n self.connect((self.ofdm_sync,0), self.nco, (self.sigmix,1)) # use sync freq. offset output to derotate input signal\n self.connect(self.ncofdm_filt, self.delay, (self.sigmix,0)) # signal to be derotated\n else :\n print '\\nMulti-band Filter Turned OFF!'\n self.connect(self, self.chan_filt)\n self.connect(self.chan_filt, self.ofdm_sync) # into the synchronization alg.\n self.connect((self.ofdm_sync,0), self.nco, (self.sigmix,1)) # use sync freq. offset output to derotate input signal\n self.connect(self.chan_filt, self.delay, (self.sigmix,0)) # signal to be derotated\n\n self.connect(self.sigmix, (self.sampler,0)) # sample off timing signal detected in sync alg\n self.connect((self.ofdm_sync,1), (self.sampler,1)) # timing signal to sample at\n\n self.connect((self.sampler,0), self.fft_demod) # send derotated sampled signal to FFT\n self.connect(self.fft_demod, (self.ofdm_frame_acq,0)) # find frame start and equalize signal\n self.connect((self.sampler,1), (self.ofdm_frame_acq,1)) # send timing signal to signal frame start\n self.connect((self.ofdm_frame_acq,0), (self,0)) # finished with fine/coarse freq correction,\n self.connect((self.ofdm_frame_acq,1), (self,1)) # frame and symbol timing, and equalization\n\n if logging:\n self.connect(self.chan_filt, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-chan_filt_c.dat\"))\n self.connect(self.fft_demod, gr.file_sink(gr.sizeof_gr_complex*fft_length, \"ofdm_receiver-fft_out_c.dat\"))\n self.connect(self.ofdm_frame_acq,\n gr.file_sink(gr.sizeof_gr_complex*occupied_tones, \"ofdm_receiver-frame_acq_c.dat\"))\n self.connect((self.ofdm_frame_acq,1), gr.file_sink(1, \"ofdm_receiver-found_corr_b.dat\"))\n self.connect(self.sampler, gr.file_sink(gr.sizeof_gr_complex*fft_length, \"ofdm_receiver-sampler_c.dat\"))\n self.connect(self.sigmix, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-sigmix_c.dat\"))\n self.connect(self.nco, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-nco_c.dat\"))", "def broadcast_and_convert_baselines(u, v, chan_freq):\n\n nchan = len(chan_freq)\n\n # broadcast to the same shape as the data\n # stub to broadcast u, v to all channels\n broadcast = np.ones((nchan, 1))\n uu = u * broadcast\n vv = v * broadcast\n\n # calculate wavelengths in meters\n wavelengths = c.value / chan_freq[:, np.newaxis] # m\n\n # calculate baselines in klambda\n uu = 1e-3 * uu / wavelengths # [klambda]\n vv = 1e-3 * vv / wavelengths # [klambda]\n\n return (uu, vv)", "def to_basic_block(self):\n return _OFDM_Cyclic_Prefix_swig.vamsi_OFDMCP_ff_sptr_to_basic_block(self)", "def vnl_vector_vcl_complexF_read(*args):\n return _vnl_vectorPython.vnl_vector_vcl_complexF_read(*args)", "def get_mfcc_coefficients_from_filter_banks(filter_banks, cep_lifter=DEFAULT_CEP_LIFTER):\n mfcc = dct(filter_banks, axis=1, norm='ortho')\n if cep_lifter:\n (n_frames, n_coeff) = mfcc.shape\n n = np.arange(n_coeff)\n lift = 1 + (cep_lifter / 2) * np.sin(np.pi * n / cep_lifter)\n mfcc *= lift\n return mfcc", "def fid_cb(msg):\n global tf_broadcaster\n global tf_listener\n global FIDUCIAL_NAMES\n global pose_pub\n\n # if driving, don't interupt\n if get_state() not in [States.LOST, States.TELEOP]: #removed docked state\n return\n # if fiducials found, take the first\n if len(msg.transforms) == 0:\n return\n transform = msg.transforms[0]\n\n # swap y and z axes to fit map frame of reference\n pos = transform.transform.translation\n rot = transform.transform.rotation\n pos.x, pos.y, pos.z = pos.x, pos.z, pos.y\n rot.x, rot.y, rot.z = rot.x, rot.z, rot.y\n transform.transform.translation = pos\n transform.transform.rotation = rot\n\n # invert the transform\n homo_mat = PoseConv().to_homo_mat(transform.transform)\n inverted_tf = PoseConv().to_tf_msg(np.linalg.inv(homo_mat))\n\n # send a transform from camera to fiducial\n m = TransformStamped()\n m.transform = inverted_tf\n m.header.frame_id = FIDUCIAL_NAMES.get(str(transform.fiducial_id))\n m.header.stamp = rospy.Time.now()\n m.child_frame_id = \"fiducial_camera\"\n tf_broadcaser.sendTransform(m)\n\n # calculate transform from map to base\n try:\n latest_time = tf_listener.getLatestCommonTime(\"/map\",\"/fiducial_base\")\n base_to_map = tf_listener.lookupTransform(\"/map\",\"/fiducial_base\",latest_time)\n except tf2_ros.TransformException:\n rospy.logwarn(\"failed to transform, is fiducial {} mapped?\".format(transform.fiducial_id))\n return\n\n # convert transform to PoseWithCovarianceStamped\n robot_pose = PoseConv().to_pose_msg(base_to_map)\n pose_w_cov_stamped = PoseWithCovarianceStamped()\n pose_w_cov_stamped.pose.pose = robot_pose\n pose_w_cov_stamped.header.stamp = rospy.Time.now()\n pose_w_cov_stamped.header.frame_id = \"map\"\n rospy.logdebug(\"Sending fiducial pose:\\n{}\".format(robot_pose))\n\n # publish to /initialpose\n pose_pub.publish(pose_w_cov_stamped)\n\n # update state\n try:\n prev_state = get_state()\n if prev_state == States.LOST:\n change_state(States.WAITING)\n rospy.loginfo(\"Fiducial {} seen, no longer lost\".format(transform.fiducial_id))\n except rospy.ServiceException:\n rospy.logerr(\"Could not access state service\")", "def vamsi_OFDMCP_ff_make(*args, **kwargs):\n return _OFDM_Cyclic_Prefix_swig.vamsi_OFDMCP_ff_make(*args, **kwargs)", "def copyFrom(self, field: 'SoField') -> \"void\":\n return _coin.SoMFVec2b_copyFrom(self, field)", "def translate_into_fbz(df):\n # First, find all the vectors defining the boundary\n coords = df[['kx [1/A]', 'ky [1/A]', 'kz [1/A]']]\n b1, b2, b3 = c.b1, c.b2, c.b3\n b1pos = 0.5 * b1[:, np.newaxis]\n b2pos = 0.5 * b2[:, np.newaxis]\n b3pos = 0.5 * b3[:, np.newaxis]\n lpos = 0.5 * (b1 + b2 + b3)[:, np.newaxis]\n b1neg = -1 * b1pos\n b2neg = -1 * b2pos\n b3neg = -1 * b3pos\n lneg = -1 * lpos\n xpos = -0.5 * (b1 + b3)[:, np.newaxis]\n ypos = 0.5 * (b2 + b3)[:, np.newaxis]\n zpos = 0.5 * (b1 + b2)[:, np.newaxis]\n xneg = -1 * xpos\n yneg = -1 * ypos\n zneg = -1 * zpos\n\n # Place them into octants to avoid problems when finding points\n # (naming is based on positive or negative for coordinate so octpmm means x+ y- z-. p=plus, m=minus)\n vecs_ppp = np.concatenate((b2pos, xpos, ypos, zpos), axis=1)[:, :, np.newaxis]\n vecs_ppm = np.concatenate((b1neg, xpos, ypos, zneg), axis=1)[:, :, np.newaxis]\n vecs_pmm = np.concatenate((lneg, xpos, yneg, zneg), axis=1)[:, :, np.newaxis]\n vecs_mmm = np.concatenate((b2neg, xneg, yneg, zneg), axis=1)[:, :, np.newaxis]\n vecs_mmp = np.concatenate((b1pos, xneg, yneg, zpos), axis=1)[:, :, np.newaxis]\n vecs_mpp = np.concatenate((lpos, xneg, ypos, zpos), axis=1)[:, :, np.newaxis]\n vecs_mpm = np.concatenate((b3pos, xneg, ypos, zneg), axis=1)[:, :, np.newaxis]\n vecs_pmp = np.concatenate((b3neg, xpos, yneg, zpos), axis=1)[:, :, np.newaxis]\n # Construct matrix which is 3 x 4 x 8 where we have 3 Cartesian coordinates, 4 vectors per octant, and 8 octants\n allvecs = np.concatenate((vecs_ppp, vecs_ppm, vecs_pmm, vecs_mmm, vecs_mmp, vecs_mpp, vecs_mpm, vecs_pmp), axis=2)\n\n # Since the number of points in each octant is not equal, can't create array of similar shape. Instead the 'octant'\n # array below is used as a boolean map where 1 (true) indicates positive, and 0 (false) indicates negative\n octants = np.array([[1, 1, 1],\n [1, 1, 0],\n [1, 0, 0],\n [0, 0, 0],\n [0, 0, 1],\n [0, 1, 1],\n [0, 1, 0],\n [1, 0, 1]])\n\n fbzcoords = coords.copy(deep=True).values\n exitvector = np.zeros((8, 1))\n iteration = 0\n while not np.all(exitvector): # don't exit until all octants have points inside\n exitvector = np.zeros((8, 1))\n for i in range(8):\n oct_vecs = allvecs[:, :, i]\n whichoct = octants[i, :]\n if whichoct[0]:\n xbool = fbzcoords[:, 0] >= 0\n else:\n xbool = fbzcoords[:, 0] <= 0\n if whichoct[1]:\n ybool = fbzcoords[:, 1] >= 0\n else:\n ybool = fbzcoords[:, 1] <= 0\n if whichoct[2]:\n zbool = fbzcoords[:, 2] >= 0\n else:\n zbool = fbzcoords[:, 2] <= 0\n octindex = np.logical_and(np.logical_and(xbool, ybool), zbool)\n octcoords = fbzcoords[octindex, :]\n allplanes = 0\n for j in range(oct_vecs.shape[1]):\n diffvec = octcoords[:, :] - np.tile(oct_vecs[:, j], (octcoords.shape[0], 1))\n dist2plane = np.dot(diffvec, oct_vecs[:, j]) / np.linalg.norm(oct_vecs[:, j])\n outside = dist2plane[:] > 0\n if np.any(outside):\n octcoords[outside, :] = octcoords[outside, :] - \\\n (2 * np.tile(oct_vecs[:, j], (np.count_nonzero(outside), 1)))\n # Times 2 because the vectors that define FBZ are half of the full recip latt vectors\n # print('number outside this plane is %d' % np.count_nonzero(outside))\n else:\n allplanes += 1\n if allplanes == 4:\n exitvector[i] = 1\n fbzcoords[octindex, :] = octcoords\n iteration += 1\n print('Finished %d iterations of bringing points into FBZ' % iteration)\n uniqkx = np.sort(np.unique(fbzcoords[:, 0]))\n deltakx = np.diff(uniqkx)\n smalldkx = np.concatenate((deltakx < (np.median(deltakx) * 1E-2), [False]))\n if np.any(smalldkx):\n for kxi in np.nditer(np.nonzero(smalldkx)):\n kx = uniqkx[kxi]\n fbzcoords[fbzcoords[:, 0] == kx, 0] = uniqkx[kxi+1]\n print('Shifted points that were slightly misaligned in kx.\\n')\n df[['kx [1/A]', 'ky [1/A]', 'kz [1/A]']] = fbzcoords\n print('Done bringing points into FBZ!')\n\n return df", "def _convertToVcf(self, beagleFile, chrom, origVcf):\r\n \r\n vcfFile = VcfFile.VcfFile(origVcf.pool, origVcf.pool.outputDir + chrom + \"_\" + os.path.basename(origVcf.fileName), chrom=chrom)\r\n cmd = (\"java -jar -Xmx30g \" + Program.config.getPath(\"gatk\") + \r\n \" -R \" + Program.config.getPath(\"refGenome\") + \r\n \" -T BeagleOutputToVCF\" +\r\n \" -V \" + origVcf.fileName +\r\n \" -beagleR2:BEAGLE \" + beagleFile.rTwoFile +\r\n \" -beaglePhased:BEAGLE \" + beagleFile.getFile(\"phased\") +\r\n \" -beagleProbs:BEAGLE \" + beagleFile.getFile(\"gprobs\") +\r\n \" -o \" + vcfFile.fileName +\r\n \" --unsafe LENIENT_VCF_PROCESSING\")\r\n self.execute(cmd, \"gatk\", vcfFile)\r\n beagleFile.pool.vcf[chrom] = vcfFile\r\n vcfFile.phased = True\r\n return vcfFile", "def to_basic_block(self):\n return _wavelet_swig.wvps_ff_sptr_to_basic_block(self)", "def golomb_decode(bytes, bits):\n bit_seq = unpack_bits(bytes)\n\n while True: # or until bit_seq throws a StopIteration\n qq = 0\n while next(bit_seq) == 1:\n qq += 1\n\n rr = 0\n for ii in range(bits):\n rr = rr << 1 | next(bit_seq)\n\n yield qq << bits | rr", "def read(*args):\n return _vnl_vectorPython.vnl_vector_vcl_complexF_read(*args)", "def test_record_to_bf(self):\n config = {\n \"blocking-features\": [1, 2],\n \"Lambda\": 38,\n \"bf-len\": 2000,\n \"num-hash-funcs\": 2,\n \"K\": 30,\n \"random_state\": 0,\n \"input-clks\": False\n }\n lambdafold = PPRLIndexLambdaFold(config)\n record = [1, 'Xu', 'Li']\n bloom_filter = lambdafold.__record_to_bf__(record, config['blocking-features'])\n assert sum(bloom_filter) == 6", "def map_bb(*args, **kwargs):\n return _digital_swig.map_bb(*args, **kwargs)", "def MBfilter_CF(st, frequencies,\n CN_HP, CN_LP,\n filter_norm, filter_npoles=2,\n var_w=True,\n CF_type='envelope', CF_decay_win=1.0,\n hos_order=4,\n rosenberger_decay_win=1.0,\n rosenberger_filter_power=1.0,\n rosenberger_filter_threshold=None,\n rosenberger_normalize_each=False,\n wave_type='P',\n hos_sigma=None,\n rec_memory=None,\n full_output=False):\n delta = st[0].stats.delta\n Tn = 1. / frequencies\n Nb = len(frequencies)\n CF_decay_nsmps = CF_decay_win / delta\n rosenberger_decay_nsmps = rosenberger_decay_win / delta\n\n if hos_sigma is None:\n hos_sigma = -1.\n\n # Single component analysis\n if len(st) < 2:\n # Use just the first trace in stream\n tr = st[0]\n y = tr.data\n\n YN1 = np.zeros((Nb, len(y)), float)\n CF1 = np.zeros((Nb, len(y)), float)\n\n for n in range(Nb):\n if rec_memory is not None:\n rmem = rec_memory[(tr.id, wave_type)][n]\n else:\n rmem = None\n\n YN1[n] = recursive_filter(y, CN_HP[n], CN_LP[n],\n filter_npoles, rmem)\n YN1[n] /= filter_norm[n]\n\n if var_w and CF_type == 'envelope':\n CF_decay_nsmps_mb = (Tn[n]/delta) * CF_decay_nsmps\n else:\n CF_decay_nsmps_mb = CF_decay_nsmps\n\n # Define the decay constant\n CF_decay_constant = 1 / CF_decay_nsmps_mb\n\n # Calculates CF for each MBF signal\n if CF_type == 'envelope':\n CF1[n] = recursive_rms(YN1[n], CF_decay_constant, rmem)\n\n if CF_type == 'kurtosis':\n CF1[n] = recursive_hos(YN1[n], CF_decay_constant,\n hos_order, hos_sigma, rmem)\n\n # 2 (horizontal) components analysis\n elif len(st) == 2:\n # Assumes that 2 horizontal components are used\n tr1 = st.select(channel='*[E,W,1]')[0]\n tr2 = st.select(channel='*[N,S,2]')[0]\n\n y1 = tr1.data\n y2 = tr2.data\n\n # Initializing arrays\n YN_E = np.zeros((Nb, len(y1)), float)\n YN_N = np.zeros((Nb, len(y1)), float)\n YN1 = np.zeros((Nb, len(y1)), float)\n CF1 = np.zeros((Nb, len(y1)), float)\n\n for n in range(Nb):\n if rec_memory is not None:\n rmem1 = rec_memory[(tr1.id, wave_type)][n]\n rmem2 = rec_memory[(tr2.id, wave_type)][n]\n else:\n rmem1 = None\n rmem2 = None\n\n YN_E[n] = recursive_filter(y1, CN_HP[n], CN_LP[n],\n filter_npoles, rmem1)\n YN_E[n] /= filter_norm[n]\n YN_N[n] = recursive_filter(y2, CN_HP[n], CN_LP[n],\n filter_npoles, rmem2)\n YN_N[n] /= filter_norm[n]\n # Combining horizontal components\n YN1[n] = np.sqrt(np.power(YN_E[n], 2) + np.power(YN_N[n], 2))\n\n if var_w and CF_type == 'envelope':\n CF_decay_nsmps_mb = (Tn[n] / delta) * CF_decay_nsmps\n else:\n CF_decay_nsmps_mb = CF_decay_nsmps\n\n # Define the decay constant\n CF_decay_constant = 1 / CF_decay_nsmps_mb\n\n # Calculates CF for each MBF signal\n if CF_type == 'envelope':\n CF1[n] = recursive_rms(YN1[n], CF_decay_constant, rmem1)\n\n if CF_type == 'kurtosis':\n CF1[n] = recursive_hos(YN1[n], CF_decay_constant,\n hos_order, hos_sigma, rmem1)\n\n # 3 components analysis, includes polarization P and S decomposition\n else:\n # Vertical\n tr1 = st.select(channel='*[Z,U,D]')[0]\n # Horizontals\n tr2 = st.select(channel='*[E,W,1]')[0]\n tr3 = st.select(channel='*[N,S,2]')[0]\n\n y1 = tr1.data\n y2 = tr2.data\n y3 = tr3.data\n\n # Initializing arrays\n YN1 = np.zeros((Nb, len(y1)), float)\n YN2 = np.zeros((Nb, len(y1)), float)\n YN3 = np.zeros((Nb, len(y1)), float)\n CF1 = np.zeros((Nb, len(y1)), float)\n filteredDataP = np.zeros((Nb, len(y1)), float)\n filteredDataS = np.zeros((Nb, len(y1)), float)\n if full_output:\n CF2 = np.zeros((Nb, len(y1)), float)\n\n for n in range(Nb):\n if rec_memory is not None:\n rmem1 = rec_memory[(tr1.id, wave_type)][n]\n rmem2 = rec_memory[(tr2.id, wave_type)][n]\n rmem3 = rec_memory[(tr3.id, wave_type)][n]\n else:\n rmem1 = None\n rmem2 = None\n rmem3 = None\n\n YN1[n] = recursive_filter(y1, CN_HP[n], CN_LP[n],\n filter_npoles, rmem1)\n YN1[n] /= filter_norm[n]\n YN2[n] = recursive_filter(y2, CN_HP[n], CN_LP[n],\n filter_npoles, rmem2)\n YN2[n] /= filter_norm[n]\n YN3[n] = recursive_filter(y3, CN_HP[n], CN_LP[n],\n filter_npoles, rmem3)\n YN3[n] /= filter_norm[n]\n\n # Define the decay constant\n rosenberger_decay_constant = 1 / rosenberger_decay_nsmps\n\n # print('Rosenberger in process {}/{}\\r'.format(n+1, Nb),\n # sys.stdout.flush())\n\n # third value returned by rosenberger() is the polarizaion filter,\n # which we do not use here\n filt_dataP, filt_dataS, _ =\\\n rosenberger(YN2[n], YN3[n], YN1[n],\n rosenberger_decay_constant,\n pol_filter_power=rosenberger_filter_power,\n pol_filter_threshold=rosenberger_filter_threshold,\n normalize_each=rosenberger_normalize_each)\n\n # Use vertical component for P data\n filteredDataP[n] = filt_dataP[0, :]\n # Use vector composition of the two horizontal component for S data\n filteredDataS[n] = np.sqrt(np.power(filt_dataS[1, :], 2) +\n np.power(filt_dataS[2, :], 2))\n\n if var_w and CF_type == 'envelope':\n CF_decay_nsmps_mb = (Tn[n]/delta) * CF_decay_nsmps\n else:\n CF_decay_nsmps_mb = CF_decay_nsmps\n\n # Define the decay constant\n CF_decay_constant = 1 / CF_decay_nsmps_mb\n\n if CF_type == 'envelope':\n if wave_type == 'P':\n CF1[n] = recursive_rms(filteredDataP[n],\n CF_decay_constant, rmem1)\n if full_output:\n CF2[n] = recursive_rms(filteredDataS[n],\n CF_decay_constant, rmem2)\n else:\n CF1[n] = recursive_rms(filteredDataS[n],\n CF_decay_constant, rmem1)\n if full_output:\n CF2[n] = recursive_rms(filteredDataP[n],\n CF_decay_constant, rmem2)\n\n if CF_type == 'kurtosis':\n if wave_type == 'P':\n CF1[n] = recursive_hos(filteredDataP[n],\n CF_decay_constant,\n hos_order, hos_sigma, rmem1)\n if full_output:\n CF2[n] = recursive_hos(filteredDataS[n],\n CF_decay_constant,\n hos_order, hos_sigma, rmem2)\n else:\n CF1[n] = recursive_hos(filteredDataS[n],\n CF_decay_constant,\n hos_order, hos_sigma, rmem1)\n if full_output:\n CF2[n] = recursive_hos(filteredDataP[n],\n CF_decay_constant,\n hos_order, hos_sigma, rmem2)\n\n if full_output:\n return YN1, CF1, CF2, Tn, Nb, filteredDataP, filteredDataS\n else:\n return YN1, CF1, Tn, Nb", "def get_mfccs(sig):\n# loginfo('[wav2ivec.get_mfccs] Extracting MFCC features ...')\n fbank_mx = features.mel_fbank_mx(winlen_nfft=WINDOWSIZE / SOURCERATE,\n fs=fs,\n NUMCHANS=NUMCHANS,\n LOFREQ=LOFREQ,\n HIFREQ=HIFREQ)\n fea = features.mfcc_htk(sig,\n window=WINDOWSIZE / SOURCERATE,\n noverlap=(WINDOWSIZE - TARGETRATE) / SOURCERATE,\n fbank_mx=fbank_mx,\n _0='first',\n NUMCEPS=NUMCEPS,\n RAWENERGY=RAWENERGY,\n PREEMCOEF=PREEMCOEF,\n CEPLIFTER=CEPLIFTER,\n ZMEANSOURCE=ZMEANSOURCE,\n ENORMALISE=ENORMALISE,\n ESCALE=0.1,\n SILFLOOR=50.0,\n USEHAMMING=True)\n\n# loginfo('[wav2ivec.get_mfccs] Adding derivatives ...')\n fea = features.add_deriv(fea, (deltawindow, accwindow))\n\n# loginfo('[wav2ivec.get_mfccs] Reshaping to SFeaCat conventions ...')\n return fea.reshape(fea.shape[0], 3, -1).transpose((0, 2, 1)).reshape(fea.shape[0], -1)", "def data_block(self, *args):\n return _vnl_vectorPython.vnl_vector_vcl_complexF_data_block(self, *args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_ofdm_sampler_sptr __init__(self, p) > digital_ofdm_sampler_sptr
def __init__(self, *args): this = _digital_swig.new_digital_ofdm_sampler_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_probe_density_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_framer_sink_1_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n self.array = None\n self.target = None", "def __init__(self, *args):\n this = _digital_swig.new_digital_simple_framer_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_glfsr_source_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, mem, inp, outp):\n self.pc = 0\n self.mem = mem\n self.inp = inp\n self.outp = outp", "def __init__(self, rng, func):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n SliceSamplerBase.__init__(self, rng, func)", "def __init__(self, *args, **kwargs):\r\n super(AudioDataLoader, self).__init__(*args, **kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_packet_sink_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_glfsr_source_f_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, annotations):\n super(_SimulationStepStream, self).__init__()\n self._annotations = annotations", "def __init__(self, *args):\n _itkOptimizerParametersPython.itkOptimizerParametersD_swiginit(self, _itkOptimizerParametersPython.new_itkOptimizerParametersD(*args))", "def __init__(self, *args):\n _itkOptimizerParametersPython.itkOptimizerParametersHelperD_swiginit(self, _itkOptimizerParametersPython.new_itkOptimizerParametersHelperD(*args))", "def __init__(self, spi_rack, module, frequency=100e6):\n #def __init__(self, module, frequency=100e6):\n self.spi_rack = spi_rack\n self.module = module\n\n self.rf_frequency = frequency\n self.stepsize = 1e6\n self.ref_frequency = 10e6\n self.use_external = 0\n self.outputPower = None\n\n # These are the 6 registers present in the ADF4351\n self.registers = 6*[0]\n # In REG3: set ABP=1 (3 ns, INT-N) and CHARGE CANCEL=1\n self.registers[3] = (1<<22) | (1<<21) | 3\n # In REG5: set LD PIN MODE to 1 -> digital lock detect\n self.registers[5] = (1<<22) | (3<<19) | 5\n\n self.set_frequency(frequency)", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ofdm_sampler(unsigned int fft_length, unsigned int symbol_length, unsigned int timeout = 1000) > digital_ofdm_sampler_sptr does the rest of the OFDM stuff
def ofdm_sampler(*args, **kwargs): return _digital_swig.ofdm_sampler(*args, **kwargs)
[ "def __init__(self, fft_length, cp_length, occupied_tones, snr, ks, carrier_map_bin, nc_filter, logging=False):\n\n\tgr.hier_block2.__init__(self, \"ofdm_receiver\",\n\t\t\t\tgr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature\n gr.io_signature2(2, 2, gr.sizeof_gr_complex*occupied_tones, gr.sizeof_char)) # Output signature\n\n bw = (float(occupied_tones) / float(fft_length)) / 2.0\n tb = bw*0.04\n print \"ofdm_receiver:__init__:occupied_tones %s fft_length %d \" % (occupied_tones, fft_length)\n \n chan_coeffs = filter.firdes.low_pass (1.0, # gain\n 1.0, # sampling rate\n bw+tb, # midpoint of trans. band\n tb, # width of trans. band\n filter.firdes.WIN_HAMMING) # filter type\n \n self.chan_filt = filter.fft_filter_ccc(1, chan_coeffs)\n\n # linklab, get ofdm parameters\n self._fft_length = fft_length\n self._occupied_tones = occupied_tones\n self._cp_length = cp_length\n self._nc_filter = nc_filter\n self._carrier_map_bin = carrier_map_bin\n \n win = [1 for i in range(fft_length)]\n \n # linklab, initialization function\n self.initialize(ks, self._carrier_map_bin)\n \n\n zeros_on_left = int(math.ceil((fft_length - occupied_tones)/2.0))\n ks0 = fft_length*[0,]\n ks0[zeros_on_left : zeros_on_left + occupied_tones] = ks[0]\n\n ks0 = np_fft.ifftshift(ks0)\n ks0time = np_fft.ifft(ks0)\n # ADD SCALING FACTOR\n ks0time = ks0time.tolist()\n\n SYNC = \"pn\"\n if SYNC == \"ml\":\n nco_sensitivity = -1.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_ml(fft_length,\n cp_length,\n snr,\n ks0time,\n logging)\n elif SYNC == \"pn\":\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_pn(fft_length,\n cp_length,\n logging)\n elif SYNC == \"pnac\":\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_pnac(fft_length,\n cp_length,\n ks0time,\n logging)\n # for testing only; do not user over the air\n # remove filter and filter delay for this\n elif SYNC == \"fixed\":\n self.chan_filt = gr.multiply_const_cc(1.0)\n nsymbols = 18 # enter the number of symbols per packet\n freq_offset = 0.0 # if you use a frequency offset, enter it here\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_fixed(fft_length,\n cp_length,\n nsymbols,\n freq_offset,\n logging)\n\n # Set up blocks\n\n # Create a delay line, linklab\n self.delay = blocks.delay(gr.sizeof_gr_complex, fft_length)\n\n self.nco = analog.frequency_modulator_fc(nco_sensitivity) # generate a signal proportional to frequency error of sync block\n self.sigmix = blocks.multiply_cc()\n self.sampler = gr_papyrus.ofdm_sampler(fft_length, fft_length+cp_length)\n self.fft_demod = gr_fft.fft_vcc(fft_length, True, win, True)\n self.ofdm_frame_acq = gr_papyrus.ofdm_frame_acquisition(occupied_tones,\n fft_length,\n cp_length, ks[0])\n # linklab, check current mode: non-contiguous OFDM or not\n if self._nc_filter:\n print '\\nMulti-band Filter Turned ON!'\n # linklab, non-contiguous filter\n self.ncofdm_filt = ncofdm_filt(self._fft_length, self._occupied_tones, self._carrier_map_bin)\n self.connect(self, self.chan_filt, self.ncofdm_filt)\n self.connect(self.ncofdm_filt, self.ofdm_sync) # into the synchronization alg.\n self.connect((self.ofdm_sync,0), self.nco, (self.sigmix,1)) # use sync freq. offset output to derotate input signal\n self.connect(self.ncofdm_filt, self.delay, (self.sigmix,0)) # signal to be derotated\n else :\n print '\\nMulti-band Filter Turned OFF!'\n self.connect(self, self.chan_filt)\n self.connect(self.chan_filt, self.ofdm_sync) # into the synchronization alg.\n self.connect((self.ofdm_sync,0), self.nco, (self.sigmix,1)) # use sync freq. offset output to derotate input signal\n self.connect(self.chan_filt, self.delay, (self.sigmix,0)) # signal to be derotated\n\n self.connect(self.sigmix, (self.sampler,0)) # sample off timing signal detected in sync alg\n self.connect((self.ofdm_sync,1), (self.sampler,1)) # timing signal to sample at\n\n self.connect((self.sampler,0), self.fft_demod) # send derotated sampled signal to FFT\n self.connect(self.fft_demod, (self.ofdm_frame_acq,0)) # find frame start and equalize signal\n self.connect((self.sampler,1), (self.ofdm_frame_acq,1)) # send timing signal to signal frame start\n self.connect((self.ofdm_frame_acq,0), (self,0)) # finished with fine/coarse freq correction,\n self.connect((self.ofdm_frame_acq,1), (self,1)) # frame and symbol timing, and equalization\n\n if logging:\n self.connect(self.chan_filt, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-chan_filt_c.dat\"))\n self.connect(self.fft_demod, gr.file_sink(gr.sizeof_gr_complex*fft_length, \"ofdm_receiver-fft_out_c.dat\"))\n self.connect(self.ofdm_frame_acq,\n gr.file_sink(gr.sizeof_gr_complex*occupied_tones, \"ofdm_receiver-frame_acq_c.dat\"))\n self.connect((self.ofdm_frame_acq,1), gr.file_sink(1, \"ofdm_receiver-found_corr_b.dat\"))\n self.connect(self.sampler, gr.file_sink(gr.sizeof_gr_complex*fft_length, \"ofdm_receiver-sampler_c.dat\"))\n self.connect(self.sigmix, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-sigmix_c.dat\"))\n self.connect(self.nco, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-nco_c.dat\"))", "def __init__(self, fft, dB = None, time_domain = None):\n\n self.fft = fft\n self.dB = dB\n self.time_domain = time_domain", "def test_4d_freq_stream():\n dic, data = ng.pipe.read_lowmem(NMRPIPE_4D_FREQ_STREAM)\n assert data.shape == (2, 3, 4, 5)\n assert data.dtype == 'float32'\n assert data[0, 0, 0, 0] == 1.\n assert data[0, 0, 0, 1] == 1.\n assert data[0, 0, 1, 0] == 1.\n assert data[0, 1, 0, 0] == 1.\n assert data[1, 0, 0, 0] == 2.\n check_ppm_limits(dic, data, 0, [180.00, 80.00])\n check_ppm_limits(dic, data, 1, [186.67, 53.33])\n check_ppm_limits(dic, data, 2, [179.00, 59.00])\n check_ppm_limits(dic, data, 3, [44.70, -35.30])\n check_simple_roundtrip(dic, data, lowmem=True)", "def ofdm_modulate(self, num_data_symb, freq_bin_data):\n min_pow = 1e-30\n time_ofdm_symbols = zeros((self.num_ant, num_data_symb * self.OFDMsymb_len), dtype=complex)\n for symb in range(num_data_symb):\n freq_data_start = symb * self.num_data_bins\n freq_data_end = freq_data_start + self.num_data_bins\n\n time_symb_start = symb * self.OFDMsymb_len\n time_symb_end = time_symb_start + self.OFDMsymb_len\n\n P = 0\n for ant in range(self.num_ant):\n\n ofdm_symb = zeros(self.NFFT, dtype=complex)\n ofdm_symb[self.used_data_bins] = freq_bin_data[ant, freq_data_start:freq_data_end]\n # plt.stem(array(range(-int(self.NFFT/2), int(self.NFFT/2))), abs(ofdm_symb))\n # plt.show()\n data_ifft = ifft(ofdm_symb, self.NFFT)\n cyclic_prefix = data_ifft[-self.CP:]\n data_time = concatenate((cyclic_prefix, data_ifft)) # add CP\n\n sig_energy = abs(dot(data_time, conj(data_time).T))\n # power scaling to normalize to 1\n if sig_energy > min_pow and ant == 0:\n scale_factor = sqrt(len(data_time) / sig_energy)\n else:\n scale_factor = 1\n data_time *= scale_factor\n P += var(data_time)\n time_ofdm_symbols[ant, time_symb_start: time_symb_end] = data_time\n\n for ant in range(self.num_ant):\n time_ofdm_symbols[ant, time_symb_start: time_symb_end] *= (1 / sqrt(P))\n\n return time_ofdm_symbols", "def gen_samples(f, duration, fs=44100):\n\n samples = (np.sin(2*np.pi*np.arange(fs*duration)*f/fs)).astype(np.float32)\n\n return samples", "def singleFreqLUT(f, iq, sampleRate=540e6, resolution=1e4, phase=0, amplitude=2**15-1):\r\n size = int(sampleRate/resolution)\r\n data = []\r\n for i in range(0, size):\r\n t = i/sampleRate\r\n if iq == 'I':\r\n data.append(int(amplitude*math.cos(2*math.pi*f*t+phase)))\r\n else:\r\n data.append(int(-amplitude*math.sin(2*math.pi*f*t+phase)))\r\n\r\n return data", "def __init__(self, file1, file2,\n df=0.1*u.kHz,\n dt=0.1*u.s,\n window=None,\n shift=0):\n\n self.file1=file1\n self.file2=file2\n self.df=df\n self.dt=dt\n \n self.s1=sdrdata(self.file1)\n self.s2=sdrdata(self.file2)\n\n if self.s1.rate != self.s2.rate:\n raise ValueError('Sampling rate for Rx2 (%f Hz) does not match rate for Rx1 (%f Hz)' % (self.s1.rate.value,self.s2.rate.value))\n if self.s1.center_freq != self.s2.center_freq:\n raise ValueError('Frequency for Rx2 (%f MHz) does not match rate for Rx1 (%f MHz)' % (self.s1.center_freq.to(u.MHz).value,\n self.s2.center_freq.to(u.MHz).value))\n \n\n # read in the data, convert to complex\n self.d1=self.s1.data\n self.d2=self.s2.data\n\n self.dtype=self.d1.dtype\n\n self.exptime=self.s1.exptime\n self.center_freq=self.s1.center_freq\n\n # this is the complex sampling rate\n self.rate=self.s1.rate\n \n # needs this long a fft\n self.nfft=int(round((self.rate/self.df).decompose()))\n # there are this many ffts\n self.nsamples=self.d1.shape[0]/self.nfft\n # so each chunk is this long\n self.chunk=(self.nfft/self.rate).decompose()\n # and we need to add this many together\n self.nint=int(round(self.dt/self.chunk))\n # and we have this many correlations\n self.ncorr=int((self.exptime/self.dt))\n\n # frequency of FFTs\n self.freq=np.fft.fftshift(np.fft.fftfreq(self.nfft,(1/self.rate).decompose().value))*u.Hz\n # and the actual frequency on the sky\n self.rf_freq=self.center_freq+self.freq\n\n # Hz per channel\n self.channel=np.diff(self.freq).mean()\n\n if window is None:\n self.window=np.kaiser(self.nfft,5).astype('complex')\n else:\n self.window=window(self.nfft).astype('complex')\n\n # apply a phase shift\n # to the second input\n phi=np.exp(2j*np.pi*(self.rf_freq*shift).decompose().value)\n \n # dynamic spectra\n self.DS1=np.zeros((self.nsamples,self.nfft),dtype=self.dtype)\n self.DS2=np.zeros((self.nsamples,self.nfft),dtype=self.dtype)\n\n # output correlation\n self.outcorr=np.zeros((self.ncorr,self.nfft),dtype=self.dtype)\n\n j=0\n for i in range(self.ncorr):\n corr=np.zeros((self.nfft),dtype=self.dtype)\n for k in range(self.nint):\n D1=self.d1[j*self.nfft:(j+1)*self.nfft]*self.window\n D2=self.d2[j*self.nfft:(j+1)*self.nfft]*self.window\n \n F1=np.fft.fftshift(np.fft.fft(D1))\n F2=np.fft.fftshift(np.fft.fft(D2))\n F2*=phi\n self.DS1[j]=F1\n self.DS2[j]=F2\n \n corr+=F1*np.conj(F2)\n j+=1\n\n self.outcorr[i]=corr\n \n self.phase=np.angle(self.outcorr)*u.rad\n self.amp=np.absolute(self.outcorr)", "def specgram(x, NFFT=256, Fs=2, Fc=0, detrend=None, window = pl.hanning, noverlap=128, cmap=None, hold=None, dtype = np.float32, threads=1, im_obj=None, interpolation='nearest', pylab_scaling=True, fast=True):\n siglen = len(x)\n if Fc != 0: print('strange Fc value: {0}'.format(Fc))\n \n if np.issubdtype(dtype, np.float32): \n outdtype = np.complex64\n elif np.issubdtype(dtype, np.float64): \n outdtype = np.complex128\n elif np.issubdtype(dtype, np.float128): \n outdtype = np.complex256\n elif np.issubdtype(dtype, np.complex): \n raise ValueError('Use numpy.specgram for your complex ({dt}) input data'\n .format(dt=dtype))\n else: raise ValueError(\"specgram_fft23 can't use data type {dt}\" .format(dt=dtype))\n\n winarr1 = window(NFFT).astype(np.dtype)\n\n if noverlap == 0:\n nbins = siglen/NFFT\n seglen = NFFT\n winarr_all = np.tile(winarr1, nbins) \n windata32 = winarr_all*x # tiling and bulk multiply saves 30ms/180ms\n\n else:\n seglen = NFFT - noverlap\n nbins = (siglen-noverlap)/seglen # make sure noverlap left over at end\n windata32 = np.empty(NFFT*nbins, dtype = dtype)\n for b in range(nbins): \n windata32[b*NFFT:(b+1)*NFFT] = \\\n x[seglen*b:seglen*b+NFFT] * winarr1\n\n simd_align = pyfftw.simd_alignment # 16 at the moment.\n inp = pyfftw.n_byte_align_empty(NFFT, simd_align, dtype)\n out = pyfftw.n_byte_align_empty(NFFT/2+1, simd_align, dtype = outdtype)\n allout = np.empty((nbins,NFFT/2+1),dtype=outdtype)\n F = pyfftw.FFTW(inp,out,threads=threads)#, force_nogil=force_nogil)\n\n for b in range(nbins):\n inp[:] = windata32[b*NFFT:(b+1)*NFFT]\n F.execute()\n allout[b,:] = out\n\n P = np.real(allout.T)**2 + np.imag(allout.T)**2\n\n P[[0,-1]] *=0.5\n Fs = float(Fs) # make sure!\n if pylab_scaling: P = P*2/Fs/(np.abs(winarr1)**2).sum()\n im = 10/np.log(10)*np.log(P)\n extent = (0, nbins*seglen/float(Fs),0,Fs/2)\n\n # first check a few things\n # in spite of this logic, you can fool it by putting a different image in\n # really should carefully find child 1 of figure, and check that the\n # newest image child of that has the same extent!\n fig=pl.gcf()\n if im_obj is not None:\n if not (np.allclose(extent, im_obj.get_extent()) and \n im_obj.get_animated() and\n (im_obj.get_interpolation() == interpolation) and\n (im_obj.get_figure() == fig)):\n print('not using old object - mismatch with {ii} or not animated'\n .format(ii=im_obj))\n im_obj=None\n\n try: # is there an image that looks right?\n chs = fig.get_children()[1].get_children()\n is_img = [hasattr(ch,'write_png') for ch in chs]\n latest_image = chs[np.where(is_img)[0][-1]] # last image\n if not latest_image==im_obj: \n im_obj=None\n print('image (im_obj) does not match image in figure')\n except:\n print('Failed to find similar image in figure')\n im_obj=None\n \n if im_obj is None: # seems OK, lets do it.\n #im_obj = pl.imshow(im,origin='lower',aspect='auto',hold=hold)\n im_obj = pl.imshow(im,origin='lower',aspect='auto',animated=True, interpolation=interpolation, hold=hold, extent=extent)\n pl.show()\n else:\n sh = np.shape(im)\n pix = fig.get_size_inches()*fig.get_dpi()\n if fast and (np.prod(sh)>2e5):\n im_obj.set_data(im[::max(1,0.5*sh[0]/pix[0]),\n ::max(1,0.5*sh[1]/pix[1])]),\n pl.draw() \n print('now full res')\n im_obj.set_data(im)\n pl.draw() \n frange = np.linspace(extent[2], extent[3], np.shape(im)[0]) #agrees with pylabn\n t0range = np.linspace(extent[0], extent[1], np.shape(im)[1]+1)\n trange = t0range[0:-1]+NFFT/(2*Fs) # midway point\n return(P, frange, trange, im_obj)", "def find_dominantfreq(self,fs=100,N_wf=256,overlap=128):\n signals=[self.acc_magnitude,self.gyro_magnitude]\n f0v=8\n fmin=0.5\n fmax=4\n Nf=int(f0v*(N_wf/2))\n Ns=10\n self.acc_features=pd.DataFrame(columns=['stepfreq','meandominantfreq','Variance','Energy','VarXmedian'])\n self.gyro_features=pd.DataFrame(columns=['stepfreq','meandominantfreq','Variance','Energy','VarXmedian'])\n s=0\n for sig in signals:\n dom_freq=[]\n step_freq=[]\n var=[]\n ener=[]\n VarXMedian=[]\n for i in range(0,len(sig)-N_wf,overlap):\n mag=sig[i:i+N_wf]\n VarXMedian.append(np.var(mag)*np.median(mag))\n mag=mag-np.mean(mag)\n# mx=np.amax(mag)\n# mn=np.amin(mag)\n# mag=(mag-mn)/(mx-mn)\n \n var.append(np.var(mag))\n# print(var)\n ener.append(np.max(mag**2))\n domfreq=np.zeros((1,3))\n fourcoef=np.zeros((1,3), dtype=complex)\n \n freq=np.arange(0,(2*Nf))/(2*Nf)*(fs)\n lowind=np.where(freq>fmin)[0][0]\n upind=np.max(np.where(freq<fmax))\n \n haming= np.hamming(N_wf)\n \n furval=fft(mag*haming,n=2*Nf)\n \n fourcoef[0,0]=(furval[lowind+np.argmax(np.abs(furval[lowind:upind]))])\n ind=lowind+np.argmax(np.abs(furval[lowind:upind]))\n idx=np.where(furval==fourcoef[0,0])[0][0]\n domfreq[0,0]=freq[idx]\n furval[np.maximum(1,ind-Ns):(ind+Ns)]=0 # furval[np.maximum(1,ind-Ns):(ind+Ns)+lowind+1]=0\n \n fourcoef[0,1]=(furval[lowind+np.argmax(np.abs(furval[lowind:upind]))])\n ind=lowind+np.argmax(np.abs(furval[lowind:upind]))\n idx=np.where(furval==fourcoef[0,1])[0][0]\n domfreq[0,1]=freq[idx]\n furval[np.maximum(1,ind-Ns):(ind+Ns)]=0\n \n fourcoef[0,2]=(furval[lowind+np.argmax(np.abs(furval[lowind:upind]))])\n ind=lowind+np.argmax(np.abs(furval[lowind:upind]))\n idx=np.where(furval==fourcoef[0,2])[0][0]\n domfreq[0,2]=freq[idx]\n \n stepfreq=domfreq[0,0]\n \n# if domfreq[0,0]<1.2:\n# stepfreq=domfreq[0,1]\n \n step_freq.append(stepfreq)\n dom_freq.append(domfreq)\n# plt.plot(mag)\n if s==0:\n \n self.acc_features['stepfreq']=step_freq\n self.acc_features['meandominantfreq']=np.mean(np.vstack(dom_freq),axis=1)\n self.acc_features['Variance']=var\n self.acc_features['Energy']=ener\n self.acc_features['VarXmedian']=VarXMedian\n self.dom_freqacc=dom_freq\n \n if s==1:\n \n self.gyro_features['stepfreq']=step_freq\n self.gyro_features['meandominantfreq']=np.mean(np.vstack(dom_freq),axis=1)\n self.gyro_features['Variance']=var\n self.gyro_features['Energy']=ener\n self.gyro_features['VarXmedian']=VarXMedian\n self.dom_freqgyro=dom_freq\n \n s=s+1", "def fft_filter(\n data, \n samplerate, \n low_pass: float = None, \n high_pass: float = None, \n psd_threshold: float = None, \n amount: float = 1\n ):\n nyq = int(np.floor(data.shape[0] / 2))\n freq, psd, fhat = fft(data, samplerate)\n freq = freq[:nyq]\n if low_pass is not None:\n i = freq < low_pass\n i = np.concatenate((i, np.flip(i))) \n fhat = fhat * (i + (1 - amount) * np.invert(i))\n if high_pass is not None:\n i = freq > high_pass\n i = np.concatenate((i, np.flip(i))) \n fhat = fhat * (i + (1 - amount) * np.invert(i))\n if psd_threshold is not None:\n i = psd > psd_threshold\n fhat = fhat * (i + (1 - amount) * np.invert(i))\n\n return np.fft.ifft(fhat).real.reshape(-1, 1)", "def init_freq_sweep(self, start_freq, stop_freq, num_pts):\n curr_phase = 'Running'\n printMsg(curr_phase,\"Setting frequency range from \" + start_freq + \" to \" + stop_freq + \" with \" + str(num_pts) + \" points\")\n self.instrument.write(\"STAR \" + start_freq)\n self.instrument.write(\"STOP \" + stop_freq)\n self.instrument.write(\"POIN \" + str(num_pts)) # Will round up to be one of the following values: 3, 11, 21, 51, 101, 201, 401, 801, 1601\n #NOTE: there is a min freq span for each number of points see operating manual page 52\n self.freq_sweep_type() # Set the freq sweep mode for all following measurements (lin or log)\n \n \n self.degree = self.instrument.query('OUTPFORM')\n\n #Checking if Start and Stop Freq changed due to selected number of points or log freq sweep\n real_start_freq = self.instrument.query(\"STAR?\") #Ask VNA for set start frequency\n real_stop_freq = self.instrument.query(\"STOP?\") # Ask VNA for set stop frequency\n self.instrument.write(\"NOOP\") # No operation + sets operation bit to complete (puts VNA back in listen mode so can use front panel)\n\n #Check if the span is too small for log freq sweep (automatically defaults to lin sweep in that case)\n if self.freq_mode == \"log\" and int(self.instrument.query(\"LOGFREQ?\")) == 0:\n printError(curr_phase,\"You need > 2 octaves in span to run a logarithmic frequency sweep\")\n return False, real_start_freq, real_stop_freq\n\n user_start_freq = start_freq.split(\" \") #Split the units from the number for start freq\n if user_start_freq[1] == \"GHz\":\n user_start_freq[0] = float(user_start_freq[0]) * 10**9 #Convert from GHz to Hz\n elif user_start_freq[1] == \"MHz\":\n user_start_freq[0] = float(user_start_freq[0]) * 10**6 #Convert MHz to Hz\n else:\n printError(curr_phase,\"Units other than MHz or GHz were used\")\n return False, real_start_freq, real_stop_freq #Note returns as string not float\n\n user_stop_freq = stop_freq.split(\" \") # Split the units from the number for start freq\n if user_stop_freq[1] == \"GHz\":\n user_stop_freq[0] = float(user_stop_freq[0]) * 10 ** 9 # Convert from GHz to Hz\n elif user_stop_freq[1] == \"MHz\":\n user_stop_freq[0] = float(user_stop_freq[0]) * 10 ** 6 # Convert MHz to Hz\n else:\n printError(curr_phase,\"Units other than MHz or GHz were used\")\n return False, real_start_freq, real_stop_freq #Note returns as string not float\n\n if float(real_start_freq) - user_start_freq[0] != 0 or float(real_stop_freq) - user_stop_freq[0] != 0:\n return False, real_start_freq, real_stop_freq #Note returns as string not float\n else:\n return True, real_start_freq, real_stop_freq #Note returns as string not float", "def fftfreq(n, dtype=torch.float, device=torch.device(\"cpu\")):\n return (torch.arange(n, dtype=dtype, device=device) + n // 2) % n - n // 2", "def test_4d_freq_single_index():\n dic, data = ng.pipe.read_lowmem(NMRPIPE_4D_FREQ_1)\n assert data.shape == (2, 3, 4, 5)\n assert data.dtype == 'float32'\n assert data[0, 0, 0, 0] == 1.\n assert data[0, 0, 0, 1] == 1.\n assert data[0, 0, 1, 0] == 1.\n assert data[0, 1, 0, 0] == 1.\n assert data[1, 0, 0, 0] == 2.\n check_ppm_limits(dic, data, 0, [180.00, 80.00])\n check_ppm_limits(dic, data, 1, [186.67, 53.33])\n check_ppm_limits(dic, data, 2, [179.00, 59.00])\n check_ppm_limits(dic, data, 3, [44.70, -35.30])\n check_simple_roundtrip(dic, data, 1, lowmem=True)", "def set_Df_sweep(instrument, f_start, f_stop, unit='MHZ', channel_num=1):\n command1 = ':SENSe%d:FREQuency:STARt %G %s' % (channel_num, f_start, unit)\n command2 = ':SENSe%d:FREQuency:STOP %G %s' % (channel_num, f_stop, unit)\n instrument.write(command1)\n instrument.write(command2)", "def realfft(eventfile,segment_length):\n parent_folder = str(pathlib.Path(eventfile).parent)\n dat_files = sorted(glob.glob(parent_folder+'/accelsearch_' + str(segment_length) + 's/*.dat')) #not that order matters here I think, but just in case\n # recall that un-truncated data is \"*bary.dat\", so \"*bary_*.dat\" is truncated data!\n logfile = parent_folder + '/accelsearch_' + str(segment_length) + 's/realfft.log'\n\n print('Doing realfft now!')\n with open(logfile,'w') as logtextfile:\n for i in tqdm(range(len(dat_files))):\n if os.path.exists(dat_files[i][:-3] + 'fft')==False:\n output = subprocess.run(['realfft',dat_files[i]],capture_output=True,text=True)\n logtextfile.write(output.stdout)\n logtextfile.write('*------------------------------* \\n')\n logtextfile.write(output.stderr)\n logtextfile.close()\n\n return", "def harmonicModelAnal(x, fs, window, fft_size, hop_size, min_fft_val, nSines, minf0, maxf0, f0et, harmDevSlope=0.01, minSineDur=.02):\n\n\tif (minSineDur <0): # raise exception if minSineDur is smaller than 0\n\t\traise ValueError(\"Minimum duration of sine tracks smaller than 0\")\n\t\t\n\t#hN = fft_size / 2 # size of positive spectrum\n\thM1 = int(math.floor((window.size + 1) / 2)) # half analysis window size by rounding\n\thM2 = int(math.floor(window.size / 2)) # half analysis window size by floor\n\tx = np.append(np.zeros(hM2), x) # add zeros at beginning to center first window at sample 0\n\tx = np.append(x, np.zeros(hM2)) # add zeros at the end to analyze last sample\n\tpin = hM1 # init sound pointer in middle of anal window \n\tpend = x.size - hM1 # last sample to start a frame\n\t#fftbuffer = np.zeros(fft_size) # initialize buffer for FFT\n\twindow = window / sum(window) # normalize analysis window\n\thfreqp = [] # initialize harmonic frequencies of previous frame\n\tf0t = 0 # initialize f0 track\n\tf0stable = 0 # initialize f0 stable\n\n\twhile pin<=pend:\n\t\t#print(\"pin:\", pin, \" pend:\", pend)\n\t\tx1 = x[pin-hM1:pin+hM2] # select frame\n\t\t#--------- harmonic Analysis frame\n\t\t# mX, pX = DFT.dftAnal(x1, w, N) # compute dft \n\t\t# ploc = UF.peakDetection(mX, t) # detect peak locations \n\t\t# iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc) # refine peak values\n\t\t# ipfreq = fs * iploc/N # convert locations to Hz\n\t\t# f0t = UF.f0Twm(ipfreq, ipmag, f0et, minf0, maxf0, f0stable) # find f0\n\t\t# if ((f0stable==0)&(f0t>0)) \\\n\t\t# \t\tor ((f0stable>0)&(np.abs(f0stable-f0t)<f0stable/5.0)):\n\t\t# \tf0stable = f0t # consider a stable f0 if it is close to the previous one\n\t\t# else:\n\t\t# \tf0stable = 0\n\t\t# hfreq, hmag, hphase = harmonicDetection(ipfreq, ipmag, ipphase, f0t, nH, hfreqp, fs, harmDevSlope) # find harmonics\n\t\t#-----------\n\t\tuseTWM=0\n\t\tmX, f0stable, f0t, hfreq, hmag, hphase = harmonicModelAnalFrame (x1, window, fft_size, min_fft_val, fs, hfreqp, f0et, minf0, maxf0, nSines, f0stable, harmDevSlope, useTWM)\n\t\thfreqp = hfreq #hfreq(previous)\n\t\tif pin == hM1: # first frame\n\t\t\txhfreq = np.array([hfreq])\n\t\t\txhmag = np.array([hmag])\n\t\t\txhphase = np.array([hphase])\n\t\telse: # next frames\n\t\t\txhfreq = np.vstack((xhfreq,np.array([hfreq])))\n\t\t\txhmag = np.vstack((xhmag, np.array([hmag])))\n\t\t\txhphase = np.vstack((xhphase, np.array([hphase])))\n\t\tpin += hop_size # advance sound pointer\n\txhfreq = SM.cleaningSineTracks(xhfreq, round(fs * minSineDur / hop_size)) # delete tracks shorter than minSineDur\n\treturn xhfreq, xhmag, xhphase, f0stable", "def process_data(m):\n\n logging.debug(\"Sample rate is %0.3f Hz\"%(args.fs))\n\n # data type for binary file\n if args.swapiq:\n dt=[('imag','float32'),('real','float32')]\n else:\n dt=[('real','float32'),('imag','float32')]\n\n # Number of samples to work with\n fs=float(args.fs)\n\n # Starting phase\n phi=0;\n samples_read=0\n elapsed_sec=0\n fc=args.fc;\n fc_rate=args.fc_rate;\n\n if m:\n if m.has_key('utc'):\n indarray=args.utc==m['utc']\n ind=np.where(indarray==True)\n if len(ind) is not 1:\n logging.error(\"Couldn't find UTC time %s in doppler file\"%())\n sys.exit(-1)\n ind=ind[0]\n else:\n # Starts at 0 if no UTC time given\n ind=0\n\n past_predicts_warning_printed=False\n\n if args.save_doppler:\n doppler_out_fid=open(args.save_doppler,\"w\")\n else:\n doppler_out_fid=None\n \n\n while 1:\n # Compute number of samples to read\n n=int(round((elapsed_sec+1.0)*fs-samples_read))\n data=np.fromfile(sys.stdin,dtype=dt,count=n)\n\n current_utc = args.utc + TimeDelta(elapsed_sec,format='sec')\n\n logging.debug(\"%s (%d): Reading %d samples\"%(current_utc,elapsed_sec,n))\n\n if len(data)==0:\n break\n\n # Reserve arrays\n iq_out = np.zeros ( [ 2*len(data) ] , dtype = np.float32 )\n iq = np.zeros ( [ len(data) ] , dtype = np.complex )\n\n # Make complex number\n iq.real=data['real']\n iq.imag=data['imag']\n\n # Predicts provided?\n if m:\n # If we provide a model, use the model doppler and doppler rate\n # Note, if args.fc is given, we will apply this args.fc offset to\n # all data in predicts file. We will NOT apply a args.fc_rate offset though\n # and only use the fc_rate from the predicts file\n try:\n fc=args.fc+m['doppler_hz'][ind+elapsed_sec]\n fc_rate=m['doppler_rate_hz_s'][ind+elapsed_sec]\n if m.has_key('utc'):\n delta_time=current_utc-m['utc'][ind+elapsed_sec]\n # Make sure our model time and current time align\n delta_sec=(delta_time*86400).value\n if delta_sec > 0.1 :\n logging.error(\"Model time and current time mismatch by %f seconds\"%(delta_sec))\n sys.exit(-1)\n\n except IndexError:\n # Just keep using fc, fc_rate and keep propagating\n if not past_predicts_warning_printed:\n past_predicts_warning_printed=True\n if m.has_key('utc'):\n logging.warning(\"Past predicts time %s, using model %f Hz %f Hz/s\"\\\n %(m['utc'][-1],fc,fc_rate))\n else:\n logging.warning(\"Past predicts time %d, using model %f Hz %f Hz/s\"\\\n %(elapsed_sec-1,fc,fc_rate))\n\n if doppler_out_fid:\n if m.has_key['utc']:\n doppler_out_fid.write(\"%s %f %f\\n\"%(current_utc,fc,fc_rate))\n else:\n doppler_out_fid.write(\"%d %f %f\\n\"%(elapsed_sec,fc,fc_rate))\n\n doppler_out_fid.flush()\n\n # Counter rotate\n iq_cr,fc,phi=counterrotate(iq,args.fs,fc,fc_rate,phi)\n\n # Interleave and dump to disk\n iq_out[::2]=iq_cr.real\n iq_out[1::2]=iq_cr.imag\n iq_out.tofile(sys.stdout,format='float32');\n\n # Increment counters\n elapsed_sec+=1\n samples_read+=n\n \n if doppler_out_fid:\n doppler_out_fid.close()", "def test_4d_freq_single_index_slice():\n dic, data = ng.pipe.read(NMRPIPE_4D_FREQ_1 % (1))\n assert data.shape == (4, 5)\n assert data.dtype == 'float32'\n assert data[0, 0] == 1.\n assert data[0, 1] == 1.\n assert data[1, 0] == 1.\n check_ppm_limits(dic, data, 0, [179.00, 59.00])\n check_ppm_limits(dic, data, 1, [44.70, -35.30])\n check_simple_roundtrip(dic, data)", "def fft(data, samplerate):\n data = data.flatten()\n n = len(data)\n dt = 1 / samplerate\n fhat = np.fft.fft(data, n)\n psd = fhat * np.conj(fhat) / n # magnitude of each fourier transformed\n freq = (1/(dt * n)) * np.arange(n) # create x axis of increasing frequencies\n return freq, psd.real, fhat", "def stft(self, frame_length, hop, fft_size=None, return_onesided=True, window=None, pad_end=0): # real signature unknown; restored from __doc__\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_packet_sink_sptr __init__(self, p) > digital_packet_sink_sptr
def __init__(self, *args): this = _digital_swig.new_digital_packet_sink_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_framer_sink_1_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, sink):\n\n self.sink = sink", "def __deref__(self):\n return _wmbus_swig.wmbus_packet_sink_sptr___deref__(self)", "def __init__(self, stream):\n self.send = stream.send", "def __init__(self, *args):\n this = _digital_swig.new_digital_simple_framer_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, stream):\n self.stream = stream\n self.recv = stream.recv", "def __init__(self, sender, source_id=None):\n self.sender = sender\n self.source_id = None\n self.source_id_buff = None\n self.time_id = 0\n self.set_source_id(source_id)", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, src):\n self.src = src", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoByteStream()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, mem, inp, outp):\n self.pc = 0\n self.mem = mem\n self.inp = inp\n self.outp = outp", "def make(param_mode, debug):\n return _wmbus_swig.wmbus_packet_sink_make(param_mode, debug)", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(__self__, *,\n filters: Optional[pulumi.Input[Sequence[pulumi.Input['PacketCaptureFilterArgs']]]] = None,\n maximum_bytes_per_packet: Optional[pulumi.Input[int]] = None,\n maximum_bytes_per_session: Optional[pulumi.Input[int]] = None,\n maximum_capture_duration_in_seconds: Optional[pulumi.Input[int]] = None,\n name: Optional[pulumi.Input[str]] = None,\n network_watcher_id: Optional[pulumi.Input[str]] = None,\n storage_location: Optional[pulumi.Input['PacketCaptureStorageLocationArgs']] = None,\n virtual_machine_id: Optional[pulumi.Input[str]] = None):\n if filters is not None:\n pulumi.set(__self__, \"filters\", filters)\n if maximum_bytes_per_packet is not None:\n pulumi.set(__self__, \"maximum_bytes_per_packet\", maximum_bytes_per_packet)\n if maximum_bytes_per_session is not None:\n pulumi.set(__self__, \"maximum_bytes_per_session\", maximum_bytes_per_session)\n if maximum_capture_duration_in_seconds is not None:\n pulumi.set(__self__, \"maximum_capture_duration_in_seconds\", maximum_capture_duration_in_seconds)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if network_watcher_id is not None:\n pulumi.set(__self__, \"network_watcher_id\", network_watcher_id)\n if storage_location is not None:\n pulumi.set(__self__, \"storage_location\", storage_location)\n if virtual_machine_id is not None:\n pulumi.set(__self__, \"virtual_machine_id\", virtual_machine_id)", "def __init__(self, name, string='hello'):\r\n Server.__init__(self, name)\r\n PypadData.__init__(self, string)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
carrier_sensed(self) > bool return true if we detect carrier
def carrier_sensed(self): return _digital_swig.digital_packet_sink_sptr_carrier_sensed(self)
[ "def is_cisco(self):\n # Initialize key variables\n value = False\n\n # Checks system object ID\n if self.enterprise_id == 9:\n value = True\n\n # Return\n return value", "def is_saloon(self):\n return self.car_type == 'saloon'", "def _carrier_supports_ipv6(self,dut):\n carrier_supports_ipv6 = [\"vzw\", \"tmo\"]\n operator = get_operator_name(self.log, dut)\n self.log.info(\"Carrier is %s\" % operator)\n return operator in carrier_supports_ipv6", "def get_is_fedex_shipping(self, name):\n return self.carrier and \\\n self.carrier.carrier_cost_method == 'fedex' or False", "def alternative_carriers(self):\n return self.alternative_carrier_records", "def is_carnivore(self) -> bool:\n return self.has_trait(CarnivoreCard)", "def is_strain(self) -> bool:\n return self._is_strain", "def is_species(self) -> bool:\n return True", "def has_sres(self) -> bool:\n return self.check_sensi_orders((1,), MODE_RES)", "def is_ceph_enabled(self):\n return self.is_backend_enabled('ceph')", "def is_sparecore(self):\n return self._is_sparecore", "def detect() -> bool:\n if Usb.find(idVendor = 1118, idProduct = 654):\n return True\n else:\n return False", "def is_sensitive(self):\n return True if self.price_sens == 1 else False", "def _is_stackable(self):\n if (\n self.acceptance_off is None\n or self.acceptance is None\n or self.counts_off is None\n ):\n return False\n else:\n return True", "def HasSRA(self):\n return self.__has('SRA')", "def is_queue_first_car(self):\n first_car = self.parent_object.get_first_car()\n if first_car is None:\n return False\n if first_car.get_stream() == self:\n return True\n else:\n return False", "def chargeable(self):\n return not self.internal and self.charged", "def is_serp_attractive(self):\n # Work out the patch type first, using code from the base (SimpleSERPImpression) class.\n judgements = self._get_patch_judgements()\n patch_type = self._calculate_patch_type(judgements)\n self._set_query_patch_type(patch_type)\n \n results_len = self._search_context.get_current_results_length()\n results_list = self._search_context.get_current_results()\n judged_precision = sum(judgements) / float(len(judgements))\n \n if judged_precision <= self.__viewport_precision_threshold:\n # We have a poor quality SERP.\n return False\n \n # If we get here, the SERP will be of quality good enough to enter.\n return True", "def has_shirked(self):\n return ('bad' in self.technology_choices)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_pfb_clock_sync_ccf_sptr __init__(self, p) > digital_pfb_clock_sync_ccf_sptr
def __init__(self, *args): this = _digital_swig.new_digital_pfb_clock_sync_ccf_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def pfb_clock_sync_ccf(*args, **kwargs):\n return _digital_swig.pfb_clock_sync_ccf(*args, **kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_fff_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def pfb_clock_sync_fff(*args, **kwargs):\n return _digital_swig.pfb_clock_sync_fff(*args, **kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_pn_correlator_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def get_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_channel_taps(self, *args, **kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def get_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_channel_taps(self, *args, **kwargs)", "def __init__(self, fft_length, cp_length, occupied_tones, snr, ks, carrier_map_bin, nc_filter, logging=False):\n\n\tgr.hier_block2.__init__(self, \"ofdm_receiver\",\n\t\t\t\tgr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature\n gr.io_signature2(2, 2, gr.sizeof_gr_complex*occupied_tones, gr.sizeof_char)) # Output signature\n\n bw = (float(occupied_tones) / float(fft_length)) / 2.0\n tb = bw*0.04\n print \"ofdm_receiver:__init__:occupied_tones %s fft_length %d \" % (occupied_tones, fft_length)\n \n chan_coeffs = filter.firdes.low_pass (1.0, # gain\n 1.0, # sampling rate\n bw+tb, # midpoint of trans. band\n tb, # width of trans. band\n filter.firdes.WIN_HAMMING) # filter type\n \n self.chan_filt = filter.fft_filter_ccc(1, chan_coeffs)\n\n # linklab, get ofdm parameters\n self._fft_length = fft_length\n self._occupied_tones = occupied_tones\n self._cp_length = cp_length\n self._nc_filter = nc_filter\n self._carrier_map_bin = carrier_map_bin\n \n win = [1 for i in range(fft_length)]\n \n # linklab, initialization function\n self.initialize(ks, self._carrier_map_bin)\n \n\n zeros_on_left = int(math.ceil((fft_length - occupied_tones)/2.0))\n ks0 = fft_length*[0,]\n ks0[zeros_on_left : zeros_on_left + occupied_tones] = ks[0]\n\n ks0 = np_fft.ifftshift(ks0)\n ks0time = np_fft.ifft(ks0)\n # ADD SCALING FACTOR\n ks0time = ks0time.tolist()\n\n SYNC = \"pn\"\n if SYNC == \"ml\":\n nco_sensitivity = -1.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_ml(fft_length,\n cp_length,\n snr,\n ks0time,\n logging)\n elif SYNC == \"pn\":\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_pn(fft_length,\n cp_length,\n logging)\n elif SYNC == \"pnac\":\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_pnac(fft_length,\n cp_length,\n ks0time,\n logging)\n # for testing only; do not user over the air\n # remove filter and filter delay for this\n elif SYNC == \"fixed\":\n self.chan_filt = gr.multiply_const_cc(1.0)\n nsymbols = 18 # enter the number of symbols per packet\n freq_offset = 0.0 # if you use a frequency offset, enter it here\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_fixed(fft_length,\n cp_length,\n nsymbols,\n freq_offset,\n logging)\n\n # Set up blocks\n\n # Create a delay line, linklab\n self.delay = blocks.delay(gr.sizeof_gr_complex, fft_length)\n\n self.nco = analog.frequency_modulator_fc(nco_sensitivity) # generate a signal proportional to frequency error of sync block\n self.sigmix = blocks.multiply_cc()\n self.sampler = gr_papyrus.ofdm_sampler(fft_length, fft_length+cp_length)\n self.fft_demod = gr_fft.fft_vcc(fft_length, True, win, True)\n self.ofdm_frame_acq = gr_papyrus.ofdm_frame_acquisition(occupied_tones,\n fft_length,\n cp_length, ks[0])\n # linklab, check current mode: non-contiguous OFDM or not\n if self._nc_filter:\n print '\\nMulti-band Filter Turned ON!'\n # linklab, non-contiguous filter\n self.ncofdm_filt = ncofdm_filt(self._fft_length, self._occupied_tones, self._carrier_map_bin)\n self.connect(self, self.chan_filt, self.ncofdm_filt)\n self.connect(self.ncofdm_filt, self.ofdm_sync) # into the synchronization alg.\n self.connect((self.ofdm_sync,0), self.nco, (self.sigmix,1)) # use sync freq. offset output to derotate input signal\n self.connect(self.ncofdm_filt, self.delay, (self.sigmix,0)) # signal to be derotated\n else :\n print '\\nMulti-band Filter Turned OFF!'\n self.connect(self, self.chan_filt)\n self.connect(self.chan_filt, self.ofdm_sync) # into the synchronization alg.\n self.connect((self.ofdm_sync,0), self.nco, (self.sigmix,1)) # use sync freq. offset output to derotate input signal\n self.connect(self.chan_filt, self.delay, (self.sigmix,0)) # signal to be derotated\n\n self.connect(self.sigmix, (self.sampler,0)) # sample off timing signal detected in sync alg\n self.connect((self.ofdm_sync,1), (self.sampler,1)) # timing signal to sample at\n\n self.connect((self.sampler,0), self.fft_demod) # send derotated sampled signal to FFT\n self.connect(self.fft_demod, (self.ofdm_frame_acq,0)) # find frame start and equalize signal\n self.connect((self.sampler,1), (self.ofdm_frame_acq,1)) # send timing signal to signal frame start\n self.connect((self.ofdm_frame_acq,0), (self,0)) # finished with fine/coarse freq correction,\n self.connect((self.ofdm_frame_acq,1), (self,1)) # frame and symbol timing, and equalization\n\n if logging:\n self.connect(self.chan_filt, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-chan_filt_c.dat\"))\n self.connect(self.fft_demod, gr.file_sink(gr.sizeof_gr_complex*fft_length, \"ofdm_receiver-fft_out_c.dat\"))\n self.connect(self.ofdm_frame_acq,\n gr.file_sink(gr.sizeof_gr_complex*occupied_tones, \"ofdm_receiver-frame_acq_c.dat\"))\n self.connect((self.ofdm_frame_acq,1), gr.file_sink(1, \"ofdm_receiver-found_corr_b.dat\"))\n self.connect(self.sampler, gr.file_sink(gr.sizeof_gr_complex*fft_length, \"ofdm_receiver-sampler_c.dat\"))\n self.connect(self.sigmix, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-sigmix_c.dat\"))\n self.connect(self.nco, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-nco_c.dat\"))", "def __init__(self):\n super(ClockFace, self).__init__()\n\n # Set to True when the variables to draw the clock are set:\n self.initialized = False\n\n # The time on the clock face\n self._time = datetime.now()\n self._old_minute = self._time.minute\n\n # Update the clock only when the widget is active to save\n # resource\n self._active = False\n\n # The display mode of the clock\n self._mode = _MODE_SIMPLE_CLOCK\n\n # SVG Background cache\n self._cache_pixbuf = None\n self._radius = -1\n\n # The graphic context used for drawings\n self._gc = None\n self._line_width = 2\n\n # Color codes (approved colors for XO screen:\n # http://wiki.laptop.org/go/XO_colors)\n colormap = self.get_colormap()\n\n # XO Medium Blue\n self._COLOR_HOURS = colormap.alloc_color(\"#005FE4\")\n\n # XO Medium Green\n self._COLOR_MINUTES = colormap.alloc_color(\"#00B20D\")\n\n # XO Medium Red\n self._COLOR_SECONDS = colormap.alloc_color(\"#E6000A\")\n\n # White\n self._COLOR_WHITE = colormap.alloc_color(\"#FFFFFF\")\n\n # Black\n self._COLOR_BLACK = colormap.alloc_color(\"#000000\")\n\n # gtk.Widget signals\n self.connect(\"expose-event\", self._expose_cb)\n self.connect(\"size-allocate\", self._size_allocate_cb)\n\n # The masks to capture the events we are interested in\n self.add_events(gdk.EXPOSURE_MASK | gdk.VISIBILITY_NOTIFY_MASK)\n\n # Define a new signal to notify the application when minutes\n # change. If the user wants to display the time in full\n # letters, the method of the activity will be called back to\n # refresh the display.\n gobject.signal_new(\"time_minute\", ClockFace,\n gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, [])", "def make(self, *args, **kwargs):\n return _OFDM_Cyclic_Prefix_swig.vamsi_OFDMCP_ff_sptr_make(self, *args, **kwargs)", "def __init__(self):\r\n# self.__init__(defaultPixels)\r\n #TODO nicht möglich mehrere Konstruktoren zu haben?\r\n #def __init__(self, pixels):\r\n #\"\"\"init the countdown with pixel positions (a list of (x, y) tuples)\"\"\"\r\n #self.pixels = pixels\r\n self.pixels = self.defaultPixels\r\n self.initColors()\r\n self.t = 0", "def get_diff_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_channel_taps(self, *args, **kwargs)", "def __init__(self, p, i, d, get_current_time, get_feedback_value):\r\n # p, i, and d constants\r\n self.p, self.i, self.d = p, i, d\r\n\r\n # saves the functions that return the time and the feedback\r\n self.get_current_time = get_current_time\r\n self.get_feedback_value = get_feedback_value", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def get_diff_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_channel_taps(self, *args, **kwargs)", "def __init__(self):\n this = _coin.new_SoMFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, channel=105, state=BusState.ACTIVE, bitrate=500000, *args, **kwargs):\n\n #super(CanFoxBus, self).__init__(self, channel='PCAN_USBBUS1', state=BusState.ACTIVE, bitrate=500000, *args, **kwargs)\n self.channel_info = channel\n self.fd = kwargs.get('fd', False)\n pcan_bitrate = CANFOX_bitrate_objs.get(bitrate, CANFOX_BAUD_250K)\n\n\n\n self.m_objCANFOXBasic = CANFOXBasic()\n self.m_PcanHandle = 105 #globals()[channel]\n self._filters = None\n\n if state is BusState.ACTIVE or state is BusState.PASSIVE:\n self.state = state\n else:\n raise ArgumentError(\"BusState must be Active or Passive\")\n\n\n if self.fd:\n f_clock_val = kwargs.get('f_clock', None)\n if f_clock_val is None:\n f_clock = \"{}={}\".format('f_clock_mhz', kwargs.get('f_clock_mhz', None))\n else:\n f_clock = \"{}={}\".format('f_clock', kwargs.get('f_clock', None))\n\n fd_parameters_values = [f_clock] + [\"{}={}\".format(key, kwargs.get(key, None)) for key in pcan_fd_parameter_list if kwargs.get(key, None) is not None]\n\n self.fd_bitrate = ' ,'.join(fd_parameters_values).encode(\"ascii\")\n\n\n result = self.m_objCANFOXBasic.InitializeFD(self.m_PcanHandle, self.fd_bitrate)\n else:\n if HAS_EVENTS:\n self._recv_event = CreateEvent(None, 0, 0, \"R2\")\n self._tran_event = CreateEvent(None, 0, 0, \"T2\")\n result = self.m_objCANFOXBasic.Initialize(self.m_PcanHandle, pcan_bitrate)\n\n if result != CANFOX_ERROR_OK:\n raise PcanError(self._get_formatted_error(result))\n\n if HAS_EVENTS:\n\n if 0:\n self._recv_event = CreateEvent(None, 0, 0, \"R2\")\n result = self.m_objCANFOXBasic.SetValue(\n self.m_PcanHandle, 1, self._recv_event) #\"\"\"PCAN_RECEIVE_EVENT\"\"\"\n if result != CANFOX_ERROR_OK:\n raise PcanError(self._get_formatted_error(result))\n\n super(CanFoxBus, self).__init__(channel=channel, state=state, bitrate=bitrate, *args, **kwargs)", "def __init__(self, spi_rack, module, frequency=100e6):\n #def __init__(self, module, frequency=100e6):\n self.spi_rack = spi_rack\n self.module = module\n\n self.rf_frequency = frequency\n self.stepsize = 1e6\n self.ref_frequency = 10e6\n self.use_external = 0\n self.outputPower = None\n\n # These are the 6 registers present in the ADF4351\n self.registers = 6*[0]\n # In REG3: set ABP=1 (3 ns, INT-N) and CHARGE CANCEL=1\n self.registers[3] = (1<<22) | (1<<21) | 3\n # In REG5: set LD PIN MODE to 1 -> digital lock detect\n self.registers[5] = (1<<22) | (3<<19) | 5\n\n self.set_frequency(frequency)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_taps(self) > __dummy_11__ Returns all of the taps of the matched filter
def get_taps(self): return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_taps(self)
[ "def get_taps(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_taps(self)", "def get_diff_taps(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_taps(self)", "def get_swaps(self, t: int) -> list:\n swaps = []\n for (i, j) in self._arcs:\n if i >= j:\n continue\n for q in range(self.num_vqubits):\n if self.solution.get_value(f\"x_{t}_{q}_{i}_{j}\") > 0.5:\n swaps.append((self.global_qubit[i], self.global_qubit[j]))\n return swaps", "def get_diff_taps(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_taps(self)", "def get_diff_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_channel_taps(self, *args, **kwargs)", "def get_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_channel_taps(self, *args, **kwargs)", "def _locate_gaps(self, t):\n true_gap_starts = []\n true_gap_ends = []\n for i in range(len(t)-1):\n if t[i+1] - t[i] > 0.1 * self.period.value:\n true_gap_starts += [i]\n true_gap_ends += [i+1]\n\n return true_gap_starts, true_gap_ends", "def __call__(self, data: np.ndarray, threshold: float):\n t_list = []\n time = 0\n # Find all threshold crossings\n data_thresh = data[data[:, 2] >= threshold, :]\n while time < self.max_time:\n # Find threshold crossings less than \"time\" before the time of event\n inds = np.logical_and(data_thresh[:, 1] >= (time), data_thresh[:, 1] <= (time + self.step_size))\n # Store a boolean indicating if a warning was ever \"On\"\n t_list.append(any(inds))\n time += self.step_size\n return t_list", "def filterBreakerTrips(events):\n filters = []\n filters.append( UndervoltageMerge() )\n filters.append( RunstopMerge() )\n filters.append( CircuitBreakerMerge() )\n filters.append( KeepEventTypes(['CircuitBreakerTrip']) )\n return runFilters(filters,events)", "def multi_t_filter(w_in, a_threshold_in, vt_max_in, vt_min_in, t_out):\n \n # initialize arrays, padded with the elements we want\n t_out[:] = np.nan \n \n # checks \n if (np.isnan(w_in).any() or np.isnan(a_threshold_in)):\n return\n if (np.isnan(vt_max_in).all() and np.isnan(vt_min_in).all()):\n return \n if (not len(t_out)<=len(w_in)):\n raise DSPFatal('The length of your return array must be smaller than the length of your waveform')\n\n # Initialize an intermediate array to hold the tp0 values before we remove duplicates from it\n intermediate_t_out = np.full_like(t_out, np.nan, dtype=np.float32)\n \n # Go through the list of maxima, calling time_point_thresh (the refactored version ignores the nan padding)\n time_point_thresh(w_in, a_threshold_in, vt_max_in, 0, intermediate_t_out)\n\n # Remove duplicates from the t_out list\n remove_duplicates(intermediate_t_out, vt_min_in, t_out)", "def get_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_channel_taps(self, *args, **kwargs)", "def _find_ttl(t, x, thresh=500, polarity=1):\n\n times = []\n inpulse = 0\n if polarity > 0:\n for i in range(0, len(t)):\n if (not inpulse) and (x[i] < thresh):\n times.append(t[i])\n inpulse = 1\n elif inpulse and (x[i] > thresh):\n inpulse = 0\n else:\n for i in range(0, len(t)):\n if (not inpulse) and (x[i] > thresh):\n times.append(t[i])\n inpulse = 1\n elif inpulse and (x[i] < thresh):\n inpulse = 0\n return times", "def get_diff_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_channel_taps(self, *args, **kwargs)", "def rippleBandFilterSimulated(lfp, time, FS, bpFilterTaps, lpFilterTaps):\n #Bandpass filter into ripple band\n rippleData = signal.lfilter(bpFilterTaps,1,lfp)\n #Envelope\n rippleEnvelope = np.absolute(rippleData)\n #smooth\n smoothed_envelope = signal.lfilter(lpFilterTaps,1,rippleEnvelope)\n return smoothed_envelope, rippleData", "def trapfilt(xt, Fs, fL, k, alfa): \n ixk = round(Fs*k/float(2*fL)) # Tail cutoff index \n tt = arange(-ixk,ixk+1)/float(Fs) # Time axis for h(t) \n n = len(tt)-1 # Filter order \n ht = zeros(len(tt))\n ix = where(logical_and(tt>=-ixk,tt<ixk+1))[0]\n ht[int(len(ix)/2)] = 2*fL\n ixn = ix[0:n/2]\n ixp = ix[(n/2)+1:n+1]\n ix = hstack((ixn,ixp))\n ht[ix] = (sin(2*pi*fL*tt[ix])/(pi*tt[ix])) * (sin(2*pi*alfa*fL*tt[ix])/(2*pi*alfa*fL*tt[ix]))\n #ht[int(len(ix)/2)] = 2*fL\n if alfa == 0 :\n ixk = round(Fs*k/float(2*fL))\n ix = where(logical_and(tt>=-ixk,tt<ixk+1))[0]\n ixn = ix[0:160]\n ixp = ix[161:321]\n ix = hstack((ixn,ixp))\n TL = 1/float(2*fL)\n ht[int(len(ix)/2)] = 1 # At exception t=0, assign value of sinc directly at t =0 point\n ht[ix] = sin(pi*tt[ix]/TL)/(pi*tt[ix]/TL)\n \n yt = lfilter(ht, 1, hstack((xt, zeros(ixk)))) \n # Compute filter output y(t) \n yt = yt[ixk:] # Filter delay compensation \n return yt, n # Return y(t) and filter order", "def hot_segments(self):\n return [s for s in self.segments if s.heat_flow > 0]", "def get_frame_gaps(self):\n first_index = self.frames.get_first_frame_value('fixed_index')\n first_mjd = self.frames.get_first_frame_value('mjd')\n\n dt = self.info.sampling_interval\n measured_time = (self.frames.mjd - first_mjd) * units.Unit('day')\n expected_time = (self.frames.fixed_index - first_index) * dt\n gap_time = (measured_time - expected_time).decompose().to(dt.unit)\n frame_gaps = round_values((gap_time / dt).decompose().value)\n frame_gaps[~self.frames.valid] = 0\n gap_time[~self.frames.valid] = np.nan\n return frame_gaps, gap_time", "def _masks(signal,r_peaks, p_start, p_end, t_start, t_end):\n phases = np.zeros(len(signal))\n nni = np.diff(r_peaks) \n ppi =[]\n tti =[]\n qrsi = []\n tpi = []\n zzi = []\n \n for i in range(len(r_peaks)-1): \n # if nni[i] > 400 and nni[i] < 1400:\n ppl = p_end[i]-p_start[i]\n if ppl < 400 and ppl > 0:\n phases[p_start[i]: p_end[i]] = 1\n ppi.append(ppl)\n ttl = t_end[i]-t_start[i]\n if ttl < 700 and ttl > 0:\n phases[t_start[i]: t_end[i]] = 3\n tti.append(ttl)\n tpl = p_start[i]-t_end[i]\n if tpl < 1400 and tpl >0:\n phases[t_end[i]: p_start[i]] = 4 \n tpi.append(tpl)\n for i in range(len(r_peaks)-2): \n # if nni[i] > 400 and nni[i] < 1400 and nni[i+1] > 400 and nni[i+1] < 1400:\n qrsl = t_start[i+1] - p_end[i]\n if qrsl < 500 and qrsl > 0:\n phases[p_end[i]: t_start[i+1]] = 2\n qrsi.append(qrsl)\n return phases", "def get_trips(self):\n return self.trips" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_diff_taps(self) > __dummy_11__ Returns all of the taps of the derivative filter
def get_diff_taps(self): return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_taps(self)
[ "def get_diff_taps(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_taps(self)", "def get_diff_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_channel_taps(self, *args, **kwargs)", "def get_diff_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_channel_taps(self, *args, **kwargs)", "def get_taps(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_taps(self)", "def get_taps(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_taps(self)", "def get_diffs(self):\n return list(self.iter_diffs())", "def get_diff_taps_as_string(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_taps_as_string(self)", "def test_backward_divided_difference_gaps(self):\n orig = [(1, 5), (2, 4), (3, 4), (4, 4), (6, 4), (7, 3), (10, 6)]\n expected = [(2, -1), (3, 0), (4, 0), (6, 0), (7, -1), (10, 1)]\n walker = (('a', p, v) for p, v in orig)\n assert_equal([(p, v) for _, p, v in backward_divided_difference(walker)], expected)", "def test_timeseries_get_diffs(self):\n\n ts = self.ts.get_diffs()\n\n self.assertListEqual(\n ts.tseries.tolist(),\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n )\n\n self.assertEqual(len(ts.tseries), len(self.ts.tseries) - 1)\n\n self.assertTrue(np.array_equal(self.ts.dseries[1:], ts.dseries))", "def periods(t, y, threshold):\n transition_times = find_transition_times(t, y, threshold)\n deltas = np.diff(transition_times)\n return deltas", "def test_backward_divided_difference_gaps_step(self):\n orig = [(1, 5), (2, 4), (3, 4), (4, 4), (6, 4), (7, 3), (10, 6)]\n expected = [(2, -1), (3, 0), (4, 0), (7, -1)]\n walker = (('a', p, v) for p, v in orig)\n assert_equal([(p, v) for _, p, v in backward_divided_difference(walker, step=1)], expected)", "def get_diff_taps_as_string(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_taps_as_string(self)", "def test_forward_divided_difference_gaps_step(self):\n orig = [(1, 5), (2, 4), (3, 4), (4, 4), (6, 4), (7, 3), (10, 6)]\n expected = [(1, -1), (2, 0), (3, 0), (6, -1)]\n walker = (('a', p, v) for p, v in orig)\n assert_equal([(p, v) for _, p, v in forward_divided_difference(walker, step=1)], expected)", "def test_backward_divided_difference_gaps_step2(self):\n orig = [(1, 5), (2, 4), (3, 4), (4, 4), (6, 4), (8, 3), (10, 6)]\n expected = [(6, 0), (8, -0.5), (10, 1.5)]\n walker = (('a', p, v) for p, v in orig)\n assert_equal([(p, v) for _, p, v in backward_divided_difference(walker, step=2)], expected)", "def test_forward_divided_difference_gaps_step2(self):\n orig = [(1, 5), (2, 4), (3, 4), (4, 4), (6, 4), (8, 3), (10, 6)]\n expected = [(4, 0), (6, -0.5), (8, 1.5)]\n walker = (('a', p, v) for p, v in orig)\n assert_equal([(p, v) for _, p, v in forward_divided_difference(walker, step=2)], expected)", "def calc_delta_thresh(self):\n threshold_vals = self.calc_threshold_vals()\n return np.array(threshold_vals - threshold_vals[0])", "def backward_differences(T):\n\tnumOfTimes = len(T)\n\t#the number of steps in the method\n\tm = numOfTimes - 1\n\t#generate the initial differences, which\n\t#is just the standard basis.\n\tD = np.array([ [np.float64((i+1)==(numOfTimes-j)) for i in range(numOfTimes)] for j in range(numOfTimes)])\n\tdifferences = np.zeros_like(D)\n\tdifferences[0] = D[0]\n\t\n\t\n\tfor q in range(1,numOfTimes):\n\t\tfor j in range(numOfTimes - q):\n\t\t\tD[j] = first_difference([T[m-j],T[m-j-q]],[D[j],D[j+1]])\n\t\t\tdifferences[q] = D[0]\n\treturn differences", "def test_backward_divided_difference_gaps_auto_step(self):\n orig = [(1, 5), (2, 4), (3, 4), (4, 4), (6, 4), (7, 3), (10, 6)]\n expected = [(2, -1), (3, 0), (4, 0), (7, -1)]\n walker = (('a', p, v) for p, v in orig)\n assert_equal([(p, v) for _, p, v in backward_divided_difference(walker, auto_step=True)], expected)", "def get_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_channel_taps(self, *args, **kwargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_channel_taps(self, int channel) > __dummy_4__ Returns the taps of the matched filter for a particular channel
def get_channel_taps(self, *args, **kwargs): return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_channel_taps(self, *args, **kwargs)
[ "def get_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_channel_taps(self, *args, **kwargs)", "def get_diff_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_channel_taps(self, *args, **kwargs)", "def get_diff_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_channel_taps(self, *args, **kwargs)", "def get_taps(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_taps(self)", "def get_channels():", "def get_taps(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_taps(self)", "def get_diff_taps(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_taps(self)", "def take_channels(data, channelmap):\n channelmap = [c-1 for c in channelmap]\n return data[:, channelmap]", "def get_channel_history(channel):\n incidents = list(Incident.objects.filter(channel=channel).all())\n notes = list(ChannelNote.objects.filter(channel=channel).all())\n\n history = incidents + notes\n history.sort(key=lambda x: x.created_at, reverse=True)\n\n return history", "def get_existing_traces_by_channel(self) -> Dict[int, List[Tuple[int, str]]]:\n ret = {}\n for i in range(1, 9):\n traces = self.ask(f\"CALC{i}:PAR:CAT:EXT?\").strip('\"')\n if traces == \"NO CATALOG\":\n continue\n else:\n ret[i] = []\n traces = traces.split(',')\n names = traces[::2]\n params = traces[1::2]\n for n, p in zip(names, params):\n ret[i].append((int(n.split('_')[-1]), p))\n return ret", "def feed_comparison(self, channel):\n comparison_results = []\n retval = []\n # Alert if tower is not in feed DB\n if (channel[\"cgi_str\"] not in self.bad_cgis and\n channel[\"cgi_str\"] not in self.cgi_whitelist and\n channel[\"cgi_str\"] not in self.good_cgis):\n comparison_results.append(self.check_channel_against_feed(channel))\n # Else, be willing to alert if channel is not in range\n if (channel[\"cgi_str\"] not in self.bad_cgis and\n channel[\"cgi_str\"] not in self.cgi_whitelist and\n channel[\"cgi_str\"] not in self.good_cgis):\n comparison_results.append(self.check_channel_range(channel))\n # Test for primary BTS change\n if channel[\"cell\"] == '0':\n comparison_results.append(self.process_cell_zero(channel))\n for result in comparison_results:\n if result != ():\n retval.append(result)\n if len(retval) == 0:\n if channel[\"cgi_str\"] not in self.good_cgis:\n self.good_cgis.append(channel[\"cgi_str\"])\n return retval", "def get_channel_events(self, channel_index, source='xform', subsample=False):\n events = self.get_events(source=source, subsample=subsample)\n events = events[:, channel_index]\n\n return events", "def get_keys( self, step=None, channel=None):\r\n if step==None:\r\n return self.channels[channel] if channel<self.nchannels else []\r\n if channel==None:\r\n return self.steps[step] if step<self.nsteps else []\r\n return self.steps[step][channel] if (steps<self.nsteps and channel<nchannels) else []", "def get_channel_dict(self):\n return self.channels", "def _get_all_channels(self):\n\n try:\n available_channel_tuple = list(\n self._tagger.getChannelList(TT.TT_CHANNEL_RISING_AND_FALLING_EDGES)\n )\n # handle exception in the call (TT functions normally produce NotImplementedError)\n except NotImplementedError:\n # self.log.error('_get_all_channels(): communication with the device failed')\n return []\n # handle the case of self._tagger = None\n except AttributeError:\n # self.log.error('_get_all_channels(): _tagger is None. Initialize device first')\n return []\n\n return list(available_channel_tuple)", "def extract(self, ev):\n ch_events = channelEvents(atype='Physical')\n channel = ev.get_ad() >> self.nBitsTotal # Get channel information.\n\n for channelIdx in xrange(self.nChannels):\n t = pylab.find(channelIdx == channel)\n # Much faster than boolean list or filter\n if len(t) > 0:\n ad = (ev.get_ad()[t]) & (2 ** self[channelIdx].nBitsTotal - 1)\n ch_events.add_adtmch(channelIdx, ad, ev.get_tm()[t])\n return ch_events", "def get_channel_aliases(self, channel):\n chan_key = channel.key.lower()\n nicktuples = self.caller.nicks.get(category=\"channel\", return_tuple=True, return_list=True)\n if nicktuples:\n return [tup[2] for tup in nicktuples if tup[3].lower() == chan_key]\n return []", "def get_active_channels(self):\n sources = {}\n for inp in self.IFswitch.inputs.keys():\n if self.IFswitch.inputs[inp].source:\n sources[self.IFswitch.inputs[inp].name] = self.IFswitch.inputs[inp].source\n return sources", "def get_channel_list(self):\r\n channels = self.items()\r\n channels.sort()\r\n return [value for key, value in channels]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_diff_channel_taps(self, int channel) > __dummy_4__ Returns the taps in the derivative filter for a particular channel
def get_diff_channel_taps(self, *args, **kwargs): return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_channel_taps(self, *args, **kwargs)
[ "def get_diff_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_channel_taps(self, *args, **kwargs)", "def get_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_channel_taps(self, *args, **kwargs)", "def get_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_channel_taps(self, *args, **kwargs)", "def get_diff_taps(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_taps(self)", "def get_diff_taps(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_taps(self)", "def get_taps(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_taps(self)", "def describe_diff(channels: List[np.ndarray],\n diff_fraction_threshold: float = 0.01) -> Diff:\n diff_fraction, nearby_variants = analyze_diff_and_nearby_variants(channels)\n # Thresholds were chosen by visual experimentation, i.e. human curation.\n if diff_fraction > diff_fraction_threshold:\n return Diff.MANY_DIFFS\n elif nearby_variants >= 5:\n return Diff.NEARBY_VARIANTS\n else:\n return Diff.FEW_DIFFS", "def get_diff_taps_as_string(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_taps_as_string(self)", "def get_taps(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_taps(self)", "def periods(t, y, threshold):\n transition_times = find_transition_times(t, y, threshold)\n deltas = np.diff(transition_times)\n return deltas", "def get_channel_history(channel):\n incidents = list(Incident.objects.filter(channel=channel).all())\n notes = list(ChannelNote.objects.filter(channel=channel).all())\n\n history = incidents + notes\n history.sort(key=lambda x: x.created_at, reverse=True)\n\n return history", "def channel_distances_downstream(self, ch_nodes):\n ch_links = self._grid.at_node[\"flow__link_to_receiver_node\"][ch_nodes]\n ch_dists = np.empty_like(ch_nodes, dtype=float)\n # dists from ch head, NOT drainage divide\n ch_dists[0] = 0.0\n np.cumsum(self._grid.length_of_d8[ch_links[:-1]], out=ch_dists[1:])\n return ch_dists", "def get_diff_taps_as_string(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_taps_as_string(self)", "def receive( self, channel, differential ):\n\t\tresponse = self._spi.xfer([1, channel | differential, 0])\n\t\t# Capture 11 bits (null bit + 10 bit result)\n\t\tvalue = (((response[1] & 0b11) << 8) | (response[2]))\n\n\t\treturn value", "def get_existing_traces_by_channel(self) -> Dict[int, List[Tuple[int, str]]]:\n ret = {}\n for i in range(1, 9):\n traces = self.ask(f\"CALC{i}:PAR:CAT:EXT?\").strip('\"')\n if traces == \"NO CATALOG\":\n continue\n else:\n ret[i] = []\n traces = traces.split(',')\n names = traces[::2]\n params = traces[1::2]\n for n, p in zip(names, params):\n ret[i].append((int(n.split('_')[-1]), p))\n return ret", "def feed_comparison(self, channel):\n comparison_results = []\n retval = []\n # Alert if tower is not in feed DB\n if (channel[\"cgi_str\"] not in self.bad_cgis and\n channel[\"cgi_str\"] not in self.cgi_whitelist and\n channel[\"cgi_str\"] not in self.good_cgis):\n comparison_results.append(self.check_channel_against_feed(channel))\n # Else, be willing to alert if channel is not in range\n if (channel[\"cgi_str\"] not in self.bad_cgis and\n channel[\"cgi_str\"] not in self.cgi_whitelist and\n channel[\"cgi_str\"] not in self.good_cgis):\n comparison_results.append(self.check_channel_range(channel))\n # Test for primary BTS change\n if channel[\"cell\"] == '0':\n comparison_results.append(self.process_cell_zero(channel))\n for result in comparison_results:\n if result != ():\n retval.append(result)\n if len(retval) == 0:\n if channel[\"cgi_str\"] not in self.good_cgis:\n self.good_cgis.append(channel[\"cgi_str\"])\n return retval", "def calc_delta_thresh(self):\n threshold_vals = self.calc_threshold_vals()\n return np.array(threshold_vals - threshold_vals[0])", "def get_keys( self, step=None, channel=None):\r\n if step==None:\r\n return self.channels[channel] if channel<self.nchannels else []\r\n if channel==None:\r\n return self.steps[step] if step<self.nsteps else []\r\n return self.steps[step][channel] if (steps<self.nsteps and channel<nchannels) else []", "def calculate_distances(deltas: np.ndarray, sampling_freq_hz: float, c: float = 343) -> np.ndarray:\n conversion_factor = c / (2 * sampling_freq_hz)\n\n deltas_t = deltas.T\n\n k1 = deltas * np.eye(deltas.shape[0]) @ np.ones(deltas.shape)\n k2 = k1.T\n k = k1 + k2\n\n return conversion_factor * (np.abs(deltas - deltas_t) + k)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_taps_as_string(self) > string Return the taps as a formatted string for printing
def get_taps_as_string(self): return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_taps_as_string(self)
[ "def get_taps_as_string(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_taps_as_string(self)", "def get_diff_taps_as_string(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_taps_as_string(self)", "def get_diff_taps_as_string(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_taps_as_string(self)", "def printable(self):\n\t\tif not self.is_set:\n\t\t\treturn(\" \")\n\n\t\ttoPrint = \"A: ({}, {}, {})\".format(self.node_A,self.port_A,self.app_id_A) + \" \"\n\t\ttoPrint += \"B: ({}, {}, {})\".format(self.node_B,self.port_B,self.app_id_B) + \" \"\n\t\ttoPrint = toPrint + \"Entanglement ID: \" + str(self.id_AB) + \" \"\n\t\ttoPrint = toPrint + \"Timestamp: \" + str(self.timestamp) + \" \"\n\t\ttoPrint = toPrint + \"Time of Goodness: \" + str(self.ToG) + \" \"\n\t\ttoPrint = toPrint + \"Goodness: \" + str(self.goodness) + \" \"\n\t\ttoPrint = toPrint + \"Directionality Flag: \" + str(self.DF)\n\t\treturn(toPrint)", "def _stack_values_to_string(self, stack_values):\n\n strings = []\n for stack_value in stack_values:\n if self.solver.symbolic(stack_value):\n concretized_value = \"SYMBOLIC - %s\" % repr(stack_value)\n else:\n if len(self.solver.eval_upto(stack_value, 2)) == 2:\n concretized_value = repr(stack_value)\n else:\n concretized_value = repr(stack_value)\n strings.append(concretized_value)\n\n return \" .. \".join(strings)", "def get_perfdata(self) -> str:\n return ' '.join([str(x) for x in self._perfdata])", "def state_to_string(state):\n return ('i: \\t' + str(state[2][0]) + '\\t' + str(state[2][1]) + '\\n'\n 'v: \\t' + str(state[1][0]) + '\\t'+str(state[1][1]) + '\\n'\n 'o: \\t' + str(state[0][0]) + '\\t'+str(state[0][1]) + '\\n'\n 'h: \\t' + str(state[3][0]) + '\\t'+str(state[3][1]) + '\\n'\n 'p: \\t' + str(state[4][0]) + '\\t'+str(state[4][1]) + '\\n')", "def attractorstring(self):\n attractorstring = \"\"\n _, attractor = RBN.get_cycle(self.nodes)\n for count, state in enumerate(attractor):\n attractorstring += str(count) + \" \" + str(state) + linesep\n return attractorstring", "def do_str(self, indent):\n items = [\" \" * indent + \"{:32}\".format(self.name.split(_SEP)[-1]) + \" \" * (10 - indent) +\n \": {:8.4f} seconds\".format(self.time)]\n ctimers = sorted(self.children(), key=lambda t: t.name)\n if ctimers:\n indent += _INDENT\n self_time = self.time - self.child_time()\n items.append(\" \" * indent + \"{:32}\".format(\"self time\") + \" \" * (10 - indent) +\n \": {:8.4f} seconds\".format(self_time))\n for t in ctimers:\n items.append(t.do_str(indent))\n return \"\\n\".join(items)", "def __str__(self):\n r = ''\n r += 'Timings:\\n' + \\\n '\\tOrdering:\\t\\t{}s\\n'.format(self.ordering_time) + \\\n '\\tConstruction:\\t{}s\\n'.format(self.construction_time) + \\\n '\\tMinimising:\\t{}s\\n'.format(self.minimising_time)\n r += 'Nodes:\\n' + \\\n '\\tNot minimized:\\t\\t{}\\n'.format(self.bdd_nodes) + \\\n '\\tMinimised:\\t\\t\\t{}'.format(self.min_bdd_nodes)\n return r", "def __str__(self):\n if self.is_empty():\n return \"Stack is empty\"\n string_repr = \"Top of stack\\n===================\\n\"\n for node in reversed(self.data):\n string_repr += \"{{left_visited: {}, node: {}, right_visited: {}}}\".format(node[\"left_visited\"], node[\"node\"], node[\"right_visited\"])\n return string_repr", "def __str__(self):\n text = \"Attractor \" + self.label + \"\\n\"\n text += \"\\tLength: \"+ str(len(self.states)) + \"\\n\"\n text += \"\\tBasin: \"+ str(self.basin) + \"\\n\"\n text += \"\\tWith nodes: \"+ ', '.join(self.node_names) + \"\\n\" \n text += \"\\tWith states: \"\n for a in self.states: text += \" -> \" + state_to_str(a)\n return text.strip()", "def get_print(self):\n return ('Trip\\n\\tstart date: {}\\n\\tfinal date: {}\\n\\tgasoline: {}'.\n format(time.strftime(\"%Y.%m.%d %H:%M\",\n time.localtime(self.start_date)),\n time.strftime(\"%Y.%m.%d %H:%M\",\n time.localtime(self.end_date)),\n self.fuel))", "def __str__(self):\n return '\\t'.join(self._flowgram.split())", "def __str__(self):\n return str(self.stack)", "def getPhasesAsString(self):\n result = \"\"\n for phase in self.iterPhases():\n result += \"%s\\n\" % str(phase)\n return result", "def tprint(s):\n print(\"[\" + time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) + \"] \" + s)", "def __str__(self):\n import abjad\n items = [str(_) for _ in self]\n separator = ' '\n if self.item_class is abjad.NumberedPitchClass:\n separator = ', '\n return 'PC<{}>'.format(separator.join(items))", "def as_str(terminals):\n return \"\".join(map(str, terminals))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_diff_taps_as_string(self) > string Return the derivative filter taps as a formatted string for printing
def get_diff_taps_as_string(self): return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_taps_as_string(self)
[ "def get_diff_taps_as_string(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_taps_as_string(self)", "def get_taps_as_string(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_taps_as_string(self)", "def get_taps_as_string(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_taps_as_string(self)", "def get_perfdata(self) -> str:\n return ' '.join([str(x) for x in self._perfdata])", "def delta2str(td: timedelta) -> str:\n s = \"\"\n\n def build_s(v, suffix):\n nonlocal s\n if v > 0:\n s += f\"{v}{suffix}\"\n\n days_left, seconds_left = float(td.days), td.seconds\n y = int(days_left / DAYS_PER_YEAR)\n days_left -= y * DAYS_PER_YEAR\n build_s(y, \"y\")\n d = int(days_left)\n build_s(d, \"d\")\n seconds_left += int((days_left - d) * SEC_PER_HOUR * 24)\n build_s(int(seconds_left / SEC_PER_HOUR), \"h\")\n build_s(int(seconds_left % SEC_PER_HOUR), \"s\")\n return s", "def generate_output(tree_diff):\n output_diff = []\n for diff in tree_diff:\n if diff['previous_value']:\n output_diff.append(\n '{0}{1}: {2}\\n{3}{4}: {5}\\n'.format(\n STATES['deleted'],\n diff['name'],\n diff['previous_value'],\n STATES['added'],\n diff['name'],\n diff['actual_value'],\n ),\n )\n else:\n output_diff.append(\n '{0}{1}: {2}\\n'.format(\n STATES[diff['state']],\n diff['name'],\n diff['actual_value'],\n ),\n )\n return ''.join(['{\\n'] + output_diff + ['}'])", "def _fmt_tags(self):\n return \"\\t\".join(\n \"{}:{}:{}\".format(k, REV_TYPES.get(k), v) for k, v in self[-1].items()\n )", "def __str__(self):\n return ''.join(list(self.signal_history))", "def __repr__(self):\n s = ''\n if self.tags != []:\n s += f'{\".\".join(self.tags)}.'\n s += f'{self.transfer_number}'\n return s", "def __repr__(self):\n k = self._k\n if k == 1:\n kth = 'First'\n elif k == 2:\n kth = 'Second'\n elif k == 3:\n kth = 'Third'\n else:\n kth = '%s-th'%k\n return \"%s derivative of %s\"%(kth, self._lseries)", "def diff2str(diff):\n diff_str = str(diff)\n diff_str = diff_str.replace(\"\\n---\", _(\"\\n\\n::\\n\\n\"), 1)\n diff_str = diff_str.replace(\"\\nlhs\", _(\"\\n\\nAvant\")).replace(\"\\nrhs\", _(\"\\n\\nAprès\"))\n diff_str = diff_str.replace(\"\\nfile added in rhs\", \"\")\n diff_str = diff_str.replace(\"\\nfile deleted in rhs\", _(\"\\n\\n::\\n\\nFichier supprimé\"))\n diff_str = diff_str.replace(\"\\nfile renamed from \", _(\"\\n\\n::\\n\\nFichier renommé, à l'origine \"))\n diff_str = diff_str.replace(\"\\nfile renamed to \", _(\"\\n\\n::\\n\\nFichier renommé en \"))\n diff_str = diff_str.replace(\"\\nOMITTED BINARY DATA\", _(\"\\n\\n::\\n\\nDonnées binaires omises\"))\n\n double_colon_idx = diff_str.find(\"::\")\n diff_str = diff_str[:double_colon_idx] + diff_str[double_colon_idx:].replace(\"\\n\", \"\\n \")\n\n return diff_str", "def _stack_values_to_string(self, stack_values):\n\n strings = []\n for stack_value in stack_values:\n if self.solver.symbolic(stack_value):\n concretized_value = \"SYMBOLIC - %s\" % repr(stack_value)\n else:\n if len(self.solver.eval_upto(stack_value, 2)) == 2:\n concretized_value = repr(stack_value)\n else:\n concretized_value = repr(stack_value)\n strings.append(concretized_value)\n\n return \" .. \".join(strings)", "def get_diff_taps(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_taps(self)", "def get_diff_taps(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_taps(self)", "def to_string(self):\n string = []\n\n if isinstance(self.weights, list): # This State is belong to dur model, print name only\n string.append(\"~s\" + ' \"' + self.name + '\"')\n for ste in self.pdf:\n if ste:\n string.append(ste.to_string())\n\n if \"\" in string:\n string.remove(\"\")\n\n return \"\\n\".join(string)", "def dxfstr(self) -> str:\n return ''.join(tag.dxfstr() for tag in self.dxftags())", "def attractorstring(self):\n attractorstring = \"\"\n _, attractor = RBN.get_cycle(self.nodes)\n for count, state in enumerate(attractor):\n attractorstring += str(count) + \" \" + str(state) + linesep\n return attractorstring", "def __str__(self):\n r = ''\n r += 'Timings:\\n' + \\\n '\\tOrdering:\\t\\t{}s\\n'.format(self.ordering_time) + \\\n '\\tConstruction:\\t{}s\\n'.format(self.construction_time) + \\\n '\\tMinimising:\\t{}s\\n'.format(self.minimising_time)\n r += 'Nodes:\\n' + \\\n '\\tNot minimized:\\t\\t{}\\n'.format(self.bdd_nodes) + \\\n '\\tMinimised:\\t\\t\\t{}'.format(self.min_bdd_nodes)\n return r", "def toString(self):\n ln0 = Interval.toString(self)\n ln1 = \"Tone Frequency = %d Hz \\n\"%self.freq\n ln2 = \"freqType= %s\"%self.freqType #raj-change for freq\n return ln0+ln1+ln2 #raj- added ln2" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }