query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
returns array of vertex indices of profiles needed to fill the section
def get_indices_section(self): return np.unique(self.sv_map.volume_surf_coordinates['triangles'])
[ "def expand_indexed_profiles(self,indices,profiles):\n profiles_full = np.zeros((655362,np.shape(profiles)[1]))\n profiles_full[indices]=profiles\n return profiles_full", "def neighbor_indices(self):", "def get_index_profile(self):\n return np.copy(self._index_profile)", "def get_peak_indices( SP_obj, peaklist_ppm ):\n index_pl = []\n for entry in peaklist_ppm:\n\tresid = entry[0]\n\tNindex = SP_obj.uc0( str(entry[1]) + \" ppm\" )\n\tHindex = SP_obj.uc1( str(entry[2]) + \" ppm\" )\n\tindex_pl.append( [ resid, Nindex, Hindex ] )\n return index_pl", "def indices_for_prop_level_2d(prop_level: int, start_point: tuple) -> set:\n x, y = start_point\n indices = set([])\n # Will double add corners, but no worries because we use a set\n for i in range(-prop_level, prop_level + 1):\n indices.add((x - prop_level, y + i))\n indices.add((x + prop_level, y + i))\n\n indices.add((x + i, y - prop_level))\n indices.add((x + i, y + prop_level))\n return indices", "def indices_for_prop_level_3d(prop_level: int, start_point: tuple) -> set:\n x, y, z = start_point\n indices = set([])\n # Will double add corners, but no worries because we use a set\n for i in range(-prop_level, prop_level + 1):\n for j in range(-prop_level, prop_level + 1):\n indices.add((x - prop_level, y + i, z + j))\n indices.add((x + prop_level, y + i, z + j))\n\n indices.add((x + i, y - prop_level, z + j))\n indices.add((x + i, y + prop_level, z + j))\n\n indices.add((x + i, y + j, z - prop_level))\n indices.add((x + i, y + j, z + prop_level))\n return indices", "def _getPtychographyPositions(self) -> np.ndarray:\n\n p1 = self._scan_params.scan_area_buffer_npix\n p2 = self._probe_params.npix - p1 - self._obj_params.obj_w_border_npix\n positions_x = np.arange(p1, p2, self._scan_params.scan_step_npix)\n positions = []\n\n for r in positions_x:\n for c in positions_x:\n positions.append([r,c])\n return np.array(positions)", "def get_super_pixels(segments, seg):\r\n colors = []\r\n for row, i in enumerate(segments):\r\n for column, val in enumerate(i):\r\n if seg == val:\r\n colors.append((row,column))\r\n return colors", "def init_vertex_ids(self):\n for i, ndd in enumerate(self.altruists):\n ndd.index = i\n for i, v in enumerate(self.graph.vs):\n v.index = i + len(self.altruists)", "def profile(self, domain, component):\n np = domain.nPoints()\n x = zeros(np,'d')\n for n in range(np):\n x[n] = self.value(domain, component, n)\n return x", "def indices():\n return [1.0, 3.0, 1.0, 3.0, 1.0]", "def getcontour_points(lower, upper, array):\n for n in range(lower, upper + 1):\n x = landmarks.part(n).x\n y = landmarks.part(n).y\n array.append((x, y))\n # to return the eye array\n return array", "def prop2part(state_space, cont_props_dict):\n first_poly = [] #Initial Region's polytopes\n first_poly.append(state_space)\n\n regions = [pc.Region(first_poly)]\n\n for cur_prop in cont_props_dict:\n cur_prop_poly = cont_props_dict[cur_prop]\n\n num_reg = len(regions)\n prop_holds_reg = []\n\n for i in range(num_reg): #i region counter\n region_now = regions[i].copy()\n #loop for prop holds\n prop_holds_reg.append(0)\n\n prop_now = regions[i].props.copy()\n\n dummy = region_now.intersect(cur_prop_poly)\n\n # does cur_prop hold in dummy ?\n if pc.is_fulldim(dummy):\n dum_prop = prop_now.copy()\n dum_prop.add(cur_prop)\n\n # is dummy a Polytope ?\n if len(dummy) == 0:\n regions[i] = pc.Region([dummy], dum_prop)\n else:\n # dummy is a Region\n dummy.props = dum_prop.copy()\n regions[i] = dummy.copy()\n prop_holds_reg[-1] = 1\n else:\n #does not hold in the whole region\n # (-> no need for the 2nd loop)\n regions.append(region_now)\n continue\n\n #loop for prop does not hold\n regions.append(pc.Region([], props=prop_now) )\n dummy = region_now.diff(cur_prop_poly)\n\n if pc.is_fulldim(dummy):\n dum_prop = prop_now.copy()\n\n # is dummy a Polytope ?\n if len(dummy) == 0:\n regions[-1] = pc.Region([pc.reduce(dummy)], dum_prop)\n else:\n # dummy is a Region\n dummy.props = dum_prop.copy()\n regions[-1] = dummy.copy()\n else:\n regions.pop()\n\n count = 0\n for hold_count in range(len(prop_holds_reg)):\n if prop_holds_reg[hold_count]==0:\n regions.pop(hold_count-count)\n count+=1\n\n mypartition = PropPreservingPartition(\n domain = copy.deepcopy(state_space),\n regions = regions,\n prop_regions = copy.deepcopy(cont_props_dict)\n )\n\n mypartition.adj = pc.find_adjacent_regions(mypartition).copy()\n\n return mypartition", "def peers_indices_unit(cell):\n bins = lambda x: x//3*3\n (r, c) = cell\n return {\n (i, j)\n for i in range(bins(r), bins(r)+3)\n for j in range(bins(c), bins(c)+3)\n }", "def get_grain_positions(self):\n positions = np.empty((self.get_number_of_grains(), 3))\n for i in range(self.get_number_of_grains()):\n positions[i] = self.grains[i].position\n return positions", "def indices(self):\n nx, ny, nz = self.shape()\n return [(ix,iy,iz) for ix in range(nx) for iy in range(ny) for iz in range(nz)]", "def _plaquette_indices(cls, code):\n max_site_x, max_site_y = code.site_bounds\n row, rows = [], []\n for y in range(max_site_y, -2, -1):\n row = []\n for x in range(-1, max_site_x + 1):\n index = x, y\n row.append(tuple(index))\n rows.append(row)\n # construct empty array of indices then assign elements of rows\n # Note: We cannot construct array directly from rows because numpy will interpret tuples as an extra dimension.\n # An alternative with (non-hashable) numpy.void types is \"np.array(rows, dtype=[('x', int), ('y', int)])\"\n indices = np.empty((len(rows), len(row)), dtype=object)\n indices[...] = rows\n return indices", "def __fetch_heatmap_data_from_profile(self):\n # Read lines from file.\n with open(self.pyfile.path, 'r') as file_to_read:\n for line in file_to_read:\n # Remove return char from the end of the line and add a\n # space in the beginning for better visibility.\n self.pyfile.lines.append(' ' + line.strip('\\n'))\n\n # Total number of lines in file.\n self.pyfile.length = len(self.pyfile.lines)\n\n # Fetch line profiles.\n line_profiles = self.__get_line_profile_data()\n\n # Creating an array of data points. As the profile keys are 1 indexed\n # we should range from 1 to line_count + 1 and not 0 to line_count.\n arr = []\n for line_num in xrange(1, self.pyfile.length + 1):\n if line_profiles.has_key(line_num):\n arr.append([line_profiles[line_num][-1]])\n else:\n arr.append([0.0])\n\n # Create nd-array from list of data points.\n self.pyfile.data = np.array(arr)", "def computePindices(self):\n\n self.surf_index_P = PUBSlib.computesurfindices(self.nsurf, self.nedge, self.ngroup, self.surf_edge, self.edge_group, self.group_n)\n self.edge_index_P = PUBSlib.computeedgeindices(self.nedge, self.ngroup, self.edge_group, self.group_n)\n self.nT = 0\n self.nT += self.edge_index_P[-1,1]\n self.nT += 2*self.surf_index_P[-1,1]\n self.nP = self.nvert\n self.nP += self.edge_index_P[-1,1]\n self.nP += self.surf_index_P[-1,1]\n\n if self.printInfo:\n print '# Points =',self.nP" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a set of profiles indices and matching profiles, expand the profiles to be a matrix of profiles with zeros in the gaps
def expand_indexed_profiles(self,indices,profiles): profiles_full = np.zeros((655362,np.shape(profiles)[1])) profiles_full[indices]=profiles return profiles_full
[ "def expand_profiles(sarr, full_players, profiles): # pylint: disable=too-many-locals\n reduced_players = np.add.reduceat(profiles, sarr.role_starts, 1)\n utils.check(\n np.all(full_players >= reduced_players),\n \"full_players must be at least as large as reduced_players\",\n )\n utils.check(\n np.all((reduced_players > 0) | ((full_players == 0) & (reduced_players == 0))),\n \"reduced_players must be greater than zero\",\n )\n # Maximum prevents divide by zero error; equivalent to + eps\n rep_red_players = np.maximum(reduced_players, 1).repeat(sarr.num_role_strats, -1)\n rep_full_players = full_players.repeat(sarr.num_role_strats, -1)\n num_profs = profiles.shape[0]\n expand_profs = profiles * rep_full_players // rep_red_players\n unassigned = full_players - np.add.reduceat(expand_profs, sarr.role_starts, 1)\n\n # Order all possible strategies to find which to increment\n role_order = np.broadcast_to(sarr.role_indices, (num_profs, sarr.num_strats))\n error = profiles * rep_full_players / rep_red_players - expand_profs\n alpha_inds = np.arange(sarr.num_strats)\n alpha_ord = np.broadcast_to(alpha_inds, (num_profs, sarr.num_strats))\n inds = np.asarray(\n np.argsort(np.rec.fromarrays([role_order, -error, -profiles, alpha_ord]), 1)\n )\n\n # Map them to indices in the expand_profs array, and mask out the first\n # that are necessary to meet unassigned\n rectified_inds = inds + np.arange(num_profs)[:, None] * sarr.num_strats\n ind_mask = np.arange(sarr.num_strats) < np.repeat(\n sarr.role_starts + unassigned, sarr.num_role_strats, 1\n )\n expand_profs.flat[rectified_inds[ind_mask]] += 1\n return expand_profs", "def build_matrix_A(API2idx, apk2call, apk2idx):\n# matrix_A = np.zeros((len(apk2idx), len(API2idx)))\n matrix_A = scipy.sparse.lil_matrix((len(apk2idx), len(API2idx)))\n total = len(apk2idx)\n counter = 0\n for apk in apk2idx:\n counter += 1\n print(\"{:.2f}%\".format(counter / total * 100), apk)\n apk_idx = apk2idx[apk]\n API_indices = apk2call[apk]\n for API_idx in API_indices:\n matrix_A[apk_idx, API_idx] = 1\n return matrix_A", "def sparse_to_full(\n joint_angles_sparse, sparse_joints_idxs, tot_nr_joints, rep=\"rotmat\"\n):\n joint_idxs = sparse_joints_idxs\n # assert rep in [\"rotmat\", \"quat\", \"aa\"]\n assert rep in [\"rotmat\"]\n dof = 9 if rep == \"rotmat\" else 4 if rep == \"quat\" else 3\n n_sparse_joints = len(sparse_joints_idxs)\n angles_sparse = np.reshape(joint_angles_sparse, [-1, n_sparse_joints, dof])\n\n # fill in the missing indices with the identity element\n smpl_full = np.zeros(\n shape=[angles_sparse.shape[0], tot_nr_joints, dof]\n ) # (N, tot_nr_joints, dof)\n if rep == \"quat\":\n smpl_full[..., 0] = 1.0\n elif rep == \"rotmat\":\n smpl_full[..., 0] = 1.0\n smpl_full[..., 4] = 1.0\n smpl_full[..., 8] = 1.0\n else:\n pass # nothing to do for angle-axis\n\n smpl_full[:, joint_idxs] = angles_sparse\n smpl_full = np.reshape(smpl_full, [-1, tot_nr_joints * dof])\n return smpl_full", "def pad(self, matrices, pad_value):\n shapes = [m.shape for m in matrices]\n M, N = sum([s[0] for s in shapes]), sum([s[1] for s in shapes])\n zeros = torch.FloatTensor(np.zeros((M, N))).to(self.device)\n pad_matrices = pad_value + zeros\n i, j = 0, 0\n for k, matrix in enumerate(matrices):\n m, n = shapes[k]\n pad_matrices[i:i+m, j:j+n] = matrix\n i += m\n j += n\n return pad_matrices", "def create_to_hit_matrices(self) -> None:\n\t\tk_range = self.k_range\n\t\tmatch_tuples = self.match_tuples\n\t\tsketches = self.sketches\n\t\tnum_hashes = self.num_hashes\n\n\t\t# create k_range spare matrices. Rows index by genomes (sketch/hash index), columns index by k_mer_loc\n\t\trow_ind_dict = dict()\n\t\tcol_ind_dict = dict()\n\t\tvalue_dict = dict()\n\t\tunique_kmers = dict() # this will keep track of the unique k-mers seen in each genome (sketch/hash loc)\n\t\tfor k_size in k_range:\n\t\t\trow_ind_dict[k_size] = []\n\t\t\tcol_ind_dict[k_size] = []\n\t\t\tvalue_dict[k_size] = []\n\n\t\tmatch_tuples = set(match_tuples) # uniquify, so we don't make the row/col ind dicts too large\n\n\t\t# convert the match tuples to the necessary format to be turned into a matrix/tensor\n\t\tfor hash_loc, k_size_loc, kmer_loc in match_tuples:\n\t\t\tif hash_loc not in unique_kmers:\n\t\t\t\tunique_kmers[hash_loc] = set()\n\t\t\tk_size = k_range[k_size_loc]\n\t\t\tkmer = sketches[hash_loc]._kmers[kmer_loc][:k_size]\n\t\t\tif kmer not in unique_kmers[\n\t\t\t\thash_loc]: # if you've seen this k-mer before, don't add it. NOTE: this makes sure we don't over count\n\t\t\t\trow_ind_dict[k_size].append(hash_loc)\n\t\t\t\tcol_ind_dict[k_size].append(kmer_loc)\n\t\t\t\tvalue_dict[k_size].append(1) # only counting presence/absence, so just a 1 for the value\n\t\t\t\tunique_kmers[hash_loc].add(kmer)\n\n\t\t# list of matrices that contain the hits: len(hit_matrices) == k_sizes\n\t\t# each hit_matrices[i] has rows indexed by which genome/sketch they belong to\n\t\t# columns indexed by where the k-mer appeared in the sketch/hash list\n\t\tfor k_size in k_range:\n\t\t\t# convert to matrices\n\t\t\tmat = csc_matrix((value_dict[k_size], (row_ind_dict[k_size], col_ind_dict[k_size])),\n\t\t\t\t\t\t\t shape=(len(sketches), num_hashes))\n\t\t\tself.hit_matrices.append(mat)", "def populate_score_matrices(self, align_params):\n\t\tfor i in range(1,len(align_params.seq_a)+1):\n\t\t\tfor j in range(1,len(align_params.seq_b)+1):\n\t\t\t\tself.update(i, j, align_params)", "def build_key_profile_matrix(key_prof_maj, key_prof_min):\n # Normalize Key profiles\n key_prof_maj /= np.sum(key_prof_maj)\n key_prof_min /= np.sum(key_prof_min)\n\n # Create matrix of key profiles\n Key_prof_mat = np.vstack(\n (circulant(key_prof_maj).transpose(), circulant(key_prof_min).transpose())\n )\n\n return Key_prof_mat", "def zero_fill_2d(dst):\n for i in range(dst.shape[0]):\n for j in range(dst.shape[1]):\n dst[i, j] = 0", "def build_matrix(counts):\n total_counts = get_total_counts(counts)\n for w, contexts in counts.items():\n for c in contexts:\n counts[w][c] = ppmi(w, c, counts, total_counts)\n return counts", "def fill_zeros(heading):\n\theading_np = heading.detach().cpu().numpy()\n\theading_pd = pd.DataFrame(heading_np)\n\theading_pd = heading_pd.replace(to_replace=0, method=\"ffill\").replace(to_replace=0, method=\"bfill\")\n\treturn torch.from_numpy(heading_pd.values).to(heading) \n\t\n#\tprint(heading_pd)\n#\tinput(\"pause..\")\n\t\"\"\"\n\tneighbors = heading_np.shape[1]\n\tslen = heading_np.shape[0]\n\tfor n in range(neighbors):\n\t\tif not (heading_np[:,n]==0).any():\n\t\t\tcontinue\n\t\tidx = np.arange(slen)\n\t\tidx[heading_np[:,n]==0]=0\n\t\tidx = np.maximum.accumulate(idx,axis=0)\n\t\tprint(idx)\n\t\theading_np[:,n] = heading_np[idx,n]\n\t\tprint(heading_np) \n\t\tif (heading_np[:,n]==0).any():\n\t\t\tidx = np.arange(slen)\n\t\t\tidx[heading_np[:,n]==0]=0\n\t\t\tidx = np.minimum.accumulate(idx[::-1],axis=0)\n\t\t\tprint(idx)\n\t\t\theading_np[:,n] = heading_np[idx[::-1],n]\n\t\"\"\"", "def _create_pick_matrix(num_views, num_assets, pick_list, asset_names):\n\n pick_matrix = np.zeros((num_views, num_assets))\n pick_matrix = pd.DataFrame(pick_matrix, columns=asset_names)\n for view_index, pick_dict in enumerate(pick_list):\n assets = list(pick_dict.keys())\n values = list(pick_dict.values())\n pick_matrix.loc[view_index, assets] = values\n return pick_matrix.values", "def _create_pick_matrix(num_views, num_assets, pick_list, asset_names):\r\n\r\n pick_matrix = np.zeros((num_views, num_assets))\r\n pick_matrix = pd.DataFrame(pick_matrix, columns=asset_names)\r\n for view_index, pick_dict in enumerate(pick_list):\r\n assets = list(pick_dict.keys())\r\n values = list(pick_dict.values())\r\n pick_matrix.loc[view_index, assets] = values\r\n return pick_matrix.values", "def create_transition_matrix(self):\n\n # For each state, create a transition probability for state_i --> state_j\n # We initialize the transition probabilites as decreasing to more distant states\n transition_list = []\n for state in range(self.n_profiles):\n init_probs = [1] * self.n_profiles\n init_mult = [(1 / ((abs(x - state) + 1) * 1.5)) * init_probs[x] for x in range(len(init_probs))]\n state_transition_prob = np.divide(init_mult, sum(init_mult))\n transition_list.append(state_transition_prob)\n\n transition_matrix = np.array(transition_list)\n print('Initial transition matrix created for {} states: '.format(self.n_profiles))\n print(transition_matrix)\n return transition_matrix", "def upstream_genmatrix(upid):\n\n N = upid.shape[0]\n\n # Preallocate the sparse matrix.\n # Since we know that each cell flows into at most one other cell (some don't flow into any),\n # we can be sure we will need at most N nonzero slots.\n ivals = np.zeros((N,), dtype=int)\n jvals = np.zeros((N,), dtype=int)\n lb = 0 # Lower bound: the first index for each group of entries\n\n for i in range(N):\n numUp = upid[i, 8] # Number of upstream cells for the current cell\n if numUp > 0: # Skip if no upstream cells\n ub = lb + numUp\n jvals[lb:ub] = upid[i, 0:numUp]\n ivals[lb:ub] = i + 1\n lb = ub\n\n data = np.ones_like(ivals[0:ub])\n row = ivals[0:ub] - 1\n col = jvals[0:ub] - 1\n\n UM = sparse.coo_matrix((data, (row, col)), shape=(N, N)) - sparse.eye(N, dtype=int)\n\n return UM", "def generate_submatrices (m, absorbing_index, nonabsorbing_index, identity, zero, sub_r, sub_q):\r\n standard_form_by_index = absorbing_index + nonabsorbing_index\r\n\r\n # Define zero matrix\r\n for x in range(len(absorbing_index)):\r\n zero.append([0] * (len(standard_form_by_index) - len(absorbing_index)))\r\n\r\n # Define sub_r and sub_q matrix\r\n for na_index in nonabsorbing_index:\r\n temp_r, temp_q = [], []\r\n for index, value in enumerate(m[na_index]):\r\n if index in absorbing_index:\r\n temp_r.append(Fraction(value, sum(m[na_index])))\r\n else:\r\n temp_q.append(Fraction(value, sum(m[na_index])))\r\n\r\n sub_r.append(temp_r)\r\n sub_q.append(temp_q)\r\n\r\n #Define Identity matrix\r\n for x in range(len(nonabsorbing_index) - 1):\r\n identity[0].append(0)\r\n for x in range(len(nonabsorbing_index) - 1):\r\n identity.append(identity[x][-1:] + identity[x][:-1])", "def generate_sparse_matrices(final_results, ordered_tags_map, top_cells):\n umi_results_matrix = sparse.dok_matrix(\n (len(ordered_tags_map), len(top_cells)), dtype=int32\n )\n read_results_matrix = sparse.dok_matrix(\n (len(ordered_tags_map), len(top_cells)), dtype=int32\n )\n for i, cell_barcode in enumerate(top_cells):\n for j, TAG in enumerate(final_results[cell_barcode]):\n if final_results[cell_barcode][TAG]:\n umi_results_matrix[ordered_tags_map[TAG], i] = len(\n final_results[cell_barcode][TAG]\n )\n read_results_matrix[ordered_tags_map[TAG], i] = sum(\n final_results[cell_barcode][TAG].values()\n )\n return (umi_results_matrix, read_results_matrix)", "def __init__(self, topics: List[int]):\n base_topics = np.array(sorted(set(topics)))\n topics = base_topics.copy().reshape(-1, 1)\n self.mappings_ = np.hstack([topics.copy(), topics.copy()]).tolist()", "def permutation_matrix(N, srcs, dsts):\n M = np.identity(N, dtype='int64')\n M[srcs, srcs] = 0\n M[dsts, srcs] = 1\n return M", "def set_row_zero(mat: List[List[int]], i: int) -> None:\n n = len(mat[0])\n mat[i] = [0] * n" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
plots indexed profiles back to 2D histology using surface to volume mapper
def plot_indexed_profiles(self, indices, profiles, interpolation='linear'): expanded_profiles=self.expand_indexed_profiles(indices,profiles) block = self.orient_local_mncfile(np.squeeze(self.sv_map.map_profiles_to_block(expanded_profiles, interpolation=interpolation))) return block
[ "def profile_asterix(data, center=None, nprofiles=5, clim=None):\n fig = plt.figure(figsize=(17,11))\n ax1 = fig.add_subplot(121)\n im = plt.imshow(data,cmap=plt.cm.jet)\n cb = plt.colorbar()\n if clim:\n plt.clim(clim)\n plt.xlabel('col #')\n plt.ylabel('row #')\n\n ax2 = fig.add_subplot(122)\n plt.axhline(0,c='k')\n plt.title('profiles')\n plt.ylabel('deformation')\n plt.xlabel('pixel')\n\n nrow, ncol = data.shape\n if center:\n r0, c0 = center\n else:\n r0 = nrow/2\n c0 = ncol/2\n\n #profiles = {}\n #colors = ['b', 'g', 'r', 'c', 'm','y','k']\n slopes = np.linspace(0, np.pi, nprofiles)\n for i,rad in enumerate(slopes[:-1]): #don't repeat 0 & pi\n # Add profile line to interferogram\n #print i, rad, colors[i]\n #special case division by zeros\n if rad == 0: #could also do m=np.inf\n start = (0, r0)\n end = (ncol, r0)\n elif rad == np.pi/2:\n start = (c0, 0)\n end = (c0, nrow)\n else:\n m = np.tan(rad)\n leftIntercept = r0 + m*-c0 #NOTE: imshow takes care of axes flipping automatically!\n rightIntercept = r0 + m*(ncol-c0)\n start = (0, leftIntercept)\n end = (ncol, rightIntercept)\n ax1.plot([start[0],end[0]], [start[1],end[1]], scalex=0, scaley=0)\n\n # Add profile to adjacent plot\n #NOTE: mean, probably more representative\n length = np.floor(np.hypot(start[0]-end[0], start[1]-end[1])) #sample each pixel line passes through\n cols = np.linspace(start[0], end[0]-1, length) #NOTE end-2 to make sure indexing works\n rows = np.linspace(start[1], end[1]-1, length)\n\n # Radial-plot\n radii = np.hypot(cols-c0, rows-r0)\n\n # East Positive (to check for E-W symmetry)\n if rad == np.pi/2: #special case for vertical profile\n radii[np.where(rows>r0)] *= -1\n else:\n radii[np.where(cols<c0)] *= -1\n\n # North Positive\n #if rad == 0:\n # radii[np.where(cols<c0)] *= -1\n #else:\n # radii[np.where(rows>r0)] *= -1\n\n # not sure why there are indexing errors:\n good = (rows <= data.shape[0]) & (cols <= data.shape[1])\n rows = rows[good]\n indrows = rows.astype(np.int)\n cols = cols[good]\n indcols = cols.astype(np.int)\n pPoints = data[indrows, indcols]\n ax2.plot(radii[good], pPoints, marker='.')\n\n #ax1.plot(c0,r0, marker='s', mec='k', mew=2, mfc='none', scalex=0, scaley=0)\n ax1.plot(c0,r0,'ko', ms=2, scalex=0, scaley=0)", "def manual_pv_slice_series():\n\n \"\"\"\n PV cut orientation, vertical or horizontal\n Vertical means slice at a single RA and plot velocity vs Dec\n Horizontal means slice at a single Dec and plot velocity vs RA\n \"\"\"\n orientation = 'horizontal'\n start_idx, step_idx = 25, 50\n\n # Load cube\n line_stub = 'cii'\n if line_stub in large_map_filenames:\n # Use the custom filename rather than the default\n filename = large_map_filenames[line_stub]\n else:\n # Use default filename from cube_utils (many of these are centered around Pillars)\n filename = line_stub\n cube_obj = cube_utils.CubeData(filename).convert_to_K().convert_to_kms()\n dimension_size = (cube_obj.data.shape[2] if orientation=='vertical' else cube_obj.data.shape[1])\n\n # Make image\n ref_vel_lims = (10*kms, 35*kms)\n ref_mom0 = cube_obj.data.spectral_slab(*ref_vel_lims).moment0()\n ref_img = ref_mom0.to_value()\n\n # Set colors\n pv_cmap = 'plasma'\n img_cmap = 'Greys_r'\n line_color = marcs_colors[1]\n\n # Loop thru slice index\n for slice_idx in range(start_idx, dimension_size, step_idx):\n\n if orientation == 'vertical':\n # Cube index order is V,Y,X = Velocity,Dec,RA = V,I,J\n cube_slices = (slice(None), slice(None), slice_idx)\n else:\n cube_slices = (slice(None), slice_idx, slice(None))\n\n pv_slice = cube_obj.data[cube_slices]\n\n # First try to remake fig/axes each time. Try persistent if slow\n fig = plt.figure(figsize=(8, 10))\n gs = fig.add_gridspec(2, 1)\n ax_img = fig.add_subplot(gs[0,0], projection=cube_obj.wcs_flat)\n ax_pv = fig.add_subplot(gs[1,0], projection=pv_slice.wcs)\n\n im = ax_img.imshow(ref_img, origin='lower', vmin=0, cmap=img_cmap)\n fig.colorbar(im, ax=ax_img, label=ref_mom0.unit.to_string('latex_inline'))\n\n im = ax_pv.imshow(pv_slice.to_value(), origin='lower', vmin=0, cmap=pv_cmap)\n fig.colorbar(im, ax=ax_pv, label=pv_slice.unit.to_string('latex_inline'), orientation='horizontal')\n\n # Plot line\n if orientation == 'vertical':\n plot_line = ax_img.axvline\n else:\n plot_line = ax_img.axhline\n plot_line(slice_idx, color=line_color, linewidth=2)\n # Reference image velocity interval stamp\n ax_img.text(0.1, 0.9, make_vel_stub(ref_vel_lims), color=line_color, ha='left', va='bottom')\n\n # Clean up axes labels\n # ax_img.set_xlabel(\"RA\")\n # ax_img.set_ylabel(\"Dec\")\n ax_pv.coords[1].set_format_unit(kms)\n # 2023-04-26, 06-07\n savename = f\"/home/ramsey/Pictures/2023-04-26/m16_pv_{orientation}_{slice_idx:03d}.png\"\n fig.savefig(savename, metadata=catalog.utils.create_png_metadata(title=f'{line_stub}, using stub/file {filename}', file=__file__, func='manual_pv_slice_series'))", "def pspec(psd2, return_index=True, wavenumber=False, return_stddev=False, azbins=1, binsize=1.0, view=False, **kwargs):\n #freq = 1 + numpy.arange( numpy.floor( numpy.sqrt((image.shape[0]/2)**2+(image.shape[1]/2)**2) ) )\n\n azbins,(freq,zz) = azimuthalAverageBins(psd2,azbins=azbins,interpnan=True, binsize=binsize, **kwargs)\n if len(zz) == 1: zz=zz[0]\n # the \"Frequency\" is the spatial frequency f = 1/x for the standard numpy fft, which follows the convention\n # A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\}\n # or\n # F_f = Sum( a_m e^(-2 pi i f x_m) over the range m,m_max where a_m are the values of the pixels, x_m are the\n # indices of the pixels, and f is the spatial frequency\n freq = freq.astype('float') # there was a +1.0 here before, presumably to deal with div-by-0, but that shouldn't happen and shouldn't have been \"accounted for\" anyway\n\n if return_index:\n if wavenumber:\n fftwavenum = (numpy.fft.fftfreq(zz.size*2)[:zz.size])\n return_vals = list((fftwavenum,zz))\n #return_vals = list((len(freq)/freq,zz))\n else:\n return_vals = list((freq,zz))\n # return_vals = list((freq/len(freq),zz))\n else:\n return_vals = list(zz)\n if return_stddev:\n zzstd = azimuthalAverageBins(psd2,azbins=azbins,stddev=True,interpnan=True, binsize=binsize, **kwargs)\n return_vals.append(zzstd)\n\n if view and pyplotOK:\n pyplot.loglog(freq,zz)\n pyplot.xlabel(\"Spatial Frequency\")\n pyplot.ylabel(\"Spectral Power\")\n\n return return_vals", "def flatten_LPF(dictionary, N = 50, scale = 'log', cmap = None, \n\t\tlegend = True, add_face_nums = True, colorbar = True):\n\n\timport matplotlib\n\timport matplotlib.pyplot as plt\n\timport numpy as np \n\timport pandas as pd\n\tfrom matplotlib import cm\n\timport itertools\n\timport matplotlib.lines as mlines\n\n\t#3D\n\tfrom mpl_toolkits.mplot3d import Axes3D\n\timport matplotlib\n\timport matplotlib.pyplot as plt\n\n\t#3D\n\tfrom mpl_toolkits.mplot3d import Axes3D\n\tfrom mpl_toolkits.mplot3d import art3d\n\tfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\n\n\t#2D Hist\n\timport matplotlib.patches as patches\n\tfrom matplotlib.path import Path\n\n\timport matplotlib.colors\n\tfrom matplotlib.colors import LogNorm\n\timport copy\n\n\tif not cmap:\n\t\timport colormap\n\t\tcmap = colormap.parula\n\n\t# Converts dictionary to a pandas dataframe\n\tdf = dictionaryToDataFrame(dictionary)\n\n\n\t#Cycles through faces on the LPF\n\tfaces = np.arange(0,10)\n\t#Facecolor of patches, currently transparent\n\talpha = 0 \t\t\t\n\n\t#Parameterizing Visuals\n\tfig = plt.figure(figsize = (10,10)) #size of figure\n\tax = fig.add_subplot(1,1,1, aspect = 'equal') #add subplot with equal axes\n\tax.set_xlim(-2,2) #xlim\n\tax.set_ylim(-2,4) #ylim\n\tax.get_xaxis().set_visible(False)\n\tax.get_yaxis().set_visible(False)\n\t\n\n\t#Total length and width of LPF\n\tLtotal = xsc[5] - xsc[0] \n\tWtotal = ysc[2] - ysc[7] \n\t\n\t# Bins per unit length, Bins / LPF length\n\tndensity = N / Ltotal \n\n\t# Bins along the Y axis on top and bottom\n\tbinsheight = int(Wtotal * ndensity) \n\n\t#number of Bins in the y(z) direction\n\tbinsy = int(H * N / Ltotal) \n\t\n\t#Find Length of sides for normalizing \n\tindicies = []\n\tfor i in faces:\n\t\t# facenumna is array of ONLY hits on face i\n\t\tfacenumna = df.where(df['face'] == i)\n\n\t\t# Makes place where face =! i dissapear\n\t\tfacenum = facenumna[np.isfinite(facenumna['face'])]\n\t\t# only a list of values that are not NaN\n\t\tindicies.append(len(list(facenum.index.values)))\n\t\n\t#colormap stuff\n\tif scale == 'log': \n\t\tvmax = max(indicies)\n\t\tvmin = .5\n\t\tnorm = LogNorm(vmin = vmin, vmax = vmax)\n\t\tfilename = 'flatLPF_log.png'\n\n\telse:\n\t\tvmin = 0\n\t\tvmax = max(indicies) / N\n\t\tnorm = matplotlib.colors.Normalize(vmin, vmax)\n\t\tfilename = 'flatLPF_lin.png'\n\n\t#Sets bad values (ie: log(0) to the lowest value on the map)\n\tmy_cmap = copy.copy(matplotlib.cm.get_cmap(cmap))\n\tmy_cmap.set_bad(my_cmap(0))\n\n\t#Facecolors\n\tsidecolor = '#FF8C00' #orange\n\tcolortop = 'navy' #navy\n\n\tcount = 0\n\n\t# finds what percentage of hits are on that face\n\tlennums = []\n\t\n\t#Parameterizes the faces\n\t# Loops through Faces to create the flattened LPF\n\tfor i in faces:\n\t\tcount += 1\n\n\t\tfacenum, index = getfinite(df,i)\n\t\t# finds what percentage of hits are on that face\n\t\tlennums.append((len(facenum['face'])) * 100.0 / len(df['face']))\n\n\t\t#X and Z switched\n\t\tif i == 0: #Parameterized Correctly, Check done\n\t\t\t#The left most face\n\t\t\tz = xsc[0]\n\t\t\txpush = xsc[0] - H #Places the X correctly on the flattened LPF, does not change data\n\t\t\typush = 0\n\n\t\t\tminfacex = 0 + xpush\n\t\t\tmaxfacex = H + xpush\n\n\t\t\tminfacey = ysc[0] + ypush\n\t\t\tmaxfacey = ysc[1] + ypush\n\n\t\t\twidth = maxfacey - minfacey\n\t\n\t\t\t# Creates the bins, based on area\n\t\t\tbinsx = int((width / Ltotal) * N)\n\t\t\tbinsy = int(H * ndensity)\n\t\t\txs = facenum['zloc'] + xpush\n\t\t\tys = facenum['yloc'] + ypush\n\n\t\t\txs = np.asarray(xs)\n\t\t\tys = np.asarray(ys) \n\t\t \n\t\t\tHist, xedges, yedges = np.histogram2d(xs,ys, bins = [binsy,binsx], \n\t\t\t\t\t\t\trange = [[minfacex, maxfacex], [minfacey, maxfacey]])\n\n\t\t\t# Transform Hist about an axis\n\t\t\tHist = Hist.T \n\n\t\t\t#Makes Patch\n\t\t\txyside0 = [[minfacex, minfacey], [maxfacex, minfacey], [maxfacex, maxfacey], [minfacex, maxfacey]]\n\t\t\tpath0 = Path(xyside0)\n\t\t\tpatch0 = patches.PathPatch(path0, facecolor=sidecolor, lw=2, alpha = alpha)\n\t\t\tax.add_patch(patch0)\n\n\t\t\t#Plots Color and clips onto patch\n\t\t\tax.pcolormesh(xedges, yedges, Hist, \n\t\t\t\t\tnorm = norm, cmap = my_cmap, \n\t\t\t\t\tclip_path = patch0, clip_on = True)\n\n\t\t# this side is transformed like 5\n\t\telif i == 1:\n\t\t\t#base vector, pointing from 1 to 2\n\t\t\tbasevectorx = xsc[2] - xsc[1]\n\t\t\tbasevectory = ysc[2] - ysc[1]\n\t\t\tbasevector = [basevectorx,basevectory]\n\t\t\t\n\t\t\t#width of base\n\t\t\twidth = np.sqrt(basevectorx ** 2 + basevectory ** 2)\n\t \n\t\t\txpush = 0\n\t\t\typush = 0\n\n\t\t\tminfacex = 0 \n\t\t\tmaxfacex = width\n\t\t\t\n\t\t\tminfacey = 0 + ypush\n\t\t\tmaxfacey = H + ypush\n\t\t\t \n\t\t\tbinsx = int(width * ndensity) \n\t\t\t\n\t\t\t#point that plot is turning around\n\t\t\txorigin = xsc[2]\n\t\t\tyorigin = ysc[2]\n\t\t\t\n\t\t\t#direction transforming to, unit vector \n\t\t\tgotovector = [1,0]\n\n\t\t\t#data to be transformed\n\t\t\txin = facenum['xloc']\n\t\t\tyin = facenum['yloc']\n\n\t\t\t#transform data, flattens the side so that there are no angles\n\t\t\txprime, yprime = transform(xin, yin, xorigin, yorigin, gotovector, index)\n\t\t\t\n\t\t\t#transformed data, figure out why xorigin must be added\n\t\t\txs = xprime - xorigin \n\t\t\tys = facenum['zloc']\n\n\t\t\tnp.asarray(xs)\n\t\t\tnp.asarray(ys) \n\n\t\t\t#create hist and edges from transformed data\n\t\t\tHist, xedges, yedges = np.histogram2d(xs, ys, bins = [binsx,binsy],\n\t\t\t\t\trange = [[minfacex,maxfacex],[minfacey,maxfacey]])\n\t\t\tHist = Hist.T\n\t\t\t\n\t\t\t#find angles between sides\n\t\t\t# vector perpendicular to the base\n\t\t\tperpbase = [-1 * basevector[1], basevector[0]] \n\t\t\t\n\t\t\tvec1 = basevector\n\t\t\tvec2 = [-1, 0]\n\n\t\t\t# angle between sides\n\t\t\ttheta = 1 * np.arccos(np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2)))\n\t\t\t\n\t\t\tfarvec_constant = (H / (np.linalg.norm(basevector)))\n\t\t\tfarvec = np.multiply(farvec_constant, perpbase)\n\t\t\t\n\t\t\t#creating vertecies for patch\n\t\t\txmax = xsc[1] + farvec[0]\n\t\t\tymax = ysc[1] + farvec[1]\n\t\t\tcornx = xsc[2] + farvec[0]\n\t\t\tcorny = ysc[2] + farvec[1]\n\t\t\txyside1 = [[xsc[1], ysc[1]], [xsc[2], ysc[2]], [cornx, corny], [xmax, ymax]]\n\t\t\t \n\t\t\t#places patch in the right spot\n\t\t\txplace = xorigin - H * np.sin(theta) \n\t\t\typlace = yorigin - H * np.cos(theta) \n\t\t\t\n\t\t\t#patch stuff\n\t\t\tpath1 = Path(xyside1)\n\t\t\tpatch1 = patches.PathPatch(path1, facecolor=sidecolor, lw=2, alpha = alpha)\n\t\t\tax.add_patch(patch1)\n\t\t\t\n\t\t\t#rotate hist, rotate the histogram so that the sides are angled\n\t\t\tx,y = DoRotation(xedges,yedges,(theta))\n\t\t\tax.pcolormesh(x + xplace, y + yplace,Hist, \n\t\t\t\t\tnorm = norm, cmap = my_cmap, alpha = 1, \n\t\t\t\t\tclip_path = patch1, clip_on = True)\n\t\t\t\n\t\t### Y is actually Z\n\t\t# Topmost side that is not the octogon (8)\n\t\telif i == 2: \n\t\t\txpush = 0\n\t\t\typush = 0 \n\t\t\t\n\t\t\tminfacex = xsc[7] + xpush\n\t\t\tmaxfacex = xsc[6] + xpush\n\t\t\n\t\t\tmaxfacey = H + ypush\n\t\t\tminfacey = 0 + ypush \n\t\t\t\n\t\t\twidth = xsc[6] - xsc[7]\n\t\t\t\n\t\t\t# Bins based on area\n\t\t\tbinsx = int((width / Ltotal) * N)\n\n\t\t\txs = facenum['xloc'] + xpush\n\t\t\tys = facenum['zloc'] + ypush\n\n\t\t\tHist, xedges, yedges = np.histogram2d(xs, ys, bins = [binsx, binsy], \n\t\t\t\t\trange = [[minfacex,maxfacex],[minfacey,maxfacey]])\n\t\t\tHist = Hist.T \n\t\t\t\n\t\t\t#Flips histogram up and down (visual purposes)\n\t\t\tHist = np.flipud(Hist)\n\t\t\txyside2 = [[minfacex, ysc[2]], [maxfacex, ysc[2]], \n\t\t\t\t\t[maxfacex, ysc[2] + H], [minfacex, ysc[2] + H]]\n\t\t\t\n\t\t\t# Create patch\n\t\t\tpath2 = Path(xyside2)\n\t\t\tpatch2 = patches.PathPatch(path2, facecolor=sidecolor, lw=2, alpha = alpha)\n\t\t\tax.add_patch(patch2)\n\n\t\t\txedges = np.linspace(xsc[2], xsc[3], len(xedges))\n\t\t\tyedges = np.linspace(ysc[2], ysc[2] + H, len(yedges))\n\t\t\t\n\t\t\t#Plots the hist\n\t\t\tax.pcolormesh(xedges, yedges, Hist, norm = norm, cmap = my_cmap, \n\t\t\t\t\tclip_path = patch2, clip_on=True)\n\n\t\t#This side is rotated \n\t\telif i == 3:\n\t\t\t\n\t\t\t#creates the vector pointing from vertex 4 to vertex 3, the base of side 3\n\t\t\tbasevectorx = xsc[3] - xsc[4]\n\t\t\tbasevectory = ysc[3] - ysc[4]\n\t\t\tbasevector = [basevectorx, basevectory]\n\t\t\t\n\t\t\t#Length of the Base\n\t\t\twidth = np.sqrt(basevectorx **2 + basevectory **2)\n\t\t\tbinsx = int(width * ndensity) # bins based on area\n\t\t \n\t\t\t#Bins are not exactly the same, but they are pretty close \n\t\t\tlenbinsx = width / binsx\n\t\t\tlenbinsy = H / binsy\n\n\t\t\t#point that plot is turning around\n\t\t\txorigin = xsc[4] \n\t\t\tyorigin = ysc[4] \n\t\t\t \n\t\t\tmaxfacex = width \n\t\t\tminfacex = 0 \n\t\t\n\t\t\tminfacey = 0 \n\t\t\tmaxfacey = H \n\t\t\t\n\t\t\t#vector points towards transformation\n\t\t\tgotovector = [1,0]\n\t\t\t\n\t\t\t#Data to be Transformed \n\t\t\txin = facenum['xloc']\n\t\t\tyin = facenum['yloc'] \n\t\t \n\t\t\t#transforms data to y = yorigin \n\t\t\txprime, yprime = transform(xin, yin, xorigin, yorigin, gotovector, index)\n\t\t\t\n\t\t\txs = xprime - xorigin \n\t\t\tys = facenum['zloc']\n\t\t\t\n\t\t\tnp.asarray(xs)\n\t\t\tnp.asarray(ys) \n\n\t\t\t#Creates Histogram in Easy (X,Z) reference frame\n\t\t\tHist, xedges, yedges = np.histogram2d(xs, ys, bins = [binsx,binsy],\n\t\t\t\t\trange = [[minfacex, maxfacex],[minfacey,maxfacey]])\n\t\t\tHist = Hist.T\n\t\t \n\t\t\t#vector perpendicular to the base of the side \n\t\t\tperpbase = [basevector[1], -1 * basevector[0]]\n\t\t \n\t\t\t#Find angle between vectors \n\t\t\tvec1 = basevector\n\t\t\tvec2 = [1, 0]\n\t\t\t\n\t\t\t#Angle between vectors, radians\n\t\t\ttheta = 1 * np.arccos(np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))) \n\t\t\t#print(np.degrees(theta)) \n\n\t\t\tfarvec_constant = (H / (np.linalg.norm(basevector))) \n\t\t\tfarvec = np.multiply(farvec_constant, perpbase) #Unit vector point towards top corner\n\t\t\t\n\t\t\txmax = xsc[3] + farvec[0] #X position of top right\n\t\t\tymax = ysc[3] + farvec[1] #Y position of top right\n\t\t\tcornx = xsc[4] + farvec[0] #X position of bot right\n\t\t\tcorny = ysc[4] + farvec[1] #Y position of bot right\n\n\t\t\t# Corners for patch\n\t\t\txyside3 = [[cornx, corny], [xsc[4], ysc[4]], [xsc[3], ysc[3]],\n\t\t\t\t\t\t[xmax, ymax], [cornx, corny]]\n\n\t\t\t# i dont know what these numbers are but they work\n\t\t\t# Constants that kept appearing\n\t\t\toffsetx = 0.062009 \n\t\t\toffsety = -1 * 0.0873899\n\n\t\t\t#Trig to figure out placement on flattened LPF\n\t\t\txplace = xsc[4] + H * np.sin(theta)\n\t\t\typlace = ysc[4] - H * np.cos(theta)\n\n\t\t\tpath3 = Path(xyside3)\n\t\t\tpatch3 = patches.PathPatch(path3, facecolor = sidecolor, lw = 2, alpha = alpha)\n\t\t\tax.add_patch(patch3)\n\t\t\t\n\t\t\t#Rotates Matrix by theta radians\n\t\t\tx, y = DoRotation(xedges, yedges, (-1 * theta))\n\t\t\tax.pcolormesh(x + xplace,y + yplace, Hist, \n\t\t\t\t\tnorm = norm, cmap = my_cmap, clip_path = patch3, clip_on = True)\n\t\t\t\n\t\t### X is actually Z\n\t\telif i == 4: # Checked, parameterized correctly\n\t\t\tz = xsc[5]\n\t\t\txpush = xsc[5] + H\n\t\t\typush = 0 \n\n\t\t\tmaxfacex = 0 + xpush\n\t\t\tminfacex = -1 * H + xpush\n\t\t \n\t\t\tminfacey = ysc[0] + ypush\n\t\t\tmaxfacey = ysc[1] + ypush\n\t\t\t\n\t\t\twidth = maxfacey - minfacey\n\n\t\t\t# bins based on area\n\t\t\tbinsx = int((width / Ltotal) * N)\n\n\t\t\txs = -1 * facenum['zloc'] + xpush\n\t\t\tys = facenum['yloc'] + ypush\n\n\t\t\tHist,xedges,yedges = np.histogram2d(xs, ys, bins = [binsy, binsx], \n\t\t\t\t\trange = [[minfacex, maxfacex], [minfacey, maxfacey]])\n\t\t\tHist = Hist.T\n\n\t\t\t#Create patch\n\t\t\txyside4 = [[minfacex, minfacey], [maxfacex, minfacey], \n\t\t\t\t\t\t[maxfacex, maxfacey], [minfacex, maxfacey]]\n\t\t\tpath4 = Path(xyside4)\n\t\t\tpatch4 = patches.PathPatch(path4, facecolor=sidecolor, lw=2, alpha = alpha)\n\t\t\tax.add_patch(patch4)\n\n\t\t\tax.pcolormesh(xedges,yedges,Hist, norm = norm, cmap = my_cmap, \n\t\t\t\t\tclip_path = patch4, clip_on = True)\n\n\t\t#This side is transformed like 1\n\t\telif i == 5:\n\t\t\t\n\t\t\t#base vector, pointing from 6 to 5\n\t\t\tbasevectorx = xsc[5] - xsc[6]\n\t\t\tbasevectory = ysc[5] - ysc[6]\n\t\t\tbasevector = [basevectorx,basevectory]\n\t\t\t\n\t\t\t#width of base\n\t\t\twidth = np.sqrt(basevectorx**2+basevectory**2)\n\t \n\t\t\txpush = 0\n\t\t\typush = 0\n\t\t\n\t\t\t#Pretend that this side is not rotated\n\t\t\tminfacex = 0 \n\t\t\tmaxfacex = width \n\t\t\n\t\t\tminfacey = 0 + ypush\n\t\t\tmaxfacey = H + ypush\n\n\t\t\t# bins based on area\n\t\t\tbinsx = int(width * ndensity)\n\t\t\t \n\t\t\t#point that plot is turning around\n\t\t\txorigin = xsc[6]\n\t\t\tyorigin = ysc[6]\n\t\t\t\n\t\t\t#direction transforming to, unit vector \n\t\t\tgotovector = [1, 0]\n\n\t\t\t#data to be transformed, currently dummy data\n\t\t\txin = facenum['xloc']\n\t\t\tyin = facenum['yloc']\n\n\t\t\t#transform data\n\t\t\txprime, yprime = transform(xin, yin, xorigin, yorigin, gotovector, index)\n\t\t\t\n\t\t\t#transformed data, figure out why xorigin must be added\n\t\t\txs = xprime - xorigin \n\t\t\tys = facenum['zloc']\n\n\t\t\tnp.asarray(xs)\n\t\t\tnp.asarray(ys) \n\n\t\t\t#create hist and edges from transformed data\n\t\t\tHist, xedges, yedges = np.histogram2d(xs, ys, bins = [binsx,binsy],\n\t\t\t\t\trange = [[minfacex, maxfacex],[minfacey, maxfacey]])\n\t\t\tHist = Hist.T\n\t\t\t\n\t\t\t#find angles between sides\n\t\t\t# Vector perpendicular to the base\n\t\t\tperpbase = [-1 * basevector[1], basevector[0]]\n\n\t\t\tvec1 = basevector\n\t\t\tvec2 = [-1, 0]\n\n\t\t\t# Angle between sides\n\t\t\ttheta = np.arccos(np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))) #angle between sides\n\t\t\t\n\t\t\tfarvec_constant = (H / (np.linalg.norm(basevector)))\n\t\t\tfarvec = np.multiply(farvec_constant, perpbase)\n\t\t\t\n\t\t\t#creating vertecies for patch\n\t\t\txmax = xsc[6] - farvec[0]\n\t\t\tymax = ysc[6] - farvec[1]\n\t\t\tcornx = xsc[5] - farvec[0]\n\t\t\tcorny = ysc[5] - farvec[1]\n\t\t\txyside5 = [[xsc[6], ysc[6]], [xsc[5], ysc[5]], [cornx, corny], [xmax, ymax], [xsc[6], ysc[6]]]\n\t\t\t \n\t\t\t#places patch in the right spot\n\t\t\txplace = xorigin + H * np.sin(theta) \n\t\t\typlace = yorigin + H * np.cos(theta) \n\t\t\t\n\t\t\t#patch stuff\n\t\t\tpath5 = Path(xyside5)\n\t\t\tpatch5 = patches.PathPatch(path5, facecolor=sidecolor, lw=2, alpha = alpha)\n\t\t\tax.add_patch(patch5)\n\t\t\t\n\t\t\t#rotate hist\n\t\t\tx,y = DoRotation(-1 * xedges, -1 * yedges, (theta))\n\t\t\tax.pcolormesh(x + xplace,y + yplace, Hist, \n\t\t\t\t\tnorm = norm,cmap = my_cmap, alpha = 1, clip_path = patch5, clip_on = True)\n\t\t\t\n\t\t### Y is actually Z\n\t\telif i == 6: \n\t\t\txpush = 0\n\t\t\typush = 0 \n\t\t\t\n\t\t\tminfacex = xsc[7] + xpush\n\t\t\tmaxfacex = xsc[6] + xpush\n\t\t\n\t\t\tminfacey = 0 + ypush\n\t\t\tmaxfacey = H + ypush \n\t\t\t\n\t\t\twidth = xsc[6] - xsc[7]\n\n\t\t\tbinsx = int((width / Ltotal) * N) # bins based on area\n\t\t \n\n\t\t\txs = facenum['xloc'] + xpush\n\t\t\tys = facenum['zloc'] + ypush\n\n\t\t\tHist, xedges, yedges = np.histogram2d(xs, ys, bins = [binsx, binsy], \n\t\t\t\t\trange = [[minfacex, maxfacex], [minfacey, maxfacey]])\n\t\t\tHist = Hist.T \n\t\t\t \n\t\t\txyside6 = [[minfacex, ysc[7] - H], [maxfacex, ysc[7] - H],\n\t\t\t\t\t[maxfacex, ysc[7]], [minfacex, ysc[7]]]\n\n\t\t\tpath6 = Path(xyside6)\n\t\t\tpatch6 = patches.PathPatch(path6, facecolor = sidecolor, lw = 2, alpha = alpha)\n\t\t\t\n\t\t\tax.add_patch(patch6)\n\t\t\t\n\t\t\txedges = np.linspace(xsc[7], xsc[6], len(xedges))\n\t\t\tyedges = np.linspace(ysc[7] - H, ysc[7], len(yedges))\n\t\t\t\n\t\t\t#Plots the hist\n\t\t\tax.pcolormesh(xedges,yedges,Hist, norm = norm, cmap = my_cmap, #interpolation='nearest', origin='lower',\n\t\t\t\t\tclip_path = patch6, clip_on=True)\n\n\t\t#this side is transformed, like 3\n\t\telif i == 7: \n\t\t\t\n\t\t\t#creates the vector pointing from vertex 0 to vertex 7, the base of side 7\n\t\t\tbasevectorx = xsc[0] - xsc[7]\n\t\t\tbasevectory = ysc[0] - ysc[7]\n\t\t\tbasevector = [basevectorx,basevectory]\n\t\t\t\n\t\t\t#Length of the Base\n\t\t\twidth = np.sqrt(basevectorx **2 + basevectory **2)\n\t\t\t# Bins based on area\n\t\t\tbinsx = int(width * ndensity)\n\t\t \n\t\t\t#Bins are not exactly the same, but they are pretty close \n\t\t\tlenbinsx = width / binsx\n\t\t\tlenbinsy = H / binsy\n\n\t\t\t#point that plot is turning around\n\t\t\txorigin = xsc[7] \n\t\t\tyorigin = ysc[7] \n\t\t\t \n\t\t\tmaxfacex = width \n\t\t\tminfacex = 0 \n\t\t\n\t\t\tminfacey = 0 \n\t\t\tmaxfacey = H \n\t\t\t\n\t\t\t#vector points towards transformation\n\t\t\tgotovector = [1,0]\n\t\t\t\n\t\t\t#Data to be Transformed \n\t\t\txin = facenum['xloc']\n\t\t\tyin = facenum['yloc'] \n\t\t \n\t\t\t#transforms data to y = yorigin \n\t\t\txprime, yprime = transform(xin,yin,xorigin, yorigin, gotovector, index)\n\t\t\t\n\t\t\txs = xprime - xorigin \n\t\t\tys = facenum['zloc']\n\t\t\tnp.asarray(xs)\n\t\t\tnp.asarray(ys) \n\t\t\t\n\t\t\t#Creates Histogram in Easy (X,Z) reference frame\n\t\t\tHist, xedges, yedges = np.histogram2d(xs, ys, bins = [binsx, binsy],\n\t\t\t\t\trange = [[minfacex, maxfacex], [minfacey, maxfacey]])\n\t\t\tHist = Hist.T\n\t\t \n\t\t\t#vector perpendicular to the base of the side \n\t\t\tperpbase = [basevector[1], -1 * basevector[0]]\n\t\t \n\t\t\t#Find angle between vectors \n\t\t\tvec1 = basevector\n\t\t\tvec2 = [1, 0]\n\t\t\t\n\t\t\t#Angle between vectors, radians\n\t\t\ttheta = 1 * np.arccos(np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))) \n\n\t\t\tfarvec_constant = (H / (np.linalg.norm(basevector))) \n\t\t\t# Unit vector pointing towards top corner\n\t\t\tfarvec = np.multiply(farvec_constant, perpbase)\n\t\t\t\n\t\t\txmax = xsc[0] - farvec[0] #X position of top right\n\t\t\tymax = ysc[0] - farvec[1] #Y position of top right\n\t\t\tcornx = xsc[7] - farvec[0] #X position of bot right\n\t\t\tcorny = ysc[7] - farvec[1] #Y position of bot right\n\t\t\t# Corners for patch\n\t\t\txyside7 = [[cornx, corny], [xsc[7], ysc[7]], \n\t\t\t\t\t[xsc[0], ysc[0]], [xmax, ymax], [cornx, corny]]\n\n\t\t\txplace = xsc[7] - H * np.sin(theta) \n\t\t\typlace = ysc[7] + H * np.cos(theta) \n\n\t\t\tpath7 = Path(xyside7)\n\t\t\tpatch7 = patches.PathPatch(path7, facecolor = sidecolor , lw=2, alpha = alpha)\n\t\t\tax.add_patch(patch7)\n\t\t\t\n\t\t\t#Rotates Matrix by theta radians\n\t\t\tx, y = DoRotation(xedges, -1 * yedges, (-1 * theta))\n\t\t\tax.pcolormesh(x + xplace, y + yplace, Hist, \n\t\t\t\t\tnorm = norm, cmap = my_cmap,\n\t\t\t\t\tclip_path = patch7, clip_on = True)\n\t\t\t\n\t\t#This is the bottom, flip initial conditions, x = x, y = -y, z = z \n\t\telif i == 8: \n\t\t\tz = 0 #Z position\n\n\t\t\txpush = 0 #Shift in the x direction\n\t\t\typush = ysc[2] + H - ysc[7] #Shift in the y direction\n\n\t\t\tminfacex = xsc[0] + xpush \n\t\t\tmaxfacex = xsc[5] + xpush\n\t\t\n\t\t\tmaxfacey = -1 * ysc[7] + ypush\n\t\t\tminfacey = -1 * ysc[2] + ypush\n\t\t\n\t\t\txbins = np.linspace(minfacex,maxfacex,N)\n\t\t\tybins = np.linspace(minfacey,maxfacey,N)\n\t\t\t\n\t\t\txs = facenum['xloc'] + xpush\n\t\t\t# Flipped y because the bottom is viewed upside down\n\t\t\tys = -1 * facenum['yloc'] + ypush\n\t\t\txs = np.asarray(xs)\n\t\t\tys = np.asarray(ys) \n\n\t\t\t#Create Histogram\n\t\t\tHist, xedges, yedges = np.histogram2d(xs, ys, bins = [N, binsheight], \n\t\t\t\t\trange = [[minfacex, maxfacex], [minfacey, maxfacey]])\n\t\t\tHist = Hist.T\n\t\t\t\n\t\t\t#Creates Patch for bottom \n\t\t\txybot = []\n\t\t\tfor i in range(len(xsc)):\n\t\t\t\txybot.append([xsc[i] + xpush, ysc[i] + ypush])\n\n\t\t\tpathbot = Path(xybot)\n\t\t\tpatchbot = patches.PathPatch(pathbot, facecolor=sidecolor, lw = 2, alpha = alpha)\n\t\t\tpatchbot1 = patchbot\n\t\t\tax.add_patch(patchbot) \n\n\t\t\t#Plots the hist, and gets cropped by the octogon\n\t\t\tax.pcolormesh(xedges, yedges, Hist, norm = norm, cmap = my_cmap,\n\t\t\t\t\tclip_path = patchbot, clip_on = True)\n\t\n\n\t\t# This is the top, keep initial conditions, x = x, y = y ... \n\t\telif i == 9: \n\t\t\t\n\t\t\tz = H # Zposition \n\t\t\t\n\t\t\t#To Shift graphing Position, Must shift everything\n\t\t\txpush = 0\t#Shift Parameter for x\n\t\t\typush = 0 \t#Shift Parameter for y\n\n\t\t\tminfacex = xsc[0] + xpush\n\t\t\tmaxfacex = xsc[5] + xpush\n\t\t\n\t\t\tminfacey = ysc[7] + ypush\n\t\t\tmaxfacey = ysc[2] + ypush\n\t\t \n\t\t\t#Input data\n\t\t\txs = facenum['xloc'] + xpush \n\t\t\tys = facenum['yloc'] + ypush \n\n\t\t\txs = np.asarray(xs)\n\t\t\tys = np.asarray(ys)\n\t\t\t\n\t\t\t#Creates Histogram (NxN), Xedges (N), and Yedges (N)\n\t\t\tHist,xedges,yedges = np.histogram2d(xs,ys,bins = [N,binsheight],range = [[minfacex,maxfacex],[minfacey,maxfacey]])\n\t\t\t\n\t\t\t#Transforms the Histogram so it can be graphed\n\t\t\tHist = Hist.T \n\t\t\t\n\t\t\t#Creates the Octogon Patch \n\t\t\txytop = []\n\t\t\tfor i in range(len(xsc)):\n\t\t\t\txytop.append([xsc[i] + xpush, ysc[i] + ypush])\n\n\t\t\tpathtop = Path(xytop)\n\t\t\tpatchtop = patches.PathPatch(pathtop, facecolor=colortop, lw=2, alpha = alpha)\n\t\t\tpatchtop1 = patchtop\n\t\t\tax.add_patch(patchtop) \n\n\t\t\t#Plots the hist, and gets cropped by the octogon\n\t\t\tplottop = ax.pcolormesh(xedges, yedges, Hist, norm = norm, cmap = my_cmap, #interpolation='nearest', origin='lower',\n\t\t\t\t\tclip_path = patchtop, clip_on=True)\n\t\t\n\t\t\t#Makes the colorbar for all graphs, normalization is the same \n\t\t\tif colorbar:\n\t\t\t\tplt.colorbar(plottop) \n\t\n\t#Labels the facenumbers, 2 and 9 are missing because they are ugly when put on \n\tif add_face_nums:\n\t\tmarking = my_cmap(0)\n\t\tax.annotate('0', xy=(-1.85, 0), color = marking)#, xytext=(xsc[0] - H -.5,0))\n\t\tax.annotate('1', xy=(-1.5, 1), color = marking)#, xytext=(xsc[0] - H -.5,0))\n\t\tax.annotate('3', xy=(1.5, 1), color = marking)#, xytext=(xsc[0] - H -.5,0))\n\t\tax.annotate('4', xy=(1.85, 0), color = marking)#, xytext=(xsc[0] - H -.5,0)) \n\t\tax.annotate('5', xy=(1.5, -1), color = marking)#, xytext=(xsc[0] - H -.5,0))\n\t\tax.annotate('6', xy=(0, -1.9), color = marking)#, xytext=(xsc[0] - H -.5,0))\n\t\tax.annotate('7', xy=(-1.6,-1), color = marking)#, xytext=(xsc[0] - H -.5,0))\n\t\tax.annotate('8', xy=(0, 3.6), color = marking)#, xytext=(xsc[0] - H -.5,0))\n\t\t#ax.annotate('9', xy=(1.5, 1), color = marking)#, xytext=(xsc[0] - H -.5,0))\n\n\tif legend:\n\t\t#Makes Legend for percent hit of side numbers\n\t\ttextstr = 'Percent Hit \\n'\n\t\tfor i in range(10):\n\t\t\ttextstr += 'Face %i = %i'%(i, lennums[i])\n\t\t\tif i != 9:\n\t\t\t\ttextstr +='\\n'\n\t\tprops = dict(boxstyle='round', facecolor='grey', alpha=0.3)\n\n\t\tax.text(0.05, 0.97, textstr, transform=ax.transAxes, fontsize=10,\n\t\t\tverticalalignment='top', bbox=props) \n\n\tax.set_title('LPF Micrometeroid Impact Location %s'%(scale))\n\treturn fig", "def plot_facets(self):\r\n for i in self.sides:\r\n i.regularise_grid()\r\n\r\n fig = plt.figure()\r\n for i, facet in enumerate(self.sides):\r\n print(i)\r\n fig.add_subplot(16, 1, i + 1)\r\n plt.imshow(facet.regular_grid[2], cmap='gray')\r\n plt.title(str(i)), plt.xticks([]), plt.yticks([])", "def _plot_profiles(example_dict, example_indices, plot_shortwave):\n\n heating_rate_matrix_k_day01 = example_utils.get_field_from_dict(\n example_dict=example_dict,\n field_name=\n example_utils.SHORTWAVE_HEATING_RATE_NAME if plot_shortwave\n else example_utils.LONGWAVE_HEATING_RATE_NAME\n )\n\n figure_object = None\n axes_object = None\n\n for i in example_indices:\n this_colour = numpy.random.uniform(low=0., high=1., size=3)\n\n figure_object, axes_object = profile_plotting.plot_one_variable(\n values=heating_rate_matrix_k_day01[i, :],\n heights_m_agl=example_dict[example_utils.HEIGHTS_KEY],\n use_log_scale=True, line_colour=this_colour,\n figure_object=figure_object\n )\n\n axes_object.set_xlabel(r'Heating rate (K day$^{-1}$)')\n\n return figure_object, axes_object", "def npv_histograms():\n\n\t### Initialize path to CSV file\n\tfn = 'Combined_AtRiskProperties.csv'\n\tcsv_uri = os.path.join(paths.outputs_dir, fn)\n\n\t### Set plot parameters and style\n\tsb.set(style='ticks')\n\tfig, axes = plt.subplots(ncols=2, figsize=(15/1.5, 6/1.5))\n\n\t### Read CSV file to Pandas DataFrame\n\tdf = pd.read_csv(csv_uri)\n\tdf = df[df['npv_dr3']>0]\n\tdf['Flood Zone'] = np.where(df['flood_zone']=='100yr', 'SFHA', 'Non-SFHA')\n\n\tbins = 150\n\n\t### Plot data\n\tsb.histplot(df, x='npv_dr3', hue='Flood Zone', bins=bins, \n\t\talpha=0.8,\n\t\tbinrange=[0,1*10**4],\n\t\thue_order=['SFHA', 'Non-SFHA'],\n\t\tpalette=['r', 'b'],\n\t\tax=axes[0],\n\t\tlegend=False\n\t\t)\n\n\t### Plot data\n\tsb.histplot(df, x='npv_dr3', hue='Flood Zone', bins=bins, \n\t\talpha=0.8,\n\t\tbinrange=[0,5*10**5],\n\t\thue_order=['SFHA', 'Non-SFHA'],\n\t\tpalette=['r', 'b'],\n\t\tax=axes[1],\n\t\tlegend=True\n\t\t)\n\n\tsfha_median = df['npv_dr3'][df['Flood Zone']=='SFHA'].median()\n\taxes[0].axvline(sfha_median, color='r', ls='--')\n\n\tnonsfha_median = df['npv_dr3'][df['Flood Zone']=='Non-SFHA'].median()\n\taxes[0].axvline(nonsfha_median, color='b', ls='--')\n\n\tprint(sfha_median)\n\tprint(nonsfha_median)\n\t\n\t### Format axes\n\taxes[0].set_xlim(0, 1*10**4)\n\taxes[1].set_xlim(0, 5*10**5)\n\t\n\tfor i in range(2):\n\t\taxes[i].set_yscale('log')\n\t\taxes[i].set_xlabel('NPV')\n\t\taxes[i].set_yticks([])\n\t\taxes[i].set_ylabel('')\n\t\taxes[i].set_xticklabels(\n\t\t\t[\"${:,.0f}\".format(int(t)) for t in axes[i].get_xticks()])\n\t\taxes[i].tick_params(axis='y', which='minor', left=False)\n\n\t\tfor s in ['top', 'left', 'right']:\n\t\t\taxes[i].spines[s].set_visible(False)\n\n\t### Save figure\n\tfn = 'npv_histograms.png'\n\turi = os.path.join(paths.figures_dir, fn)\n\tplt.savefig(uri, bbox_inches='tight', dpi=600)\n\tplt.savefig(uri.replace('png', 'pdf'), bbox_inches='tight')\n\n\t### Open figure\n\ttime.sleep(0.5)\n\tsubprocess.run(['open', uri])\n\n\treturn None", "def plot_3d_hist(x,y,name):\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111,projection = '3d')\r\n hist, xedges, yedges = np.histogram2d(x,y,bins=4, normed=True)\r\n xpos, ypos = np.meshgrid(xedges[:-1] + 0.25, yedges[:-1] + 0.25)\r\n xpos = xpos.flatten('F')\r\n ypos = ypos.flatten('F')\r\n zpos = np.zeros_like(xpos)\r\n dx = 0.5 * np.ones_like(zpos)\r\n dy = dx.copy()\r\n dz = hist.flatten()\r\n\r\n ax.bar3d(xpos, ypos, zpos, dx, dy, dz, color='b', zsort='average')\r\n plt.savefig(name)\r\n plt.close()", "def plot_catalog_on_sky(self,fighandle):\n import numpy as np\n from astropy import coordinates, units \n ax = fighandle.add_subplot(111)\n B2mR2 = [-0.8,+2.4]\n for a,b,c,d in zip(self.ub1ra,self.ub1dec,self.ub1b2mag,self.ub1r2mag): \n B2mR2.append(c-d)\n B2mR2 = np.array( B2mR2 ) \n R = ax.scatter(self.ub1ra,self.ub1dec,\n s=(20.-self.ub1b2mag)*2.,\n c=B2mR2[2:],norm=None,cmap='plasma')\n ax.plot(self.ra.value,self.dec.value,'+',markersize=20,color='purple',lw=2,label='source') \n fighandle.colorbar(R,fraction=0.05,pad=0.05,label=\"blue=hot yellow=cool\")\n ax.set_xlabel('RA (deg)')\n ax.set_ylabel('Dec (deg)') \n # plot det frame\n #detwcs = wcs.WCS(header=self._det_header(),key='S')\n z = self.det_frame(WCS=self.detWcs)\n ax.plot(z[0][:],z[1][:],'k',lw=1)\n ax.plot(z[0][0],z[1][0],'*k',ms=7,label='det origin') # origin\n ax.plot(z[0][:2],z[1][:2],'or--',lw=2.7) # bottom detector image\n # slit (uvgrism) for source at anchor\n x,y,z,c = self.slit()\n xd,yd = self.rotate_slit(x,y)\n z1 = self.detWcs.pixel_to_world(xd,yd)\n ax.plot(z1.ra,z1.dec,'b-',label='first order')\n ax.legend()\n ax.invert_xaxis() # RA runs opposite to longitude on ground", "def _plot_counts_one_height(\n count_matrix, metadata_dict, height_index, colour_map_object,\n colour_norm_object, border_latitudes_deg_n, border_longitudes_deg_e,\n output_dir_name):\n\n latitudes_deg_n = metadata_dict[count_obs.LATITUDES_KEY]\n longitudes_deg_e = metadata_dict[count_obs.LONGITUDES_KEY]\n\n figure_object, axes_object = pyplot.subplots(\n 1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)\n )\n\n plotting_utils.plot_borders(\n border_latitudes_deg_n=border_latitudes_deg_n,\n border_longitudes_deg_e=border_longitudes_deg_e,\n axes_object=axes_object\n )\n\n matrix_to_plot = count_matrix.astype(float)\n matrix_to_plot[matrix_to_plot < 0.5] = numpy.nan\n\n radar_plotting.plot_latlng_grid(\n field_matrix=matrix_to_plot, field_name=DUMMY_FIELD_NAME,\n axes_object=axes_object,\n min_grid_point_latitude_deg=numpy.min(latitudes_deg_n),\n min_grid_point_longitude_deg=numpy.min(longitudes_deg_e),\n latitude_spacing_deg=numpy.diff(latitudes_deg_n[:2])[0],\n longitude_spacing_deg=numpy.diff(longitudes_deg_e[:2])[0],\n colour_map_object=colour_map_object,\n colour_norm_object=colour_norm_object\n )\n\n gg_plotting_utils.plot_colour_bar(\n axes_object_or_matrix=axes_object, data_matrix=matrix_to_plot,\n colour_map_object=colour_map_object,\n colour_norm_object=colour_norm_object,\n orientation_string='vertical', extend_min=False, extend_max=True\n )\n\n plotting_utils.plot_grid_lines(\n plot_latitudes_deg_n=latitudes_deg_n,\n plot_longitudes_deg_e=longitudes_deg_e, axes_object=axes_object,\n parallel_spacing_deg=2., meridian_spacing_deg=2.\n )\n\n height_m_asl = metadata_dict[count_obs.HEIGHTS_KEY][height_index]\n title_string = 'Observation counts at {0:d} m AGL'.format(\n int(numpy.round(height_m_asl))\n )\n axes_object.set_title(title_string)\n\n output_file_name = '{0:s}/observation_counts_{1:05d}-metres-agl.jpg'.format(\n output_dir_name, int(numpy.round(height_m_asl))\n )\n\n print('Saving figure to file: \"{0:s}\"...'.format(output_file_name))\n figure_object.savefig(\n output_file_name, dpi=FIGURE_RESOLUTION_DPI,\n pad_inches=0, bbox_inches='tight'\n )\n pyplot.close(figure_object)", "def plot_repavg(dataset, plotspecs):\n\n grid_dims = plotspecs[\"grid_dims\"]\n for t in range(len(dataset.topologies)):\n names = dataset.top_names[t]\n for n in range(len(names)):\n # Plot whatever for a protein\n fig, axes = plt.subplots(*grid_dims, sharex=True, sharey=True, figsize=(12,10))\n for j in range(len(dataset.b_values)):\n ax = axes[j / grid_dims[1], j % grid_dims[1]]\n rep_xdata = dataset.xdata[t][n][j]\n rep_ydata = dataset.ydata[t][n][j]\n if len(rep_xdata) > 0:\n # \n if hasattr(rep_ydata[0], \"mask\"):\n # plot profile for each sample \n for r in range(len(rep_xdata)): \n #ax.plot(rep_xdata[r], rep_ydata[r], color=cubecmap(plotspecs[\"coloridxs\"][j]), alpha=0.8)\n ax.plot(rep_xdata[r][~ rep_ydata[r].mask], rep_ydata[r][~ rep_ydata[r].mask], color=cmap(plotspecs[\"coloridxs\"][j]), alpha=0.8)\n else:\n # plot profile for each sample \n for r in range(len(rep_xdata)): \n #ax.plot(rep_xdata[r], rep_ydata[r], color=cubecmap(plotspecs[\"coloridxs\"][j]), alpha=0.8)\n ax.plot(rep_xdata[r], rep_ydata[r], color=cmap(plotspecs[\"coloridxs\"][j]), alpha=0.8)\n\n x_repavg = dataset.avgxdata[t][n][j]\n y_repavg = dataset.avgydata[t][n][j]\n # plot disorder-averaged profile\n ax.plot(x_repavg, y_repavg, lw=2, color='k')\n\n if plotspecs.has_key(\"xytext\"):\n xytext = plotspecs[\"xytext\"]\n else:\n xytext = (0.3, 0.1)\n\n ax.annotate(\"b = \" + dataset.b_values[j], xy=(0,0), xytext=xytext,\n bbox={\"boxstyle\":\"square\",\"facecolor\":\"w\",\"edgecolor\":\"k\"},\n xycoords=\"axes fraction\", textcoords=\"axes fraction\")\n\n if plotspecs.has_key(\"ylims\"): \n ax.set_ylim(*plotspecs[\"ylims\"])\n if plotspecs.has_key(\"xlims\"): \n ax.set_xlim(*plotspecs[\"xlims\"])\n if plotspecs.has_key(\"ylog\"):\n if plotspecs[\"ylog\"]:\n ax.semilogy()\n\n big_ax = fig.add_subplot(111)\n big_ax.grid(False)\n big_ax.set_axis_bgcolor('none')\n big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')\n big_ax.set_ylabel(plotspecs[\"ylabel\"])\n big_ax.set_xlabel(plotspecs[\"xlabel\"])\n if plotspecs.has_key(\"title\"):\n big_ax.set_title(plotspecs[\"title\"].format(names[n]))\n\n plt.subplots_adjust(wspace=0, hspace=0)\n\n if not (plotspecs[\"saveas\"] is None):\n if not os.path.exists(\"plots\"):\n os.mkdir(\"plots\")\n os.chdir(\"plots\")\n for format in plotspecs[\"saveas_formats\"]:\n fig.savefig(plotspecs[\"saveas\"][t][n] + \".\" + format, bbox_inches=\"tight\")\n os.chdir(\"..\")\n\n \n #plt.figure()\n #levels = np.linspace(0, 1.3, 11)\n #CS3 = plt.contourf([[0,0],[0,0]], levels, cmap='viridis')\n #plt.clf()\n\n #plt.figure()\n #for j in range(len(name_repavg_x)):\n # plt.plot(name_repavg_x[j], name_repavg_y[j], color=cmap(plotspecs[\"coloridxs\"][j]))\n #plt.xlabel(plotspecs[\"xlabel\"])\n #plt.ylabel(plotspecs[\"ylabel\"])\n #plt.title(plotspecs[\"title\"].format(names[n]))\n\n #cbar = plt.colorbar(CS3)\n #cbar.set_label(\"Frustration b\")\n if plotspecs.has_key(\"avg_ylims\"): \n plt.ylim(*plotspecs[\"avg_ylims\"])\n if plotspecs.has_key(\"avg_xlims\"): \n plt.xlim(*plotspecs[\"avg_xlims\"])\n\n if not (plotspecs[\"saveas\"] is None):\n if not os.path.exists(\"plots\"):\n os.mkdir(\"plots\")\n os.chdir(\"plots\")\n for format in plotspecs[\"saveas_formats\"]:\n plt.savefig(plotspecs[\"saveas\"][t][n] + \"_avg.\" + format, bbox_inches=\"tight\")\n os.chdir(\"..\")", "def plot_slices_3D(slow: np.ndarray, \n fast: np.ndarray, \n TG: np.ndarray, \n Z: np.ndarray, \n transition_list: List[List[dict]]):\n \n fig,(ax0,ax1,ax2) = plt.subplots(1, 3, figsize=[10,2.5])\n \n ind = 0\n ax0.pcolormesh(fast, TG, Z[ind,:,:], cmap='hot')\n ax0.set_xlabel('Fast Gate Voltage (V)', fontsize=14)\n ax0.set_ylabel('TG Voltage (V)', fontsize=14)\n ax0.set_title('Slow Gate = '+str(slow[ind])+'V', fontsize=14, fontweight='semibold')\n\n if len(transition_list[ind]) is not 0:\n x_base = transition_list[ind][0]['location']\n yvals = ax0.get_ylim()\n xvals = [x_base, x_base+ (yvals[1] - yvals[0])/transition_list[ind][0]['gradient']]\n ax0.plot(xvals, yvals, linewidth=6,color='lawngreen')\n \n \n ind = np.floor(slow.shape[0]/2).astype(int)\n ax1.pcolormesh(fast, TG, Z[ind,:,:], cmap='hot')\n ax1.set_xlabel('Fast Gate Voltage (V)', fontsize=14)\n ax1.yaxis.set_ticklabels([])\n ax1.set_title('Slow Gate = '+str(slow[ind])+'V', fontsize=14, fontweight='semibold')\n\n if len(transition_list[ind]) is not 0:\n x_base = transition_list[ind][0]['location']\n yvals = ax1.get_ylim()\n xvals = [x_base, x_base+ (yvals[1] - yvals[0])/transition_list[ind][0]['gradient']]\n ax1.plot(xvals, yvals, linewidth=6,color='magenta')\n\n \n ind = -1\n ax2.pcolormesh(fast, TG, Z[ind,:,:], cmap='hot')\n ax2.set_xlabel('Fast Gate Voltage (V)', fontsize=14)\n ax2.yaxis.set_ticklabels([])\n ax2.set_title('Slow Gate = '+str(slow[ind])+'V', fontsize=14, fontweight='semibold')\n\n if len(transition_list[ind]) is not 0:\n x_base = transition_list[ind][0]['location']\n yvals = ax2.get_ylim()\n xvals = [x_base, x_base+ (yvals[1] - yvals[0])/transition_list[ind][0]['gradient']]\n ax2.plot(xvals, yvals, linewidth=6,color='aqua')", "def generate_histogram(avg_histogram_df, pass_counter, chip_name, metric_str, histo_metric, histo_dir):\n\n\n bin_array = np.array(avg_histogram_df.index, dtype='float')\n\n smooth_histo_df = avg_histogram_df.filter(regex='rollingmean').rename(columns=lambda x: x[:-12])\n\n sdm_histo_df = avg_histogram_df.filter(regex='sdm').rename(columns=lambda x: x[:-4])\n\n # smooth_max = np.max(np.max(smooth_histo_df))\n # sdm_max = np.max(np.max(sdm_histo_df))\n # if np.isnan(sdm_max):\n # sdm_max = 0\n # histo_max\n\n min_cont, max_cont = metric_str.split(\"-\")\n\n if pass_counter < 10:\n passes_to_show = 1\n else:\n passes_to_show = 2\n pass_counter // 10\n line_settings = dict(alpha=0.75, elinewidth = 0.5)\n vhf_colormap = get_vhf_colormap()\n\n\n\n for i in range(1, pass_counter+1, passes_to_show):\n sns.set_style('darkgrid')\n fig = plt.figure(figsize=(8,6))\n ax = fig.add_subplot(111)\n # ax.set_xscale('log')\n sns.set(style='ticks')\n\n c = 0\n max_list = []\n for col in smooth_histo_df:\n max_list.append(np.max(smooth_histo_df[col]))\n histo_max = np.ceil(max(max_list))\n splitcol = col.split(\"_\")\n if len(splitcol) == 2:\n spot_type, pass_num = splitcol\n else:\n spot_type, pass_num = splitcol[::2]\n pass_num = int(pass_num)\n if pass_num == i:\n ax.errorbar(x=bin_array,\n y=smooth_histo_df[col],\n yerr=sdm_histo_df[col],\n color = vhf_colormap[c],\n label = None,\n lw = 0,\n **line_settings\n )\n ax.step(x=bin_array,\n y=smooth_histo_df[col],\n color = vhf_colormap[c],\n label = spot_type,\n lw = 1,\n where= 'mid',\n alpha=0.75\n )\n c += 1\n\n ax.axhline(y=0, ls='dotted', c='k', alpha=0.75)\n ax.axvline(x=float(min_cont), ls='dashed', c='k', alpha=0.8)\n\n plt.legend(loc = 'best', fontsize = 10)\n\n plt.ylabel(\"Frequency (kparticles/mm\" + r'$^2$'+\")\", size = 14)\n plt.xlabel(\"{} (%)\".format(histo_metric), size = 14)\n\n if histo_max < 0.5:\n ysteps = 0.1\n else:\n ysteps = round(histo_max/10,1)\n\n plt.yticks(np.arange(0, histo_max, ysteps), size = 12)\n\n xlabels = np.append(bin_array, int(max_cont))[::(len(bin_array) // 10)]\n plt.xticks(xlabels, size = 12, rotation = 90)\n\n plt.title(chip_name+\" Pass \"+str(i)+\" Average Histograms\")\n\n figname = ('{}_combohisto_pass_{}_{}_{}.png'.format(chip_name,i,histo_metric,metric_str))\n plt.savefig('{}/{}'.format(histo_dir,figname), bbox_inches = 'tight', dpi = 300)\n print(\"File generated: {}\".format(figname))\n plt.clf()", "def AtlasPlots(cf, p, atlas, m_array, EnergyHistory): \n\n fig = plt.figure(1)\n fig.patch.set_facecolor('white')\n\n TE = [sum(x) for x in EnergyHistory] \n VE = [row[0] for row in EnergyHistory] \n IE = [row[1] for row in EnergyHistory] \n\n plt.subplot(1,3,1)\n plt.plot(TE)\n plt.title('Total Energy')\n plt.hold(False)\n plt.subplot(1,3,2)\n plt.plot(VE)\n plt.title('Vector Energy')\n plt.hold(False)\n plt.subplot(1,3,3)\n plt.plot(IE)\n plt.title('Image Energy')\n plt.hold(False)\n plt.draw()\n plt.show()\n\n if cf.io.outputPrefix != None: \n energyFilename = cf.io.outputPrefix + \"Energy.pdf\"\n plt.savefig(energyFilename)", "def exo1():\n nblist = round(linspace(10, nb, 4))\n for i in 1: length(nblist):\n V = U(: , 1: nblist(i))\n subplot(2, 2, i)\n plot_mesh((vertex*V)*V', faces)", "def make3DLPF(dictionary, N = 50, scale = 'log', cmap = colormap.parula, return_ax = False):\n\n\t# initalize colors\n\tmy_cmap = copy.copy(matplotlib.cm.get_cmap(cmap))\n\tmy_cmap.set_bad(my_cmap(0))\n\t\n\tif scale == 'log':\n\t\tfrom matplotlib.colors import LogNorm\n\t\tnorm = LogNorm(vmin = 1e-3, vmax = 1)\n\telse:\n\t\tnorm = matplotlib.colors.Normalize(0, 1)\n\n\t# converts dictionary to pandas Dataframe\n\tdf = dictionaryToDataFrame(dictionary)\n\n\t# initialize figure\n\tfig3D = plt.figure(figsize = (4,4))\n\n\t# add subplot with equal axes\n\tax3d = fig3D.add_subplot(1,1,1, projection = '3d')\n\tax3d.set_axis_off()\n\tax3d.set_xlim(-1, 1)\n\tax3d.set_ylim(-1, 1)\n\tax3d.set_zlim(-1, 1)\n\tax3d.set_aspect('equal')\n\n\n\tfor f in np.arange(0, 10):\n\t\t#Gets only values on one face\n\t\tdf_new = dataFrameOnlyFace(df, f)\n\t\t\n\t\t#vertical sides\n\t\tif f < 8:\n\t\t\t# Translated\n\t\t\tdf_new = translate_to_origin(df_new, f)\n\n\t\t\t# Rotated\n\t\t\tdf_new, theta = rotate_at_origin(df_new, f)\n\n\t\t\t# makes historgram\n\t\t\tHist, xedges, zedges = hist(df_new, f, N, length_df = len(df.index) / N)\n\n\t\t\t# rotates histogram edges (creates y edges)\n\t\t\txedges, yedges = rotate_at_origin(xedges, f, back = True)\n\n\t\t\t# translates histogram edges\n\t\t\txedges, yedges = translate_from_origin(xedges, yedges, f)\n\t\t\tmakeSidePatch(ax3d, Hist, xedges, yedges, zedges, f, norm, cmap = my_cmap)\n\n\t\telse:\n\t\t\t# makes top and bottom patches\n\t\t\tmakeTopPatch(ax3d, df_new, f, N, \n\t\t\t\t\tlength_df = len(df.index) / N, norm = norm, cmap = my_cmap)\n\tif return_ax:\n\t\treturn ax3d, fig3D\n\telse:\n\t\treturn fig3D", "def vectormap(x,y,vx,vy,nbins=10,ax=None,cmap=\"YlGnBu\",colorlines=False,density=1.,linecolor='k'):\n #Simon's hack for getting the means quick\n H,yedges,xedges = np.histogram2d(y,x,bins=nbins) #density histogram \n Hx,yedges,xedges = np.histogram2d(y,x,bins=nbins,weights=vx) #sum of x-component of vector in each pixel\n Hy,yedges,xedges = np.histogram2d(y,x,bins=nbins,weights=vy) #sum of y-component of vector in each pixel\n vxm,vym = Hx/H,Hy/H #the means\n #bin centres\n xc = np.array([.5*(xedges[i]+xedges[i+1]) for i in np.arange(nbins)])\n yc = np.array([.5*(yedges[i]+yedges[i+1]) for i in np.arange(nbins)])\n #meshgrid to make arrays for streamplot\n xx,yy = np.meshgrid(xc,yc)\n if ax is None:\n fig,ax = plt.subplots()\n if colorlines is False:\n ax.streamplot(xx,yy,vxm,vym,density=density,color=linecolor)\n ax.set_xlim((xedges[0],xedges[-1]))\n ax.set_ylim((yedges[0],yedges[-1]))\n else:\n ax.streamplot(xx,yy,vxm,vym,density=density,color=np.sqrt(vxm**2.+vym**2.),cmap=cmap)\n ax.set_xlim((xedges[0],xedges[-1]))\n ax.set_ylim((yedges[0],yedges[-1]))\n return None", "def draw(axes, img, name):\n # cols = int(len(fig.axes) / 3) + 1\n\n # ax, ax_hist = axes\n # ax_cdf = ax_hist.twinx()\n \n # ax.set_title(name)\n # ax.imshow(img, cmap='gray')\n\n # # Display histogram\n # ax_hist.hist(img.ravel(), bins=256, histtype='step', color='black')\n # ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))\n # ax_hist.set_xlabel('Pixel intensity')\n # ax_hist.set_xlim(0, 1)\n # ax_hist.set_yticks([])\n\n # # Display cumulative distribution\n # img_cdf, bins = exposure.cumulative_distribution(img, 256)\n # ax_cdf.plot(bins, img_cdf, 'r')\n # ax_cdf.set_yticks([])\n\n # return ax, ax_hist, ax_cdf\n bins = 256\n image = img_as_float(img)\n ax_img, ax_hist = axes\n ax_cdf = ax_hist.twinx()\n\n # Display image\n ax_img.imshow(image, cmap=plt.cm.gray)\n ax_img.set_axis_off()\n ax_img.set_title(name)\n\n # Display histogram\n ax_hist.hist(image.ravel(), bins=bins, histtype='step', color='black')\n ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))\n ax_hist.set_xlabel('Pixel intensity')\n ax_hist.set_xlim(0, 1)\n ax_hist.set_yticks([])\n\n # Display cumulative distribution\n img_cdf, bins = exposure.cumulative_distribution(image, bins)\n ax_cdf.plot(bins, img_cdf, 'r')\n ax_cdf.set_yticks([])\n\n return ax_img, ax_hist, ax_cdf", "def create_histograms(rat_one, bin_width=0.5):\r\n\r\n # Define the bins.\r\n custom_bins = np.arange(rat_one[['u', 'v']].min().min(), rat_one[['u', 'v']].max().max()+bin_width, bin_width)\r\n\r\n # Cast to numpy vectors.\r\n u = rat_one['u'].to_numpy()\r\n v = rat_one['v'].to_numpy()\r\n\r\n # Prepare the rotation matrix.\r\n theta = np.radians(120)\r\n c, s = np.cos(theta), np.sin(theta)\r\n R = np.array(((c, -s), (s, c)))\r\n\r\n # Concatenate the vectors into a matrix.\r\n points = np.column_stack((u,v))\r\n\r\n # Create rotated images.\r\n rotated1 = points @ R.T\r\n rotated2 = rotated1 @ R.T\r\n\r\n # Create histograms of the images.\r\n H1, xedges, yedges = np.histogram2d(v, u, bins=[custom_bins, custom_bins])\r\n H2, xedges, yedges = np.histogram2d(rotated1[:, 1], rotated1[:, 0], bins=[custom_bins, custom_bins])\r\n H3, xedges, yedges = np.histogram2d(rotated2[:, 1], rotated1[:, 0], bins=[custom_bins, custom_bins])\r\n return H1,H2,H3" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
load locally generated mnc file Checks, loads and orients correctly for plotting
def import_results_mncfile(self, filename): if not os.path.isfile(filename): print("Can't find {}".format(filename)) print("Consider generating one with the .generate_results_mncfile") array_data = self.load_2D_mnc(filename) array_data = self.orient_local_mncfile(array_data) # array_data = self.filter_zeros(array_data) return array_data
[ "def load_2D_mnc(self,filename):\n import pyminc.volumes.factory as pyminc\n mncfile=pyminc.volumeFromFile(filename)\n array_data = np.squeeze(np.array(mncfile.data))\n return array_data", "def load_and_track(directory,fileprefix,fileext=\".dat\",radius = 3,minmass=None,preprocess=False):\n# directory = r'C:\\data\\200203\\FSM-2D'\n# fileprefix = '200203FSM54 FSM-2D'\n# fileext = \".dat\"\n# filename = '200203FSM54 FSM-2D Z.dat'\n filename = fileprefix +' Z'+fileext\n # fileprefix = \n # also read from file name X Y Z\n usersDf = pd.read_csv(directory+'\\\\' + fileprefix + ' Z' + fileext, skiprows=0,delim_whitespace=True,header=None).values\n numX,numY=usersDf.shape\n f = tp.locate(usersDf, radius, invert=False,minmass=minmass,preprocess=preprocess)\n\n usersDfX = pd.read_csv(directory+'\\\\' + fileprefix + ' X' + fileext, skiprows=0,delim_whitespace=True,header=None).values\n usersDfY = pd.read_csv(directory+'\\\\' + fileprefix + ' Y' + fileext, skiprows=0,delim_whitespace=True,header=None).values\n\n usersDfX = np.reshape(usersDfX,usersDfX.size)\n usersDfY = np.reshape(usersDfY,usersDfY.size)\n\n if usersDfY[0]<usersDfY[-1]:\n originPosition = 'lower'\n else:\n originPosition = 'upper'\n \n pltstyle = {\n \"origin\": originPosition\n }\n \n extent = [min(usersDfX[0], usersDfX[-1]),max(usersDfX[0], usersDfX[-1]), min(usersDfY[0], usersDfY[-1]),max(usersDfY[0], usersDfY[-1]) ]\n fig2,ax2 = plt.subplots(figsize=(6,6))\n\n plt.imshow(usersDf,origin=originPosition,extent=extent,cmap=black_blue_white1)\n # use lifeplot and take into account of the X,Y axis data\n# lp = LivePlot2DV2((6, 8), usersDfX, usersDfY, usersDf, 'x (um)', 'y (um)', 'APD (kct/s)')\n# lp = LivePlot2DV2( usersDfX, usersDfY, usersDf)\n \n # tp.annotate(f, usersDf,plot_style={'markersize':7},imshow_style=pltstyle)\n\n fnew = f\n fnew.index = list(range(len(f)))\n for i in list(range( len(f))):\n \n xcoor = usersDfX[0] + (usersDfX[-1]-usersDfX[0])*(fnew.iloc[i].x )/( numX )\n ycoor = usersDfY[0] + (usersDfY[-1]-usersDfY[0])*(fnew.iloc[i].y )/( numY )\n \n fnew.at[i,'x'] = xcoor\n fnew.at[i,'y'] = ycoor\n# print(i,len(f),xcoor,ycoor,f.iloc[i].x,f.iloc[i].y)\n\n plt.scatter(xcoor, ycoor, s=100, facecolors='none', edgecolors='r')\n ax2.annotate(str(i),(xcoor,ycoor),fontsize=12,color='black',\n bbox=dict(boxstyle='round,pad=0.2', fc='white', alpha=0.6))\n\n plt.xlabel('', labelpad=16, fontsize=16)\n plt.ylabel('', labelpad=16, fontsize=16) \n ax2.tick_params(axis='both', labelsize=16)\n plt.title(directory+\"\\\\\"+filename,fontsize=16)\n \n \n return fnew\n # # https://stackoverflow.com/questions/24108063/matplotlib-two-different-colors-in-the-same-annotate/49041502", "def _init_netcdf(netcdf_file,\r\n work_dir):\r\n \r\n # read data description info\r\n data_desc = _read_description(work_dir)\r\n \r\n # get the years covered\r\n years = _get_years()\r\n \r\n # create a corresponding NetCDF\r\n with netCDF4.Dataset(netcdf_file, 'w') as output_dataset:\r\n \r\n # create the time, x, and y dimensions\r\n output_dataset.createDimension('time', None)\r\n output_dataset.createDimension('lon', data_desc['xdef_count'])\r\n output_dataset.createDimension('lat', data_desc['ydef_count'])\r\n \r\n #TODO provide additional attributes for CF compliance, data discoverability, etc.\r\n output_dataset.title = data_desc['title']\r\n \r\n # create the coordinate variables\r\n time_variable = output_dataset.createVariable('time', 'i4', ('time',))\r\n x_variable = output_dataset.createVariable('lon', 'f4', ('lon',))\r\n y_variable = output_dataset.createVariable('lat', 'f4', ('lat',))\r\n \r\n # set the coordinate variables' attributes\r\n data_desc['units_since_year'] = 1800\r\n time_variable.units = 'days since %s-01-01 00:00:00' % data_desc['units_since_year']\r\n x_variable.units = 'degrees_east'\r\n y_variable.units = 'degrees_north'\r\n \r\n # generate longitude and latitude values, assign these to the NetCDF coordinate variables\r\n lon_values = list(_frange(data_desc['xdef_start'], data_desc['xdef_start'] + (data_desc['xdef_count'] * data_desc['xdef_increment']), data_desc['xdef_increment']))\r\n lat_values = list(_frange(data_desc['ydef_start'], data_desc['ydef_start'] + (data_desc['ydef_count'] * data_desc['ydef_increment']), data_desc['ydef_increment']))\r\n x_variable[:] = np.array(lon_values, 'f4')\r\n y_variable[:] = np.array(lat_values, 'f4')\r\n \r\n # read the variable data from the CMORPH file, mask and reshape accordingly, and then assign into the variable\r\n data_variable = output_dataset.createVariable('prcp', \r\n 'f8', \r\n ('time', 'lat', 'lon',), \r\n fill_value=np.NaN)\r\n\r\n # variable attributes\r\n data_variable.units = 'mm'\r\n data_variable.standard_name = 'precipitation'\r\n data_variable.long_name = 'precipitation, monthly cumulative'\r\n data_variable.description = data_desc['title']\r\n\r\n return data_desc", "def readFile(self, blended_grid, suntans_output, roms_output):\n #### step 1) SUNTANS output\n #filename='GalvCoarse_0000.nc'\n nc = Dataset(suntans_output,'r')\n print \"#### Reading SUNTANS output file !!!! ####\\n\"\n #print nc\n\n self.uc=nc.variables['uc'][:][:,0,:]\n self.vc=nc.variables['vc'][:][:,0,:]\n xi=nc.variables['xv'][:]\n yi=nc.variables['yv'][:]\n timei=nc.variables['time']\n self.stime = num2date(timei[:],timei.units) ## SUNTANS time: seconds since 1990-01-01\n \n \n #### step 2) ROMS output\n nc2 = Dataset(roms_output,'r')\n print \"#### Reading ROMS output file !!!! ####\\n\"\n lon0=nc2.variables['lon'][:]\n lat0=nc2.variables['lat'][:]\n self.mask0=nc2.variables['mask'][:]\n self.u_roms=nc2.variables['water_u'][:]\n self.v_roms=nc2.variables['water_v'][:]\n ftime=nc2.variables['time']\n rtime = num2date(ftime[:],ftime.units) ## ROMS time: seconds since 1970-01-01 \n (y0,x0)=lon0.shape\n \n #### subset roms time, note that SUNTANS time period is shorted than ROMS time period\n t0 = self.stime[0]\n t1 = self.stime[-1]\n self.ind0 = self.findNearest(t0,rtime)\n self.ind1 = self.findNearest(t1,rtime)\n self.time_ss = rtime[self.ind0:self.ind1+1] \n \n xroms0 = np.zeros_like(lon0)\n yroms0 = np.zeros_like(lat0)\n for i in range(y0):\n for j in range(x0):\n (yroms0[i,j],xroms0[i,j])=utm.from_latlon(lat0[i,j],lon0[i,j])[0:2]\n \n #### step 3) Blended grid\n #filename='blended_grid.nc'\n nc1 = Dataset(blended_grid,'r')\n print \"#### Reading curvilinear blended grid !!!!####\\n\"\n #print nc1\n \n xr = nc1.variables['xr'][:]\n yr = nc1.variables['yr'][:]\n lon = nc1.variables['lon_rho'][:]\n lat = nc1.variables['lat_rho'][:]\n mask = nc1.variables['mask_rho'][:]\n \n xroms = np.zeros_like(lon)\n yroms = np.zeros_like(lat)\n (y,x) = lon.shape\n for i in range(y):\n for j in range(x):\n (yroms[i,j],xroms[i,j])=utm.from_latlon(lat[i,j],lon[i,j])[0:2]\n \n #### subset ROMS grid for interpolation ####\n def findNearset(x,y,lon,lat):\n \"\"\"\n Return the J,I indices of the nearst grid cell to x,y\n \"\"\"\n \n dist = np.sqrt( (lon - x)**2 + (lat - y)**2)\n \n return np.argwhere(dist==dist.min())\n \n SW=utm.to_latlon(xi.min(),yi.min(),15,'R') ###(lat, lon)\n NE=utm.to_latlon(xi.max(),yi.max(),15,'R') \n \n #SW=utm.from_latlon(27.95,-95.2)[0:2]\n #NE=utm.from_latlon(30.0, -94.25)[0:2]\n \n #### step 4) searching for the index of the subset domain for interpolation\n #### Hard coded to narrow this domain\n ind = findNearset(SW[1], SW[0], lon, lat)\n J0=ind[0][0] + 15\n I0=ind[0][1] + 15\n \n ind = findNearset(NE[1], NE[0], lon, lat)\n J1=ind[0][0] + 25\n I1=ind[0][1] - 35\n \n yss = yroms[J0:J1,I0:I1] ##subset x,y\n xss = xroms[J0:J1,I0:I1]\n self.maskss = mask[J0:J1,I0:I1]\n \n #pdb.set_trace()\n #### Step 5) Prepare the grid variables for the SUNTANS interpolation class\n xy_sun = np.vstack((yi.ravel(),xi.ravel())).T ## SUNTANS grid, xi: latitude, yi: longitude \n xy_new = np.vstack((xss[self.maskss==1],yss[self.maskss==1])).T ## blended grid\n \n self.Fuv = interpXYZ(xy_sun,xy_new, method='idw')\n \n #### define spatial and length scales ####\n self.Nt = self.stime.shape[0]\n (self.X, self.Y) = xss.shape\n \n \n #### step 6) Prepare for interpolate original ROMS velocity\n SW0 = (lat0.min(),lon0.min())\n NE0 = (lat0.max(),lon0.max())\n ind0 = findNearset(SW0[1], SW0[0], lon, lat)\n JJ0=ind0[0][0] -40\n II0=ind0[0][1] \n \n ind0 = findNearset(NE0[1], NE0[0], lon, lat)\n JJ1=ind0[0][0] +25\n II1=ind0[0][1] \n \n yss0 = yroms[JJ0:JJ1,II0:II1] ##subset x,y for ROMS velocity\n xss0 = xroms[JJ0:JJ1,II0:II1]\n self.maskss0 = mask[JJ0:JJ1,II0:II1]\n \n #### step 7) Prepare the grid variables for the SUNTANS interpolation class\n xy_roms = np.vstack((xroms0[self.mask0==1],yroms0[self.mask0==1])).T\n xy_new0 = np.vstack((xss0[self.maskss0==1],yss0[self.maskss0==1])).T ## blended grid\n \n self.Fuv0 = interpXYZ(xy_roms,xy_new0, method='idw')\n \n #### define spatial and length scales ####\n self.Nt0 = rtime.shape[0]\n (self.X0, self.Y0) = xss0.shape\n self.lon0=lon[JJ0:JJ1,II0:II1]\n self.lat0=lat[JJ0:JJ1,II0:II1]\n \n #### step 8) define the index of SUNTANS in sub-domain of ROMS\n self.JJJ0 = J0-JJ0-1\n self.JJJ1 = J1-JJ0\n \n self.III0 = I0-II0\n self.III1 = I1-II0\n \n #### the new time is the ROMS output time ####\n self.time = ftime[:]\n \n \n #pdb.set_trace()", "def load_visualization (self, file_name=\"\"): \n debug (\"In MayaViTkGUI::load_visualization ()\")\n if not file_name:\n file_name = tk_fopen (title=\"Load MayaVi data file\", \n initialdir=Common.config.initial_dir,\n filetypes=[(\"MayaVi visualization files\",\n \"*.mv\"), \n (\"All files\", \"*\")])\n if check_file (file_name):\n self.mayavi.load_visualization (file_name)", "def ingest_cmorph_to_netcdf_full(work_dir,\r\n netcdf_file,\r\n raw=True):\r\n \r\n # create/initialize the NetCDF dataset, get back a data descriptor dictionary\r\n data_desc = _init_netcdf(netcdf_file, work_dir)\r\n\r\n with netCDF4.Dataset(netcdf_file, 'a') as output_dataset:\r\n \r\n # compute the time values \r\n total_years = 2017 - int(data_desc['start_date'].year) + 1 #FIXME replace this hard-coded value with an additional end_year entry in the data_desc\r\n output_dataset.variables['time'][:] = _compute_days(data_desc['start_date'].year,\r\n total_years * 12, \r\n initial_month=data_desc['start_date'].month,\r\n units_start_year=data_desc['units_since_year'])\r\n \r\n # get a handle to the precipitation variable, for convenience\r\n data_variable = output_dataset.variables['prcp']\r\n \r\n # loop over each year/month, reading binary data from CMORPH files and adding into the NetCDF variable\r\n for year in range(data_desc['start_date'].year, 2018): # from start year through 2017, replace the value 2018 here with some other method of determining this value from the dataset itself\r\n for month in range(1, 13):\r\n\r\n # get the files for the month\r\n downloaded_files = _download_daily_files(work_dir, year, month, raw)\r\n \r\n if len(downloaded_files) > 0:\r\n\r\n # read all the data for the month as a sum from the daily values, assign into the appropriate slice of the variable\r\n data = _read_daily_cmorph_to_monthly_sum(downloaded_files, data_desc, year, month)\r\n \r\n # assume values are in lat/lon orientation\r\n data = np.reshape(data, (1, data_desc['ydef_count'], data_desc['xdef_count']))\r\n \r\n # get the time index, which is actually the month's count from the start of the period of record \r\n time_index = ((year - data_desc['start_date'].year) * 12) + month - 1\r\n \r\n # assign into the appropriate slice for the monthly time step\r\n data_variable[time_index, :, :] = data\r\n \r\n # clean up\r\n for file in downloaded_files:\r\n os.remove(file)", "def load_data_from_nc():\n \n file_data = Dataset(\"air.mon.mean.nc\", \"r\")\n latitudes = file_data.variables[\"lat\"][:] \n longitudes = file_data.variables[\"lon\"][:]\n times = file_data.variables[\"time\"][:] \n air_temperatures = file_data.variables[\"air\"][:] \n file_data.close()\n \n return latitudes, longitudes, times, air_temperatures", "def load_data():\n\t\t# load the data\n\t\tDATPATH = \"../data/\"\n\t\tfnino = DATPATH + \"tas_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_1_nino3_tseries.nc\"\n\t\t#fnino = DATPATH + \"nino3_1871_2016.csv\"\n\t\t#fnino = DATPATH + \"nino34.long.data\"\n\t\tnc_data_nino3 = netCDF4.Dataset(fnino) \n\t\tnino3_load = nc_data_nino3.variables['tas'][:] \n\t\tdnino = nino3_load.flatten()\n\n\t\tfismr = DATPATH + \"psl_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_1_india_goswami_2002_tseries.nc\"\n\t\tnc_data_ismr = netCDF4.Dataset(fismr) \n\t\tismr_load = nc_data_ismr.variables['psl'][:] \n\t\tdismr = ismr_load.flatten()\n\n\t\tfvolc = DATPATH + \"sigl.txt\"\n\t\tdvolc = np.genfromtxt(fvolc, delimiter=\",\", dtype=float).flatten()\n\t\t# simple check for data consistency\n\t\tassert dnino.shape == dismr.shape, \"Data sets are unequal!\"\n\t\tassert int(dismr.shape[0]/12) == dvolc.shape[0], \"Data sets are unequal\"\n\t\treturn dnino, dismr, dvolc", "def loadncfile(path, variable):\n if os.path.exists(path):\n print \"loading \"+path\n nc = Dataset(path, 'r')\n data = nc.variables[variable][:]\n nc.close()\n return data\n else:\n print \"could not find \"+path+\" !!! returning None\"\n return None", "def load(self):\n self.__dicom_image = Parser.DicomParser().parse(self.__dicom_path)\n self.__contour_list = Parser.ContourParser().parse(self.__contour_path)\n self.__is_data_loaded = True", "def read_file(netcdf_file_name):\n\n dataset_object = netCDF4.Dataset(netcdf_file_name)\n\n saliency_dict = {\n EXAMPLE_IDS_KEY: [\n str(id) for id in\n netCDF4.chartostring(dataset_object.variables[EXAMPLE_IDS_KEY][:])\n ],\n MODEL_FILE_KEY: str(getattr(dataset_object, MODEL_FILE_KEY)),\n LAYER_NAME_KEY: str(getattr(dataset_object, LAYER_NAME_KEY)),\n NEURON_INDICES_KEY: numpy.array(\n getattr(dataset_object, NEURON_INDICES_KEY), dtype=int\n ),\n IDEAL_ACTIVATION_KEY: getattr(dataset_object, IDEAL_ACTIVATION_KEY),\n TARGET_FIELD_KEY: getattr(dataset_object, TARGET_FIELD_KEY),\n TARGET_HEIGHT_KEY: getattr(dataset_object, TARGET_HEIGHT_KEY)\n }\n\n if numpy.isnan(saliency_dict[IDEAL_ACTIVATION_KEY]):\n saliency_dict[IDEAL_ACTIVATION_KEY] = None\n if saliency_dict[TARGET_FIELD_KEY] == '':\n saliency_dict[TARGET_FIELD_KEY] = None\n if saliency_dict[TARGET_HEIGHT_KEY] < 0:\n saliency_dict[TARGET_HEIGHT_KEY] = None\n\n num_examples = dataset_object.dimensions[EXAMPLE_DIMENSION_KEY].size\n num_scalar_predictors = (\n dataset_object.dimensions[SCALAR_PREDICTOR_DIM_KEY].size\n )\n num_vector_predictors = (\n dataset_object.dimensions[VECTOR_PREDICTOR_DIM_KEY].size\n )\n num_heights = dataset_object.dimensions[HEIGHT_DIMENSION_KEY].size\n\n if SCALAR_SALIENCY_KEY in dataset_object.variables:\n saliency_dict[SCALAR_SALIENCY_KEY] = (\n dataset_object.variables[SCALAR_SALIENCY_KEY][:]\n )\n else:\n model_metafile_name = neural_net.find_metafile(\n model_dir_name=os.path.split(saliency_dict[MODEL_FILE_KEY])[0]\n )\n model_metadata_dict = neural_net.read_metafile(model_metafile_name)\n net_type_string = model_metadata_dict[neural_net.NET_TYPE_KEY]\n\n if net_type_string == neural_net.DENSE_NET_TYPE_STRING:\n these_dim = (num_examples, num_scalar_predictors)\n else:\n these_dim = (num_examples, num_heights, num_scalar_predictors)\n\n saliency_dict[SCALAR_SALIENCY_KEY] = numpy.full(these_dim, 0.)\n\n if VECTOR_SALIENCY_KEY in dataset_object.variables:\n saliency_dict[VECTOR_SALIENCY_KEY] = (\n dataset_object.variables[VECTOR_SALIENCY_KEY][:]\n )\n else:\n these_dim = (num_examples, num_heights, num_vector_predictors)\n saliency_dict[VECTOR_SALIENCY_KEY] = numpy.full(these_dim, 0.)\n\n dataset_object.close()\n return saliency_dict", "def loadFile(self, fileName,verbose=False):\n if (os.path.isabs(fileName)):\n self.fileName = os.path.basename(fileName)\n self.fullFileName = fileName\n else:\n self.fileName = fileName\n # make the full file name by joining the input name \n # to the MKID_RAW_PATH (or . if the environment variable \n # is not defined)\n dataDir = os.getenv('MKID_RAW_PATH', '/')\n self.fullFileName = os.path.join(dataDir, self.fileName)\n\n if (not os.path.exists(self.fullFileName)):\n msg='file does not exist: %s'%self.fullFileName\n if verbose:\n print msg\n raise Exception(msg)\n \n #open the hdf5 file\n self.file = tables.open_file(self.fullFileName, mode='r')\n\n ##### TO DO/DELETE #####\n # dark obs files have no header currently (SRM 2017-05-05)\n # can update later by foldingn log files into obs file generation somehow\n # header is currently not used anywhere else in the code anyways. Maybe can just trash this.\n '''\n self.header = self.file.root.header.header\n self.titles = self.header.colnames\n try:\n self.info = self.header[0] #header is a table with one row\n except IndexError as inst:\n if verbose:\n print 'Can\\'t read header for ',self.fullFileName\n raise inst\n '''\n\n # Useful information about data format set here.\n # For now, set all of these as constants.\n # If we get data taken with different parameters, straighten\n # that all out here.\n\n ##### TO DELETE? #####\n ## These parameters are for DARKNESS data\n # May be cleared out later if deprecated (SRM 2017-05-05)\n self.tickDuration = 1e-6 #s\n self.ticksPerSec = int(1.0 / self.tickDuration)\n self.intervalAll = interval[0.0, (1.0 / self.tickDuration) - 1]\n\n\n ##### TO DELETE #####\n # Did not do this in DARKNESS. nonAllocPixels were just flagged in beammap\n # but still assigned a unique location. Correct method will be with beam map flags \n #self.nonAllocPixelName = '/r0/p250/'\n\n\n #get the beam image.\n try:\n self.beamImage = self.file.get_node('/BeamMap/Map').read()\n self.beamMapFlags = self.file.get_node('/BeamMap/Flag').read()\n except Exception as inst:\n if verbose:\n print 'Can\\'t access beamimage for ',self.fullFileName\n raise inst\n\n ##### TO DELETE #####\n # dark obs files have pixels ID'd by resID now, not roach/pixel address\n # Do we need these beamImageRoaches or beamImagePixelNums later?\n '''\n #format for a pixelName in beamImage is /r#/p#/t# where r# is the roach number, p# is the pixel number\n # and t# is the starting timestamp\n self.beamImageRoaches = np.array([[int(s.split('r')[1].split('/')[0]) for s in row] for row in self.beamImage])\n self.beamImagePixelNums = np.array([[int(s.split('p')[1].split('/')[0]) for s in row] for row in self.beamImage])\n '''\n #instead of beamImagePixelNums, we alternatively use beamImagePixelIDs\n #simply the beamImage cast to integer data types from strings\n self.beamImagePixelIDs = np.array(self.beamImage, dtype=int)\n\n #get shape of array from beamImage\n beamShape = self.beamImage.shape\n self.nRow = beamShape[0]\n self.nCol = beamShape[1]\n\n #make pointer to data table\n self.data = self.file.root.Photons.data\n\n #easy way to check exactly how many seconds of data are supposedly recorded\n self.totalIntegrationTime = self.file.root.Images._g_getnchildren()", "def load(self, filename = 'xgc_profile.sav'):\n\n if 'npz' not in filename:\n filename += '.npz'\n nefile = np.load(filename)\n if 'Z1D' in nefile.files:\n dimension = 3\n if 'BR' in nefile.files:\n equilibrium_mesh = '2D'\n else:\n equilibrium_mesh = '3D'\n else:\n dimension = 2\n if(dimension != self.dimension):\n raise XGC_Loader_Error('Geometry incompatible! Trying to load {0}d data onto {1}d grid.\\nMake sure the geometry setup is the same as the data file.'.format(dimension,self.dimension))\n if(equilibrium_mesh != self.equilibrium_mesh):\n raise XGC_Loader_Error('Equilibrium mesh doesn\\'t match! {0} mesh is expected while {1} mesh is loaded.'.format(self.equilibrium_mesh,equilibrium_mesh))\n #======== NEED MORE DETAILED GEOMETRY CHECKING HERE! CURRENT VERSION DOESN'T GUARANTEE SAME GRID. ERRORS WILL OCCUR WHEN READ SAVED FILE WITH A DIFFERENT GRID.\n #=============================================#\n\n self.mesh = {'R':np.copy(nefile['X_origin']),'Z':np.copy(nefile['Y_origin'])}\n self.dne_ad = np.copy(nefile['dne_ad_org'])\n self.ne0_on_grid = np.copy(nefile['ne0'])\n self.dne_ad_on_grid = np.copy(nefile['dne_ad'])\n\n self.ne_on_grid = self.ne0_on_grid[np.newaxis,np.newaxis,:,:] + self.dne_ad_on_grid\n\n if 'nane' in nefile.files:\n self.HaveElectrons = True\n self.nane = np.copy(nefile['nane_org'])\n self.nane_on_grid = nefile['nane']\n self.ne_on_grid += self.nane_on_grid\n\n self.psi_on_grid = np.copy(nefile['psi'])\n self.te_on_grid = np.copy(nefile['Te0'])\n self.ti_on_grid = np.copy(nefile['Ti0'])\n\n if dimension == 2:\n self.B_on_grid = np.copy(nefile['B0'])\n elif equilibrium_mesh == '3D':\n self.BZ_on_grid = np.copy(nefile['B0'])\n self.BX_on_grid = np.copy(nefile['BX'])\n self.BY_on_grid = np.copy(nefile['BY'])\n self.B_on_grid = np.sqrt(self.BX_on_grid**2 + self.BY_on_grid**2 + self.BZ_on_grid**2)\n elif equilibrium_mesh == '2D':\n self.BPhi_on_grid = np.copy(nefile['B0'])\n self.BR_on_grid = np.copy(nefile['BR'])\n self.BZ_on_grid = np.copy(nefile['BZ'])\n self.B_on_grid = np.sqrt(self.BPhi_on_grid**2 + self.BR_on_grid**2 + self.BZ_on_grid**2)", "def main(_, infile, outdir, params):\n outname_template = join(outdir, params['suffix'] + '_%Y%m%d%H.nc')\n\n dimpairs = [('lev', # name in src\n 'level'), # name in dst\n ('lat',\n 'lat'),\n ('lon',\n 'lon'),\n ('ilev',\n 'ilev')]\n\n dim_copiers = [DimensionCopier(src_name, dst_name)\n for src_name, dst_name in dimpairs]\n\n varpairs_to_copy = [(['CH3CHO_VMR_inst', 'GLYALD_VMR_inst'],\n 'ALD'),\n ('CO_VMR_inst', # name in src, lists added toghether\n 'CO'), # name in dst\n ('CRESOL_VMR_inst',\n 'CSL'),\n ('C2H6_VMR_inst',\n 'ETH'),\n ('GLYOXAL_VMR_inst',\n 'GLY'),\n ('H2O2_VMR_inst',\n 'H2O2'),\n ('C3H8_VMR_inst',\n 'HC3'),\n ('HNO3_VMR_inst',\n 'HNO3'),\n ('BIGALK_VMR_inst',\n 'HC5'),\n ('CH2O_VMR_inst',\n 'HCHO'),\n ('HO2NO2_VMR_inst',\n 'HNO4'),\n ('HO2_VMR_inst',\n 'HO2'),\n ('ISOP_VMR_inst',\n 'ISO'),\n (['CH3COCH3_VMR_inst',\n 'HYAC_VMR_inst',\n 'MEK_VMR_inst'],\n 'KET'),\n (['MVK_VMR_inst', 'MACR_VMR_inst'],\n 'MACR'),\n ('CH3COCHO_VMR_inst',\n 'MGLY'),\n ('MPAN_VMR_inst',\n 'MPAN'),\n ('N2O5_VMR_inst',\n 'N2O5'),\n ('NH3_VMR_inst',\n 'NH3'),\n ('NO_VMR_inst',\n 'NO'),\n ('NO2_VMR_inst',\n 'NO2'),\n ('NO3_VMR_inst',\n 'NO3'),\n ('OH_VMR_inst',\n 'OH'),\n ('C2H4_VMR_inst',\n 'OL2'),\n ('ONIT_VMR_inst',\n 'ONIT'),\n ('CH3OOH_VMR_inst',\n 'OP1'),\n ('C2H5OOH_VMR_inst',\n 'OP2'),\n ('CH3COOH_VMR_inst',\n 'ORA2'),\n ('O3_VMR_inst',\n 'OZONE'),\n ('CH3COOOH_VMR_inst',\n 'PAA'),\n ('PAN_VMR_inst',\n 'PAN'),\n ('SO2_VMR_inst',\n 'SO2'),\n ('T',\n 'T'),\n ('TOLUENE_VMR_inst',\n 'TOL'),\n ('DUST1',\n 'VSOILA'),\n ('DUST2',\n 'VSOILB'),\n ('DUST3',\n 'VSOILC')]\n\n varpairs_to_copy_dimchange = [('NH4_VMR_inst',\n 'VNH4Jm'),\n (['OC1_VMR_inst', 'OC2_VMR_inst'],\n 'VORG1Jm'),\n ('SO4_VMR_inst',\n 'VSO4Jm'),\n (['CB1_VMR_inst', 'CB2_VMR_inst'],\n 'VSOOTJ')]\n\n for time_index in range(Dataset(infile).dimensions['time'].size):\n # Have to give dimensions explicitly because 'lev' changes to 'level'\n # Have to give var_val_indices explicitly because we only copy one\n # time index\n spacial_variable_options = {'var_args': {'dimensions': ('time',\n 'level',\n 'lat',\n 'lon')},\n 'var_val_indices': np.s_[time_index, :]}\n\n # 3D variables that simply get copied\n var_opts = [{'src_names': src,\n 'dst_name': dst,\n **spacial_variable_options}\n for src, dst in varpairs_to_copy]\n\n # 3D variables with dimchange to mol/mol\n var_opts += [{'src_names': src,\n 'dst_name': dst,\n 'var_attrs': {'units': 'mol/mol'},\n **spacial_variable_options}\n for src, dst in varpairs_to_copy_dimchange]\n\n # Others\n var_opts += [{'src_names': 'lat',\n 'dst_name': 'lat'},\n {'src_names': 'lev',\n 'dst_name': 'level',\n 'var_args': {'dimensions': ('level', )}},\n {'src_names': 'lon',\n 'dst_name': 'lon'},\n {'src_names': 'P0',\n 'dst_name': 'P0'},\n {'src_names': 'PS',\n 'dst_name': 'PSURF',\n 'var_args': {'dimensions': ('time', 'lat', 'lon')},\n 'var_val_indices': np.s_[time_index, :]},\n {'src_names': 'hyam',\n 'dst_name': 'hyam',\n 'var_args': {'dimensions': ('level', )}},\n {'src_names': 'hybm',\n 'dst_name': 'hybm',\n 'var_args': {'dimensions': ('level', )}},\n {'src_names': 'ilev',\n 'dst_name': 'ilev'}]\n\n var_copiers = [VariableCopier(**kwargs)\n for kwargs in var_opts]\n\n extract_data(infile,\n time_index,\n dim_copiers,\n var_copiers,\n outname_template)", "def __rd_xyz_nmol(self):\n filename = self.files['xyz']\n \n fpin = open(filename, \"r\")\n nmol = 0\n # read number of atom\n line = fpin.readline() \n while line.strip() != \"\":\n natom = int(line.split()[0]) \n line = fpin.readline()\n # read a mol\n for i in range(natom):\n line = fpin.readline() \n nmol = nmol + 1\n\n line = fpin.readline()\n fpin.close()\n\n self.dim['n_mol'] = nmol\n\n return", "def load_data_files(self, path, from_dicom_comments=True, files_start_with=None, files_end_with=None, exclude_files_end_with=['.dat','.txt','.py','.pyc','.nii','.gz','.png','.jpg','.jpeg','.eps','.hdr','.l'] ): \n\n self._n_time_points = 0 \n self._duration = [] \n self._motion = [] \n\n self._paths = []\n self._tx = []; self._ty = []; self._tz = []; \n self._rx = []; self._ry = []; self._rz = []; \n self._tx_comm = []; self._ty_comm = []; self._tz_comm = [];\n self._rx_comm = []; self._ry_comm = []; self._rz_comm = [];\n self._q0_comm = []; self._q1_comm = []; self._q2_comm = []; self._q3_comm = []; \n self._a0_comm = []; self._a1_comm = []; self._a2_comm = []; self._a3_comm = []; \n N=0 \n\n # pick the first dicom file found in path \n files = os.listdir(path)\n files.sort() \n\n # CASE 1: there exist files named with .dcm extension\n for file_name in files: \n file_valid = True\n if files_start_with is not None: \n if not file_name.startswith(files_start_with): \n file_valid = False\n if files_end_with is not None: \n if not file_name.endswith(files_end_with): \n file_valid = False \n for s in exclude_files_end_with: \n if file_name.endswith(s): \n file_valid = False \n if file_valid: \n full_path = path+os.sep+file_name\n # read moco information from files \n self._paths.append(full_path)\n try: \n f = dicom.read_file(full_path) \n except: \n print \"Could not read file \",full_path\n return \n t = f.get(0x00191025).value \n r = f.get(0x00191026).value \n self._tx.append(t[0]); self._ty.append(t[1]); self._tz.append(t[2]); \n self._rx.append(r[0]); self._ry.append(r[1]); self._rz.append(r[2]); \n motion_dicom_moco = []\n \n # extract moco information stored in the dicom comment field\n if from_dicom_comments: \n s = f.get(0x00204000).value \n if N: \n a = numpy.float32(s.split(' ')[1:5])\n t = numpy.float32(s.split(' ')[6:9])\n freq = numpy.float32(s.split(' ')[10])\n r = angle_axis_to_rotation(a[0],a[1:4]) \n else: \n t = numpy.float32([0,0,0])\n r = numpy.float32([0,0,0]) \n a = numpy.float32([0,1,0,0]) #FIXME: is this right?\n \n q = angle_axis_to_quaternion(a.copy()[0],a.copy()[1:4]) \n self._a0_comm.append(a[0]); self._a1_comm.append(a[1]); self._a2_comm.append(a[2]); self._a3_comm.append(a[3]); \n self._tx_comm.append(t[0]); self._ty_comm.append(t[1]); self._tz_comm.append(t[2])\n self._q0_comm.append(q[0]); self._q1_comm.append(q[1]); self._q2_comm.append(q[2]); self._q3_comm.append(q[3]); \n self._rx_comm.append(r[0]); self._ry_comm.append(r[1]); self._rz_comm.append(r[2]); \n\n\n tra_mat = tr.translation_matrix(t) \n rot_mat = tr.quaternion_matrix(q)\n motion_dicom_comments = numpy.dot(tra_mat,rot_mat) \n\n #xaxis, yaxis, zaxis = [1, 0, 0], [0, 1, 0], [0, 0, 1]\n #Rx = tr.rotation_matrix(r[0], xaxis)\n #Ry = tr.rotation_matrix(r[1], yaxis)\n #Rz = tr.rotation_matrix(r[2], zaxis)\n #rot_mat = tr.concatenate_matrices(Rx, Ry, Rz)\n #rot_mat = Ry.copy()\n #motion_dicom_comments = numpy.dot(tra_mat,rot_mat) \n #motion_dicom_comments = rot_mat.copy()\n \n N += 1 \n if from_dicom_comments: \n self._motion.append(motion_dicom_comments) \n else: \n self._motion.append(motion_dicom_moco) \n acquisition_number = f.get(0x00200012).value \n creation_time = f.get(0x00080013).value\n# print \"Acquisition number: \", acquisition_number\n# print \"Creation time: \",creation_time\n self._n_time_points = N", "def load_lattice(filename):\n lattice = np.load(filename)\n print (\"SOM lattice loaded from %s\" %filename)\n return lattice", "def load_fluctuations_3D_fluc_only(self):\n #similar to the 2D case, we first read one file to determine the total toroidal plane number in the simulation\n flucf = self.xgc_path + 'xgc.3d.'+str(self.time_steps[0]).zfill(5)+'.h5'\n fluc_mesh = h5.File(flucf,'r')\n\n self.n_plane = fluc_mesh['dpot'].shape[1]\n dn = int(self.n_plane/self.n_cross_section)\n self.center_planes = np.arange(self.n_cross_section)*dn\n\n self.planes = np.unique(np.array([np.unique(self.prevplane),np.unique(self.nextplane)]))\n self.planeID = {self.planes[i]:i for i in range(len(self.planes))} #the dictionary contains the positions of each chosen plane, useful when we want to get the data on a given plane known only its plane number in xgc file.\n\n #initialize the arrays\n if(self.HaveElectron):\n self.nane = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.planes),len(self.mesh['R'])) )\n nane_all = np.zeros((self.n_plane,len(self.time_steps),len(self.mesh['R'])))\n if(self.load_ions):\n self.dni = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.planes),len(self.mesh['R'])) )\n dni_all = np.zeros((self.n_plane,len(self.time_steps),len(self.mesh['R'])))\n self.phi = np.zeros( (self.n_cross_section,len(self.time_steps),len(self.planes),len(self.mesh['R'])) )\n phi_all = np.zeros((self.n_plane,len(self.time_steps),len(self.mesh['R'])))\n\n #load all the rest of the files\n for i in range(1,len(self.time_steps)):\n flucf = self.xgc_path + 'xgc.3d.'+str(self.time_steps[i]).zfill(5)+'.h5'\n fluc_mesh = h5.File(flucf,'r')\n for j in range(self.n_plane):\n phi_all[j,i] += np.swapaxes(fluc_mesh['dpot'][...][:,j],0,1)\n if(self.HaveElectron):\n nane_all[j,i] += np.swapaxes(fluc_mesh['eden'][...][:,j],0,1)\n if(self.load_ions):\n dni_all[j,i] += np.swapaxes(fluc_mesh['iden'][...][:,j],0,1)\n fluc_mesh.close()\n\n\n #similar to the 2D case, we take care of the equilibrium relaxation contribution. See details in the comments in 2D loading function.\n\n phi_avg_tor = np.average(phi_all,axis = 0)\n if self.HaveElectron:\n nane_avg_tor = np.average(nane_all,axis=0)\n if self.load_ions:\n dni_avg_tor = np.average(dni_all,axis=0)\n\n for j in range(self.n_cross_section):\n self.phi[j,...] = np.swapaxes(phi_all[(self.center_planes[j] + self.planes)%self.n_plane,:,:],0,1) - phi_avg_tor[:,np.newaxis,:]\n if self.HaveElectron:\n self.nane[j,...] = np.swapaxes(nane_all[(self.center_planes[j] + self.planes)%self.n_plane,:,:],0,1) - nane_avg_tor[:,np.newaxis,:]\n if self.load_ions:\n self.dni[j,...] = np.swapaxes(dni_all[(self.center_planes[j] + self.planes)%self.n_plane,:,:],0,1) - dni_avg_tor[:,np.newaxis,:]\n\n self.ne0[:] += np.average(phi_avg_tor,axis=0)\n if self.HaveElectron:\n self.ne0[:] += np.average(nane_avg_tor,axis=0)\n self.ni0[:] += np.average(phi_avg_tor,axis=0)\n if self.load_ions:\n self.ni0[:] += np.average(dni_avg_tor,axis=0)\n\n return 0", "def loadNOE(self, filename, consDef):\n managerName = basename(filename)\n loader = ConstraintLoader(filename, managerName, consDef)\n self.ManagersList[managerName] = loader.loadConstraintsFromFile()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set zeros to nan for transparent
def filter_zeros(self,array_data): array_data[array_data==0]=np.nan return array_data
[ "def nan2zero(array):\n for i in range(array.shape[0]):\n for j in range(array.shape[1]):\n if isNaN(array[i][j]):\n array[i][j] = 0\n return array", "def setZero(self):\n self.image.setZero()", "def nan_as_zero(v):\n return 0 if math.isnan(v) else v", "def zero_to_nan(self, values):\n return [float('nan') if x == 0 else x for x in values]", "def remove_nans(dataset):\n return dataset.fillna(0.0)", "def zero(self):\n self.set(0.0)", "def _fill_nans(dataset, val):\n for k in dataset.keys():\n dataset.values[np.isnan(dataset.values)] = val", "def mask_water(chlora,waterMask):\r\n chlora[waterMask==0] = np.nan\r\n return chlora", "def make_mask_from_NaNs(array, ghost_array=dsa.NoneArray, is_cell=False):\n from ..vtkCommonDataModel import vtkDataSetAttributes\n if is_cell:\n mask_value = vtkDataSetAttributes.HIDDENCELL\n else:\n mask_value = vtkDataSetAttributes.HIDDENPOINT\n\n return bitwise_or(isnan(array).astype(numpy.uint8) * mask_value,\n ghost_array)", "def prepend_nans(arr):\n a = arr.copy()\n for i in range(arr.shape[1]):\n a = np.insert(a.astype(float), 0, np.nan, 1)\n\n return a", "def _zeronan(self):\n self.rate[np.isnan(self.rate)] = 0\n self.error[np.isnan(self.error)] = 0", "def set_num_to_none(self):\n self.m.T0.value = None", "def nan(self, indices=None):\n if self.coordinates is None:\n return\n\n if (not isinstance(self.coordinates, np.ndarray)\n or self.coordinates.shape == ()):\n if self.unit is not None:\n self.coordinates = np.nan * self.unit\n else:\n self.coordinates = np.nan\n return\n\n if indices is None:\n self.coordinates.fill(np.nan)\n elif self.coordinates.ndim == 1:\n self.coordinates[indices] = np.nan\n else:\n self.coordinates[:, indices] = np.nan", "def fill_nan(array: np.ndarray) -> np.ndarray:\n try:\n import astropy.convolution\n except ImportError:\n raise ImportError('fill_nan calculation requires astropy to be installed')\n kernel = astropy.convolution.Gaussian2DKernel(x_stddev=3) # kernel x_size=8*stddev\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\n array = astropy.convolution.interpolate_replace_nans(\n array, kernel, convolve=astropy.convolution.convolve\n )\n\n return array", "def set_zero(self):\n for y in range(self.length):\n for x in range(self.length):\n self.grid[x, y] = 0", "def fillzero(df):\n missing_traffic = (df == 0).all(axis=1)\n df[missing_traffic].replace(0, np.NaN)\n r_mean = df.rolling(min_periods=1, window=3, center=True).sum().shift(-1) / 2\n df.loc[missing_traffic] = r_mean[missing_traffic]\n return df", "def fill_internal_nans(array):\n print(\"a\")\n a = array[0].astype(\"float32\")\n shape = a.shape\n a[a == 0] = np.nan\n x, y = np.indices(shape)\n print(\"b\")\n interp = np.array(a)\n print(\"c\")\n # interp[np.isnan(interp)] = griddata(\n # (x[~np.isnan(a)], y[~np.isnan(a)]), # points we know\n # a[~np.isnan(a)], # values we know\n # (x[np.isnan(a)], y[np.isnan(a)]),\n # method='cubic'\n # )\n\n interp[np.isnan(interp)] = LinearNDInterpolator(\n (a[~np.isnan(a)], 2), a[~np.isnan(a)], (x[np.isnan(a)], y[np.isnan(a)])\n )\n print(\"d\")\n return interp[np.newaxis, :]", "def fill_nas(data):\n data = data.where(~np.isinf(data)).fillna(np.nan)\n data = data.where(data < 1).fillna(np.nan)\n data = data.where(data > 0).fillna(np.nan)\n\n return data", "def replace_start_ends_NaNs_with_zeros(x):\n N = len(x)\n istart = np.argwhere(~np.isnan(x))[0][0]\n iend = np.argwhere(~np.isnan(x))[-1][0]\n\n if istart.size > 0 and istart > 0:\n x[0 : istart + 1] = 0\n if iend.size > 0 and iend < N - 1:\n x[iend + 1 : N + 1] = 0\n\n return x" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
loads and squeezes a 2D mncfile
def load_2D_mnc(self,filename): import pyminc.volumes.factory as pyminc mncfile=pyminc.volumeFromFile(filename) array_data = np.squeeze(np.array(mncfile.data)) return array_data
[ "def parse_nd2_file(nd2_filepath):\n\n with ND2_Reader(nd2_filepath) as images:\n #print(\"The metadata is \", str(images.metadata).encode('utf-8'))\n \n num_fov = images.sizes['m']\n num_channels = images.sizes['c']\n num_rows = images.sizes['y']\n num_columns = images.sizes['x']\n\n fields_of_view = list(range(num_fov))\n channels = [images.metadata['plane_' + str(num)]['name'] for num in range(num_channels)]\n microns_per_pixel = images.metadata['calibration_um']\n\n try:\n images.iter_axes = 'mc'\n images.bundle_axes = 'zyx'\n except:\n images.iter_axes = 'c'\n images.bundle_axes = 'zyx'\n\n aggregated_images = []\n coordinate_pairs = []\n for z_stack in images:\n aggregated_image = np.max(z_stack, axis = 0)\n aggregated_images.append(aggregated_image)\n coordinate_pair = z_stack.metadata['y_um'], z_stack.metadata['x_um']\n coordinate_pairs.append(coordinate_pair)\n\n aggregated_images = np.reshape(aggregated_images,(num_fov, num_channels, num_rows, num_columns))\n coordinate_pairs = np.average(np.reshape(coordinate_pairs, (num_fov, num_channels, 2)), axis = 1)\n print(\"Coordinate pairs is \\n\" + str(coordinate_pairs))\n print(\"Shape is \" + str(aggregated_images.shape))\n \n data = {\n \"aggregated_images\": aggregated_images,\n \"coordinate_pairs\": coordinate_pairs,\n \"fields_of_view\": fields_of_view,\n \"channels\": channels,\n \"microns_per_pixel\": microns_per_pixel \n }\n\n return data", "def _binary_c2nc(file_in, file_out, quantity):\n\n columns = 3 # long, lat , depth\n mux_file = open(file_in, 'rb')\n\n # Number of points/stations\n (points_num,) = unpack('i', mux_file.read(4))\n\n # nt, int - Number of time steps\n (time_step_count,) = unpack('i', mux_file.read(4))\n\n #dt, float - time step, seconds\n (time_step,) = unpack('f', mux_file.read(4))\n\n msg = \"Bad data in the mux file.\"\n if points_num < 0:\n mux_file.close()\n raise ANUGAError(msg)\n if time_step_count < 0:\n mux_file.close()\n raise ANUGAError(msg)\n if time_step < 0:\n mux_file.close()\n raise ANUGAError(msg)\n\n lonlatdep = p_array.array('f')\n lonlatdep.read(mux_file, columns * points_num)\n lonlatdep = num.array(lonlatdep, dtype=float)\n lonlatdep = num.reshape(lonlatdep, (points_num, columns))\n\n lon, lat, depth = lon_lat2grid(lonlatdep)\n lon_sorted = list(lon)\n lon_sorted.sort()\n\n if not num.alltrue(lon == lon_sorted):\n msg = \"Longitudes in mux file are not in ascending order\"\n raise IOError(msg)\n\n lat_sorted = list(lat)\n lat_sorted.sort()\n\n nc_file = Write_nc(quantity,\n file_out,\n time_step_count,\n time_step,\n lon,\n lat)\n\n for i in range(time_step_count):\n #Read in a time slice from mux file\n hz_p_array = p_array.array('f')\n hz_p_array.read(mux_file, points_num)\n hz_p = num.array(hz_p_array, dtype=float)\n hz_p = num.reshape(hz_p, (len(lon), len(lat)))\n hz_p = num.transpose(hz_p) # mux has lat varying fastest, nc has long v.f.\n\n #write time slice to nc file\n nc_file.store_timestep(hz_p)\n\n mux_file.close()\n nc_file.close()\n\n return lonlatdep, lon, lat, depth", "def main(_, infile, outdir, params):\n outname_template = join(outdir, params['suffix'] + '_%Y%m%d%H.nc')\n\n dimpairs = [('lev', # name in src\n 'level'), # name in dst\n ('lat',\n 'lat'),\n ('lon',\n 'lon'),\n ('ilev',\n 'ilev')]\n\n dim_copiers = [DimensionCopier(src_name, dst_name)\n for src_name, dst_name in dimpairs]\n\n varpairs_to_copy = [(['CH3CHO_VMR_inst', 'GLYALD_VMR_inst'],\n 'ALD'),\n ('CO_VMR_inst', # name in src, lists added toghether\n 'CO'), # name in dst\n ('CRESOL_VMR_inst',\n 'CSL'),\n ('C2H6_VMR_inst',\n 'ETH'),\n ('GLYOXAL_VMR_inst',\n 'GLY'),\n ('H2O2_VMR_inst',\n 'H2O2'),\n ('C3H8_VMR_inst',\n 'HC3'),\n ('HNO3_VMR_inst',\n 'HNO3'),\n ('BIGALK_VMR_inst',\n 'HC5'),\n ('CH2O_VMR_inst',\n 'HCHO'),\n ('HO2NO2_VMR_inst',\n 'HNO4'),\n ('HO2_VMR_inst',\n 'HO2'),\n ('ISOP_VMR_inst',\n 'ISO'),\n (['CH3COCH3_VMR_inst',\n 'HYAC_VMR_inst',\n 'MEK_VMR_inst'],\n 'KET'),\n (['MVK_VMR_inst', 'MACR_VMR_inst'],\n 'MACR'),\n ('CH3COCHO_VMR_inst',\n 'MGLY'),\n ('MPAN_VMR_inst',\n 'MPAN'),\n ('N2O5_VMR_inst',\n 'N2O5'),\n ('NH3_VMR_inst',\n 'NH3'),\n ('NO_VMR_inst',\n 'NO'),\n ('NO2_VMR_inst',\n 'NO2'),\n ('NO3_VMR_inst',\n 'NO3'),\n ('OH_VMR_inst',\n 'OH'),\n ('C2H4_VMR_inst',\n 'OL2'),\n ('ONIT_VMR_inst',\n 'ONIT'),\n ('CH3OOH_VMR_inst',\n 'OP1'),\n ('C2H5OOH_VMR_inst',\n 'OP2'),\n ('CH3COOH_VMR_inst',\n 'ORA2'),\n ('O3_VMR_inst',\n 'OZONE'),\n ('CH3COOOH_VMR_inst',\n 'PAA'),\n ('PAN_VMR_inst',\n 'PAN'),\n ('SO2_VMR_inst',\n 'SO2'),\n ('T',\n 'T'),\n ('TOLUENE_VMR_inst',\n 'TOL'),\n ('DUST1',\n 'VSOILA'),\n ('DUST2',\n 'VSOILB'),\n ('DUST3',\n 'VSOILC')]\n\n varpairs_to_copy_dimchange = [('NH4_VMR_inst',\n 'VNH4Jm'),\n (['OC1_VMR_inst', 'OC2_VMR_inst'],\n 'VORG1Jm'),\n ('SO4_VMR_inst',\n 'VSO4Jm'),\n (['CB1_VMR_inst', 'CB2_VMR_inst'],\n 'VSOOTJ')]\n\n for time_index in range(Dataset(infile).dimensions['time'].size):\n # Have to give dimensions explicitly because 'lev' changes to 'level'\n # Have to give var_val_indices explicitly because we only copy one\n # time index\n spacial_variable_options = {'var_args': {'dimensions': ('time',\n 'level',\n 'lat',\n 'lon')},\n 'var_val_indices': np.s_[time_index, :]}\n\n # 3D variables that simply get copied\n var_opts = [{'src_names': src,\n 'dst_name': dst,\n **spacial_variable_options}\n for src, dst in varpairs_to_copy]\n\n # 3D variables with dimchange to mol/mol\n var_opts += [{'src_names': src,\n 'dst_name': dst,\n 'var_attrs': {'units': 'mol/mol'},\n **spacial_variable_options}\n for src, dst in varpairs_to_copy_dimchange]\n\n # Others\n var_opts += [{'src_names': 'lat',\n 'dst_name': 'lat'},\n {'src_names': 'lev',\n 'dst_name': 'level',\n 'var_args': {'dimensions': ('level', )}},\n {'src_names': 'lon',\n 'dst_name': 'lon'},\n {'src_names': 'P0',\n 'dst_name': 'P0'},\n {'src_names': 'PS',\n 'dst_name': 'PSURF',\n 'var_args': {'dimensions': ('time', 'lat', 'lon')},\n 'var_val_indices': np.s_[time_index, :]},\n {'src_names': 'hyam',\n 'dst_name': 'hyam',\n 'var_args': {'dimensions': ('level', )}},\n {'src_names': 'hybm',\n 'dst_name': 'hybm',\n 'var_args': {'dimensions': ('level', )}},\n {'src_names': 'ilev',\n 'dst_name': 'ilev'}]\n\n var_copiers = [VariableCopier(**kwargs)\n for kwargs in var_opts]\n\n extract_data(infile,\n time_index,\n dim_copiers,\n var_copiers,\n outname_template)", "def readFile(self, blended_grid, suntans_output, roms_output):\n #### step 1) SUNTANS output\n #filename='GalvCoarse_0000.nc'\n nc = Dataset(suntans_output,'r')\n print \"#### Reading SUNTANS output file !!!! ####\\n\"\n #print nc\n\n self.uc=nc.variables['uc'][:][:,0,:]\n self.vc=nc.variables['vc'][:][:,0,:]\n xi=nc.variables['xv'][:]\n yi=nc.variables['yv'][:]\n timei=nc.variables['time']\n self.stime = num2date(timei[:],timei.units) ## SUNTANS time: seconds since 1990-01-01\n \n \n #### step 2) ROMS output\n nc2 = Dataset(roms_output,'r')\n print \"#### Reading ROMS output file !!!! ####\\n\"\n lon0=nc2.variables['lon'][:]\n lat0=nc2.variables['lat'][:]\n self.mask0=nc2.variables['mask'][:]\n self.u_roms=nc2.variables['water_u'][:]\n self.v_roms=nc2.variables['water_v'][:]\n ftime=nc2.variables['time']\n rtime = num2date(ftime[:],ftime.units) ## ROMS time: seconds since 1970-01-01 \n (y0,x0)=lon0.shape\n \n #### subset roms time, note that SUNTANS time period is shorted than ROMS time period\n t0 = self.stime[0]\n t1 = self.stime[-1]\n self.ind0 = self.findNearest(t0,rtime)\n self.ind1 = self.findNearest(t1,rtime)\n self.time_ss = rtime[self.ind0:self.ind1+1] \n \n xroms0 = np.zeros_like(lon0)\n yroms0 = np.zeros_like(lat0)\n for i in range(y0):\n for j in range(x0):\n (yroms0[i,j],xroms0[i,j])=utm.from_latlon(lat0[i,j],lon0[i,j])[0:2]\n \n #### step 3) Blended grid\n #filename='blended_grid.nc'\n nc1 = Dataset(blended_grid,'r')\n print \"#### Reading curvilinear blended grid !!!!####\\n\"\n #print nc1\n \n xr = nc1.variables['xr'][:]\n yr = nc1.variables['yr'][:]\n lon = nc1.variables['lon_rho'][:]\n lat = nc1.variables['lat_rho'][:]\n mask = nc1.variables['mask_rho'][:]\n \n xroms = np.zeros_like(lon)\n yroms = np.zeros_like(lat)\n (y,x) = lon.shape\n for i in range(y):\n for j in range(x):\n (yroms[i,j],xroms[i,j])=utm.from_latlon(lat[i,j],lon[i,j])[0:2]\n \n #### subset ROMS grid for interpolation ####\n def findNearset(x,y,lon,lat):\n \"\"\"\n Return the J,I indices of the nearst grid cell to x,y\n \"\"\"\n \n dist = np.sqrt( (lon - x)**2 + (lat - y)**2)\n \n return np.argwhere(dist==dist.min())\n \n SW=utm.to_latlon(xi.min(),yi.min(),15,'R') ###(lat, lon)\n NE=utm.to_latlon(xi.max(),yi.max(),15,'R') \n \n #SW=utm.from_latlon(27.95,-95.2)[0:2]\n #NE=utm.from_latlon(30.0, -94.25)[0:2]\n \n #### step 4) searching for the index of the subset domain for interpolation\n #### Hard coded to narrow this domain\n ind = findNearset(SW[1], SW[0], lon, lat)\n J0=ind[0][0] + 15\n I0=ind[0][1] + 15\n \n ind = findNearset(NE[1], NE[0], lon, lat)\n J1=ind[0][0] + 25\n I1=ind[0][1] - 35\n \n yss = yroms[J0:J1,I0:I1] ##subset x,y\n xss = xroms[J0:J1,I0:I1]\n self.maskss = mask[J0:J1,I0:I1]\n \n #pdb.set_trace()\n #### Step 5) Prepare the grid variables for the SUNTANS interpolation class\n xy_sun = np.vstack((yi.ravel(),xi.ravel())).T ## SUNTANS grid, xi: latitude, yi: longitude \n xy_new = np.vstack((xss[self.maskss==1],yss[self.maskss==1])).T ## blended grid\n \n self.Fuv = interpXYZ(xy_sun,xy_new, method='idw')\n \n #### define spatial and length scales ####\n self.Nt = self.stime.shape[0]\n (self.X, self.Y) = xss.shape\n \n \n #### step 6) Prepare for interpolate original ROMS velocity\n SW0 = (lat0.min(),lon0.min())\n NE0 = (lat0.max(),lon0.max())\n ind0 = findNearset(SW0[1], SW0[0], lon, lat)\n JJ0=ind0[0][0] -40\n II0=ind0[0][1] \n \n ind0 = findNearset(NE0[1], NE0[0], lon, lat)\n JJ1=ind0[0][0] +25\n II1=ind0[0][1] \n \n yss0 = yroms[JJ0:JJ1,II0:II1] ##subset x,y for ROMS velocity\n xss0 = xroms[JJ0:JJ1,II0:II1]\n self.maskss0 = mask[JJ0:JJ1,II0:II1]\n \n #### step 7) Prepare the grid variables for the SUNTANS interpolation class\n xy_roms = np.vstack((xroms0[self.mask0==1],yroms0[self.mask0==1])).T\n xy_new0 = np.vstack((xss0[self.maskss0==1],yss0[self.maskss0==1])).T ## blended grid\n \n self.Fuv0 = interpXYZ(xy_roms,xy_new0, method='idw')\n \n #### define spatial and length scales ####\n self.Nt0 = rtime.shape[0]\n (self.X0, self.Y0) = xss0.shape\n self.lon0=lon[JJ0:JJ1,II0:II1]\n self.lat0=lat[JJ0:JJ1,II0:II1]\n \n #### step 8) define the index of SUNTANS in sub-domain of ROMS\n self.JJJ0 = J0-JJ0-1\n self.JJJ1 = J1-JJ0\n \n self.III0 = I0-II0\n self.III1 = I1-II0\n \n #### the new time is the ROMS output time ####\n self.time = ftime[:]\n \n \n #pdb.set_trace()", "def import_results_mncfile(self, filename):\n if not os.path.isfile(filename):\n print(\"Can't find {}\".format(filename))\n print(\"Consider generating one with the .generate_results_mncfile\")\n array_data = self.load_2D_mnc(filename)\n array_data = self.orient_local_mncfile(array_data)\n # array_data = self.filter_zeros(array_data)\n return array_data", "def cdf_output_2D(self,output_path,filehead='fluctuation'):\n file_start = output_path + filehead\n for i in range(self.n_cross_section):\n for j in range(len(self.time_steps)):\n\n fname = file_start + str(self.time_steps[j])+'_'+str(i) + '.cdf'\n f = nc.netcdf_file(fname,'w')\n f.createDimension('z_dim',self.grid.NZ)\n f.createDimension('r_dim',self.grid.NR)\n\n rr = f.createVariable('rr','d',('r_dim',))\n rr[:] = self.grid.R1D[:]\n zz = f.createVariable('zz','d',('z_dim',))\n zz[:] = self.grid.Z1D[:]\n rr.units = zz.units = 'Meter'\n\n bb = f.createVariable('bb','d',('z_dim','r_dim'))\n bb[:,:] = self.B_on_grid[:,:]\n bb.units = 'Tesla'\n\n dne = f.createVariable('dne','d',('z_dim','r_dim'))\n dne[:,:] = self.dne_ad_on_grid[i,j,:,:] + self.nane_on_grid[i,j,:,:]\n dne.units = 'per cubic meter'\n\n ne = f.createVariable('ne','d',('z_dim','r_dim'))\n ne[:,:] = self.ne0_on_grid[:,:] + dne[:,:]\n ne.units = 'per cubic meter'\n\n te = f.createVariable('te','d',('z_dim','r_dim'))\n te[:,:] = self.te_on_grid[:,:]/1000\n te.units = 'keV'\n\n ti = f.createVariable('ti','d',('z_dim','r_dim'))\n ti[:,:] = self.ti_on_grid[:,:]/1000\n ti.units = 'keV'\n\n f.close()", "def importDCAM(filename, dims, timepoints):\n with open(filename, 'rb') as fid:\n fid.seek(233)\n A = np.fromfile(fid, dtype='>u2')\n# A = np.fromfile(fid, dtype=np.uint16).byteswap()\n # TODO: consider using np.memmap here\n A = A[:dims[0]*dims[1]*timepoints]\n assert(len(A)==(dims[0]*dims[1]*timepoints))\n mov = np.fliplr(A.reshape([dims[0], dims[1], timepoints], order='F'))\n # hack to remove strange pixels with very high intensity\n mov[np.where(mov > 60000)] = 0\n return mov", "def read_mat_7_3(mat_file):\n import digitStruct #Use sarahrn/Py-Gsvhn-DigiStruct-Reader to decode file\n objectList = []\n x_pix = []\n y_pix = []\n for dsObj in digitStruct.yieldNextDigitStruct(mat_file): #Only call to digiStruct\n label = ''\n bounding = []\n for bbox in dsObj.bboxList:\n label += str(bbox.label)\n boundBox = (bbox.label, bbox.left, bbox.top, bbox.width, bbox.height)\n bounding.append(boundBox)\n try:\n image_name = mat_file.split('\\\\')[0] + '\\\\' + dsObj.name\n image = cv2.imread(image_name, 0)\n if isinstance(image, np.ndarray):\n y = len(image)\n x = len(image[0])\n x_pix.append(x)\n y_pix.append(y)\n data = (image_name, x, y, bounding, label) \n objectList.append(data)\n except IOError as e:\n print('Could not read:', image_name, ':', e, '- it\\'s ok, skipping.')\n data_len = len(objectList)\n x = max(x_pix)\n y = max(y_pix)\n print(data_len, x, y)\n dataset = np.ndarray((data_len, 2), dtype='|S16')\n bbox_set = np.ndarray((data_len, 6, 5), dtype=np.int16)\n sizes = np.ndarray((data_len, 2), dtype=np.int16)\n for s, sample in enumerate(objectList):\n dataset[s, 0] = sample[0]\n dataset[s, 1] = sample[4]\n sizes[s, 0] = sample[1]\n sizes[s, 1] = sample[2]\n for b, bbox in enumerate(sample[3]):\n bbox_set[s, b, :] = bbox\n return dataset, bbox_set, sizes", "def read_grid(filename):\n\n hd = read_header(filename)\n \n def maybe_swap(xs):\n endianness = sys.byteorder\n if endianness == \"little\" and hd.type.endianness_flag == -1:\n return\n elif endianness == \"big\" and hd.type.endianness_flag == 0:\n return\n xs.byteswap()\n\n n = 1\n for i in range(3):\n if i != hd.axis: n *= hd.dim[i]\n\n if hd.axis == 0: j, k = 1, 2\n if hd.axis == 1: j, k = 0, 2\n if hd.axis == 2: j, k = 0, 1\n\n if hd.type.is_vector_grid:\n xs, ys, zs = array.array(\"f\"), array.array(\"f\"), array.array(\"f\")\n with open(filename, \"rb\") as fp:\n fp.read(hd.sizes.header + 8)\n xs.fromfile(fp, n)\n ys.fromfile(fp, n)\n zs.fromfile(fp, n)\n\n maybe_swap(xs)\n maybe_swap(ys)\n maybe_swap(zs)\n \n if hd.axis == -1:\n xs = np.reshape(xs, (hd.dim[2], hd.dim[1], hd.dim[0]))\n ys = np.reshape(ys, (hd.dim[2], hd.dim[1], hd.dim[0]))\n zs = np.reshape(zs, (hd.dim[2], hd.dim[1], hd.dim[0]))\n else:\n xs = np.reshape(xs, (hd.dim[k], hd.dim[j]))\n ys = np.reshape(ys, (hd.dim[k], hd.dim[j]))\n zs = np.reshape(zs, (hd.dim[k], hd.dim[j]))\n\n return np.array([xs, ys, zs])\n else:\n xs = array.array(\"f\")\n with open(filename, \"rb\") as fp:\n fp.read(hd.sizes.header + 8)\n xs.fromfile(fp, n)\n\n maybe_swap(xs)\n if hd.axis == -1:\n xs = np.reshape(xs, (hd.dim[2], hd.dim[1], hd.dim[0]))\n else:\n xs = np.reshape(xs, (hd.dim[k], hd.dim[j]))\n return xs", "def extract_mesh2d(file_name):\n\n res = TelemacFile(file_name)\n header = ['X', 'Y']\n\n data = np.column_stack((res.meshx, res.meshy))\n\n return header, data", "def _nd2_to_tif(input_filename,meta=True):\n # add parse_filename function to get info from nd2 name and convert to tif filename\n info = ops.filenames.parse_filename(input_filename)\n \n file_description={}\n for k,v in sorted(info.items()):\n file_description[k] = v\n file_description['ext']='tif'\n file_description['subdir']=file_description['expt']+'_tif/'+file_description['mag']+'_'+file_description['cycle']\n\n\n with ND2Reader(input_filename) as images:\n images.iter_axes='v'\n axes = 'xy'\n if 'c' in images.axes:\n axes = 'c' + axes\n if 'z' in images.axes:\n axes = 'z' + axes\n images.bundle_axes = axes\n\n if 'z' in images.axes:\n for site,image in zip(images.metadata['fields_of_view'],images):\n image = image.max(axis=0)\n\n output_filename = ops.filenames.name_file(file_description,site=str(site)) \n save(output_filename,image[:])\n else:\n for site,image in zip(images.metadata['fields_of_view'],images):\n output_filename = ops.filenames.name_file(file_description,site=str(site)) \n save(output_filename,image[:])\n \n # METADATA EXTRACTION\n if meta==True:\n well_metadata = [{\n 'filename':ops.filenames.name_file(file_description,site=str(site)),\n 'field_of_view':site,\n 'x':images.metadata['x_data'][site],\n 'y':images.metadata['y_data'][site],\n 'z':images.metadata['z_data'][site],\n 'pfs_offset':images.metadata['pfs_offset'][0],\n 'pixel_size':images.metadata['pixel_microns']\n } for site in images.metadata['fields_of_view']]\n metadata_filename = ops.filenames.name_file(file_description,tag='metadata',ext='pkl')\n pd.DataFrame(well_metadata).to_pickle(metadata_filename)", "def load_and_track(directory,fileprefix,fileext=\".dat\",radius = 3,minmass=None,preprocess=False):\n# directory = r'C:\\data\\200203\\FSM-2D'\n# fileprefix = '200203FSM54 FSM-2D'\n# fileext = \".dat\"\n# filename = '200203FSM54 FSM-2D Z.dat'\n filename = fileprefix +' Z'+fileext\n # fileprefix = \n # also read from file name X Y Z\n usersDf = pd.read_csv(directory+'\\\\' + fileprefix + ' Z' + fileext, skiprows=0,delim_whitespace=True,header=None).values\n numX,numY=usersDf.shape\n f = tp.locate(usersDf, radius, invert=False,minmass=minmass,preprocess=preprocess)\n\n usersDfX = pd.read_csv(directory+'\\\\' + fileprefix + ' X' + fileext, skiprows=0,delim_whitespace=True,header=None).values\n usersDfY = pd.read_csv(directory+'\\\\' + fileprefix + ' Y' + fileext, skiprows=0,delim_whitespace=True,header=None).values\n\n usersDfX = np.reshape(usersDfX,usersDfX.size)\n usersDfY = np.reshape(usersDfY,usersDfY.size)\n\n if usersDfY[0]<usersDfY[-1]:\n originPosition = 'lower'\n else:\n originPosition = 'upper'\n \n pltstyle = {\n \"origin\": originPosition\n }\n \n extent = [min(usersDfX[0], usersDfX[-1]),max(usersDfX[0], usersDfX[-1]), min(usersDfY[0], usersDfY[-1]),max(usersDfY[0], usersDfY[-1]) ]\n fig2,ax2 = plt.subplots(figsize=(6,6))\n\n plt.imshow(usersDf,origin=originPosition,extent=extent,cmap=black_blue_white1)\n # use lifeplot and take into account of the X,Y axis data\n# lp = LivePlot2DV2((6, 8), usersDfX, usersDfY, usersDf, 'x (um)', 'y (um)', 'APD (kct/s)')\n# lp = LivePlot2DV2( usersDfX, usersDfY, usersDf)\n \n # tp.annotate(f, usersDf,plot_style={'markersize':7},imshow_style=pltstyle)\n\n fnew = f\n fnew.index = list(range(len(f)))\n for i in list(range( len(f))):\n \n xcoor = usersDfX[0] + (usersDfX[-1]-usersDfX[0])*(fnew.iloc[i].x )/( numX )\n ycoor = usersDfY[0] + (usersDfY[-1]-usersDfY[0])*(fnew.iloc[i].y )/( numY )\n \n fnew.at[i,'x'] = xcoor\n fnew.at[i,'y'] = ycoor\n# print(i,len(f),xcoor,ycoor,f.iloc[i].x,f.iloc[i].y)\n\n plt.scatter(xcoor, ycoor, s=100, facecolors='none', edgecolors='r')\n ax2.annotate(str(i),(xcoor,ycoor),fontsize=12,color='black',\n bbox=dict(boxstyle='round,pad=0.2', fc='white', alpha=0.6))\n\n plt.xlabel('', labelpad=16, fontsize=16)\n plt.ylabel('', labelpad=16, fontsize=16) \n ax2.tick_params(axis='both', labelsize=16)\n plt.title(directory+\"\\\\\"+filename,fontsize=16)\n \n \n return fnew\n # # https://stackoverflow.com/questions/24108063/matplotlib-two-different-colors-in-the-same-annotate/49041502", "def Readmsh(filename):\n\t# Open file\n\tfid = open(filename, 'r')\n\tstop = 0 # flag to continue (0) or stop (1) reading\n\tnumber = 0 # initialize variable (recognize as global)\n\t# Search header file\n\twhile stop == 0:\n\t\tline = fid.readline()\n\t\tif '$Mesh' in line:\n\t\t\tline = fid.readline() # Gmsh information format\n\t\t\tstop = 1\n\t# Search nodes\n\tstop = 0\n\twhile stop == 0:\n\t\tline = fid.readline()\n\t\tif '$Nodes' in line:\n\t\t\tline = fid.readline()\n\t\t\tnumber = int(line) # Number-of-nodes\n\t\t\tstop = 1\n\tcount = 0\n\tcoords = np.zeros((number,3),dtype=float) #coord-x-y-z\n\twhile count < number:\n\t\tline = fid.readline()\n\t\tvalues = line.split()\n\t\tcoords[count][0:] = values[1:] # Omit Node-number\n\t\tcount = count + 1\n\t# Search elements\n\tstop = 0\n\twhile stop == 0:\n\t\tline = fid.readline()\n\t\tif '$Elements' in line:\n\t\t\tline = fid.readline()\n\t\t\tnumber = int(line) # Number-of-elm\n\t\t\tstop = 1\n\tcount = 0\n\tflag_lines = 0 # zero if line elements not added\n\tflag_triangles = 0 # zero if triangle elements not added \n\telm_lines = np.zeros((1,3),dtype=int) #phys_ent node1-2\n\telm_triangles = np.zeros((1,4),dtype=int) #phys_ent node1-2-3\n\twhile count < number:\n\t\tline = fid.readline()\n\t\tvalues = line.split()\n\t\tnumber_tags = int(values[2]) # Number-of-tags (tags still not used)\n\t\tnew_elm = [values[3]]\n\t\tnew_elm.extend(values[3+number_tags:])\n\t\tnew_elm = np.array(new_elm,dtype=int)\n\t\tif values[1] == '1':\n\t\t\tif flag_lines == 1:\n\t\t\t\telm_lines = np.vstack((elm_lines,new_elm))\n\t\t\telse:\n\t\t\t\telm_lines = new_elm\n\t\t\t\tflag_lines = 1\n\t\telif values[1] == '2':\n\t\t\tif flag_triangles == 1:\n\t\t\t\telm_triangles = np.vstack((elm_triangles,new_elm))\n\t\t\telse:\n\t\t\t\telm_triangles = new_elm\n\t\t\t\tflag_triangles = 1\n\t\telse:\n\t\t\tprint \"Type\", values[1], \"in element\", values[0], \"is not supported element type.\"\n\t\tcount = count + 1\n\tfid.close()\n\tif not flag_triangles:\n\t\treturn coords, elm_lines\n\telse:\n\t\treturn coords, elm_lines, elm_triangles", "def urs2nc(basename_in='o', basename_out='urs'):\n\n files_in = [basename_in + WAVEHEIGHT_MUX_LABEL,\n basename_in + EAST_VELOCITY_LABEL,\n basename_in + NORTH_VELOCITY_LABEL]\n files_out = [basename_out + '_ha.nc',\n basename_out + '_ua.nc',\n basename_out + '_va.nc']\n quantities = ['HA', 'UA', 'VA']\n\n #if os.access(files_in[0]+'.mux', os.F_OK) == 0 :\n for i, file_name in enumerate(files_in):\n if os.access(file_name, os.F_OK) == 0:\n if os.access(file_name + '.mux', os.F_OK) == 0 :\n msg = 'File %s does not exist or is not accessible' % file_name\n raise IOError(msg)\n else:\n files_in[i] += '.mux'\n log.critical(\"file_name %s\" % file_name)\n\n hashed_elevation = None\n for file_in, file_out, quantity in zip(files_in,\n files_out,\n quantities):\n lonlatdep, lon, lat, depth = _binary_c2nc(file_in,\n file_out,\n quantity)\n if hashed_elevation is None:\n elevation_file = basename_out + '_e.nc'\n write_elevation_nc(elevation_file,\n lon,\n lat,\n depth)\n hashed_elevation = myhash(lonlatdep)\n else:\n msg = \"The elevation information in the mux files is inconsistent\"\n assert hashed_elevation == myhash(lonlatdep), msg\n\n files_out.append(elevation_file)\n\n return files_out", "def prepare_nc(FileIn, FileRef, FileOut, x, y, stream_thres, relevant_T):\n # if projStr.lower() == 'epsg:4326':\n# if srs.IsProjected() == 0:\n# logger.info('Found lat-lon coordinate system, preparing lat, lon axis')\n# x_dim = 'lon'; y_dim = 'lat'\n# x_name = 'longitude'; y_name = 'latitude'\n# x_longname = 'Longitude values';y_longname = 'Latitude values'\n# x_unit = 'degrees_east'; y_unit = 'degrees_north'\n# gridmap = 'latitude_longitude'\n# else:\n# logger.info('Found a Cartesian projection coordinate system, preparing x, y axis')\n# x_dim = 'x'; y_dim = 'y'\n# x_name = 'projection_x_coordinate'; y_name = 'projection_x_coordinate'\n# x_longname = 'x-coordinate in Cartesian system';y_longname = 'y-coordinate in Cartesian system'\n# x_unit = 'm'; y_unit = 'm'\n# gridmap = ''\n y_dim = 'lat'\n x_dim = 'lon'\n y_unit = 'degrees_north'\n x_unit = 'degrees_east'\n y_name = 'latitude'\n x_name = 'longitude'\n y_longname = 'Latitude values'\n x_longname = 'Longitude values'\n gridmap = 'latitude_longitude'\n logger.info('Preparing ' + FileOut)\n nc_src = Dataset(FileIn,'r')\n nc_trg = Dataset(FileOut,'w') # format='NETCDF3_CLASSIC'\n # Create dimensions\n nc_trg.createDimension(\"time\", 0) #NrOfDays*8\n nc_trg.createDimension(y_dim, len(y))\n nc_trg.createDimension(x_dim, len(x))\n # create axes\n\n DateHour = nc_trg.createVariable('time','f8',('time',))\n DateHour.units = 'Years since 0001-01-01 00:00:00'\n DateHour.calendar = 'gregorian'\n DateHour.standard_name = 'time'\n DateHour.long_name = 'time'\n DateHour_src = nc_src.variables['time'][:]\n DateHour[:] = np.arange(0,len(DateHour_src))\n # DateHour[:] = nc4.date2num(datetimeObj,units=nc_src.variables['time'].units,calendar=DateHour.calendar)\n y_var = nc_trg.createVariable(y_dim,'f4',(y_dim,))\n y_var.standard_name = y_name\n y_var.long_name = y_longname\n y_var.units = y_unit\n x_var = nc_trg.createVariable(x_dim,'f4',(x_dim,))\n x_var.standard_name = x_name\n x_var.long_name = x_longname\n x_var.units = x_unit\n y_var[:] = y\n x_var[:] = x\n\n # Set attributes\n # Change some of the attributes, add some\n all_attrs = nc_src.ncattrs()\n for attr in all_attrs:\n try:\n attr_val = eval('nc_src.' + attr)\n exec(\"nc_trg.\" + attr + \" = '\" + attr_val + \"'\")\n except:\n logger.warning('Could not write attribute')\n nc_trg.institution = 'Deltares\\nPBL\\nUtrecht University'\n nc_trg.history = \"File generated from Deltares' GLOFRIS_downscale v1.0. Original file details given in global attributes\"\n nc_trg.source_case = FileIn\n nc_trg.reference_case = FileRef\n nc_trg.stream_threshold= str(stream_thres)\n nc_trg.return_period_threshold = str(relevant_T)\n nc_trg.disclaimer = 'The availability and quality of these data is in no way guaranteed by Deltares'\n # write projection info to file\n wgs84 = nc_trg.createVariable('wgs84','c')\n wgs84.long_name = 'wgs84'\n wgs84.EPSG_code = 'EPSG:4326'\n wgs84.proj4_params = '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'\n wgs84.grid_mapping_name = 'latitude_longitude'\n\n # create water level variable\n variab = nc_trg.createVariable('water_level','f4',('time',y_dim,x_dim,),chunksizes=(1,len(y),len(x)),fill_value=-9999)\n variab.units = 'm'\n variab.standard_name = 'water_surface_height_above_reference_datum'\n variab.long_name = 'Water level above surface elevation'\n\n nc_trg.sync()\n nc_trg.close()", "def load_off(filename, size):\n\n # create 3D array (cube with edge = size)\n obj = np.zeros([size, size, size])\n\n # open filename.off\n with open(filename) as f:\n\n # read first line\n header = f.readline() # returns a string\n # set properties\n properties = f.readline().split(\" \") # returns a list of chars\n num_vertices = int(properties[0])\n num_faces = int(properties[1])\n num_edges = int(properties[2])\n print(\"Properties:\",\n \"\\nNumber of vertices:\", num_vertices,\n \"\\nNUmber of faces: \", num_faces,\n \"\\nNumber of edges: \", num_edges)\n\n # read everything else\n body = f.readlines() # returns a list of strings\n if num_vertices != 0:\n vertices = body[0:num_vertices]\n else:\n raise ValueError(\"No vertex found.\")\n if num_faces != 0:\n faces = body[num_vertices:num_vertices+num_faces]\n else:\n raise ValueError(\"No face found.\")\n if num_edges != 0:\n edges = body[num_faces:num_faces+num_edges]\n \n # set vertices\n for i in range(num_vertices):\n coords = vertices[i].split(\" \")\n if (int(float(coords[0])) < size) and (int(float(coords[1])) < size) and (int(float(coords[2])) < size):\n obj[int(float(coords[0])), int(float(coords[1])), int(float(coords[2]))] = 1\n else:\n print(\"Error at vertex\", i)\n\n return obj", "def convert_mf1(input_filename, output_filename, dimension1, dimension2,\r\n global_bool, progress_bar):\r\n #read the file\r\n datasize = datasize_finder(input_filename, global_bool)\r\n\r\n with open(input_filename,'rb') as fid:\r\n header=fid.read(2048)\r\n temp_hdf5 = h5py.File(output_filename+'temporary','w')\r\n list_xdata = read_into_cube(fid, temp_hdf5, global_bool, dimension1,\r\n dimension2, progress_bar)\r\n generate_output(output_filename, temp_hdf5, global_bool, header,\r\n list_xdata)\r\n temp_hdf5.close()\r\n os.remove(output_filename+'temporary')", "def create_sph_ncfile(filename,attribs,n,dim):\n nc_file = netCDF4.Dataset(filename,'w')\n\n # Miscellaneous attributes\n setattr(nc_file,'Date',1)\n setattr(nc_file,'Creator','ac')\n\n # sphvars file attributes\n for name,val in attribs.iteritems():\n setattr(nc_file,name,val)\n \n # Create netcdf dimensions\n # number of particles\n # spatial dimensions\n # timestep number\n nc_file.createDimension('timestep',None)\n nc_file.createDimension('particle',n)\n nc_file.createDimension('spatial',dim)\n\n # Create variables for the dimensions, and populate them\n tstep = nc_file.createVariable('timestep','d',('timestep',))\n part = nc_file.createVariable('particle','i',('particle',))\n space = nc_file.createVariable('spatial','i',('spatial',))\n \n part[:] = numpy.array(range(n))\n space[:] = numpy.array([0,1,2])\n\n dimnames = nc_file.dimensions.keys()\n\n # Set up variables\n # every particle property has a variable\n # and there are also variables for the box size\n # and the box dimensions.\n # a variable for 'time elapsed' at each step (for variable stepping)\n\n #each variable needs a \"units\" attribute\n\n #vector variables\n v_dims =('timestep','particle','spatial')\n\n #scalar variables\n sc_dims = ('timestep','particle')\n \n #histogram variables\n hist_dims = ('timestep')\n\n #total and average variables\n tot_dims = ('timestep')\n\n r = nc_file.createVariable('position','d',v_dims)\n v = nc_file.createVariable('velocity','d',v_dims)\n #a = nc_file.createVariable('acceleration','d',v_dims)\n #temp = nc_file.createVariable('temperature','d',sc_dims)\n energy = nc_file.createVariable('internal_energy','d',sc_dims)\n mass = nc_file.createVariable('mass','d',sc_dims)\n #rho = nc_file.createVariable('density','d',sc_dims)\n #press = nc_file.createVariable('pressure','d',sc_dims)\n #ss =nc_file.createVariable('sound_speed','d',sc_dims)\n #visc =nc_file.createVariable('viscosity','d',sc_dims)\n #h = nc_file.createVariable('smoothing_length','d',sc_dims)\n #hl = nc_file.createVariable('long_smoothing_length','d',sc_dims)\n #q = nc_file.createVariable('heat_flux','d',v_dims)\n #vsm= nc_file.createVariable('smoothed_velocity','d',v_dims)\n #psm =nc_file.createVariable('smoothed_pressure','d',sc_dims)\n #tmpsm =nc_file.createVariable('smoothed_temperature','d',sc_dims)\n #grad_rho = nc_file.createVariable('density_gradient','d',v_dims)\n #ptype = nc_file.createVariable('particle_type','u1',sc_dims)\n\n #now set up the non-particle averaged or total system variables\n # kinetic energy, internal energy, isolated Hamiltonian\n\n #V = nc_file.createVariable('total_kinetic_energy','d',tot_dims) \n #T = nc_file.createVariable('total_internal_energy','d',tot_dims)\n #tav = nc_file.createVariable('average_temp','d',tot_dims)\n #rhoav = nc_file.createVariable('rho_average','d',tot_dims)\n #tstat_energy = nc_file.createVariable('thermostat_energy','d',tot_dims)\n #TV = nc_file.createVariable('hamiltonian','d',tot_dims)\n #dti = nc_file.createVariable('dt','d',tot_dims)\n #sys_dt = nc_file.createVariable('systime','d',tot_dims)\n \n nc_file.sync()\n nc_file.close()", "def import_sgems_dat_file(file_path):\n with open(file_path, \"r\") as f:\n img = []\n for i, row in enumerate(f):\n if i == 0:\n row_vals = row.split(\" \")\n nx = int(row_vals[0])\n ny = int(row_vals[1])\n nz = int(row_vals[2])\n elif i >2:\n stripped_vals = [val.strip(\"\\n\") for val in row.split(\" \")]\n legit_values = [val for val in stripped_vals if val not in [\"\", \" \", \"\\\"\"]]\n node_value = int(legit_values[0])\n img.append(node_value)\n img = np.array(img).reshape(ny, nx).astype(np.int32)\n return img" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get string and index describing axis
def get_axis_info(self): axes=['x','y','z'] axis_strings=['Sagittal','Coronal','Axial'] self.axis_index=axes.index(self.axis) self.axis_str = axis_strings[self.axis_index]
[ "def Get_axis(node):\n\n node.plotting()\n\n if len(node) == 1:\n arg = node[0]\n\n if arg.cls == \"Matrix\" and len(arg[0]) == 4:\n a,b,c,d = arg[0]\n return \"_plot.axis(\" + str(a) + \", \" + str(b) + \", \" + str(c) + \", \" + str(d) + \")\"\n\n elif arg.cls != \"Matrix\" and arg.num and arg.dim>0:\n\n name1 = arg.name + \"(0)\";\n name2 = arg.name + \"(1)\"\n name3 = arg.name + \"(2)\";\n name4 = arg.name + \"(3)\"\n if arg.mem not in (2,3):\n name1 = \"static_cast<double>(\" + name1 + \")\"\n name2 = \"static_cast<double>(\" + name2 + \")\"\n name3 = \"static_cast<double>(\" + name3 + \")\"\n name4 = \"static_cast<double>(\" + name4 + \")\"\n\n return \"_plot.axis(\" + name1 + \", \" + name2 + \", \" + name3 + \", \" + name4 + \")\"\n\n node.error(\"argument array type\")\n return \"_plot.axis(\", \", \", \")\"", "def _get_axes_names(shape):\n names = _AXIS_COORDINATE_NAMES[-1 : -(len(shape) + 1) : -1]\n return names[::-1]", "def get_axes_labels(self) -> (str, str):\n units = self.units.get()\n\n # First, the x axes\n x_units = units.split('_')[-1]\n if x_units in ('nm', 'm'):\n x_label = 'Wavelength ({})'.format(x_units)\n elif x_units == 'hz':\n x_label = 'Frequency (hz)'\n else:\n x_label = 'Energy ({})'.format(x_units)\n\n # And now the y axes\n if units.split('_')[0] == 'power':\n y_label = 'Power density (W m$^{{-2}}$ {}$^{{-1}}$)'.format(x_units)\n else:\n y_label = 'Photon flux (photons m$^{{-2}}$ {}$^{{-1}}$)'.format(x_units)\n\n return x_label, y_label", "def get_axis(header):\n mywcs = wcs.WCS(header)\n specwcs = mywcs.sub([wcs.WCSSUB_SPECTRAL])\n return specwcs.wcs_pix2world(np.arange(header['NAXIS{0}'.format(mywcs.wcs.spec+1)]), 0)", "def axis(self, key):\n raise NotImplementedError(\"Axis depends on indexing convention of \"\n \"input file.\")", "def get_dimension_labels(self):\n\n axes_labels = []\n for dim, quantity in enumerate(self.axes_quantities):\n axes_labels.append('{} [{}]'.format(quantity, self.axes_units[dim]))\n return axes_labels", "def _dimShape(self):\n naxis = self.header['NAXIS']\n axes = naxis*[0]\n for j in range(naxis):\n axes[j] = self.header['NAXIS'+`j+1`]\n axes.reverse()\n return tuple(axes)", "def _GetAxis(self):\n XaxisCounter = 0\n YaxisCounter = 0\n \n for node in self.svg.iter(): \n if node.get(inkex.addNS(\"AxisType\",\"TimeAnalysis\")) == \"Yaxis\":\n Yaxis = self._ParseAxis(node)\n YaxisCounter += 1\n elif node.get(inkex.addNS(\"AxisType\",\"TimeAnalysis\")) == \"Xaxis\":\n Xaxis = self._ParseAxis(node)\n XaxisCounter += 1\n \n assert (XaxisCounter == 1 and YaxisCounter == 1), \"Wrong number of X or Y axis in document\"\n \n return Xaxis, Yaxis", "def index_of(self, axis):\n return _ffi_api.LayoutIndexOf(self, axis) # type: ignore", "def _ijdim_to_pydim(self, axis):\n if str(axis) in ['X', 'Y', 'Z', 'C', 'T']:\n return str(axis).lower()\n return str(axis)", "def get_axis_names(axes_metadata):\n\n def leaf_rewrite(x):\n return None if x is None else jax.sharding.PartitionSpec(*x)\n\n def rewrite(tree):\n return jax.tree_util.tree_map(leaf_rewrite, tree, is_leaf=_is_logical_spec)\n\n axes_metadata = unfreeze(axes_metadata) # pytype: disable=wrong-arg-types\n flat_dict = {\n re.sub(r'_axes$', '', '/'.join(k)): rewrite(v.names)\n for k, v in flatten_dict(axes_metadata).items()\n }\n return freeze(\n unflatten_dict({tuple(k.split('/')): v for k, v in flat_dict.items()})\n )", "def test_get_axis_info():\n # the output is a dictionary with the fields: pixel_origin, x_scale, y_scale, step, and units\n axis_info_dict = get_axis_info([1], [5], [20], [250], [10], [25], [30, 280], 30, ['volts', 'amps'])\n assert isinstance(axis_info_dict, dict), 'axis_info_dict is not a dictionary'\n for field in ['step', 'pixel_origin', 'x_scale', 'y_scale', 'units', 'y_pixel_range', 'x_pixel_range']:\n assert field in axis_info_dict.keys(), 'axis_info_dict is missing fields'\n return", "def entDimPos(ent):\n return entDimString(ent), entPosString(ent)", "def Get_caxis(node):\n\n node.plotting()\n\n if len(node) == 1:\n arg = node[0]\n\n if arg.cls == \"Matrix\" and len(arg[0]) == 2:\n a,b = arg[0]\n return \"_plot.caxis(\" + str(a) + \", \" + str(b) + \")\"\n\n elif arg.cls != \"Matrix\" and arg.num and arg.dim>0:\n\n name1 = arg.name + \"(0)\"\n name2 = arg.name + \"(1)\"\n if arg.mem not in (2,3):\n name1 = \"static_cast<double>(\" + name1 + \")\"\n name2 = \"static_cast<double>(\" + name2 + \")\"\n\n return \"_plot.caxis(\" + name1 + \", \" + name2 + \")\"\n\n node.error(\"argument array type\")\n return \"_plot.caxis(\", \", \", \")\"", "def _get_origin(self, axis):\n return axis.values[0]", "def get_x_name(self):\n return self['dim0'].name", "def _extract_x_name(line):\n if line.startswith(\"x axis\"):\n return line[line.index(\": \") + 2:].strip()\n return \"\"", "def getAxis(self, axis = None):\n if not axis in self.getAxesNames():\n logging.error(\"Cannot find \"+axis+\", it doesn't exist.\")\n return None\n return self.t._f_get_child(axis)", "def _get_axis_num(self, xarr, axis):\n py_axnum = xarr.get_axis_num(axis)\n if numpy.isfortran(xarr.values):\n return py_axnum\n\n if self._ends_with_channel_axis(xarr):\n if axis == len(xarr.dims) - 1:\n return axis\n else:\n return len(xarr.dims) - py_axnum - 2\n else:\n return len(xarr.dims) - py_axnum - 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
takes a set of images and crops each to the mask
def autocrop_to_mask(self, all_images,mask, thr=0): mask = mask>thr rows = np.any(mask, axis=1) cols = np.any(mask, axis=0) rmin, rmax = np.where(rows)[0][[0, -1]] cmin, cmax = np.where(cols)[0][[0, -1]] for image in all_images.keys(): all_images[image]= all_images[image][rmin:rmax,cmin:cmax] return all_images
[ "def crop_images(ipath, width, height, opath, padding=0 , dataset=''): \n assert(width > 0 and height > 0 and padding >= 0)\n \n if not os.path.exists(opath):\n os.mkdir(opath);\n \n if ipath == opath:\n print 'Error : Input Path: (', ipath, ') == Output Path (', opath, ')'\n sys.exit(1)\n \n imlist = get_imlist(ipath)\n \n for image in imlist:\n im = Image.open(image) \n if im.mode == 'RGB':\n im.convert('L'); # convert to grayscale\n if dataset == 'FERET':\n # Rescale the image... \n print ' Rescaling the images for FERET Dataset'\n im.resize((im[0] * 0.7, im[1] * 0.7));\n im = np.array(im);\n x = np.floor((im.shape[1] - (width + 2 * padding)) / 2) - 1;\n y = np.floor ((im.shape[0] - (height + 2 * padding)) / 2) - 1;\n cr_image = Image.fromarray(im [y:y + height + 2 * padding, x:x + width + 2 * padding ]); \n cr_image.save(os.path.join(opath, os.path.basename(image)))", "def multi_crop(path_in, path_out, input_shape=(1292, 968), target_shape=(644, 644), bottom_right=False,\n random_crop=0):\n\n print('Starting multi_crop')\n # Create the folder that will hold all images:\n if os.path.exists(path_out):\n shutil.rmtree(path_out, ignore_errors=True)\n os.makedirs(path_out)\n\n # get the classes\n folders = os.listdir(path_in)\n\n # get center point\n x_c = np.int(input_shape[0] / 2.)\n\n # create dictionary to be used in cropping loop:\n # values define the cropping position\n new_imgs = {'tl': (0, 0, target_shape[0], target_shape[1]),\n 'tc': (x_c - np.int(target_shape[0] / 2.), 0,\n x_c + np.int(target_shape[0] / 2.), target_shape[1]),\n 'tr': (input_shape[0] - target_shape[0], 0,\n input_shape[0], target_shape[1]),\n 'bl': (0, input_shape[1] - target_shape[1],\n target_shape[0], input_shape[1]),\n 'bc': (x_c - np.int(target_shape[0] / 2.), input_shape[1] - target_shape[1],\n x_c + np.int(target_shape[0] / 2.), input_shape[1])}\n\n if bottom_right:\n # if user wants to keep bottom right crop, we add it to the dictionary\n new_imgs['br'] = (input_shape[0] - target_shape[0], input_shape[1] - target_shape[1],\n input_shape[0], input_shape[1])\n for i in range(0, random_crop):\n # if user wants extra randomly centered crops\n # starting point can range from 0 to size of the image - target size\n xi = np.random.randint(0, input_shape[0] - target_shape[0])\n yi = np.random.randint(0, input_shape[1] - target_shape[1])\n new_imgs['r{}'.format(i)] = (xi, yi,\n xi + target_shape[0], yi + target_shape[1])\n\n # uses the path_in and walks in folders to crop images\n for folder in folders:\n print('----{}'.format(folder))\n os.mkdir(path_out + os.sep + folder)\n lst = os.listdir(path_in + os.sep + folder)\n\n images = [item for item in lst if item.lower().endswith(('.png', '.jpg', '.jpeg', '.tif'))]\n\n for file in images:\n\n # open image\n ori = Image.open(path_in + os.sep + folder + os.sep + file)\n\n for k in new_imgs:\n new_name = '{}_{}{}'.format(os.path.splitext(file)[0], k, os.path.splitext(file)[1])\n # crop image\n cropped = ori.crop(new_imgs[k])\n # save cropped image with new resolution\n img = cropped.resize(target_shape, Image.ANTIALIAS)\n img.save(path_out + os.sep + folder + os.sep + new_name)\n print('multi_crop complete\\n')", "def cropImage():", "def crop_on_annotations():\n #if len(os.listdir(cropped_output)) == 0:\n annotations = load_annotations()\n image_list = create_image_list(annotations)\n crop_images(image_list, annotations)", "def crop_and_resize(imgs, shape=(32, 16, 3)):\n height, width, channels = shape\n imgs_resized = np.empty([len(imgs), height, width, channels])\n for i, img in enumerate(imgs):\n cropped = img[55:135, :, :]\n imgs_resized[i] = imresize(cropped, shape)\n #imgs_resized[i] = cv2.resize(img, (16, 32))\n\n return imgs_resized", "def crop_image_with_masks(image,\n masks,\n max_area=8000,\n min_area=500,\n width_height_ratio=0.9):\n cropped_images = []\n\n for mask_data in masks:\n # Extract mask and bounding box data\n bbox = mask_data['bbox']\n seg = mask_data['segmentation']\n x, y, w, h = bbox\n\n # Crop the image based on the bounding box\n cropped_image = image[y:y+h, x:x+w]\n\n # Create an 8-bit mask from the segmentation data\n mask = np.asarray(seg[y:y+h, x:x+w], dtype=np.uint8) * 255\n # Apply the mask to the cropped image\n cropped_image = cv2.bitwise_and(\n cropped_image, cropped_image, mask=mask)\n cropped_image = cv2.cvtColor(cropped_image, cv2.COLOR_BGR2RGB)\n if (mask_data['area'] >= min_area and\n mask_data['area'] <= max_area and\n w/h >= width_height_ratio):\n cropped_images.append(cropped_image)\n\n return cropped_images", "def CropImageByShpMask(self):\n \n in_shp_file = os.path.join(self.base_path, 'InputFiles', 'ShapeMask', self.StudyAreaShp)\n # with rasterio.open(InputImage) as ImageObj:\n # out_image, out_transform = rasterio.mask.mask(ImageObj, gpd.GeoSeries(Polygon(CrownBuffer)), crop=True, filled=True, nodata = 0)\n\n with fiona.open(in_shp_file, 'r') as shapefile:\n ShapeMask = [feature[\"geometry\"] for feature in shapefile]\n # Crop dem\n RasterOperators.CropImage(os.path.join(self.base_path,'InputFiles',self.dem), ShapeMask, os.path.join(self.base_path,'InputFiles',self.dem))\n # Crop dsm\n RasterOperators.CropImage(os.path.join(self.base_path,'InputFiles',self.dsm), ShapeMask, os.path.join(self.base_path,'InputFiles',self.dsm))", "def _image_crop(self, crop_limits):\n n, rows, cols = self.images.shape\n [(col_min,row_min),(col_max,row_max)] = crop_limits\n #xmin, xmax, ymin, ymax = crop_limits\n self.images = self.images[:, row_min : row_max, col_min : col_max]", "def cropping(image):\n\n image = Image.open(image)\n x = image.width\n k = 1 # for cropped images naming\n for i in range(1, 4):\n for j in range(1, 4):\n cropped = image.crop(((x/3)*(j-1), (x/3)*(i-1), (x/3)*j, (x/3)*i))\n cropped.save(f\"cropped/im{k}.jpg\")\n k += 1", "def crop_images_wcs(self, ra, dec, size):\n topfile = re.sub(\".*/\", \"\", self.data_dir) # for file /a/b/c, extract c\n\n # crop_dir encodes the detector number, instrument, date\n crop_dir = f'{os.path.abspath(self.data_dir+\"/..\")}/cropped_{topfile}'\n run(f\"mkdir -p {crop_dir}\", shell=True) # make crop_dir\n \n crop_counter = 0\n for fi in self.files:\n hdr = fits.getheader(f\"{self.data_dir}/{fi}\")\n img = fits.getdata(f\"{self.data_dir}/{fi}\")\n y_size, x_size = img.shape # total image dims in pix \n w = wcs.WCS(hdr)\n \n # compute the bounds \n pix_scale = hdr[\"PIXSCAL1\"] # scale of image in arcsec per pix\n size_wcs = pix_scale*size/3600.0 # size of desired box in degrees\n pix_x1 = np.array(w.all_world2pix(ra-size_wcs/2.0, dec, 1))[0]\n pix_x2 = np.array(w.all_world2pix(ra+size_wcs/2.0, dec, 1))[0]\n pix_y1 = np.array(w.all_world2pix(ra, dec-size_wcs/2.0, 1))[1]\n pix_y2 = np.array(w.all_world2pix(ra, dec+size_wcs/2.0, 1))[1]\n x_bounds = np.array(sorted([pix_x1, pix_x2])) # sorted arrays of \n y_bounds = np.array(sorted([pix_y1, pix_y2])) # pixel boundaries\n # truncate bounds if needed\n x_bounds[x_bounds<0] = 0 \n x_bounds[x_bounds>x_size] = x_size\n y_bounds[y_bounds<0] = 0 \n y_bounds[y_bounds>y_size] = y_size\n # convert to horizontal & vertical fractions, pass to __get_crop()\n frac_hori = x_bounds/x_size\n frac_vert = y_bounds/y_size\n \n # if the crop does not contain the bounds, skip it\n # if the crop's aspect ratio is more skew than 4:1 or 1:4, skip\n # if the crop is < 50% the width/height of the desired box, skip\n if np.all(frac_hori==0) or np.all(frac_hori==1.0) or np.all(\n frac_vert==0.0) or np.all(frac_vert==1.0):\n continue \n if not(0.25 < ((frac_hori[1]-frac_hori[0])/\n (frac_vert[1]-frac_vert[0])) < 4.0):\n continue\n if not((x_bounds[1]-x_bounds[0] > size/2.0) and \n (y_bounds[1]-y_bounds[0] > size/2.0) ):\n continue\n \n crop_counter += 1\n cropped_hdu = self.__get_crop(f\"{self.data_dir}/{fi}\", \n frac_hori, frac_vert)\n new_f = fi.replace(\".fits\",\"_cropped.fits\")\n cropped_hdu.writeto(f\"{crop_dir}/{new_f}\", overwrite=True, \n output_verify=\"ignore\") # write them\n \n print(f\"{crop_counter}/{len(self.files)} images were cropped.\\n\", \n flush=True)", "def crop(masks, boxes, padding: int = 1):\n h, w, n = masks.shape\n x1, x2 = sanitize_coordinates(boxes[:, 0], boxes[:, 2], w, padding)\n y1, y2 = sanitize_coordinates(boxes[:, 1], boxes[:, 3], h, padding)\n\n np.arange(w, dtype=x1.dtype)\n rows = np.arange(w, dtype=x1.dtype).reshape((1, -1, 1)).repeat(repeats=h, axis=0).repeat(repeats=n, axis=2)\n cols = np.arange(h, dtype=x1.dtype).reshape((-1, 1, 1)).repeat(repeats=w, axis=1).repeat(repeats=n, axis=2)\n\n masks_left = rows >= x1.reshape((1, 1, -1))\n masks_right = rows < x2.reshape((1, 1, -1))\n masks_up = cols >= y1.reshape((1, 1, -1))\n masks_down = cols < y2.reshape((1, 1, -1))\n\n crop_mask = masks_left * masks_right * masks_up * masks_down\n\n return masks * crop_mask.astype(np.float32)", "def cut_images(self, cutting_function):\n # Can make this work conditionally based on which images are enabled later\n for d in range(len(self.images)):\n for s in range(len(self.images[d])):\n for f in range(len(self.images[d][s])):\n for i in range(len(self.images[d][s][f])):\n self.images[d][s][f][i].cut_image(cutting_function, (self.x(), self.y(), self.z(), d), self.paksize())", "def crop_and_align_images(self, images):\n if self._use_tensorrt:\n result = [\n self.detector(x, limit=self.limit, thresholds=self.thresholds,\n nms_thresholds=self.nms_thresholds) for x in images]\n else:\n result = [\n self.detector(x, limit=self.limit, min_face_size=self.min_face_size,\n thresholds=self.thresholds, nms_thresholds=self.nms_thresholds) for x in images]\n return result", "def crop_shifted_images(images, shift):\n\n x_shift, y_shift = shift\n\n # Calculate the minimum and maximum horizontal shift\n if x_shift < 0:\n images = images[:, :x_shift]\n elif x_shift > 0:\n images = images[:, x_shift:]\n if y_shift < 0:\n images = images[:, :, :y_shift]\n elif y_shift > 0:\n images = images[:, :, y_shift:]\n return images", "def crop_patches(images: torch.Tensor, size: int = 64, stride: int = 32) -> torch.Tensor:\n patches = images.data.unfold(1, 3, 3).unfold(2, size, stride).unfold(3, size, stride)\n patches = patches.reshape(-1, 3, size, size)\n return patches", "def preprocess_img_mask(img, mask, num_classes, img_size, augment=False, eval_resize=False):\n if augment:\n img_size_w = int(np.random.randint(img_size[0], img_size[0] * 1.5, 1))\n img_size_h = int(np.random.randint(img_size[1], img_size[1] * 1.5, 1))\n img = cv2.resize(img, (img_size_w, img_size_h))\n mask = cv2.resize(mask, (img_size_w, img_size_h))\n dw = int(np.random.randint(0, img_size_w - img_size[0] + 1, 1))\n dh = int(np.random.randint(0, img_size_h - img_size[1] + 1, 1))\n img = img[dh:dh+img_size[1], dw:dw+img_size[0], :]\n mask = mask[dh:dh+img_size[1], dw:dw+img_size[0]]\n if np.random.random() > 0.5:\n flip_code = int(np.random.randint(-1, 2, 1))\n img = cv2.flip(img, flip_code)\n mask = cv2.flip(mask, flip_code)\n else:\n img = cv2.resize(img, img_size)\n if not eval_resize:\n mask = cv2.resize(mask, img_size)\n img = (img.astype(np.float32) - 127.5) / 127.5\n img = img.transpose(2, 0, 1)\n if num_classes == 2:\n mask = mask.astype(np.float32) / mask.max()\n mask = (mask > 0.5).astype(np.int)\n else:\n mask = mask.astype(np.int)\n mask = (np.arange(num_classes) == mask[..., None]).astype(int)\n mask = mask.transpose(2, 0, 1).astype(np.float32)\n return img, mask", "def process_images(input_path):\n logging.info('Processing faces')\n cropped_images = []\n # TODO iterate over all the .jpg files\n for path in glob.glob(os.path.join(input_path, '*.jpg')):\n image = cv2.imread(path)\n face_locations = locate_faces(image)\n\n for face in face_locations:\n cropped_images.append(crop_image(image, face))\n\n return cropped_images", "def paste_instance_masks(masks, detected_boxes, image_height, image_width):\n\n def expand_boxes(boxes, scale):\n \"\"\"Expands an array of boxes by a given scale.\"\"\"\n # Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/boxes.py#L227\n # The `boxes` in the reference implementation is in [x1, y1, x2, y2] form,\n # whereas `boxes` here is in [x1, y1, w, h] form\n w_half = boxes[:, 2] * 0.5\n h_half = boxes[:, 3] * 0.5\n x_c = boxes[:, 0] + w_half\n y_c = boxes[:, 1] + h_half\n\n w_half *= scale\n h_half *= scale\n\n boxes_exp = np.zeros(boxes.shape)\n boxes_exp[:, 0] = x_c - w_half\n boxes_exp[:, 2] = x_c + w_half\n boxes_exp[:, 1] = y_c - h_half\n boxes_exp[:, 3] = y_c + h_half\n\n return boxes_exp\n\n # Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/core/test.py#L812\n # To work around an issue with cv2.resize (it seems to automatically pad\n # with repeated border values), we manually zero-pad the masks by 1 pixel\n # prior to resizing back to the original image resolution. This prevents\n # \"top hat\" artifacts. We therefore need to expand the reference boxes by an\n # appropriate factor.\n\n _, mask_height, mask_width = masks.shape\n scale = max((mask_width + 2.0) / mask_width, (mask_height + 2.0) / mask_height)\n\n ref_boxes = expand_boxes(detected_boxes, scale)\n ref_boxes = ref_boxes.astype(np.int32)\n padded_mask = np.zeros((mask_height + 2, mask_width + 2), dtype=np.float32)\n segms = []\n\n for mask_ind, mask in enumerate(masks):\n im_mask = np.zeros((image_height, image_width), dtype=np.uint8)\n # Process mask inside bounding boxes.\n padded_mask[1:-1, 1:-1] = mask[:, :]\n\n ref_box = ref_boxes[mask_ind, :]\n w = ref_box[2] - ref_box[0] + 1\n h = ref_box[3] - ref_box[1] + 1\n w = np.maximum(w, 1)\n h = np.maximum(h, 1)\n\n mask = cv2.resize(padded_mask, (w, h)) # pylint: disable=E1101\n mask = np.array(mask > 0.5, dtype=np.uint8)\n\n x_0 = min(max(ref_box[0], 0), image_width)\n x_1 = min(max(ref_box[2] + 1, 0), image_width)\n y_0 = min(max(ref_box[1], 0), image_height)\n y_1 = min(max(ref_box[3] + 1, 0), image_height)\n\n im_mask[y_0:y_1, x_0:x_1] = mask[\n (y_0 - ref_box[1]) : (y_1 - ref_box[1]), (x_0 - ref_box[0]) : (x_1 - ref_box[0])\n ]\n segms.append(im_mask)\n\n segms = np.array(segms)\n assert masks.shape[0] == segms.shape[0]\n\n return segms", "def get_crops(image, xs, ys, win):\n batch_size = len(xs)\n data = np.ones((batch_size,win*win*3),dtype=np.single)\n count = 0\n for x, y in zip(xs,ys):\n xmin = x-win/2\n xmax = x+win/2\n ymin = y-win/2\n ymax = y+win/2\n temp = image[xmin:xmax,ymin:ymax,:]\n temp2 = misc.imresize(temp,(224,224))\n data[count,:] = vectorize(temp2)\n count += 1\n return np.transpose(data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> [x for x in gen_all_n_length_bitsrings(3)] ['000', '001', '010', '011', '100', '101', '110', '111']
def gen_all_n_length_bitsrings(n): for i in range(1 << n): yield '{:0{}b}'.format(i, n)
[ "def generate_n_bit_strings(start, n, graph_size, bit_string_length=10):\r\n bit_strings = [\r\n f'{i:0{bit_string_length}b}' for i in range(start, start+n) if i <= 2**graph_size\r\n ]\r\n data = {\r\n 'bit_strings': bit_strings,\r\n 'count': len(bit_strings),\r\n 'graph_size': graph_size,\r\n 'bit_string_length': bit_string_length\r\n }\r\n return data", "def generate_wsr(num_bits: int) -> List[int]:\n return list(np.random.randint(2, size=num_bits))", "def gen_permutations(n):\n max_int = '0b' + '1' * n\n for i in range(0, int(max_int, 2)+1):\n yield str(format(i, 'b').zfill(n))", "def intToBits(n, length):\n bits = []\n for i in range(length):\n bits.append(n%2)\n n = n//2\n return bits", "def bitstrings_generator(iterable=None, n=None):\n if n is None:\n assert not iterable is None, ValueError(\"Must provide iterable or n\")\n n = len(iterable)\n else:\n assert isinstance(n, int), TypeError(\"n must be an integer\")\n l = bitstr_len(n) # make all bitstrings identical length\n bit_range = range(2**l)\n for b in bit_range:\n bs = Bits(uint=b, length=l)\n yield bs", "def binary_strings(max_length=10):\n yield ''\n for size in range(1, 1+max_length):\n for i in range(2**size):\n yield '{:b}'.format(i).rjust(size, '0')", "def binary_combinations(n, r):\n return [sum(1<<x for x in c) for c in it.combinations(range(n), r)]", "def bitmasks(n,m):\n if m < n:\n if m > 0:\n for x in bitmasks(n-1,m-1):\n yield bitarray([1]) + x\n for x in bitmasks(n-1,m):\n yield bitarray([0]) + x\n else:\n yield n * bitarray('0')\n else:\n yield n * bitarray('1')", "def generate_array(n):\n\treturn [True] * (n+1)", "def yieldAllCombos(items):\n # Your code here\n N = len(items)\n bags=[]\n # enumerate the 2**N possible combinations\n for i in range(3**N):\n bag1 = []\n bag2 = []\n for j in range(N):\n # test bit jth of integer i\n if (i >> j) % 3 == 0:\n bag1.append(items[j])\n elif (i >> j)%3 == 1:\n bag2.append(items[j])\n bags.append((bag1, bag2))\n #yield bags\n return bags", "def generate_binary_grid(gridsize):\n results = []\n\n grid = [0 for x in range(gridsize**2)]\n\n for x in range((2**(gridsize)**2)-1):\n grid[0] += 1\n\n for index, entry in enumerate(grid):\n\n if entry > 1:\n grid[index] = 0\n grid[index+1] += 1\n else:\n break\n new_grid = [grid[i:i+gridsize] for i in range(0, len(grid), gridsize)]\n results.append(new_grid)\n return(results)", "def convert_intList_to_bit(l):\n result = []\n for a in l:\n result += [int(n) for n in bin(a)[2:].zfill(8)]\n return result", "def generate_palette_RGB(numBits):\n print \"there are %d possible combination with %d bits\" %(pow(pow(2, numBits),3),numBits)\n return [combination for combination in itertools.product(xrange(pow(2, numBits)), repeat=3)]", "def print_binary_sequences(n):\n if n <= 0:\n return print(EMPTY_STRING)\n zero_sequences_list = print_binary_sequences_with_prefix(0, n)\n one_sequences_list = print_binary_sequences_with_prefix(1, n)\n final_list = zero_sequences_list + one_sequences_list\n print_list_sequces(final_list)", "def gen_bin(length:int, prefix=\"\"):\n if length == 0:\n print(prefix)\n return\n\n gen_bin(length - 1, prefix + \"0\")\n gen_bin(length - 1, prefix + \"1\")", "def create_ones_list(length):\n return 0", "def generate_permutations(n):\n return list(itertools.permutations(range(1, n + 1)))", "def allPandigitals(N, base=1):\n\tret = []\n\tfor i in range(base,base+N):\n\t\tret += pandigitals(N, base)\n\treturn ret", "def print_binary_sequences(n):\n # Print all sequences of size n with an empty prefix\n print_binary_sequences_with_prefix('', n)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> data = [[3], [7, 4], [2, 4, 6], [8, 5 , 9, 3]] >>> take_path(data, "001") == 3 + 7 + 2 + 5 True >>> take_path(data, "101") == 3 + 4 + 4 + 9 True >>> take_path(data, "011") == 3 + 7 + 4 + 9 True
def take_path(data, directions): assert(len(directions) == len(data) - 1) cur_pos = 0 vals = [] for i in xrange(0, len(data)): if i == 0: d = 0 else: d = int(directions[i - 1]) cur_pos += d vals.append(data[i][cur_pos]) # print vals return sum(vals)
[ "def _traverse_path(cursor, path, start=0):\n\n for pathIndex in range(start, len(path)):\n if isinstance(path[pathIndex], str):\n cursor_goto(cursor, path[pathIndex])\n else:\n if isinstance(path[pathIndex], int):\n arrayIndex = [path[pathIndex]]\n elif isinstance(path[pathIndex], (list, tuple)):\n arrayIndex = path[pathIndex]\n else:\n raise ValueError(\"path specification (%s) should be a string or (list of) integers\" %\n (path[pathIndex],))\n\n # get the shape of the array from the cursor. the size of all\n # dynamic dimensions are computed by the coda library.\n arrayShape = cursor_get_array_dim(cursor)\n\n # handle a rank-0 array by (virtually) converting it to\n # a 1-dimensional array of size 1.\n rankZeroArray = False\n if len(arrayShape) == 0:\n rankZeroArray = True\n arrayShape.append(1)\n\n # check if the number of indices specified match the\n # dimensionality of the array.\n if len(arrayIndex) != len(arrayShape):\n raise ValueError(\"number of specified indices does not match the dimensionality of the array\")\n\n # check for variable indices and perform range checks on all\n # non-variable indices.\n intermediateArray = False\n for i in range(0, len(arrayIndex)):\n if arrayIndex[i] == -1:\n intermediateArray = True\n elif (arrayIndex[i] < 0) or (arrayIndex[i] >= arrayShape[i]):\n raise ValueError(\"array index (%i) exceeds array range [0:%i)\" % (arrayIndex[i], arrayShape[i]))\n\n if intermediateArray:\n return (True, pathIndex)\n else:\n # if all indices are non-variable, just move the cursor\n # to the indicated element.\n if rankZeroArray:\n cursor_goto_array_element(cursor, [])\n else:\n cursor_goto_array_element(cursor, arrayIndex)\n\n # we've arrived at the end of the path.\n return (False, len(path) - 1)", "def find_path(grid):\n n = len(grid)\n m = len(grid[0])\n\n def helper(row,col,path):\n if row == n:\n return path\n for i in range(col-1,col+2):\n if 0 <= i < m and grid[row][i]:\n result = helper(row+1,i,path + [(row,i)])\n if result is not None:\n return result\n return None\n\n for c in range(0,m):\n if grid[0][c]:\n result = helper(1,c,[(0,c)])\n if result is not None:\n return result\n return None", "def path_cost(path):\n if len(path) < 3:\n return 0\n else:\n action, total_cost = path[-2]\n return total_cost", "def find_paths(root,needed_sum, path = []):\n\tif root is None:\n\t\treturn \n\t# Checks if current path till current node leads to needed sum\n\t# subtracting root.data is required to make sure of current data\n\t# else it will be printed only when chekcing for either children\n\t# and will be printed twice\t\n\tif (needed_sum - root.data) == 0:\n\t\tprint(path + [root.data])\n\n\t# Check across left and right subtree\n\tfind_paths(root.left,(needed_sum-root.data),path+[root.data])\n\tfind_paths(root.right,(needed_sum-root.data),path+[root.data])", "def findPathsDown(graph={}, startnode='', operator='==', pathlength=None): \r\n pathsDownGraph ={}\r\n # Define operators that can be used for pathlength comparisons\r\n operatorDict = {'<': '<', '<=': '<=',\r\n '>' : '>', '>=': '>=',\r\n '==': '==', '!=': '!=',\r\n '<>' : '<>'}\r\n # Check that only valid operators are input by user\r\n if not operatorDict.has_key(operator):\r\n return \"Operator must be one of -- '<', '<=', '>', '>=', '==', '!=', '<>'\"\r\n if pathlength <> None:\r\n # Check that pathlength is an integer\r\n try:\r\n int(pathlength)\r\n except ValueError:\r\n return \"Pathlength must be an integer\"\r\n # Create a list of unique endnodes (for iteration and will become keys in output graph)\r\n endnodesList = graph.values()\r\n uniqueendnodesList = [ ]\r\n for nodeList in endnodesList:\r\n for node in nodeList:\r\n if node not in uniqueendnodesList:\r\n uniqueendnodesList.append(node)\r\n # For each unique endnode find all paths between the startnode and endnodes\r\n for endnode in uniqueendnodesList:\r\n # Ignore case where endnode == startnode -- path to self\r\n if endnode == startnode:\r\n continue\r\n pathsList = findAllPathsAsLists(graph, startnode, endnode)\r\n # Check that resulting list of paths is not empty\r\n if len (pathsList) <> 0:\r\n # If pathlength == None -- no Filter is applied and path entered\r\n # in output graph for key(startnode)\r\n if pathlength == None:\r\n pathsDownGraph[endnode] = pathsList\r\n else:\r\n # apply the user input operator and pathlength info to filter paths\r\n validpathsList = [path for path in\r\n pathsList if\r\n eval('len(path)' + operatorDict[operator] + 'pathlength')]\r\n # Check that remaining valid paths results in a non-empty list.\r\n if len(validpathsList) > 0:\r\n # Apply valid non-empty paths to output graph for key (startnode)\r\n pathsDownGraph[endnode] = validpathsList\r\n else:\r\n continue\r\n else:\r\n continue\r\n return pathsDownGraph", "def _append_path_including_subpaths(paths, path) -> typing.List[typing.Tuple[EntityLink]]:\n for i in range(len(path)):\n if path[:i + 1] not in paths:\n paths.append(path[:i + 1])\n return paths", "def find_paths_with_forward_slash(self, input, count=0):\n # output variable\n result = []\n\n # loop variable\n counter = 0\n\n while len(input) > 0:\n # return empty string if input does not contain a \"/\" character\n path_start_index = string.find(input, \"/\")\n\n if path_start_index < 0:\n return result\n\n # get the index of first space after the \"/\" character\n space_index = string.find(input, \" \", path_start_index)\n\n # if no space is found, path must end till end of string\n if space_index < 0:\n result.append(input[path_start_index:])\n input = \"\"\n else:\n result.append(input[path_start_index:space_index])\n input = input[space_index + 1:]\n\n # increment counter\n counter = counter + 1\n\n # return result if requested number of paths have been found\n if (count > 0 and count == counter):\n return result\n\n # end while\n\n # return output\n return result", "def c_path(path: List[Union[str, int]]) -> str:\n res = \"\".join(\n ((\".\" + elem) if isinstance(elem, str) else (\"[\" + str(elem) + \"]\")) for elem in path\n )\n return res[1:] # drop the first dot", "def get_values_by_path(data, path: list) -> list:\n if len(path) == 0:\n return [data]\n elif isinstance(data, (list, tuple)):\n # Concatenate the results returned by each item in the list.\n return list(itertools.chain.from_iterable(get_values_by_path(item, path) for item in data))\n elif isinstance(data, dict):\n # Descend further down the tree.\n if path[0] in data:\n return get_values_by_path(data[path[0]], path[1:])\n else:\n return []\n else:\n # The path has not been resolved and we've reached a\n # data type we can't key into, so return no results.\n return []", "def getPathValues(d, path):\n pos = path.find('.')\n currentpath = path[0:pos] if pos > 0 else path\n nextpath = path[pos+1:] if pos > 0 else None\n lbracket = path.find('[')\n itemnum= None\n if lbracket >= 0 and (pos < 0 or lbracket < pos):\n rbracket = path.find(']')\n itemnum = int(path[lbracket + 1:rbracket])\n currentpath = path[0:lbracket]\n # keep the bracket for the next recurive depth\n nextpath = path[lbracket:] if lbracket > 0 else nextpath\n if type(d) is list:\n result = []\n if itemnum is not None:\n result.extend(getPathValues(d[itemnum], nextpath))\n else:\n for item in d:\n #still on the current path node\n result.extend(getPathValues(item, path))\n return result\n if pos < 0:\n if currentpath == '*':\n result = []\n for k, v in d.iteritems():\n result.append(v)\n return result\n return [d[currentpath]] if currentpath in d and d[currentpath] else []\n else:\n if currentpath == '*':\n result = []\n for k,v in d.iteritems():\n result.extend(getPathValues(v, nextpath))\n return result\n return getPathValues(d[currentpath], nextpath) if currentpath in d else []", "def paths_len_2(graph):\n paths = []\n nodes_completed = []\n for node in graph:\n for n in graph[node]:\n #if not n in nodes_completed:\n paths.append([node, n])\n nodes_completed.append(node)\n return paths", "def getPath(path_initial, path_last):\n def makeItem(item):\n \"\"\"\n Constructs a list containing either just the single item\n (if it's non-None) or an empty list.\n :param object item:\n :return list:\n \"\"\"\n if item is None:\n return []\n else:\n return [item]\n\n path_elements = list(path_initial)\n addendum = makeItem(path_last)\n path_elements.extend(addendum)\n #\n path = path_elements[0]\n if len(path_elements) > 1:\n for ele in path_elements[1:]:\n path = os.path.join(path, ele)\n return path", "def indexes_from_path(path: str) -> Tuple[Sequence[int], bool]:\n\n steps = path.split('/')\n if steps[0] == 'm':\n absolute = True\n elif steps[0] == '.':\n absolute = False\n else:\n raise ValueError(f'Invalid derivation path: {path}')\n\n indexes: List[int] = list()\n for step in steps[1:]:\n hardened = False\n if step[-1] in (\"'\", \"H\", \"h\"):\n hardened = True\n step = step[:-1]\n index = int(step)\n index += 0x80000000 if hardened else 0\n indexes.append(index)\n\n return indexes, absolute", "def find_best_path(lattice):\n # FIXME *** IMPLEMENT ME ***\n\n best_path = []\n start = lattice[len(lattice) - 1]['<s>'][1]\n best_path.append(start)\n count = 0\n for num in range(len(lattice) - 2, 1, -1):\n best_path.insert(0,lattice[num][best_path[count]][1])\n count += 1\n \n return best_path", "def paths(G, curr, visited):\n if curr == 'end':\n return 1\n if curr.islower() and curr in visited:\n return 0\n\n visited = visited | {curr}\n\n paths_sum = 0\n for neighbor in G.neighbors(curr):\n paths_sum += paths(G, neighbor, visited)\n\n return paths_sum", "def find_path(self, T):\n if T.ntype == 'exNode':\n if T.size <= 1: return self.e\n else:\n self.e = self.e + c_factor(T.size)\n return self.e\n else:\n # Threshold for the hyperplane for splitting data at a given node.\n q = T.q \n # Direction curve for the hyperplane for splitting data at a given node.\n d = T.d \n self.e += 1\n \n if (self.alpha != 1):\n if self.innerproduct(self.x, d, self.deriv_x, self.deriv_D[T.dd]) - q < 0:\n self.path_list.append('L')\n return self.find_path(T.left)\n else:\n self.path_list.append('R')\n return self.find_path(T.right)\n else:\n if self.innerproduct(self.x, d, self.step) - q < 0:\n self.path_list.append('L')\n return self.find_path(T.left)\n else:\n self.path_list.append('R')\n return self.find_path(T.right)", "def getPathDistance(self,path_list):\n temp_list = copy.copy(path_list)\n\n for i in range(len(temp_list)):\n if temp_list[i] == 0:\n temp_list[i] = 1\n temp_list.append(1)\n temp_list.insert(0,1)\n\n total_distance = 0\n\n for i in range(1,len(temp_list)):\n distance = self.getDistanceBt2Nodes(temp_list[i],temp_list[i-1])\n total_distance += distance\n\n # print total_distance\n return total_distance", "def has_path(data, path) -> bool:\n walk = data\n for key in path:\n if isinstance(walk, dict):\n if key in walk:\n walk = walk[key]\n else:\n return False\n elif isinstance(walk, list) or isinstance(walk, tuple):\n if isinstance(key, int) and key < len(walk):\n walk = walk[key]\n else:\n return False\n else:\n return False\n return True", "def find_path(self, id1, id2):\n parents, _ = self.evaluate_costs(id1)\n node_id = id2\n path = []\n while parents[node_id] is not None:\n node_id = parents[node_id]\n path = [node_id] + path\n return path + [id2]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses a Windows Restore Point (rp.log) log filelike object.
def ParseFileObject(self, parser_mediator, file_object): file_size = file_object.get_size() file_header_map = self._GetDataTypeMap('rp_log_file_header') try: file_header, _ = self._ReadStructureFromFileObject( file_object, 0, file_header_map) except (ValueError, errors.ParseError) as exception: raise errors.WrongParser( 'Unable to parse file header with error: {0!s}'.format(exception)) file_footer_map = self._GetDataTypeMap('rp_log_file_footer') file_footer_offset = file_size - 8 try: file_footer, _ = self._ReadStructureFromFileObject( file_object, file_footer_offset, file_footer_map) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning( 'unable to parse file footer with error: {0!s}'.format(exception)) return event_data = RestorePointEventData() # The description in the file header includes the end-of-string character # that we need to strip off. event_data.description = file_header.description.rstrip('\0') event_data.restore_point_event_type = file_header.event_type event_data.restore_point_type = file_header.restore_point_type event_data.sequence_number = file_header.sequence_number if file_footer.creation_time: event_data.creation_time = dfdatetime_filetime.Filetime( timestamp=file_footer.creation_time) parser_mediator.ProduceEventData(event_data)
[ "def ParseFileObject(self, parser_mediator, file_object):\r\n event_data = FileHistoryRestoreLogEventData()\r\n encoding = self._ENCODING or parser_mediator.codepage\r\n\r\n text_file_object = text_parser.text_file.TextFile(file_object, encoding=encoding)\r\n line = ''\r\n try:\r\n line = text_file_object.readline(400)\r\n\r\n except UnicodeDecodeError:\r\n errors.UnableToParseFile('Not a text file or encoding not supported')\r\n\r\n if not line:\r\n raise errors.UnableToParseFile('Not a text file.')\r\n\r\n if not line.startswith(\"<\"):\r\n raise errors.UnableToParseFile('Not an Windows FileHistory Restore.log file.')\r\n\r\n split_line = line.split(' ')\r\n len_split_line = len(split_line)\r\n if len_split_line is not 8:\r\n raise errors.UnableToParseFile('Not an Windows FileHistory Restore.log file.')\r\n\r\n event_data.file_record_id = int(split_line[2].replace(\"\\x00\", \"\"), 16)\r\n event_data.restored_file = split_line[3].replace(\"\\x00\", \"\")\r\n event_data.usn = int(split_line[4].replace(\"\\x00\", \"\"), 16)\r\n temp_creation_date = int(split_line[6].replace(\"\\x00\", \"\"), 16)\r\n event_data.creation_date = dfdatetime_filetime.\\\r\n Filetime(timestamp=temp_creation_date).CopyToDateTimeString()\r\n temp_modification_date = int(split_line[7].replace(\"\\x00\", \"\")[:-2], 16)\r\n event_data.modification_date = dfdatetime_filetime.\\\r\n Filetime(timestamp=temp_modification_date).CopyToDateTimeString()\r\n\r\n date_time = dfdatetime_posix_time.PosixTime(timestamp=0)\r\n event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)\r\n parser_mediator.ProduceEventWithEventData(event, event_data)", "def parse_log(self, log_entry: str) -> Optional[dict]:\n match = self.log_grok.match(log_entry)\n\n if match is None:\n return None\n\n if \"timestamp\" in match:\n match[\"timestamp\"] = datetime.strptime(\n match[\"timestamp\"], self.strptime_pattern\n ).isoformat()\n\n # Rename for elasticsearch\n match[\"@timestamp\"] = match.pop(\"timestamp\")\n\n match[\"type\"] = self.type\n\n return match", "def parse_log(cls, log_file):\n if isinstance(log_file, basestring):\n infile = open(log_file, 'r')\n else:\n infile = log_file\n\n try:\n listener, timestamp = cls._read_header(infile)\n return Log(listener, timestamp, infile)\n finally:\n infile.close()", "def get_recon_log(self):\n input = str(self.mainwindow.ui.lineEditInput.text())\n\n # Get the folder name and path name\n # path, folder_name = os.path.split(input)\n\n try:\n # # I think some times the log file is .txt and sometimes .log, a check for both types and other\n # # old formats of log files is checked as follows:\n # if os.path.exists(os.path.join(path,folder_name,folder_name+\".txt\")):\n # recon_log_path = os.path.join(path,folder_name,folder_name+\".txt\")\n # elif os.path.exists(os.path.join(path,folder_name,folder_name+\"_rec.txt\")):\n # recon_log_path = os.path.join(path,folder_name,folder_name+\"_rec.txt\")\n # elif os.path.exists(os.path.join(path,folder_name,folder_name+\".log\")):\n # recon_log_path = os.path.join(path,folder_name,folder_name+\".log\")\n # elif os.path.exists(os.path.join(path,folder_name,folder_name+\"_rec.log\")):\n # recon_log_path = os.path.join(path,folder_name,folder_name+\"_rec.log\")\n # else:\n # raise Exception('No log file')\n\n log_paths = [f for f in os.listdir(input) if f.endswith(\"_rec.log\")]\n\n if len(log_paths) == 1:\n recon_log_path = os.path.join(input, log_paths[0])\n\n # To make sure the path is in the correct format (prob not necessary..)\n self.mainwindow.recon_log_path = os.path.abspath(recon_log_path)\n\n # Open the log file as read only\n recon_log_file = open(self.mainwindow.recon_log_path, 'r')\n\n # get pixel size from get_pixel method\n self.mainwindow.pixel_size = self.get_pixel(self.mainwindow.modality, recon_log_file)\n\n # Display the number on the lcd display\n self.mainwindow.ui.lcdNumberPixel.display(self.mainwindow.pixel_size)\n\n # Set recon log text\n self.mainwindow.ui.lineEditCTRecon.setText(str(self.mainwindow.recon_log_path))\n\n except IOError as e:\n # Python standard exception identifies recon file not found\n print(\"exception re\")\n self.mainwindow.ui.lineEditCTRecon.setText(\"Not found\")\n self.mainwindow.pixel_size = \"\"\n self.mainwindow.ui.lcdNumberPixel.display(self.mainwindow.pixel_size)\n except Exception as inst:\n # Custom exception identifies recon file not found\n self.mainwindow.pixel_size = \"\"\n self.mainwindow.ui.lcdNumberPixel.display(self.mainwindow.pixel_size)\n self.mainwindow.ui.lineEditCTRecon.setText(\"Not found\")\n except:\n self.mainwindow.pixel_size = \"\"\n self.mainwindow.ui.lcdNumberPixel.display(self.mainwindow.pixel_size)\n QtWidgets.QMessageBox.warning(self.mainwindow, 'Message', 'Warning: Unexpected error getting recon log file',sys.exc_info()[0])\n self.mainwindow.ui.lineEditCTRecon.setText(\"Not found\")", "def process_log(self):\n self.logfile.seek(self.pos)\n line = self.logfile.readline()\n while line:\n self.parse_next_line(line)\n line = self.logfile.readline()\n self.pos = self.logfile.tell()", "def read():\n return log_object.read_from_log()", "def parse_log_line(line: str) -> LogEntry:\n match = LOGPAT.match(line)\n if not match:\n # we could catch that error and skip the line\n raise ValueError(f'incorrect log format: {line}')\n\n entry = match.groups()\n parsed_time = parse(entry[3][:11] + ' ' + entry[3][12:])\n size = int(entry[8]) if entry[8] != '-' else 0\n return LogEntry(\n entry[0], entry[1], entry[2], parsed_time, entry[4], entry[5],\n entry[6], int(entry[7]), size\n )", "def load_log(log_name):\n\n with open(log_name, 'rb+') as book_file:\n try:\n data = pickle.load(book_file) # loading data to look what it has inside\n except EOFError:\n add_live_log_entry(\"Plik logu jest pusty (\"+log_name+\")\")\n data = []\n except Exception:\n add_live_log_entry(\"Uwaga! Coś poszło nie tak! (\"+log_name+\")\")\n data = []\n return data", "def parse_lsr(prod, text):\n lines = text.split(\"\\n\")\n if len(lines) < 2:\n prod.warnings.append(\n (\"LSR text is too short |%s|\\n%s\")\n % (text.replace(\"\\n\", \"<NL>\"), text)\n )\n return None\n lsr = LSR()\n lsr.product = prod\n lsr.text = text\n tokens = lines[0].split()\n h12 = tokens[0][:-2]\n mm = tokens[0][-2:]\n ampm = tokens[1]\n dstr = f\"{h12}:{mm} {ampm} {lines[1][:10]}\"\n lsr.valid = datetime.datetime.strptime(dstr, \"%I:%M %p %m/%d/%Y\")\n lsr.assign_timezone(prod.tz, prod.z)\n # Check that we are within bounds\n if lsr.utcvalid > (prod.valid + FUTURE_THRESHOLD) or lsr.utcvalid > (\n utc() + FUTURE_THRESHOLD\n ):\n prod.warnings.append(\n \"LSR is from the future!\\n\"\n f\"prod.valid: {prod.valid} lsr.valid: {lsr.valid}\\n\"\n f\"{text}\\n\"\n )\n return None\n\n lsr.wfo = prod.source[1:]\n\n lsr.typetext = lines[0][12:29].strip()\n if lsr.typetext.upper() not in reference.lsr_events:\n prod.warnings.append(f\"Unknown lsr.typetext |{lsr.typetext}|\\n{text}\")\n return None\n\n lsr.city = lines[0][29:53].strip()\n\n tokens = lines[0][53:].strip().split()\n lat = float(tokens[0][:-1])\n lon = 0 - float(tokens[1][:-1])\n if lon <= -180 or lon >= 180 or lat >= 90 or lat <= -90:\n prod.warnings.append(f\"Invalid Geometry Lat: {lat} Lon: {lon}\\n{text}\")\n return None\n lsr.geometry = ShapelyPoint((lon, lat))\n\n lsr.consume_magnitude(lines[1][12:29].strip())\n if lsr.magnitude_f is not None and math.isnan(lsr.magnitude_f):\n prod.warnings.append(f\"LSR has NAN magnitude\\n{text}\")\n return None\n lsr.county = lines[1][29:48].strip()\n if lsr.county == \"\":\n prod.warnings.append(f\"LSR has empty county\\n{text}\")\n lsr.state = lines[1][48:50].strip()\n if lsr.state == \"\":\n prod.warnings.append(f\"LSR has empty state\\n{text}\")\n lsr.source = lines[1][53:].strip()\n if lsr.source == \"\":\n prod.warnings.append(f\"LSR has empty source\\n{text}\")\n if len(lines) > 2:\n meat = \" \".join(lines[2:]).strip()\n if meat.strip() != \"\":\n lsr.remark = \" \".join(meat.split())\n if lsr.typetext.upper() == \"ICE STORM\" and lsr.magnitude_f is None:\n val = _icestorm_remark(lsr.remark)\n if val is not None:\n lsr.magnitude_f = val\n lsr.magnitude_qualifier = \"U\"\n lsr.magnitude_units = \"INCH\"\n return lsr", "def parseApacheLogLine(logline):\n match = logline.split(\"::\")\n #if match is None:\n # return (logline, 0)\n\n return (Row(\n idPartido=int(match[0]),\n temporada=match[1],\n jornada=int(match[2]),\n equipoLocal=match[3],\n equipoVisitante=match[4],\n golesLocal=int(match[5]),\n golesVisitante=int(match[6]),\n fecha=match[7],\n timestamp=match[8]\n ))", "def _ReadChangeLogEntry(self, file_object):\n file_offset = file_object.tell()\n data_type_map = self._GetDataTypeMap('rp_change_log_entry')\n\n change_log_entry_record, data_size = self._ReadStructureFromFileObject(\n file_object, file_offset, data_type_map, 'change log entry record')\n\n if self._debug:\n self._DebugPrintChangeLogEntryRecord(change_log_entry_record)\n\n if change_log_entry_record.record_type != 1:\n raise errors.ParseError(\n f'Unsupported record type: {change_log_entry_record.record_type:d}')\n\n signature = change_log_entry_record.signature\n if signature != self._RECORD_SIGNATURE:\n raise errors.ParseError('Unsupported change.log file signature')\n\n # TODO: refactor to use size hints\n record_size = (\n change_log_entry_record.record_size - data_size)\n record_data = file_object.read(record_size)\n file_offset += data_size\n\n if self._debug:\n self._DebugPrintData('Record data', record_data)\n\n context = dtfabric_data_maps.DataTypeMapContext(values={\n 'rp_change_log_entry': change_log_entry_record})\n\n data_type_map = self._GetDataTypeMap('rp_change_log_entry2')\n\n try:\n change_log_entry_record2 = self._ReadStructureFromByteStream(\n record_data, file_offset, data_type_map, 'change log entry record',\n context=context)\n except (ValueError, errors.ParseError) as exception:\n raise errors.ParseError(\n f'Unable to parse change log entry record with error: {exception!s}')\n\n if self._debug:\n self._DebugPrintValue(\n 'Process name', change_log_entry_record2.process_name[:-1])\n\n self._DebugPrintText('\\n')\n\n change_log_entry = ChangeLogEntry()\n change_log_entry.entry_type = change_log_entry_record.entry_type\n change_log_entry.entry_flags = change_log_entry_record.entry_flags\n change_log_entry.file_attribute_flags = (\n change_log_entry_record.file_attribute_flags)\n change_log_entry.sequence_number = change_log_entry_record.sequence_number\n change_log_entry.process_name = change_log_entry_record2.process_name[:-1]\n\n sub_record_data_offset = context.byte_size\n sub_record_data_size = record_size - 4\n if self._debug:\n self._DebugPrintValue(\n 'Sub record data offset', f'{sub_record_data_offset:d}')\n\n value_size = sub_record_data_size - sub_record_data_offset\n self._DebugPrintValue('Sub record data size', f'{value_size:d}')\n\n if sub_record_data_offset < sub_record_data_size:\n self._DebugPrintText('\\n')\n\n while sub_record_data_offset < sub_record_data_size:\n read_size = self._ReadRecord(record_data, sub_record_data_offset)\n if read_size == 0:\n break\n sub_record_data_offset += read_size\n\n data_type_map = self._GetDataTypeMap('uint32le')\n\n try:\n copy_of_record_size = self._ReadStructureFromByteStream(\n record_data[-4:], sub_record_data_offset, data_type_map,\n 'copy of record size')\n except (ValueError, errors.ParseError) as exception:\n raise errors.ParseError(\n f'Unable to parse copy of record size with error: {exception!s}')\n\n if change_log_entry_record.record_size != copy_of_record_size:\n raise errors.ParseError((\n f'Record size mismatch ({change_log_entry_record.record_size:d} != '\n f'{copy_of_record_size:d})'))\n\n if self._debug:\n self._DebugPrintValue('Copy of record size', f'{copy_of_record_size:d}')\n\n self._DebugPrintText('\\n')\n\n return change_log_entry", "def log_scan(log_path):\n with open(log_path, 'r') as log_file:\n log_buffer = log_file.read()\n\n pattern = r\"Shield! in state: \\n(.+?\\]\\])\"\n prog = re.compile(pattern, flags=re.DOTALL)\n result = prog.findall(log_buffer)\n\n ret_list = []\n for i in result:\n s = np.fromstring(i.replace(\"[\", \"\").replace(\"]\", \"\"), dtype=float, sep=' ')\n s = s.reshape((len(s), 1)) \n ret_list.append(s)\n\n with open(log_path+\"_ret.pkl\", \"wb\") as fp:\n pickle.dump(ret_list, fp)\n\n return ret_list", "def parse_log_file(self, log_file):\n msg = {}\n events = {}\n print \"Parsing %s ...\" % log_file\n for line in open(log_file, 'r'):\n log_entry = [entry.strip() for entry in line.split(\"|\")]\n log_time = parse_datetime(log_entry[0])\n if log_entry[3].find(\"Event: \") != -1:\n event_dict_string = log_entry[3][len(\"Event: \"):]\n event_dict = ast.literal_eval(event_dict_string)\n events[log_time] = event_dict\n else:\n msg[log_time] = log_entry[3]\n return (msg, events)", "def parse_log_file(self):\n # Open the log file\n log_file_data = utils.open_file(self.log_file)\n for line in log_file_data:\n found = re.findall(self.INVALID_USER, line)\n if (found is not None and found != []):\n date = found[0][0]\n month = date.split(\" \")[0]\n day = date.split(\" \")[1]\n last_time = found[0][1]\n username = found[0][2]\n ip = found[0][3]\n\n # convert date, time to epoch time\n epoch_time = utils.get_epoch_time(month, day, last_time)\n self.update_username_dict(username, ip, date, epoch_time)", "def _read_log(self):\n path = self.path_log()\n log_file = open(path, \"r\", encoding='latin-1')\n return log_file.readlines()", "def loadLogResults(self):\n try:\n logPicklePath = os.path.join(self.getCurrentCsvReportFolder(), 'LogResults.pkl')\n try:\n with open(logPicklePath, 'rb') as f:\n self.logDict = pickle.load(f)\n except:\n print('Exception for loadLogResults')\n except:\n print(traceback.format_exc())", "def load_log(log_file):\n file_lines = {}\n for line in open(log_file, 'r'):\n parts = line.split(\" \")\n log_time = datetime.strptime(parts[0] + \" \" + parts[1],\n '%Y-%m-%d %H:%M:%S,%f')\n # Assume that the last part of a log line is the data part\n log_query = parts[-1]\n file_lines[log_time] = log_query\n return file_lines", "def parse_log():\n fh = open(log_file, 'r')\n lines = fh.readlines()\n fh.close()\n line_counts = len(lines)\n # print(f'counts {line_counts}')\n stopped_cik_file = lines[line_counts - 3].strip()\n stopped_cik_file_index = cik_file_list.index(stopped_cik_file)\n stopped_cik_row_index = lines[line_counts - 1].strip()\n return int(stopped_cik_file_index), int(stopped_cik_row_index)", "def _process_pyscf_log(self, logfile: str) -> None:\n with open(logfile, \"r\", encoding=\"utf8\") as file:\n content = file.readlines()\n\n for i, _ in enumerate(content):\n if content[i].startswith(\"System:\"):\n content = content[i:]\n break\n\n logger.debug(\"PySCF processing messages log:\\n%s\", \"\".join(content))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Map raw rwc eat data to beat position in bar (e.g. 1, 2, 3, 4).
def _position_in_bar(beat_positions, beat_times): # Remove -1 _beat_positions = np.delete(beat_positions, np.where(beat_positions == -1)) beat_times_corrected = np.delete(beat_times, np.where(beat_positions == -1)) # Create corrected array with downbeat positions beat_positions_corrected = np.zeros((len(_beat_positions),)) downbeat_positions = np.where(_beat_positions == 384)[0] _beat_positions[downbeat_positions] = 1 beat_positions_corrected[downbeat_positions] = 1 # Propagate positions for b in range(0, len(_beat_positions)): if _beat_positions[b] > _beat_positions[b - 1]: beat_positions_corrected[b] = beat_positions_corrected[b - 1] + 1 if not downbeat_positions[0] == 0: timesig_next_bar = beat_positions_corrected[downbeat_positions[1] - 1] for b in range(1, downbeat_positions[0] + 1): beat_positions_corrected[downbeat_positions[0] - b] = ( timesig_next_bar - b + 1 ) return beat_positions_corrected, beat_times_corrected
[ "def _extract_bars(self, data):", "def _map_velocity_to_bar_location(self, velocity):\n # (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min --> map one spectrum to another\n if velocity < self._velocity_bar_min_velocity:\n return self._velocity_bar_min_pixel\n if velocity > self._velocity_bar_max_velocity:\n return self._velocity_bar_max_pixel\n pixel = (velocity - self._velocity_bar_min_velocity) * (self._velocity_bar_max_pixel - self._velocity_bar_min_pixel) \\\n / (self._velocity_bar_max_velocity - self._velocity_bar_min_velocity) \\\n + self._velocity_bar_min_pixel\n self._logger.debug(\"mapping location to {}\".format(pixel))\n return int(pixel)", "def bar_oscillator(self):\n # update tempo and oscillator\n # print(self._tempo.get_delay_s())\n if self.bpm_beat_now():\n self.beat_counter = (self.beat_counter + 1) % 4\n self.beat_period = self._tempo.get_period_s()\n # print(\"beat at:\", self._tempo.get_delay_s())\n self.beat_timestamp = time.time()\n oscillator = self.beat_counter\n else:\n time_since_beat = time.time() - self.beat_timestamp\n oscillator = (\n 1 - (self.beat_period - time_since_beat) / self.beat_period\n ) + self.beat_counter\n # ensure it's between 0 and 1. useful when audio cuts\n oscillator = min(4, oscillator)\n oscillator = max(0, oscillator)\n return oscillator", "def _map_index(r, c):\n return ((r)*(r)+(r))/2+c", "def _movementcounter(self, *, mfdata):\n return mfdata[15] & 0xFF", "def bar_offset(self):\n if self._bar_offset is None:\n bar_offset, _ = self.offset_bar(self._filter, self.mask)\n else:\n bar_offset = self._bar_offset\n return bar_offset", "def add_pos(coverage, quality, chr, pos, str_call, snps, sample):\n\n if coverage >= 8 and '_' not in chr and 'M' not in chr and quality > float(100):\n ref_freq = get_ref_freq(coverage, str_call)\n snps[sample][chr+':'+pos] = [ref_freq, float(1)-ref_freq]", "def posToRainbow(pos):\n if pos < 85:\n return Color(pos * 3, 255 - pos * 3, 0)\n elif pos < 170:\n pos -= 85\n return Color(255 - pos * 3, 0, pos * 3)\n else:\n pos -= 170\n return Color(0, pos * 3, 255 - pos * 3)", "def _movementcounter(self, *, mfdata):\n return (mfdata[9] & 0xFF) << 8 | mfdata[10] & 0xFF", "def wilight_to_hass_position(value: int) -> int:\n return min(100, round((value * 100) / 255))", "def get_piezo_response_map_separate_z(self):\n tdTomato_file=self.tdTomato_registered_path\n gcamp_file=self.gcamp_registered_path\n piezo_data_file=self.piezo_data_path\n min_range3 = self.min_range3\n max_range3 = self.max_range3\n gcamp_threshold_ratio = self.gcamp_threshold_ratio\n tdTomato_threshold = self.tdTomato_threshold\n ratio_threshold = self.ratio_threshold\n\n response_range = self.response_range\n base_range = self.base_range\n n_of_z = self.n_of_z\n\n #load the info on piezo start frame and get the tdTomato and gcamp data (filtered and registered)\n with open(piezo_data_file, \"rb\") as f:\n [first_piezo_start,second_piezo_start]=pickle.load(f)\n\n with open(tdTomato_file,\"rb\") as f:\n tdTomato_registered=pickle.load(f)\n\n with open(gcamp_file,\"rb\") as f:\n gcamp_registered=pickle.load(f)\n\n #tdTomato_registered and gcamp_registered may contain negative pixel values.\n #image brightness should always be positive (or zero), so subtract the min\n #value to make all values above zero.\n tdTomato_registered=tdTomato_registered-np.min(tdTomato_registered)\n gcamp_registered=gcamp_registered-np.min(gcamp_registered)\n\n #initialize the data array\n average_tdTomato_all=np.zeros((n_of_z,tdTomato_registered.shape[2],tdTomato_registered.shape[3]))\n average_gcamp_all=np.zeros_like(average_tdTomato_all)\n base_tdTomato_all=np.zeros_like(average_tdTomato_all)\n base_gcamp_all=np.zeros_like(average_tdTomato_all)\n ratio_response_all=np.zeros_like(average_tdTomato_all)\n ratio_baseline_all=np.zeros_like(average_tdTomato_all)\n DF_F_map_all=np.zeros_like(average_tdTomato_all)\n DR_R_map_all=np.zeros_like(average_tdTomato_all)\n\n #calculate the threshold pixel value\n flattened_array=np.ravel(base_gcamp_all)\n gcamp_sorted=np.sort(flattened_array)\n threshold_index=np.round(gcamp_sorted.shape[0]*gcamp_threshold_ratio)\n threshold_index=threshold_index.astype(int)\n gcamp_threshold=gcamp_sorted[threshold_index]\n\n\n #calcuate the baseline and the response images for each z-level\n for z_level in range(n_of_z):\n #\n average_tdTomato=np.average(tdTomato_registered[z_level,first_piezo_start:first_piezo_start+response_range,:,:],axis=0)\n average_tdTomato2=np.average(tdTomato_registered[z_level,second_piezo_start:second_piezo_start+response_range,:,:],axis=0)\n average_tdTomato=(average_tdTomato+average_tdTomato2)/2\n\n average_gcamp=np.average(gcamp_registered[z_level,first_piezo_start:first_piezo_start+response_range,:,:], axis=0)\n average_gcamp2=np.average(gcamp_registered[z_level,second_piezo_start:second_piezo_start+response_range,:,:], axis=0)\n average_gcamp=(average_gcamp+average_gcamp2)/2\n\n base_tdTomato=np.average(tdTomato_registered[z_level,first_piezo_start-base_range:first_piezo_start,:,:],axis=0)\n base_tdTomato2=np.average(tdTomato_registered[z_level,second_piezo_start-base_range:second_piezo_start,:,:],axis=0)\n base_tdTomato=(base_tdTomato+base_tdTomato2)/2\n\n base_gcamp=np.average(gcamp_registered[z_level,first_piezo_start-base_range:first_piezo_start,:,:],axis=0)\n base_gcamp2=np.average(gcamp_registered[z_level,second_piezo_start-base_range:second_piezo_start,:,:],axis=0)\n base_gcamp=(base_gcamp+base_gcamp2)/2\n\n #initialize the response map to zero\n ratio_response=np.zeros_like(base_gcamp)\n ratio_baseline=np.zeros_like(base_gcamp)\n DF_F_map=np.zeros_like(base_gcamp)\n DR_R_map=np.zeros_like(base_gcamp)\n\n #calculate ratio, but we need to exclude pixels with very low tdTomato value to avoid high noise\n ratio_response=np.divide(average_gcamp,average_tdTomato, where=((average_tdTomato>=tdTomato_threshold)&(base_tdTomato>=tdTomato_threshold)))\n ratio_baseline=np.divide(base_gcamp,base_tdTomato, where=((average_tdTomato>=tdTomato_threshold)&(base_tdTomato>=tdTomato_threshold)))\n\n #plot in a figure\n fig, axs = plt.subplots(1,3, figsize=(12,5),tight_layout = True)\n\n axs[0].imshow(base_gcamp)\n axs[0].set_yticks([])\n axs[0].set_xticks([])\n axs[0].set_title('gcamp baseline', fontsize=20)\n\n #DF/F calculated only for pixels whose base_gcamp value is above the threshold\n DF_F_map=np.divide((average_gcamp-base_gcamp),base_gcamp,where=(base_gcamp>=gcamp_threshold))\n # where=() in np.divide seems to give inconsitent results in google colab.\n #Force it with the following line for now.\n DF_F_map[base_gcamp<=gcamp_threshold]=0\n axs[1].imshow(DF_F_map,vmin=min_range3,vmax=max_range3)\n axs[1].set_yticks([])\n axs[1].set_xticks([])\n axs[1].set_title('DF/F map', fontsize=20)\n\n #DR/R calculated only for pixels whose ratio_baseline is above the threshold and we have certain level of baseline gcamp\n DR_R_map=np.divide((ratio_response-ratio_baseline),ratio_baseline,where=((ratio_baseline>=ratio_threshold)&(base_gcamp>=gcamp_threshold)))\n axs[2].imshow(DR_R_map,vmin=min_range3,vmax=max_range3)\n axs[2].set_yticks([])\n axs[2].set_xticks([])\n axs[2].set_title('DR/R map', fontsize=20)\n\n #Place in the appropriate data array.\n average_tdTomato_all[z_level,:,:]=average_tdTomato\n average_gcamp_all[z_level,:,:]=average_gcamp\n base_tdTomato_all[z_level,:,:]=base_tdTomato\n base_gcamp_all[z_level,:,:]=base_gcamp\n ratio_response_all[z_level,:,:]=ratio_response\n ratio_baseline_all[z_level,:,:]=ratio_baseline\n DF_F_map_all[z_level,:,:]=DF_F_map\n DR_R_map_all[z_level,:,:]=DR_R_map\n\n #Save the data array.\n outfile_name=gcamp_file+'_maps'\n\n with open(outfile_name, \"wb\") as f:\n pickle.dump([average_tdTomato_all,average_gcamp_all,base_tdTomato_all, base_gcamp_all, ratio_response_all, ratio_baseline_all, DF_F_map_all, DR_R_map_all], f)\n print(outfile_name)\n\n self.map_data_path = outfile_name\n\n return self.map_data_path", "def update_history(history, index, mus):\n\n # pull arm i\n x_it = get_sample(mus[index])\n history[index][0] += x_it\n history[index][1] += 1.0\n return history", "def bar_offset(self, value):\n # Only update if the value changes\n if self.mask is None:\n self._bar_offset = 0 #None\n elif self.mask[-2:]=='WB':\n # Value limits between -10 and 10\n if (value is not None) and np.abs(value)>10:\n value = 10 if value>0 else -10\n msg1 = 'bar_offset value must be between -10 and 10 arcsec.'\n msg2 = ' Setting to {}.'.format(value)\n _log.warning('{}\\n{}'.format(msg1,msg2))\n \n self._bar_offset = value\n else:\n self._bar_offset = 0", "def positions_feed(self):", "def pitch(self):\r\n # The idea is this method will cover all shots with a wedge not considered a chip.\r\n # This will make use of strokes gained approach the green and other parameters\r\n # with regards to short finesse shots into the green. \r\n # I need to make some feature that helps me determine how close the player will hit the ball\r\n # to the hole\r\n \r\n import numpy as np\r\n \r\n pitch = []\r\n \r\n if self.sg_apr > 0:\r\n second_cut = .05\r\n first_cut = .1\r\n on_green = 1 - second_cut - first_cut\r\n pitch.append(on_green)\r\n pitch.append(first_cut)\r\n pitch.append(second_cut)\r\n elif self.sg_apr <= 0:\r\n second_cut = .025\r\n first_cut = .15\r\n on_green = 1 - second_cut - first_cut\r\n pitch.append(on_green)\r\n pitch.append(first_cut)\r\n pitch.append(second_cut)\r\n return pitch", "def mbar(self):\n return 1000 * (self._press / 1.013e5)", "def range_process( instrument, raw, max_range, constants\n ,rs_cal, rs_Cxx, corr_adjusts ,processing_defaults):\n\n\n \n\n assert(rs_Cxx is not None)\n rs = hau.Time_Z_Group(like=raw)\n\n if 0:\n import matplotlib.pylab as plt\n \n mol = np.nanmean(raw.molecular_counts,0)\n wfov = np.nanmean(raw.molecular_wfov_counts,0)\n bin_vec = 7.5 * np.arange(len(wfov))\n mol = mol - np.nanmean(mol[0:40])\n wfov = wfov - np.nanmean(wfov[0:40])\n mol *= (bin_vec-45*7.5)**2\n wfov *= (bin_vec-45*7.5)**2\n wfov *= np.exp(-2*bin_vec *1e-5)\n #wfov = wfov - bin_vec*wfov[900]/(900 *0.0001)\n wfov *= mol[900]/wfov[900]\n plt.figure(99999)\n plt.plot(bin_vec,wfov,'c',bin_vec,mol,'r')\n ax=plt.gca()\n ax.set_yscale('log')\n plt.grid(True)\n plt.show()\n print j\n #copy corrected raw into rs \n for field in ['transmitted_1064_energy','transmitted_energy','seeded_shots','molecular_counts'\n ,'combined_lo_counts','combined_hi_counts','cross_pol_counts',\n 'combined_wfov_counts','molecular_wfov_counts',\n 'molecular_i2a_counts','combined_1064_counts','telescope_pointing']:\n if hasattr(raw,field):\n setattr(rs,field,getattr(raw,field).copy())\n setattr(rs,'raw_'+field,getattr(raw,field).copy())\n \n # compute bin number of laser pulse\n [dark_interval_end_time, laser_pulse_time, cal_pulse_end_time] = \\\n constants['apd_pulse_timing']\n bin_duration = constants['binwidth']\n s_bin = int(laser_pulse_time / bin_duration) # laser pulse bin number\n #dark_interval_end_bin = int(dark_interval_end_time / bin_duration)- 1\n\n nalts = raw.molecular_counts.shape[1]\n\n #save geo corrected raw counts as 'var_xxx' in rs so that they get averaged without\n #other range processing for use in compute_photon_statistics. We also multiply\n #by the square of the geocorrection to account for the geocorrection in the\n #signal used compute_phothon_statistics()\n if processing_defaults.enabled('compute_stats'):\n ones_array = np.ones(raw.molecular_counts.shape)\n # bin 0 of geo_correction is defined as occurring at the laser pulse\n geocorr = ones_array.copy()\n geocorr[:,s_bin:] = rs_cal.geo.data[:nalts-s_bin, 1] * ones_array[:,s_bin:]\n \n for field in ('molecular_counts','combined_lo_counts'\n ,'combined_hi_counts','cross_pol_counts','combined_wfov_counts'\n ,'molecular_wfov_counts','molecular_i2a_counts','combined_1064_counts'):\n if hasattr(raw,field):\n setattr(rs,'var_raw_'+field,getattr(raw,field)*geocorr*geocorr) \n \n #counts arrays are the average number of counts in a data raw acquistion interval\n #of raw.times[2]-raw.times[1] while seeded_shots is the total number of laser pulses\n #the acquisition interval prior to preaveraging in preprocess_raw.py\n\n #note: this does not compensate for the pileup correction--in very high count areas this\n #will under estimate the varience because actual counts are multipled by a pileup correction\n #in the preprocess_raw.py routine \n\n #counts have been pileup corrected in preprocessing\n #do dark correction for all channels\n \n s_time =datetime.utcnow()\n dark_count_correction(instrument,raw,rs,rs_Cxx,corr_adjusts,processing_defaults,constants)\n print 'time for dark correction = ',datetime.utcnow() - s_time\n\n # gain correction for nadir pointing in airborne operation\n # this is expected to be a very small correction with little\n # impact on signal statitics\n if 'installation' in constants and constants['installation'] == 'airborne' \\\n and constants['nadir_comb_gain_adjustment'] != 1.0:\n print 'Apply nadir gain adjustment'\n print 'nadir gain adj= ', constants['nadir_comb_gain_adjustment']\n ix = np.arange(rs.telescope_pointing.shape[0])\n indices = ix[rs.telescope_pointing[:] < 0.1]\n nadir_gain_adj = constants['nadir_comb_gain_adjustment']\n rs.combined_lo_counts[indices, :] *= nadir_gain_adj\n rs.combined_hi_counts[indices, :] *= nadir_gain_adj\n \n #np.set_printoptions(threshold='nan')\n \n #do baseline correction\n rs = baseline_correction(rs,rs_cal,nalts,corr_adjusts,constants)\n \n # correct for differential geometry between 1064 and 532 nm channels\n rs = diff_1064_532_geometry_correction(rs,rs_cal,nalts,processing_defaults\n ,corr_adjusts)\n if 0:\n import matplotlib.pylab as plt\n plt.figure(67)\n plt.plot(np.nanmean(rs.combined_hi_counts,0),np.arange(len(rs.combined_hi_counts[0,:])),'r'\n ,np.nanmean(rs.molecular_counts,0),np.arange(len(rs.molecular_counts[0,:])),'b')\n ax=plt.gca()\n ax.set_xscale('log') \n #do combined-molecular differential geo correction if available\n rs = diff_geometry_correction(rs,rs_cal,nalts,processing_defaults\n ,corr_adjusts)\n if 0:\n import matplotlib.pylab as plt\n plt.figure(68)\n plt.plot(np.nanmean(rs.combined_hi_counts,0),np.arange(len(rs.combined_hi_counts[0,:])),'r'\n ,np.nanmean(rs.molecular_counts,0),np.arange(len(rs.molecular_counts[0,:])),'b')\n ax=plt.gca()\n ax.set_xscale('log') \n \n # Matt Add: do cross polarization differential geometry correction\n rs = diff_cp_geometry_correction(rs,rs_cal,nalts,processing_defaults\n ,corr_adjusts)\n \n # do i2a differential geo correction if present and relavent to instrument\n if hasattr(rs,'molecular_i2a_counts') and corr_adjusts['i2a_dif_geo_corr'] > 0:\n rs = i2a_diff_geo_correction(rs,rs_cal,corr_adjusts)\n\n #create combined_counts from combined_hi and combined_lo profiles\n rs = merge_combined_hi_and_lo(rs,constants)\n if 0:\n import matplotlib.pylab as plt\n plt.figure(69)\n plt.plot(np.nanmean(rs.combined_hi_counts,0),np.arange(len(rs.combined_hi_counts[0,:])),'r'\n ,np.nanmean(rs.molecular_counts,0),np.arange(len(rs.molecular_counts[0,:])),'b'\n ,np.nanmean(rs.combined_lo_counts,0),np.arange(len(rs.combined_lo_counts[0,:])),'c'\n ,np.nanmean(rs.cross_pol_counts,0),np.arange(len(rs.cross_pol_counts[0,:])),'g'\n ,np.nanmean(rs.combined_counts,0),np.arange(len(rs.combined_counts[0,:])),'k')\n ax=plt.gca()\n ax.set_xscale('log')\n #plt.show()\n\n print 'cp/mol'\n \"\"\"\n if processing_defaults.enabled('wfov_geo_corr') and hasattr(rs,'molecular_wfov_counts'):\n #do geometry correction after adjusting geo_corr with wide-field-of-view data.\n geo_corr = rs_cal.geo.data[:4000,1]\n s_bin = np.int(constants['apd_pulse_timing'][1]/constants['binwidth'])\n wfov_ratios = np.zeros(rs.molecular_wfov_counts.shape[1])\n wfov_ratios[:-s_bin] = nanmean(rs.molecular_wfov_counts[:,s_bin:],0)\\\n / nanmean(rs.molecular_counts[:,s_bin:],0) \n wfov_geometry_correction(rs,wfov_ratios,geo_corr,processing_defaults,constants,corr_adjusts)\n \"\"\"\n #does wfov corr exist?\n if processing_defaults.enabled('wfov_corr') and hasattr(rs,'molecular_wfov_counts')\\\n and hasattr(rs_cal,'geo')\\\n and hasattr(rs_cal.geo,'wfov_mol_ratio'):\n \n \n #add pre-trigger bins to wfov_mol_ratio array provided in geofile_default_file\n #and add to structure for use in extinction processing\n calibration_wfov_mol_ratio = np.zeros(rs.molecular_counts.shape[1])\n calibration_wfov_mol_ratio[s_bin:] = \\\n rs_cal.geo.wfov_mol_ratio[:(rs.molecular_counts.shape[1]-s_bin)]\n rs.calibration_wfov_mol_ratio = hau.Z_Array(calibration_wfov_mol_ratio)\n \n # do the normal geometric correction on the following variables\n select = ['molecular_counts','combined_lo_counts','combined_hi_counts'\n ,'molecular_i2a_counts','combined_1064_counts','molecular_wfov_counts'\n ,'combined_counts','cross_pol_counts']\n rs = lu.geometry_correction(select,rs,rs_cal,nalts,s_bin,corr_adjusts['geo_corr'])\n \n #mask close range bin counts\n first_bin_to_process = processing_defaults.get_value('first_bin_to_process','bin_number')\n for field in ['combined_hi_counts','combined_lo_counts','combined_wfov_counts','molecular_wfov_counts'\n 'molecular_i2a_counts','molecular_counts','cross_pol_counts','combined_counts'\\\n 'combined_1064_counts']:\n if hasattr(rs,field):\n getattr(rs,field)[:, :(s_bin+first_bin_to_process)] = np.NaN\n \n return rs", "def on_beat(self):\n print self.beat_time", "def maps_from_echse(conf):\n # Read sub-catchment rainfall from file\n fromfile = np.loadtxt(conf[\"f_data\"], dtype=\"string\", delimiter=\"\\t\")\n if len(fromfile)==2: \n rowix = 1\n elif len(fromfile)>2:\n rowix = slice(1,len(fromfile))\n else:\n raise Exception(\"Data file is empty: %s\" % conf[\"f_data\"])\n \n var = fromfile[rowix,1:].astype(\"f4\")\n dtimes = fromfile[rowix,0]\n dtimes = np.array([wradlib.util.iso2datetime(dtime) for dtime in dtimes])\n dtimesfromconf = wradlib.util.from_to(conf[\"tstart\"], conf[\"tend\"], conf[\"interval\"])\n dtimes = np.intersect1d(dtimes, dtimesfromconf)\n if len(dtimes)==0:\n print \"No datetimes for mapping based on intersection of data file and config info.\"\n return(0)\n \n# objects = fromfile[0,1:]\n\n cats = plt.genfromtxt(conf[\"f_coords\"], delimiter=\"\\t\", names=True,\n dtype=[('id', '|S20'), ('lat', 'f4'), ('lon', 'f4'), \n ('x', 'f4'), ('y', 'f4')])\n mapx, mapy = wradlib.georef.reproject(cats[\"x\"],cats[\"y\"], \n projection_source=conf[\"trg_proj\"], \n projection_target=conf[\"map_proj\"])\n\n # Read shapefile\n dataset, inLayer = wradlib.io.open_shape(conf[\"f_cats_shp\"])\n polys, keys = wradlib.georef.get_shape_coordinates(inLayer, key='DN')\n keys = np.array(keys)\n # Preprocess polygons (remove minors, sort in same order as in coords file)\n polys2 = []\n for i, id in enumerate(cats[\"id\"]):\n keyix = np.where( keys==eval(id.strip(\"cats_\")) )[0]\n if len(keyix) > 1:\n # More than one key matching? Find largest matching polygon\n keyix = keyix[np.argmax([len(polys[key]) for key in keyix])]\n else:\n keyix = keyix[0] \n poly = polys[keyix].copy()\n if poly.ndim==1:\n # Multi-Polygons - keep only the largest polygon \n # (just for plotting - no harm done)\n poly2 = poly[np.argmax([len(subpoly) for subpoly in poly])].copy()\n else:\n poly2 = poly.copy()\n polys2.append ( wradlib.georef.reproject(poly2, \n projection_source=conf[\"trg_proj\"], \n projection_target=conf[\"map_proj\"]) )\n \n colors = plt.cm.spectral(np.linspace(0,1,len(conf[\"levels\"]))) \n mycmap, mynorm = from_levels_and_colors(conf[\"levels\"], colors, extend=\"max\")\n \n plt.interactive(False)\n for i, dtime in enumerate(dtimes):\n datestr = (dtime-dt.timedelta(seconds=conf[\"interval\"])).strftime(\"%Y%m%d.png\")\n print datestr\n figpath = os.path.join(conf[\"savefigs\"], datestr)\n fig = plt.figure(figsize=(6,6))\n ax = fig.add_subplot(111, aspect=\"equal\")\n ax, coll = plot_cats(polys2, var[i], ax=ax, bbox=conf[\"bbox\"], cmap=mycmap, \n norm=mynorm, edgecolors='none')\n cb = plt.colorbar(coll, ax=ax, ticks=conf[\"levels\"], shrink=0.6)\n cb.ax.tick_params(labelsize=\"small\")\n cb.set_label(\"(mm)\")\n plt.xlabel(\"Longitude\")\n plt.ylabel(\"Latitude\")\n plot_trmm_grid_lines(ax)\n plt.text(conf[\"bbox\"][\"left\"]+0.25, conf[\"bbox\"][\"top\"]-0.25, \n \"%s\\n%s to\\n%s\" % (conf[\"figtxtbody\"], \n (dtime-dt.timedelta(seconds=conf[\"interval\"])).isoformat(\" \"),\n dtime.isoformat(\" \") ),\n color=\"red\", fontsize=\"small\", verticalalignment=\"top\")\n plt.tight_layout()\n plt.savefig(figpath)\n plt.close()\n plt.interactive(True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returnss the prediction (highest scoring class id) of a a loglinear classifier with given parameters on input x.
def predict(x, params): return np.argmax(classifier_output(x, params))
[ "def predict_logit(self, x):\n self.model.train()\n with torch.no_grad():\n y_ = self.model(x)\n return y_", "def lasso_predict(self, x: np.array) -> np.array:\r\n if self.LassoModel is None:\r\n print('Lasso Model not trained, please run lasso_fit first!')\r\n return None\r\n else:\r\n return self.LassoModel.predict(x)", "def logits(self, x):", "def predict_log_likelihood_ratio(self, X, index = 0):\n params = self.store[index][1:]\n log_ratio = params[0] + np.sum(np.multiply(params[1:], X))\n return np.maximum(log_ratio, np.log(self.class_min/(1-self.class_min)))", "def features_logits(\n self,\n x: torch.Tensor,\n ) -> Tuple[Tuple[torch.Tensor, ...], torch.Tensor]:\n\n features = self.features(x)\n logits = self.classifier(features[-1])\n return features, logits", "def predict_log_likelihood_ratio(self, X, index = 0):\n model = self.store[index]\n class_probs = np.maximum(model.predict(X)[0], self.class_min)\n return np.log(class_probs / (1 - class_probs))", "def _predict_from_logits(logits):\n # Get softmax probabilities for the tensors.\n y_class_prob = tf.squeeze(tf.nn.softmax(logits))\n\n # Get maximum arg val for the class probabilities.\n y_pred = tf.argmax(y_class_prob, axis=-1, output_type=tf.int32)\n\n return y_pred", "def prior_predict(self, x_test, logits=False):\n raise NotImplementedError(\"Priors not supported for one vs. all gaussian process classification.\")", "def predict_log_proba(self, X):\n use_proba = self.postprocessor_._get_tags()['requires_proba']\n y_score = (self.estimator_.predict_proba(X) if use_proba else\n self.estimator_.predict(X))\n y_score = pd.DataFrame(y_score, index=X.index).squeeze('columns')\n return self.postprocessor_.predict_log_proba(y_score)", "def predict(self, x_data):\n \n # Filtering data if required\n if self.filter_variables is not None:\n x_data = x_data[:,self.filter_variables]\n \n # Initialization of predictions\n predictions = np.zeros(x_data.shape[0])\n \n # Prediction for each subject\n for subject_index in range(x_data.shape[0]):\n \n # Computing the log likelihoods\n log_likelihood = np.array(\n [\n self.kde[0].score(x_data[subject_index,:].reshape(1, -1)),\n self.kde[1].score(x_data[subject_index,:].reshape(1, -1))\n ]\n )\n \n # Computing the log posteriori unnormalized\n log_posteriori_unnormalized = log_likelihood + self.classes_log_distribution\n predictions[subject_index] = 1 if log_posteriori_unnormalized[1] > log_posteriori_unnormalized[0] else 0\n \n # Return the predictions made by the model\n return predictions", "def get_score(self, logits):\n return tf.nn.log_softmax(logits)", "def supervised_predict(self, x):\n\n z_ik = self.get_posterior(x)\n N = x.shape[0]\n y_hat = np.zeros((1,N)).reshape(N,)\n\n cluster_index = np.argmax(z_ik, axis=1)\n\n n_components = self._n_components\n print('cluster_index', cluster_index)\n print('cluster map', self.cluster_label_map)\n for i in range(N):\n y_hat[i] = self.cluster_label_map[cluster_index[i]]\n\n\n return np.array(y_hat)", "def prediction(theta, x):\n\n return theta[0] + theta[1]*x", "def predict(self, x):\n res = 0\n for arbre in self.arbres:\n res += arbre.predict(x)\n if res >= 0:\n return 1\n return -1", "def classify1(self,X):\n prediction = self.classify.predict(X)\n \n return prediction", "def eval(self,y,x):\n self.forward(x)\n L = np.log(self.x3_out) * y\n loss = -np.mean(L.sum(1))\n\n accuracy = (np.argmax(self.x3_out, axis=1) == np.argmax(y, axis=1)).sum()\n return loss, accuracy/y.shape[0]", "def predictions_linear(input_data,weights,threshold):\n prediction = input_data@weights\n labels_predicted = [1 if x > threshold else -1 for x in prediction]\n return labels_predicted", "def _log_prob(self, x):\n return tf.math.log(tf.cast(self.prob(x), dtype=tf.float32))", "def log_predictive_likelihood(data_id, cluster_id, state):\n D = state['D']\n x = np.array(state['data_'][data_id])\n x = x.reshape(D,1)\n ss = state['suffstats'][cluster_id]\n return _log_predictive_likelihood(ss, x, D)", "def Logistic_Regression(x, y):\n\n model = LogisticRegression()\n model.fit(X=x, y=y)\n\n Predict_y = model.predict(x)\n\n Results = FT.MachineLearning.Metrics.ModelEvaluation(y, Predict_y)\n\n return model, Results" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write the dataframe to the HDFStore. Nonpure.
def writer(df, name, store_path, settings, start_time, overwrite=True): with pd.get_store(store_path) as store: store.append(name, df, append=False) logname = name.lstrip('m') with open(settings["make_hdf_store_completed"], 'a') as f: f.write('{},{},{}\n'.format(logname, start_time, arrow.utcnow()))
[ "def store(self, station_id: int, df: pd.DataFrame) -> None:\n\n # Make sure that there is data that can be stored\n if df.empty:\n return\n\n # Replace IntegerArrays by float64\n for column in df:\n if column in QUALITY_FIELDS or column in INTEGER_FIELDS:\n df[column] = df[column].astype(\"float64\")\n\n log.info(f\"Storing HDF5 data to {self.filepath}\")\n self.filepath.parent.mkdir(parents=True, exist_ok=True)\n df.to_hdf(path_or_buf=self.filepath, key=self.hdf5_key(station_id))", "def write_dataframe(db, dataframe, measurement):\n DataFrameClient(url, port, user, password, db).write_points(\n batch_size=batch_size,\n dataframe=dataframe,\n protocol=default_protocol,\n measurement=measurement\n )", "def save_dataframe(self):\n if not self.dataset_path.parent.exists():\n self.dataset_path.parent.mkdir(parents=True)\n logger.info(f\"Saving dataframe to {self.dataset_path}\")\n self.dataframe.to_csv(self.dataset_path, index=False)", "def save_dataframe(self, df, fileformat):\n\n coalesce = int(self.args.get(\"coalesce\", 1) or 0)\n if coalesce > 0:\n df = df.coalesce(coalesce)\n\n if fileformat == \"text\":\n df.write.text(\n self.args[\"path\"],\n compression=\"gzip\" if self.args.get(\"gzip\") else \"none\"\n )\n\n elif fileformat == \"json\":\n df.write.json(\n self.args[\"path\"],\n compression=\"gzip\" if self.args.get(\"gzip\") else \"none\"\n )\n\n elif fileformat == \"parquet\":\n df.write.parquet(self.args[\"path\"])\n\n else:\n raise Exception(\"Unknown format %s\" % fileformat)", "def write_df(self):\n self.ds = pd.Series(self.values, index=self.ts_activation)\n self.ds.index = pd.to_datetime(self.ds.index, unit='s').tz_localize('UTC').tz_convert('Europe/Berlin')", "def save(X, Y, prices, filename):\n with pd.HDFStore(filename, 'w') as store:\n X.to_hdf(store, 'X')\n Y.to_hdf(store, 'Y')\n prices.to_hdf(store, 'prices')", "def save_as_native(self, filepath: Union[str, Path]) -> None:\n with pd.HDFStore(filepath) as store:\n store[\"table\"] = self.df\n store.root._v_attrs.latitude = self.lat\n store.root._v_attrs.longitude = self.long", "def write_data_frame_to_disk(data_frame: pd.DataFrame, output_fn: str):\n\n data_frame.to_pickle(output_fn)", "def save_to_hdf(key, item, path):\n df = pd.DataFrame(item)\n with pd.HDFStore(path) as hdf:\n hdf.put(key, df)\n print(f'Item {key} saved to {path}')", "def write_summary_file(self):\n self._summaryfile = Trace.get_summary_filename(self._tracefile)\n\n packed_metadata = self.metadata.pack_dataframe()\n\n packed_metadata[Trace._formatversionkey] = pandas.Series(\n data=Trace._formatversion, dtype=numpy.int32\n )\n\n with HDFStoreContext(self._summaryfile, mode=\"w\") as hdfstore:\n hdfstore.put(Trace._metadatakey, packed_metadata, format=\"t\")\n hdfstore.put(Trace._statisticskey, self.statistics, format=\"t\")", "def write_frame(self, data):\r\n raise NotImplemented()", "def write_to_online_store(\n self,\n feature_view_name: str,\n df: pd.DataFrame,\n allow_registry_cache: bool = True,\n ):\n # TODO: restrict this to work with online StreamFeatureViews and validate the FeatureView type\n try:\n feature_view = self.get_stream_feature_view(\n feature_view_name, allow_registry_cache=allow_registry_cache\n )\n except FeatureViewNotFoundException:\n feature_view = self.get_feature_view(\n feature_view_name, allow_registry_cache=allow_registry_cache\n )\n provider = self._get_provider()\n provider.ingest_df(feature_view, df)", "def save_dataframe(df, filename):\n \n df.to_pickle(\"./datasets/processed_pickle_files/\" + filename)", "def write_dataframe(self, result, dst_paths, nodata=None, compress='lzw'):\n result = self._convert_to_ndarray(result)\n self.write_ndarray(result, dst_paths, nodata=nodata, compress=compress)", "def save_dataframe_to_file(df: vaex.dataframe.DataFrame, filename: str) -> bool:\n import pathlib\n\n path_to_processed_data = os.path.join(get_base_data_path(), \"processed\")\n\n try:\n path = os.path.realpath(os.path.join(path_to_processed_data, filename))\n dir = os.path.dirname(path)\n pathlib.Path(dir).mkdir(parents=True, exist_ok=True)\n\n if os.path.exists(f\"{path}.arrow\"):\n os.remove(f\"{path}.arrow\")\n\n df.export(f\"{path}.arrow\")\n return True\n\n except OSError as err:\n print(err)\n return False", "def save_data(name: str, dataframe: pd.DataFrame, location: str = SAVE_LOCATION) -> None:\n if dataframe.index.dtype.kind == 'M':\n dataframe = dataframe.reset_index()\n try:\n dataframe.to_feather(location + name + '.feather')\n except FileNotFoundError:\n initialization.create_folder(location)\n dataframe.to_feather(location + name + '.feather')", "def save_df(df, path, file_format='.pkl', pkg='pandas', *args, **kwargs):\n\n methods_dict = {\n 'pandas': _get_pd_io_methods,\n 'dask': _get_dd_io_methods\n }[pkg]()\n\n if file_format not in path:\n path = path + file_format\n \n utils.make_dir(path)\n\n save_func = methods_dict[file_format]['write']\n save_func(df, path, *args, **kwargs)\n\n print(\"Saved to %s \\n\" % path)", "def save(self, filename):\n if (filename[-5:] != '.hmat'):\n filename += '.hmat'\n h5f = h5py.File(filename, 'w')\n h5f.create_dataset('matrix', data=self.matrix, compression = 'gzip', compression_opts=9)\n h5f.create_dataset('idx', data=self.idx, compression = 'gzip', compression_opts=9)\n h5f.create_dataset('applyedMethods', data=cPickle.dumps(self._applyedMethods))\n if hasattr(self,\"genome\") and hasattr(self,\"resolution\"):\n h5f.create_dataset('genome',data = cPickle.dumps(self.genome))\n h5f.create_dataset('resolution',data = cPickle.dumps(self.resolution))\n else:\n warnings.warn(\"No genome and resolution is specified, attributes are recommended for matrix.\")\n \n h5f.close()", "def _save(self, data: pd.DataFrame) -> None:\n # using get_filepath_str ensures that the protocol and path are appended correctly for different filesystems\n save_path = self._get_save_path()\n data.to_parquet(save_path, compression='gzip')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Drops columns that are duplicated. Have to transpose for unknown reason. I'm hitting multiple PADDING's, that should be it.
def dedup_cols(df): if not df.columns.is_unique: dupes = df.columns.get_duplicates() print("Duplicates: {}".format(dupes)) return df.drop(dupes, axis=1) else: return df
[ "def _deduplicate_columns(df):\n to_check = df.columns.tolist()\n leading_columns = []\n to_merge = collections.defaultdict(collections.deque)\n for column in to_check:\n for leading_column in leading_columns:\n if column.lower() == leading_column.lower() and column != leading_column:\n to_merge[leading_column].append(column)\n break\n else:\n leading_columns.append(column)\n for leading_column, columns_to_merge in to_merge.items():\n new_column = df[leading_column]\n for column_to_merge in columns_to_merge:\n new_column = new_column.combine_first(df[column_to_merge])\n del df[column_to_merge]\n del df[leading_column]\n df[leading_column] = new_column\n return df", "def clean(self):\n for column in self.columns:\n column.change_misc_values()\n column.drop_greater_than()", "def stripCols(self):\n for frame in self.files.values():\n for col in frame.columns:\n frame[col] = frame[col].str.strip()", "def remove_duplicates(cls, X):\r\n X = X.drop('m8', axis=1)\r\n cls.num_cols.remove('m8')\r\n\r\n return X, cls.num_cols", "def _reduceColumns(self,pitches):\n while len(self.schedule.columns) > pitches:\n # pitches sorted by count of matches\n # hriste s nejmene zapasy\n pitch_from = self.schedule.count().sort_values().index[0]\n # matches to move\n matches_to_move = self.schedule[pitch_from].dropna()\n # drop old column\n self.schedule.drop(columns = [pitch_from], inplace = True)\n # rename columns\n self.schedule.columns = [i for i in range(len(self.schedule.columns))]\n # reverse loop through matches\n for match_ind in matches_to_move.index.sort_values(ascending=False):\n# chybi test zda je to mozne\n self.DfEditor._insert_match(matches_to_move[match_ind], match_ind)\n matches_to_move[match_ind] = None\n\n # make same lengths again\n self._makeSameLength()", "def _all_fields_no_dupes_columns(self):\n if self._extra_field_identifiers:\n # '\"imei_norm\", \"first\"(\"reporting_date\")'\n aggregate_field_names = [sql.SQL('first({ex})').format(ex=ex) for ex in self._extra_field_identifiers]\n return sql.SQL(', ').join(self._pk_field_identifiers + aggregate_field_names)\n return self._pk_field_columns", "def removeSkips(data: np.ndarray, columns: np.ndarray, colmeta: dict) -> None:\n\n for col in colmeta:\n if colmeta[col]['type'] == 'skip':\n colidxs = np.argwhere(columns!=col).flatten()\n columns = columns[colidxs]\n data = data[:, colidxs]", "def drop_cols(df: pd.DataFrame) -> pd.DataFrame:\n print(\n f\"Drop column if only < {int(len(df) * (1-Config.col_uniq_cutoff))} rows are different from most common\"\n )\n for i in df.columns:\n _ratio = df.groupby(i)[Config.primary_key].count().max() / len(df)\n if _ratio > Config.col_uniq_cutoff:\n print(f\"column = {i}, ratio= {_ratio:.5f}\")\n df = df.drop(columns=[i])\n return df", "def clear_over_correlated_columns(self):\n removed_cols = []\n corr_list = []\n col_list = list(combinations(self.cont_cols,2))#Gets all combinations of all continuous columns in group sizes of two\n for col1,col2 in col_list:\n print(f\"OVER CORR TEST FOR {col1} {col2}\")\n corr_list.append(self.df[col1].corr(self.df[col2]))\n for corr, cols in zip(corr_list, col_list):\n if cols[0] in removed_cols:\n continue\n if corr > .9 :\n removed_cols.append(cols[0])\n self.dropped_cols_stats.update({cols[0]:1})\n self.df.drop(columns = removed_cols, inplace=True)\n [self.cont_cols.remove(item) for item in removed_cols]", "def test_omitGapCols(self):\n aln = self.end_gaps\n \n #first, check behavior when we're just acting on the cols (and not\n #trying to delete the naughty rows).\n \n #default should strip out cols that are 100% gaps\n self.assertEqual(aln.omitGapCols(row_constructor=''.join), \\\n {'a':'-abc', 'b':'cba-', 'c':'-def'})\n #if allowed_gap_frac is 1, shouldn't delete anything\n self.assertEqual(aln.omitGapCols(1, row_constructor=''.join), \\\n {'a':'--a-bc-', 'b':'-cb-a--', 'c':'--d-ef-'})\n #if allowed_gap_frac is 0, should strip out any cols containing gaps\n self.assertEqual(aln.omitGapCols(0, row_constructor=''.join), \\\n {'a':'ab', 'b':'ba', 'c':'de'})\n #intermediate numbers should work as expected\n self.assertEqual(aln.omitGapCols(0.4, row_constructor=''.join), \\\n {'a':'abc', 'b':'ba-', 'c':'def'})\n self.assertEqual(aln.omitGapCols(0.7, row_constructor=''.join), \\\n {'a':'-abc', 'b':'cba-', 'c':'-def'})\n #check that it doesn't fail on an empty alignment\n self.assertEqual(self.empty.omitGapCols(), {})\n\n #second, need to check behavior when the naughty rows should be\n #deleted as well.\n\n #default should strip out cols that are 100% gaps\n self.assertEqual(aln.omitGapCols(row_constructor=''.join, \\\n del_rows=True), {'a':'-abc', 'b':'cba-', 'c':'-def'})\n #if allowed_gap_frac is 1, shouldn't delete anything\n self.assertEqual(aln.omitGapCols(1, row_constructor=''.join, \\\n del_rows=True), {'a':'--a-bc-', 'b':'-cb-a--', 'c':'--d-ef-'})\n #if allowed_gap_frac is 0, should strip out any cols containing gaps\n self.assertEqual(aln.omitGapCols(0, row_constructor=''.join, \\\n del_rows=True), {}) #everything has at least one naughty non-gap\n #intermediate numbers should work as expected\n self.assertEqual(aln.omitGapCols(0.4, row_constructor=''.join,\n del_rows=True), {'a':'abc', 'c':'def'}) #b has a naughty non-gap\n #check that does not delete b if allowed_frac_bad_calls higher than 0.14\n self.assertEqual(aln.omitGapCols(0.4, row_constructor=''.join,\n del_rows=True, allowed_frac_bad_cols=0.2), \\\n {'a':'abc', 'b':'ba-','c':'def'})\n self.assertEqual(aln.omitGapCols(0.4, row_constructor=''.join,\n del_rows=True), {'a':'abc', 'c':'def'}) #b has a naughty non-gap\n \n self.assertEqual(aln.omitGapCols(0.7, row_constructor=''.join,\n del_rows=True), {'a':'-abc', 'b':'cba-', 'c':'-def'}) #all ok\n #check that it doesn't fail on an empty alignment\n self.assertEqual(self.empty.omitGapCols(del_rows=True), {})\n\n #when we increase the number of sequences to 6, more differences\n #start to appear.\n aln['d'] = '-------'\n aln['e'] = 'xyzxyzx'\n aln['f'] = 'ab-cdef'\n #if no gaps are allowed, everything is deleted...\n self.assertEqual(aln.omitGapCols(0, del_rows=False), \\\n {'a':[], 'b':[], 'c':[], 'd':[], 'e':[], 'f':[]})\n #...though not a sequence that's all gaps, since it has no positions\n #that are not gaps. This 'feature' should possibly be considered a bug.\n self.assertEqual(aln.omitGapCols(0, del_rows=True), {'d':[]})\n #if we're deleting only full columns of gaps, del_rows does nothing.\n self.assertEqual(aln.omitGapCols(del_rows=True, \\\n row_constructor=''.join), aln)\n #at 50%, should delete a bunch of minority sequences\n self.assertEqual(aln.omitGapCols(0.5, del_rows=True, \\\n row_constructor=''.join), \\\n {'a':'-abc','b':'cba-','c':'-def','d':'----'})\n #shouldn't depend on order of rows\n aln.RowOrder = 'fadbec'\n self.assertEqual(aln.omitGapCols(0.5, del_rows=True, \\\n row_constructor=''.join), \\\n {'a':'-abc','b':'cba-','c':'-def','d':'----'})", "def rename_duplicated_columns(self):\n duplicated_columns = [\n col\n for col in self.catalog_columns\n if list(self.catalog_columns).count(col) > 1\n ]\n ran = 0\n for col in duplicated_columns:\n for idx in range(ran, len(self.catalog_columns)):\n if self.catalog_columns[idx] == col:\n self.catalog_columns[idx] = f\"{col}_{self.column_units[idx]}\"\n ran = idx\n break\n return self.catalog_columns", "def cleaning_columns_white_space(self, df):\n return df.rename(columns=lambda x: self.cleaning_some_white_space(x))", "def clear_over_correlated_columns(self):\n removed_cols = []\n corr_list = []\n col_list = list(combinations(self.cont_cols,2))\n for col1,col2 in col_list:\n print(f\"OVER CORR TEST FOR {col1} {col2}\")\n corr_list.append(self.df[col1].corr(self.df[col2]))\n for corr, cols in zip(corr_list, col_list):\n if cols[0] in removed_cols:\n continue\n if corr > .9 :\n removed_cols.append(cols[0])\n self.dropped_cols_stats.update({cols[0]:1})\n self.df.drop(columns = removed_cols, inplace=True)\n [self.cont_cols.remove(item) for item in removed_cols]", "def pad_time_series_columns(self, columns, data_frame):\n old_columns = data_frame.columns\n\n common_columns = [val for val in columns if val in old_columns]\n uncommon_columns = [val for val in columns if val not in old_columns]\n\n data_frame = data_frame[common_columns]\n\n for x in uncommon_columns: data_frame[x] = np.nan\n\n return data_frame", "def _check_duplicated_columns(prep_cols, sample_cols):\n prep_cols.extend(sample_cols)\n dups = set(duplicates(prep_cols))\n if dups:\n raise qdb.exceptions.QiitaDBColumnError(\n 'Duplicated column names in the sample and prep info '\n 'files: %s. You need to delete that duplicated field' %\n ','.join(dups))", "def drop_constant_columns(dataframe):\n keep_columns = [col for col in dataframe.columns if len(dataframe[col].unique()) > 1]\n return dataframe[keep_columns].copy()", "def remove_initial_space(df):\r\n\r\n column_name = []\r\n for i in range(len(df.columns)):\r\n column_name.append(\"\")\r\n if i == 0:\r\n column_name[i] = df.columns[i]\r\n else:\r\n column_name[i] = df.columns[i][1:]\r\n df.columns = column_name\r\n\r\n return df", "def filter_measurement_columns(self, columns):\n columns = [\n x\n for x in columns\n if not self.ignore_feature(x[0], x[1], True, wanttime=True)\n ]\n\n #\n # put Image ahead of any other object\n # put Number_ObjectNumber ahead of any other column\n #\n def cmpfn(x, y):\n if x[0] != y[0]:\n if x[0] == \"Image\":\n return -1\n elif y[0] == \"Image\":\n return 1\n else:\n return cellprofiler_core.utilities.legacy.cmp(x[0], y[0])\n if x[1] == M_NUMBER_OBJECT_NUMBER:\n return -1\n if y[1] == M_NUMBER_OBJECT_NUMBER:\n return 1\n return cellprofiler_core.utilities.legacy.cmp(x[1], y[1])\n\n columns = sorted(columns, key=functools.cmp_to_key(cmpfn))\n #\n # Remove all but the last duplicate\n #\n duplicate = [\n c0[0] == c1[0] and c0[1] == c1[1]\n for c0, c1 in zip(columns[:-1], columns[1:])\n ] + [False]\n columns = [x for x, y in zip(columns, duplicate) if not y]\n return columns", "def drop_unnecessaries(df):\n df_copy = df.copy()\n df_pruned = df_copy[df_copy['is_closed']==False]\n df_pruned.drop(columns=['_id', 'categories', 'coordinates', 'display_phone', 'is_closed', 'phone', 'price', 'name', 'display_phone', 'distance', 'lats', 'longs'], inplace=True)\n return df_pruned" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the definition for a code. Maybe add option to pass dd_name with a lookup from settings as convinence.
def get_definition(code, dd_path=None, style=None): regex = make_regex(style) def dropper(line): maybe_match = regex.match(line) try: line_code = maybe_match.groups()[0] return line_code except AttributeError: return None def get_def(dd): """ Next of dd is the line you start with. Now consume up to next match. """ top_line = [''.join(list(next(dd)))] rest = it.takewhile(lambda x: regex.match(x) is None, dd) rest = [''.join(x) for x in rest] top_line.append(rest) return top_line with open(dd_path) as dd: gen = it.dropwhile(lambda x: dropper(x) != code, dd) definition = get_def(gen) return definition
[ "def get_abbrev(self, code):\r\n return AbbrevDecl(code, self._abbrev_map[code])", "def definition(self):\n return self._bound_context.get_function_def(self.name)", "def get_definition(self) -> LabwareDefinitionDict:\n return cast(LabwareDefinitionDict, self._definition.dict(exclude_none=True))", "def findDefinition(self, identifier):\n while self.hasAlias(identifier):\n identifier = str(self.getAlias(identifier));\n\n return self.getDefinition(identifier);", "def by_code_or_alias_or_none(self, code):\r\n try:\r\n return self.by_code_or_alias(code)\r\n except Language.DoesNotExist:\r\n return None", "def get_script_definition(id):\n\tfor cardset in CARD_SETS:\n\t\tmodule = import_module(\"fireplace.cards.%s\" % (cardset))\n\t\tif hasattr(module, id):\n\t\t\treturn getattr(module, id)", "def get_wrapper_by_name(self, name):\n return self.codegens[name]", "def get_dewar_by_facilitycode(fc):\n result = None\n\n # Facility codes are reused, so we want the most recent version \n # We work that out based on the newest (highest) dewarId\n # Could also specify that its a dewar on site, at-facility perhaps?\n d = Dewar.query.filter_by(FACILITYCODE = fc).order_by(desc(Dewar.dewarId)).first()\n\n if d:\n result = {'barcode': d.barCode, 'storageLocation': d.storageLocation}\n else:\n logging.getLogger('ispyb-logistics').warn(\"Could not find dewar with FacilityCode {}\".format(fc))\n\n return result", "def get(cls, code):\n try:\n return cls.lookupTable[code]\n except KeyError:\n return ApiFrameType.UNKNOWN", "def get_dptype(\n self, dpcode: DPCode | None, prefer_function: bool = False\n ) -> DPType | None:\n if dpcode is None:\n return None\n\n order = [\"status_range\", \"function\"]\n if prefer_function:\n order = [\"function\", \"status_range\"]\n for key in order:\n if dpcode in getattr(self.device, key):\n return DPType(getattr(self.device, key)[dpcode].type)\n\n return None", "def _getname(self, decl):\n return decl", "def get_dsc(self):\n for item in self.get_files():\n if item.endswith('.dsc'):\n return item\n\n return None", "def find_definition(project, code, offset, resource=None, maxfixes=1):\n fixer = fixsyntax.FixSyntax(project, code, resource, maxfixes)\n pyname = fixer.pyname_at(offset)\n if pyname is not None:\n module, lineno = pyname.get_definition_location()\n name = rope.base.worder.Worder(code).get_word_at(offset)\n if lineno is not None:\n start = module.lines.get_line_start(lineno)\n\n def check_offset(occurrence):\n if occurrence.offset < start:\n return False\n pyname_filter = occurrences.PyNameFilter(pyname)\n finder = occurrences.Finder(project, name,\n [check_offset, pyname_filter])\n for occurrence in finder.find_occurrences(pymodule=module):\n return Location(occurrence)", "def get_type_by_code(code):\n\n for asset_type in ASSET_TYPES:\n if asset_type.code == code:\n return asset_type\n\n return NONE_TYPE", "def code_example(self):\n if self._code_example is not None:\n return self._code_example\n return getattr(self.nb.metadata, 'code_example', None)", "def from_doecode(klass, record):\n if not isinstance(record, dict):\n raise TypeError('`record` must be a dict')\n\n project = klass()\n\n # -- REQUIRED FIELDS --\n\n project['name'] = record['software_title']\n logger.debug('DOE CODE: software_title=\"%s\"', record['software_title'])\n\n link = record.get('repository_link', '')\n if not link:\n link = record.get('landing_page')\n logger.warning('DOE CODE: No repositoryURL, using landing_page: %s', link)\n\n project['repositoryURL'] = link\n\n project['description'] = record['description']\n\n licenses = set(record['licenses'])\n licenses.discard(None)\n logger.debug('DOE CODE: licenses=%s', licenses)\n\n license_objects = []\n if 'Other' in licenses:\n licenses.remove('Other')\n license_objects = [{\n 'name': 'Other',\n 'URL': record['proprietary_url']\n }]\n\n if licenses:\n license_objects.extend([_license_obj(license) for license in licenses])\n\n project['permissions']['licenses'] = license_objects\n\n if record['open_source']:\n usage_type = 'openSource'\n else:\n usage_type = 'exemptByLaw'\n project['permissions']['exemptionText'] = 'This source code is restricted by patent and / or intellectual property law.'\n\n project['permissions']['usageType'] = usage_type\n\n # TODO: Compute from git repo\n project['laborHours'] = 0\n\n project['tags'] = ['DOE CODE']\n lab_name = record.get('lab_display_name')\n if lab_name is not None:\n project['tags'].append(lab_name)\n\n project['contact']['email'] = record['owner']\n # project['contact']['URL'] = ''\n # project['contact']['name'] = ''\n # project['contact']['phone'] = ''\n\n # -- OPTIONAL FIELDS --\n\n if 'version_number' in record and record['version_number']:\n project['version'] = record['version_number']\n\n if lab_name is not None:\n project['organization'] = lab_name\n\n # Currently, can't be an empty string, see: https://github.com/GSA/code-gov-web/issues/370\n status = record.get('ever_announced')\n if status is None:\n raise ValueError('DOE CODE: Unable to determine \"ever_announced\" value!')\n elif status:\n status = 'Production'\n else:\n status = 'Development'\n\n project['status'] = status\n\n vcs = None\n link = project['repositoryURL']\n if 'github.com' in link:\n vcs = 'git'\n if vcs is None:\n logger.debug('DOE CODE: Unable to determine vcs for: name=\"%s\", repositoryURL=%s', project['name'], link)\n vcs = ''\n if vcs:\n project['vcs'] = vcs\n\n url = record.get('landing_page', '')\n if url:\n project['homepageURL'] = url\n\n # record['downloadURL'] = ''\n\n # self['disclaimerText'] = ''\n\n # self['disclaimerURL'] = ''\n\n if 'programming_languages' in record:\n project['languages'] = record['programming_languages']\n\n # self['partners'] = []\n # TODO: Look into using record['contributing_organizations']\n\n # self['relatedCode'] = []\n\n # self['reusedCode'] = []\n\n # date: [object] A date object describing the release.\n # created: [string] The date the release was originally created, in YYYY-MM-DD or ISO 8601 format.\n # lastModified: [string] The date the release was modified, in YYYY-MM-DD or ISO 8601 format.\n # metadataLastUpdated: [string] The date the metadata of the release was last updated, in YYYY-MM-DD or ISO 8601 format.\n project['date'] = {\n 'created': record['date_record_added'],\n # 'lastModified': '',\n 'metadataLastUpdated': record['date_record_updated']\n }\n\n return project", "def getDemandByName(self,name):\n return self.__l_dict[name]['d']", "def get_code(self, key):\n if key in self._code:\n return self._code[key]\n else:\n return None", "def icd_to_root(code: str) -> Optional[str]:\n node = tree.find(code)\n if node:\n parents = node.parents\n if parents and len(parents) > 2:\n root = parents[1].code\n if root[0] == \"V\":\n return V_CODE\n return root\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Select only those columns specified under settings. Optionaly
def get_subset(df, settings, dd_name, quiet=False): cols = {x for x in flatten(settings["dd_to_vars"][dd_name].values())} good_cols = {x for x in flatten(settings["dd_to_vars"]["jan2013"].values())} all_cols = cols.union(good_cols) subset = df.columns.intersection(all_cols) if not quiet: print("Implicitly dropping {}".format(cols.symmetric_difference(subset))) return df[subset]
[ "def selectColumns(df, constraint):\n columns = [c for c in df.columns if constraint(c)]\n return df[columns].copy()", "def select_cols(df,list_col):\n df = df[list_col] ##loading data using read_csv from pandas\n return df #returning the data structure ", "def test_select_column_step_must_subset_columns_using_column_names(data):\n step = SelectColumnsStep(columns=['year', 'seasons'])\n baked_df = step.prepare(data).bake(data)\n\n assert len(baked_df.columns) == 2\n assert 'year' in baked_df.columns\n assert 'seasons' in baked_df.columns", "def selectedColumns(self, int_row=0): # real signature unknown; restored from __doc__\r\n pass", "def test_select_column_step_must_allow_selectors_and_column_names(data):\n step = SelectColumnsStep(columns=[AllMatching('air'), 'seasons'])\n baked_df = step.prepare(data).bake(data)\n\n assert len(baked_df.columns) == 2\n assert 'aired' in baked_df.columns\n assert 'seasons' in baked_df.columns", "def test_select_column_step_must_allow_one_column_name(data):\n step = SelectColumnsStep(columns='seasons')\n baked_df = step.prepare(data).bake(data)\n\n assert len(baked_df.columns) == 1\n assert 'seasons' in baked_df.columns", "def slice(self, columns):\n pass", "def setup_column_prefs( self ):\n\n\t\tpass", "def select(df, *args):\n df = df[list(args)]\n return df", "def columns_to_ignore(self) -> list:\n pass", "def retrieve(self, table, cols, col_rules):\n # todo: add string comp support\n cursor = self.conn.cursor()\n num_cols = len(col_rules)\n\n # from the table select all the columns to filter for\n sql_cmd = \"select \" + \", \".join([key for key in col_rules]) + \" from \\\"\" + table + \"\\\"\"\n cursor.execute(sql_cmd)\n filter_sets = cursor.fetchall()\n\n # repeat every argument number of times it appears in the selection\n mult = [len(re.findall(\"{}\", col_rules[key])) for key in col_rules]\n\n def _repeat_vals(vals, repeats):\n rep_vals = []\n [[rep_vals.append(vals[i]) for _ in range(repeats[i])] for i in range(num_cols)]\n return rep_vals\n\n filter_sets = [_repeat_vals(set, mult) for set in filter_sets]\n\n # evaluate every row to get a boolean mask of examples\n rule_tmp = \"(\" + \") and (\".join([col_rules[key] for key in col_rules]) + \")\"\n sel_mask = [eval(rule_tmp.format(*val_set)) for val_set in filter_sets]\n\n # from the table get all the columns to retrieve\n sql_cmd = \"select \" + \" ,\".join(cols) + \" from \\\"\" + table + \"\\\"\"\n cursor.execute(sql_cmd)\n sel_sets = cursor.fetchall()\n\n # apply a boolean mask to take only entries that fit the selection rule\n sel_sets = list(compress(sel_sets, sel_mask))\n sel_vals = [list(x) for x in zip(*sel_sets)]\n return sel_vals", "def filter_unused_columns(df):\r\n df = df[['TIME', 'ACTC', 'RPM', 'CHKP', 'SPPA', 'HKLD', 'ROP', 'SWOB', 'TQA', 'MWTI',\r\n 'TVCA', 'TFLO', 'MDOA', 'CPPA', 'CFIA', 'nameWellbore', 'DMEA']]\r\n return df", "def _select_fields(self):\n return []", "def columns(self, column_list: list[str], documents: list[Document], ):\n filtered_list = []\n for document in documents:\n filtered_dict = {k: v for k, v in document.items() if k in column_list}\n filtered_list.append(Document(filtered_dict, document.doc_id))\n return filtered_list", "def apply_select(where_table, column_list):\n output_table = {}\n\n #It doesn't need to alter the table at all if all columns are\n #selected (denoted by '*')\n if '*' in column_list:\n return where_table\n else:\n #Populates a new table with the selected tables.\n for column in column_list:\n output_table[column] = where_table[column]\n return output_table", "def list_columns(self, series=\"/.*/\"):\n res = self.client.query(\"SELECT * FROM \\\"{0}\\\" LIMIT 1\".format(series))\n for series in res:\n del series['points']\n series['columns'].remove('time')\n series['columns'].remove('sequence_number')\n\n return res", "def exclude_columns(self, exclude_keys):\n result = type(self)()\n for key, value in self._columns.items():\n if key not in exclude_keys:\n result[key] = value\n return result", "def columns(self):\n\t\treturn super().columns+[\"subsystems\"]", "def _subset_columns(\n self,\n adata: AnnData,\n n_cols: int,\n from_obs: bool = True,\n from_var: bool = True,\n ):\n\n columns = []\n if from_obs:\n columns += list(adata.obs.columns)\n if from_var:\n columns += list(adata.var.columns)\n return random.sample(columns, min(len(columns), n_cols))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rename cols in df according to the spec in settings for that year.
def standardize_cols(df, dd_name, settings): renamer = settings["col_rename_by_dd"][dd_name] df = df.rename(columns=renamer) common = {"PRTAGE", "HRMIS", "HRYEAR4", "PESEX", "HRMONTH", "PTDTRACE", "PEMLR", "PRERNWA", "PTWK", "PEMARITL", "PRDISC", "HEFAMINC", "PTDTRACE", "HWHHWGT", "PEERNHRY", "HRMIS"} cols = set(df.columns.tolist()) extra = cols - common missing = common - cols if missing: name = str(df.HRYEAR4.iloc[0]) + str(df.HRMONTH.iloc[0]) key = ' '.join([str(arrow.utcnow()), name, 'missing']) d = {key: list(missing)} with open('make_hdf_store_log.json', 'a') as f: json.dump(d, f, indent=2) if extra: name = str(df.HRYEAR4.iloc[0]) + str(df.HRMONTH.iloc[0]) key = ' '.join([str(arrow.utcnow()), name, 'extra']) d = {key: list(extra)} with open('make_hdf_store_log.json', 'a') as f: json.dump(d, f, indent=2) return df
[ "def __rename_used_cols(df, train_cols: list):\n return df.rename(columns={col: C.USED_COL_FORMAT.format(col) for col in train_cols})", "def rename_columns(df, pheno, pop):\n\n if \"LOG(OR)_SE\" in df.columns:\n df.rename(columns={\"LOG(OR)_SE\": \"SE\"}, inplace=True)\n columns_to_rename = [\"BETA\", \"SE\", \"P\"]\n renamed_columns = [(x + \"_\" + pheno + \"_\" + pop) for x in columns_to_rename]\n df.rename(columns=dict(zip(columns_to_rename, renamed_columns)), inplace=True)\n return df", "def rename_cols_with_feat_name_prefix(feat_code, colnames, df, idxcol=\"SK_ID_CURR\"):\n df.set_index(idxcol, drop=True, inplace=True)\n\n # FEAT_CODE = \"CCB\"\n rename_cols = {}\n for colname in colnames:\n rename_cols[colname] = \"{}_{}\".format(feat_code, colname)\n\n df.rename(index=int, inplace=True, columns=rename_cols)\n\n df.reset_index(inplace=True)\n\n return list(rename_cols.values())", "def rename_columns(self):\r\n self.columns = [self._date, self._net_purchase, self._gross_sale, self._tax, self._margin]\r\n self.all_data.columns = self.columns", "def test_load_soruces_renamed_columns():\n filename = 'tests/test_files/1904_comp_renamed_cols.fits'\n colnames = {'ra_col': 'RAJ2000',\n 'dec_col': 'DEJ2000',\n 'peak_col': 'S',\n 'a_col': 'bmaj',\n 'b_col': 'bmin',\n 'pa_col': 'bpa'}\n cat = ar.load_sources(filename, **colnames)\n if cat is None:\n raise AssertionError(\"load_sources failed with renamed columns\")\n return", "def rename_columns(df_data, new_col):\n df_data.rename(columns=new_col, inplace=True)", "def indexing_column_year(list_):\n for i in list_:\n i.index = i['Year']", "def rename(df, **kwargs):\n for name, value in kwargs.items():\n df = df.rename(columns={'%s' % name: '%s' % value})\n return df", "def __rename_col(self,df): \r\n df.rename(columns={'id':'movie_id'}, inplace=True)", "def name_columns_rolling(self, df, format_string, func=lambda i: i):\n cols= []\n \n for i, col in enumerate(df.columns):\n col = format_string.format(i=func(i))\n cols.append(col)\n\n # Rename the columns\n df.columns = cols\n \n return df", "def year_id_switcher(df):\n df_cols = df.columns\n good_years = ['year_start', 'year_end', 'year_id']\n \n year_cols = [y for y in df_cols if y in good_years]\n \n\n if 'year_id' in year_cols:\n drop_cols = [y for y in year_cols if y != 'year_id']\n df.drop(drop_cols, axis=1, inplace=True)\n return df\n\n if not year_cols:\n print(\"We can't recognize any potential year columns in the data, possible types are {}\".\\\n format(good_years))\n return df\n\n \n \n year_cols_present = set(year_cols).intersection({'year_start', 'year_end'})\n if not set(year_cols).symmetric_difference({'year_start', 'year_end'}):\n if (df['year_end'] != df['year_start']).all():\n print(\"Start and end values do not match. The data is aggregated in some way, \"\\\n \"switch failed..\")\n return df\n else: \n df['year_id'] = df['year_start']\n else:\n df['year_id'] = df[list(year_cols_present)]\n df.drop(year_cols, axis=1, inplace=True)\n\n return df", "def rename_columns(df):\n df = df.rename(columns={'Sample Name':'Sample','Gene Name': 'Target', 'Condition Name': 'Treatment'})\n return df", "def rename_field(dataframes, original, new):\n for df in dataframes:\n if original in df:\n df.rename(columns={original: new}, inplace=True)\n return dataframes", "def format_ukhls_columns(year):\n # TODO probably worth splitting these by dataset source. indresp/hhresp etc.\n # Converted these into one dict because its annoying to edit two data frames.\n attribute_dict = {'birthy': \"birth_year\", # birth year.\n 'cduse5': 'fridge_freezer', # has fridge\n 'cduse6': 'washing_machine', # has washing machine\n 'cduse7': 'tumble_dryer', # has tumble dryer\n 'cduse8': 'dishwasher', # has dishwasher\n 'cduse9': 'microwave', # has microwave\n 'crburg': 'burglaries', # neighbourhood burglaries\n 'crcar': 'car_crime', # neighbourhood car crime\n 'crdrnk': 'drunks', # neighbourhood drunks\n 'crmugg': 'muggings', # neighbourhood muggings\n 'crrace': 'racial_abuse', # neighbourhood racial abuse\n 'crteen': 'teenagers', # neighbourhood teenager issues\n 'crvand': 'vandalism', # neighbourhood vandalism issues\n 'ctband_dv': 'council_tax', # council tax derived.\n 'dvage': 'age', # age derived.\n 'fihhmnnet1_dv': 'hh_netinc', # household net income derived\n 'gor_dv': 'region', # government region\n 'hheat': 'heating', # household heating\n 'hidp': 'hidp', # household id\n 'ieqmoecd_dv': 'oecd_equiv', # Modified OECD equivalence scale\n 'intdatem': 'hh_int_m', # household interview month\n 'intdatey': 'hh_int_y', # household interview year\n 'jbbgm': 'job_duration_m', # what month started job.\n 'jbbgy': 'job_duration_y', # what year started job\n 'jbft_dv': 'emp_type', # part or full time employment\n 'jbnssec8_dv': 'job_sec', # job nssec code\n 'jbsic07_cc': 'job_industry', # Standard Industry SIC 2007 codes.\n # Note SIC/SOC are updated every decade but have been consistently mapped for all 13 waves.\n 'jbsoc10_cc': 'job_occupation', # Standard Occupation SOC 2010 codes.\n 'jbstat': 'labour_state', # labour state\n 'smoker': 'smoker',\n 'ncigs': 'ncigs', # typical daily cigarettes smoked.\n # TODO no ncigs data for waves 1, 3, 4. There is 'smofrq' variable for 3 and 4 but uses binned ordinal values.\n # not really applicable without random generation.\n 'pidp': 'pidp', # personal identifier\n 'qfhigh_dv': 'education_state', # highest education state\n 'nqfhigh_dv': 'newest_education_state', # has any new qualification been achieved.\n 'racel_dv': 'ethnicity', # ethnicity derived.\n 'rentgrs_dv': 'hh_rent', # household monthly rent.\n #'scghqi': 'depression_change', # depression change GHQ.\n 'sclonely': 'loneliness', # is lonely.\n # sclonely only available in waves 9-11. scsf7 may be a good substitute.\n 'sex': 'sex', # biological sex.\n 'sf12mcs_dv': 'SF_12', # SF12 mental component summary\n 'sf12pcs_dv': 'SF_12p', # SF12 physical component summary\n 'smoker': 'smoker', # Currently smokes.\n # TODO waves present roughly matches ncigs. no data for waves 1-5.\n # for waves 2 and 5 similar variable 'smnow' could be used.\n 'xpmg_dv': 'hh_mortgage', # household monthly mortgage payments.\n 'xpaltob_g3': \"alcohol_spending\", # monthly household spending on alcohol.\n 'indscub_xw': \"weight\", # TESTING: Cross-sectional analysis weight (waves 2-11)\n 'nkids_dv': 'nkids', # number of children\n 'ypdklm': 'ndrinks', # last month number of drinks. audit scores probably better.\n 'xpelecy': 'yearly_electric', # yearly electricty expenditure\n 'xpgasy': 'yearly_gas', # yearly gas expenditure\n 'xpduely': 'yearly_gas_electric', # yearly both expenditure.\n 'xpoily': 'yearly_oil', # yearly oil expenditure.\n 'xpsfly': 'yearly_other_fuel', # yearly other fuel (wood?)\n 'fuelhave1': 'has_electric', # spends money on electrictiy\n 'fuelhave2': 'has_gas', # spends money on gas\n 'fuelhave3': 'has_oil', # spends money on oil\n 'fuelhave4': 'has_other', # has some other fuel source.\n 'fuelhave5': 'has_none', # has no fuel source.\n 'fuelduel': 'gas_electric_combined', # are gas and electric bills separate or combined?\n # Nutrition vars\n 'wkfruit': 'fruit_days', # number of days respondent eats fruit per week\n 'fruitamt':'fruit_per_day', # amount of fruit eaten on days when eating fruit\n 'wkvege': 'veg_days', # no. days respondent eats veg per week\n 'vegeamt': 'veg_per_day', # amt. veg eaten on veg eating days\n # hourly wage stuff (Keeping self-employed and small business vars just in case)\n 'basrate': 'hourly_rate', # basic pay hourly rate\n 'paygu_dv': 'gross_paypm', # usual gross pay per month: current job\n 'jspayg': 'gross_pay_se', # Monthly self-employed gross pay\n 'jbhrs': 'job_hours', # no. of hours normally worked in a week\n 'jshrs': 'job_hours_se', # s/emp: hours normally worked in a week\n 'jspayu': 'job_inc', # average income from job/business\n 'jspayw': 'jb_inc_per', # job/business income: pay period (weeks)\n # Private/Public sector var for living wage intervention\n 'jbsect': 'job_sector', # Whether employee of private or non-private organisation\n # SF12 MICE vars\n 'rentinc2': 'energy_in_rent', # is it combined into rent?\n 'xphsdba': 'behind_on_bills', # behind on energy bills?\n 'finnow': 'financial_situation', # financial situation\n 'finfut': 'future_financial_situation', # expected near future financial situation.\n 'lkmove': \"likely_move\", # likelihood of moving house\n 'scghqi': 'ghq_depression', # ghq depression\n 'scghql': 'ghq_happiness', # ghq general happiness\n # 'sf1': 'sf1', # sf1 score\n 'hcondn17': 'clinical_depression', # has clinical depression.\n 'scsf1': 'scsf1', # sf1 score including proxy surveys\n 'scsf2a': 'phealth_limits_modact', # physical health limits moderate activities\n 'scsf2b': 'phealth_limits_stairs', # physical health limits several flights of stairs\n 'scsf3a': 'phealth_limits_work', # physical health limits work.\n 'scsf3b': 'phealth_limits_work_type', # physical health limits kind of work\n 'scsf4a': 'mhealth_limits_work', # mental health limits work.\n 'scsf5': 'pain_interfere_work', # pain interfered with work\n 'scsf7': 'health_limits_social', # health limits social life.\n 'hhtype_dv': 'hh_composition', # household composition\n 'mastat_dv': 'marstat' # marital status\n ''\n }\n\n # Some variables change names halfway through UKHLS.\n # Assign different keys to variable names depending on year.\n\n # clinical depression changes in wave 10.\n if year < 2017:\n attribute_dict[\"hcond17\"] = \"depression\"\n else:\n attribute_dict[\"hcondcode38\"] = \"depression\"\n\n # All attributes have a wave dependent suffix apart from identifiersb (pidp, hidp etc.).\n # Adjust attribute_columns as necessary.\n # E.g age -> a_age, age -> b_age ... for waves of ukhls.\n attribute_columns = list(attribute_dict.keys())\n attribute_columns = US_utils.wave_prefix(attribute_columns, year)\n\n # Attribute names are consistent over all waves.\n # Future work may give these prefixes as well for flat (opposed to tall) data structure.\n column_names = list(attribute_dict.values())\n return attribute_columns, column_names", "def reindex_year(df):\n dfs = []\n colyearmap = dates.find_year(df)\n for colname in df.columns:\n dfs.append(dask.delayed(_reindex_col(df, colname, colyearmap)))\n\n dfs = dask.compute(*dfs)\n dfs = [x for x in dfs if x is not None]\n # merge all series into one dataframe, concat doesn't quite do the job\n res = reduce(\n lambda left, right: pd.merge(\n left, right, left_index=True, right_index=True, how=\"outer\"\n ),\n dfs,\n )\n res = res.dropna(how=\"all\") # drop uneeded columns out into future\n res = pandasutil.fillna_downbet(\n res\n ) # use this as above ffills incorrectly at end of timeseries\n\n return res", "def rename_columns(self, mapping):\n for k in list(mapping.keys()):\n if k != mapping[k]:\n self.df[k] = self.df[mapping[k]]\n self.df[mapping[k]] = None", "def get_varscan_names(df, pooldir):\n print('renaming varscan columns ...')\n # get order of samps used to create varscan cmds (same order as datatable)\n pool = op.basename(pooldir)\n samps = pklload(op.join(op.dirname(pooldir), 'poolsamps.pkl'))[pool]\n # create a list of names that varscan gives by default\n generic = ['Sample%s' % (i+1) for i in range(len(samps))]\n # create a map between generic and true samp names\n dic = dict((gen, samp) for (gen, samp) in zip(generic, samps))\n # rename the columns in df\n cols = []\n for col in df:\n if '.' in col:\n gen, rest = col.split(\".\")\n samp = dic[gen]\n col = '.'.join([samp, rest])\n cols.append(col)\n df.columns = cols\n return df", "def change_cols(data, params):\n # Drop bad columns first\n good_cols = [col for col,val in params.items() if (val and col in data.columns)]\n new_data = data[good_cols].copy()\n \n # Re-map column names\n col_mapper = {col: new_col for col,new_col in params.items() if isinstance(new_col, str)}\n new_data.rename(columns=col_mapper, inplace=True)\n \n return new_data", "def rename_column(self):\n\n self.stats_df.rename(columns={'site_id': 'Site ID', 'facility_name': 'Site Name'}, inplace=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For 89 and 92; "AdHRS1", "AdHRS2" combine to form "PEHRACTT"
def combine_hours(df, dd_name): fst = df['AdHRS1'] snd = df['AdHRS2'] df['PEHRACTT'] = fst * 10 + snd df = df.drop(["AdHRS1", "AdHRS2"], axis=1) return df
[ "def build_term_code(year_semester: str, abbr: str) -> str:\n if abbr != \"GV\" and abbr != \"QT\":\n return year_semester + \"1\"\n else:\n if abbr == \"GV\":\n return year_semester + \"2\"\n else:\n return year_semester + \"3\"", "def correct_name(hour):\n hour_str = '0123456789abcdefghijklmnop';\n correspondance = {str(xx):hour_str[xx] for xx in range(10,24)}\n for xx in range(0,10): correspondance['0'+str(xx)]=str(xx)\n return correspondance[hour[0:2]]+hour[2:4]", "def candidate_name(ra, dec):\n # RS 2014/02/05: Fiddled with it to get all the zeroes right.\n # ephem doesn't give the most standardized output by default. :/\n coo = ephem.Equatorial(ra*ephem.pi/180.0, abs(dec*ephem.pi/180.0))\n ra_str = re.sub('[:.]','',str(coo.ra))\n dec_str = re.sub('[:.]','',str(coo.dec))\n if ra < 150.0: ra_str = \"0\" + ra_str\n if abs(dec) < 10.0: dec_str = \"0\" + dec_str\n if dec >= 0.0: dec_str = \"+\" + dec_str\n if dec < 0.0: dec_str = \"-\" + dec_str\n return \"SMTJ{0}{1}\".format(ra_str,dec_str)", "def build_anthology_id(collection_id, volume_id, paper_id):\n if collection_id.startswith('W') or collection_id == 'C69':\n return '{}-{:02d}{:02d}'.format(collection_id, int(volume_id), int(paper_id))\n else:\n return '{}-{:02d}{:02d}'.format(collection_id, int(volume_id), int(paper_id))", "def get_correct_pdb_number_string(self, resnum):\n pdb_num = self.reference_pose.pdb_info().pose2pdb(resnum)\n SP = pdb_num.split()\n pdb_num_string = SP[0]+SP[1]; #Now it it resnumchain like 10A 11B etc.\n return pdb_num_string", "def get_fmt_str(direction):\n if direction == 'costh':\n return r'{0:.3f}--{1:.3f} & {2:.4f} & {3:.4f} & ${4}$ \\\\'\n return r'{0:.0f}--{1:.0f} & {2:.2f} & {3:.4f} & ${4}$ \\\\'", "def part_hair_flat():\n a1 = r\"012345678901234567\"\n a2 = r\" TTTTTTTTTTTTTTTT \"\n return a2", "def _season_code(season):\n season = str(season)\n pat1 = re.compile(r'^[0-9]{4}$') # 1994 | 9495\n pat2 = re.compile(r'^[0-9]{2}$') # 94\n pat3 = re.compile(r'^[0-9]{4}-[0-9]{4}$') # 1994-1995\n pat4 = re.compile(r'^[0-9]{4}-[0-9]{2}$') # 1994-95\n pat5 = re.compile(r'^[0-9]{2}-[0-9]{2}$') # 94-95\n\n if re.match(pat1, season):\n if int(season[2:]) == int(season[:2]) + 1:\n if season == '1920' or season == '2021':\n msg = ('Season id \"{}\" is ambiguous: interpreting as \"{}-{}\"'\n .format(season, season[:2], season[-2:]))\n warnings.warn(msg)\n return season # 9495\n elif season[2:] == '99':\n return ''.join([season[2:], '00']) # 1999\n else:\n return ''.join([season[-2:], '{:02d}'.format(int(season[-2:]) + 1)]) # 1994\n elif re.match(pat2, season):\n if season == '99':\n return ''.join([season, '00']) # 99\n else:\n return ''.join([season, '{:02d}'.format(int(season) + 1)]) # 94\n elif re.match(pat3, season):\n return ''.join([season[2:4], season[-2:]]) # 1994-1995\n elif re.match(pat4, season):\n return ''.join([season[2:4], season[-2:]]) # 1994-95\n elif re.match(pat5, season):\n return ''.join([season[:2], season[-2:]]) # 94-95\n else:\n return season", "def coding_strand_to_AA_unit_tests():\n \n sequence='AGTCTTGAT'\n print 'input:'+str(sequence)+', expected output: '+'SLD'\n print 'actual output:'+str(coding_strand_to_AA(sequence))\n sequence='ATGCCCGCTTT'\n print 'input:'+str(sequence)+', expected output: '+'MPA'\n print 'actual output:'+str(coding_strand_to_AA(sequence))\n sequence='CCGCGTTCA'\n print 'input:'+str(sequence)+', expected output: '+'PRS'\n print 'actual output:'+str(coding_strand_to_AA(sequence))", "def build_conflicting_scores_string(conflicting_scores):\n string_builder = \"\"\n for i in conflicting_scores:\n string_builder += f\"\\n\\\"{i[0]}\\\" match score: {i[1]}\"\n\n return string_builder", "def ConvertCnsProtonNames(residueName, atomName):\n #I. get a clean three-letter code and strip & uppercase the atomName\n threeLetter = AminoAcid.AminoAcid(residueName)[1]\n if threeLetter[2] == '':\n print 'WARNING: residue name', residueName, 'not understood'\n return atomName\n atomName = string.upper(string.strip(atomName))\n \n #II. methylenes\n #1. GLY HA:\n if threeLetter == 'GLY' and atomName == 'HA1':\n atomName = 'HA2'\n elif threeLetter == 'GLY' and atomName == 'HA2':\n atomName = 'HA1'\n \n #2. ARG, ASN, ASP, CYS, GLN, GLU, HIS, LEU, LYS, MET, PHE, PRO, SER, TRP, TYR HB%:\n elif threeLetter in ('ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'HIS', 'LEU', 'LYS',\\\n 'MET', 'PHE', 'PRO', 'SER', 'TRP', 'TYR') and \\\n atomName == 'HB3':\n atomName = 'HB1'\n elif threeLetter in ('ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'HIS', 'LEU', 'LYS',\\\n 'MET', 'PHE', 'PRO', 'SER', 'TRP', 'TYR') and \\\n atomName == 'HB1':\n atomName = 'HB3'\n\n #3. ARG, GLN, GLU, LYS, MET, PRO HG%:\n elif threeLetter in ('ARG', 'GLN', 'GLU', 'LYS', 'MET', 'PRO') and\\\n atomName == 'HG1':\n atomName = 'HG3'\n elif threeLetter in ('ARG', 'GLN', 'GLU', 'LYS', 'MET', 'PRO') and\\\n atomName == 'HG3':\n atomName = 'HG1'\n #4. ILE HG1%:\n elif threeLetter == 'ILE' and atomName == 'HG13':\n atomName = 'HG11'\n elif threeLetter == 'ILE' and atomName == 'HG11':\n atomName = 'HG13' \n #5. ARG, ASN, LYS, PRO HD:\n elif threeLetter in ('ARG', 'ASN', 'LYS', 'PRO') and atomName == 'HD1':\n atomName = 'HD3'\n elif threeLetter in ('ARG', 'ASN', 'LYS', 'PRO') and atomName == 'HD3':\n atomName = 'HD1'\n #6. LYS HE:\n elif threeLetter == 'LYS' and atomName == 'HE3':\n atomName = 'HE1'\n elif threeLetter == 'LYS' and atomName == 'HE1':\n atomName = 'HE3'\n \n #III. methyls:\n #1. ALA beta:\n elif threeLetter == 'ALA' and atomName == 'HB2':\n atomName = 'HB1'\n elif threeLetter == 'ALA' and atomName == 'HB1':\n atomName = 'HB2'\n #2. VAL gamma1:\n elif threeLetter == 'VAL' and atomName == 'HG11':\n atomName = 'HG12'\n elif threeLetter == 'VAL' and atomName == 'HG12':\n atomName = 'HG11'\n #3. ILE, VAL gamma2:\n elif threeLetter in ('ILE', 'VAL') and atomName == 'HG21':\n atomName = 'HG22'\n elif threeLetter in ('ILE', 'VAL') and atomName == 'HG22':\n atomName = 'HG21'\n #4. ILE, LEU delta1:\n elif threeLetter in ('ILE', 'LEU') and atomName == 'HD11':\n atomName = 'HD12'\n elif threeLetter in ('ILE', 'LEU') and atomName == 'HD12':\n atomName = 'HD11' \n #5. LEU delta2:\n elif threeLetter == 'LEU' and atomName == 'HD21':\n atomName = 'HD22'\n elif threeLetter == 'LEU' and atomName == 'HD22':\n atomName = 'HD21' \n #6. MET epsilon:\n elif threeLetter == 'MET' and atomName == 'HE1':\n atomName = 'HE2'\n elif threeLetter == 'MET' and atomName == 'HE2':\n atomName = 'HE1'\n #7. zeta:\n elif atomName == 'HZ1':\n atomName = 'HZ2'\n elif atomName == 'HZ2':\n atomName = 'HZ1' \n \n #IV. ARG NHs:\n elif threeLetter == 'ARG' and atomName == 'HH11':\n atomName = 'HH12'\n elif threeLetter == 'ARG' and atomName == 'HH12':\n atomName = 'HH11'\n elif threeLetter == 'ARG' and atomName == 'HH21':\n atomName = 'HH22'\n elif threeLetter == 'ARG' and atomName == 'HH22':\n atomName = 'HH21' \n\n return atomName", "def get_aqi_no2_1h(no2_1h: float) -> (int, str, str):\n cp = round(no2_1h * 1000)\n return __get_aqi_general_formula_texts(cp, US_NO2_1H, US_NO2_EFFECTS, US_NO2_CAUTIONS, US_AQI)", "def rotclass(val):\n # if we have seven years of either corn and soy\n if val.count(\"B\") + val.count(\"C\") > 6:\n return \"Ag\"\n return \"Non Ag\"", "def _lead_time_to_string(lead_time_hours):\n\n return '{0:03d}'.format(lead_time_hours)", "def isolate_header(coursework) -> str:\n header = coursework.split(\":\")[0]\n\n if(\"Final exam\" in header):\n return header\n\n try:\n \"\"\"\n This part is for the extra assignments as the only difference between\n it and regular assignments is the + symbol. So we take out the number\n and put the + symbol right after assignment.\n \"\"\"\n\n header, number = header.split(\" \")\n\n return header + \"+\"*number.count(\"+\")\n\n except ValueError:\n return header", "def map_6to9(letter):\n if letter in mapped_6:\n return str(6)\n elif letter in mapped_7:\n return str(7)\n elif letter in mapped_8:\n return str(8)\n else: # else: alpha_number in \"WXYZ\"\n return str(9)", "def coding_strand_to_AA_unit_tests():\n print \"input: ATGCGA, expected output: MR, actual output: \" + coding_strand_to_AA(\"ATGCGA\")\n print \"input: ATGCCCGCTTT, expected output: MPA, actual output: \" + coding_strand_to_AA(\"ATGCCCGCTTT\")", "def map_2to5(letter):\n if letter in mapped_2:\n return str(2)\n elif letter in mapped_3:\n return str(3)\n elif letter in mapped_4:\n return str(4)\n else: # else: alpha_number in \"JKL\"\n return str(5)", "def get_sample_1940_hh():\n hh_line = \"H19400200024278096700000001000009100000000001198632410100102100000009999000260300026007000840199990012200020999999901223233100110101000000001000900000000100090\"\n return hh_line" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Onboard the OMSAgent to the specified OMS workspace. This includes enabling the OMS process on the machine. This call will return nonzero if the settings provided are incomplete or incorrect.
def enable(hutil): exit_if_vm_not_supported(hutil, 'Enable') public_settings, protected_settings = get_settings(hutil) if public_settings is None: raise OmsAgentParameterMissingError('Public configuration must be ' \ 'provided') if protected_settings is None: raise OmsAgentParameterMissingError('Private configuration must be ' \ 'provided') workspaceId = public_settings.get('workspaceId') workspaceKey = protected_settings.get('workspaceKey') proxy = protected_settings.get('proxy') vmResourceId = protected_settings.get('vmResourceId') stopOnMultipleConnections = public_settings.get('stopOnMultipleConnections') if workspaceId is None: raise OmsAgentParameterMissingError('Workspace ID must be provided') if workspaceKey is None: raise OmsAgentParameterMissingError('Workspace key must be provided') check_workspace_id_and_key(workspaceId, workspaceKey) if (stopOnMultipleConnections is not None and stopOnMultipleConnections is True): check_wkspc_cmd = WorkspaceCheckCommandTemplate.format(OmsAdminPath) list_exit_code, output = run_get_output(check_wkspc_cmd, chk_err = False) # If this enable was called a workspace already saved on the machine, # then we should continue; if this workspace is not saved on the # machine, but another workspace service is running, then we should # stop and warn this_wksp_saved = False connection_exists = False for line in output.split('\n'): if workspaceId in line: this_wksp_saved = True if 'Onboarded(OMSAgent Running)' in line: connection_exists = True if not this_wksp_saved and connection_exists: err_msg = ('This machine is already connected to some other Log ' \ 'Analytics workspace, please set ' \ 'stopOnMultipleConnections to false in public ' \ 'settings or remove this property, so this machine ' \ 'can connect to new workspaces, also it means this ' \ 'machine will get billed multiple times for each ' \ 'workspace it report to. ' \ '(LINUXOMSAGENTEXTENSION_ERROR_MULTIPLECONNECTIONS)') # This exception will get caught by the main method raise OmsAgentUnwantedMultipleConnectionsException(err_msg) # Check if omsadmin script is available if not os.path.exists(OmsAdminPath): log_and_exit(hutil, 'Enable', 1, 'OMSAgent onboarding script {0} not ' \ 'exist. Enable cannot be called ' \ 'before install.'.format(OmsAdminPath)) proxyParam = '' if proxy is not None: proxyParam = '-p {0}'.format(proxy) vmResourceIdParam = '' if vmResourceId is not None: vmResourceIdParam = '-a {0}'.format(vmResourceId) optionalParams = '{0} {1}'.format(proxyParam, vmResourceIdParam) onboard_cmd = OnboardCommandWithOptionalParamsTemplate.format(OmsAdminPath, workspaceId, workspaceKey, optionalParams) hutil_log_info(hutil, 'Handler initiating onboarding.') exit_code, output = run_get_output(onboard_cmd) # To avoid exposing the shared key, print output separately hutil_log_info(hutil, 'Output of onboarding command: \n{0}'.format(output)) return exit_code
[ "def setup_omsagent(configurator, run_command, logger_log, logger_error):\n # Remember whether OMI (not omsagent) needs to be freshly installed.\n # This is needed later to determine whether to reconfigure the omiserver.conf or not for security purpose.\n need_fresh_install_omi = not os.path.exists('/opt/omi/bin/omiserver')\n\n logger_log(\"Begin omsagent setup.\")\n\n # 1. Install omsagent, onboard to LAD workspace\n # We now try to install/setup all the time. If it's already installed. Any additional install is a no-op.\n is_omsagent_setup_correctly = False\n maxTries = 5 # Try up to 5 times to install omsagent\n for trialNum in range(1, maxTries + 1):\n cmd_exit_code, cmd_output = setup_omsagent_for_lad(run_command)\n if cmd_exit_code == 0: # Successfully set up\n is_omsagent_setup_correctly = True\n break\n logger_error(\"omsagent setup failed (trial #\" + str(trialNum) + \").\")\n if trialNum < maxTries:\n logger_error(\"Retrying in 30 seconds...\")\n time.sleep(30)\n if not is_omsagent_setup_correctly:\n logger_error(\"omsagent setup failed \" + str(maxTries) + \" times. Giving up...\")\n return 1, \"omsagent setup failed {0} times. \" \\\n \"Last exit code={1}, Output={2}\".format(maxTries, cmd_exit_code, cmd_output)\n\n # Issue #265. OMI httpsport shouldn't be reconfigured when LAD is re-enabled or just upgraded.\n # In other words, OMI httpsport config should be updated only on a fresh OMI install.\n if need_fresh_install_omi:\n # Check if OMI is configured to listen to any non-zero port and reconfigure if so.\n omi_listens_to_nonzero_port = run_command(r\"grep '^\\s*httpsport\\s*=' /etc/opt/omi/conf/omiserver.conf \"\n r\"| grep -v '^\\s*httpsport\\s*=\\s*0\\s*$'\")[0] is 0\n if omi_listens_to_nonzero_port:\n run_command(\"/opt/omi/bin/omiconfigeditor httpsport -s 0 < /etc/opt/omi/conf/omiserver.conf \"\n \"> /etc/opt/omi/conf/omiserver.conf_temp\")\n run_command(\"mv /etc/opt/omi/conf/omiserver.conf_temp /etc/opt/omi/conf/omiserver.conf\")\n\n # 2. Configure all fluentd plugins (in_syslog, in_tail, out_mdsd)\n # 2.1. First get a free TCP/UDP port for fluentd in_syslog plugin.\n port = get_fluentd_syslog_src_port()\n if port < 0:\n return 3, 'setup_omsagent(): Failed at getting a free TCP/UDP port for fluentd in_syslog'\n # 2.2. Configure syslog\n cmd_exit_code, cmd_output = configure_syslog(run_command, port,\n configurator.get_fluentd_syslog_src_config(),\n configurator.get_rsyslog_config(),\n configurator.get_syslog_ng_config())\n if cmd_exit_code != 0:\n return 4, 'setup_omsagent(): Failed at configuring in_syslog. Exit code={0}, Output={1}'.format(cmd_exit_code,\n cmd_output)\n # 2.3. Configure filelog\n cmd_exit_code, cmd_output = configure_filelog(configurator.get_fluentd_tail_src_config())\n if cmd_exit_code != 0:\n return 5, 'setup_omsagent(): Failed at configuring in_tail. Exit code={0}, Output={1}'.format(cmd_exit_code,\n cmd_output)\n # 2.4. Configure out_mdsd\n cmd_exit_code, cmd_output = configure_out_mdsd(configurator.get_fluentd_out_mdsd_config())\n if cmd_exit_code != 0:\n return 6, 'setup_omsagent(): Failed at configuring out_mdsd. Exit code={0}, Output={1}'.format(cmd_exit_code,\n cmd_output)\n\n # 3. Restart omsagent\n cmd_exit_code, cmd_output = control_omsagent('restart', run_command)\n if cmd_exit_code != 0:\n return 8, 'setup_omsagent(): Failed at restarting omsagent (fluentd). ' \\\n 'Exit code={0}, Output={1}'.format(cmd_exit_code, cmd_output)\n\n # All done...\n return 0, \"setup_omsagent(): Succeeded\"", "def setup_omsagent_for_lad(run_command):\n # 1. Install omsagent. It's a noop if it's already installed.\n cmd_exit_code, cmd_output = run_command(omsagent_universal_sh_cmd_template.format(op='--upgrade'))\n if cmd_exit_code != 0:\n return 1, 'setup_omsagent_for_lad(): omsagent universal installer shell execution failed. ' \\\n 'Output: {0}'.format(cmd_output)\n\n # 2. Onboard to LAD workspace. Should be a noop if it's already done.\n if not os.path.isdir(omsagent_lad_dir):\n cmd_exit_code, cmd_output = run_command(omsagent_lad_workspace_cmd_template.format(args='-w LAD'))\n if cmd_exit_code != 0:\n return 2, 'setup_omsagent_for_lad(): LAD workspace onboarding failed. Output: {0}'.format(cmd_output)\n\n # All succeeded\n return 0, 'setup_omsagent_for_lad() succeeded'", "def test_oobm_issue_power_on(self):\n self.configureAndStartMgmtServer()\n self.assertIssueCommandState('ON', 'On')\n global apiRequests\n self.assertTrue('startVirtualMachine' in apiRequests)", "def enable_arms(self):\n\n rospy.loginfo(\"Attempting to enabling robot.\")\n rs = baxter_interface.RobotEnable(baxter_interface.CHECK_VERSION)\n\n try:\n rs.enable()\n except Exception, e:\n rospy.logerr(e.strerror)\n rospy.logerr(\"Failed to enable arms.\")\n return False\n\n rospy.loginfo(\"Successfully enabled robot.\")\n return True", "def configureAndStartMgmtServer(self):\n self.configureAndEnableOobm()\n self.startMgmtServer()", "def enable(self):\n hoomd.util.print_status_line()\n\n hoomd.util.quiet_status()\n _compute.enable(self)\n hoomd.util.unquiet_status()\n\n hoomd.context.current.thermo.append(self)", "async def _ams_enable(self, ctx):\n try:\n self.c.execute('UPDATE servers SET UseAMS = ? WHERE ID = ?', (True, ctx.message.guild.id))\n except Exception as e:\n await ctx.send(embed=discord.Embed(\n title=\"Error\", \n description=str(e),\n color=self.EMBED_COLOR\n ))\n else:\n self.connection.commit()\n await ctx.send(embed=discord.Embed(\n title=\"AMS - Automated Moderation System\",\n description='AMS is now active.\\nPlease use {}ams level to set the AMS level.'.format(\n PREFIX\n ),\n color=self.EMBED_COLOR\n ))", "def create_openmm_system(sim_openmm, model, anchor):\n building_directory = os.path.join(\n model.anchor_rootdir, anchor.directory, anchor.building_directory)\n box_vectors = None\n if anchor.amber_params is not None:\n prmtop_filename = os.path.join(\n building_directory, anchor.amber_params.prmtop_filename)\n prmtop = openmm_app.AmberPrmtopFile(prmtop_filename)\n assert anchor.amber_params.pdb_coordinates_filename is not None\n pdb_coordinates_filename = os.path.join(\n building_directory, \n anchor.amber_params.pdb_coordinates_filename)\n positions = openmm_app.PDBFile(pdb_coordinates_filename)\n #assert anchor.amber_params.box_vectors is not None\n box_vectors = anchor.amber_params.box_vectors\n topology = prmtop\n \n elif anchor.forcefield_params is not None:\n forcefield_filenames = []\n for forcefield_filename in \\\n anchor.forcefield_params.built_in_forcefield_filenames:\n forcefield_filenames.append(forcefield_filename)\n for forcefield_filename in \\\n anchor.forcefield_params.custom_forcefield_filenames:\n forcefield_filenames.append(os.path.join(\n building_directory, forcefield_filename))\n pdb_filename = os.path.join(building_directory, \n anchor.forcefield_params.pdb_filename)\n pdb = openmm_app.PDBFile(pdb_filename)\n forcefield = openmm_app.ForceField(\n *forcefield_filenames)\n box_vectors = anchor.forcefield_params.box_vectors\n \n topology = pdb\n positions = pdb\n \n elif anchor.charmm_params is not None:\n raise Exception(\"Charmm systems not yet implemented\")\n \n else:\n raise Exception(\"No Amber or Charmm input settings detected.\")\n \n #assert box_vectors is not None, \"No source of box vectors provided.\"\n nonbonded_method = model.openmm_settings.nonbonded_method.lower()\n if nonbonded_method == \"pme\":\n nonbondedMethod = openmm_app.PME\n \n elif nonbonded_method == \"nocutoff\":\n nonbondedMethod = openmm_app.NoCutoff\n \n elif nonbonded_method == \"cutoffnonperiodic\":\n nonbondedMethod = openmm_app.CutoffNonPeriodic\n \n elif nonbonded_method == \"cutoffperiodic\":\n nonbondedMethod = openmm_app.CutoffPeriodic\n \n elif nonbonded_method == \"ewald\":\n nonbondedMethod = openmm_app.Ewald\n \n else:\n raise Exception(\"nonbonded method not found: %s\", \n model.openmm_settings.nonbonded_method)\n \n if model.openmm_settings.constraints is None:\n constraints_str = None\n else:\n constraints_str = model.openmm_settings.constraints\n \n if constraints_str is None:\n constraints = None\n \n elif constraints_str.lower() == \"none\":\n constraints = None\n \n elif constraints_str.lower() == \"hbonds\":\n constraints = openmm_app.HBonds\n \n elif constraints_str.lower() == \"allbonds\":\n constraints = openmm_app.AllBonds\n \n elif constraints_str.lower() == \"hangles\":\n constraints = openmm_app.HAngles\n \n else:\n raise Exception(\"constraints not found: %s\", \n model.openmm_settings.constraints)\n \n if model.openmm_settings.hydrogenMass:\n hydrogenMass = model.openmm_settings.hydrogenMass*openmm.unit.amu\n else:\n hydrogenMass = model.openmm_settings.hydrogenMass\n rigidWater = model.openmm_settings.rigidWater\n \n if anchor.amber_params is not None:\n system = prmtop.createSystem(\n nonbondedMethod=nonbondedMethod, \n nonbondedCutoff=model.openmm_settings.nonbonded_cutoff, \n constraints=constraints, hydrogenMass=hydrogenMass, \n rigidWater=rigidWater)\n \n elif anchor.forcefield_params is not None:\n system = forcefield.createSystem(\n pdb.topology, nonbondedMethod=nonbondedMethod, \n nonbondedCutoff=model.openmm_settings.nonbonded_cutoff, \n constraints=constraints, hydrogenMass=hydrogenMass, \n rigidWater=rigidWater)\n \n elif anchor.charmm_params is not None:\n raise Exception(\"Charmm input settings not yet implemented\")\n \n else:\n print(\"Settings for Amber or Charmm simulations not found\")\n \n return system, topology, positions, box_vectors", "def poweron(self):\n LOG.info('Power on nodes: %s', self)\n self.power_management.poweron(self.get_macs())", "def runOpenMM(parm, topology, system, positions, rad, K, Indices, solvate):\n\n def newIntegrator():\n integrator = mm.LangevinIntegrator(\n 300.0 * u.kelvin,\n 10.0 / u.picosecond,\n 1.0 * u.femtosecond)\n return integrator\n\n\n # harmonically restrain dihedral angle\n # see units, http://docs.openmm.org/6.3.0/userguide/theory.html\n pi = np.pi\n harmonic = mm.CustomTorsionForce(\"k*min(dtheta, 2*pi-dtheta)^2; dtheta = abs(theta-theta0); pi = %.3f\" % pi);\n harmonic.addPerTorsionParameter(\"theta0\");\n harmonic.addPerTorsionParameter(\"k\");\n system.addForce(harmonic)\n harmonic.addTorsion(Indices[0], Indices[1], Indices[2], Indices[3], (rad, K))\n\n #Restrain backbone atoms\n force = mm.CustomExternalForce(\"k*((x-x0)^2+(y-y0)^2+(z-z0)^2)\")\n force.addGlobalParameter(\"k\", 5.0*kilocalories_per_mole/angstroms**2)\n force.addPerParticleParameter(\"x0\")\n force.addPerParticleParameter(\"y0\")\n force.addPerParticleParameter(\"z0\")\n for i, atom_crd in enumerate(parm.positions):\n if parm.atoms[i].name in ('CA', 'C', 'N'):\n force.addParticle(i, atom_crd.value_in_unit(u.nanometers))\n system.addForce(force)\n\n\n # build simulaion\n #platform = mm.Platform.getPlatformByName('CPU')\n platform = mm.Platform.getPlatformByName('CUDA')\n integ1 = newIntegrator()\n simulation = app.Simulation(topology, system, integ1)\n simulation.context.setPositions(positions)\n\n # perform minimization\n print('Minimizing...')\n simulation.minimizeEnergy()\n\n # NVT equilibration\n simulation.context.setVelocitiesToTemperature(300*u.kelvin)\n simulation.reporters.append(app.DCDReporter('nvt01.dcd', 1000)) # write every 1000 steps\n simulation.reporters.append(app.StateDataReporter('data01.csv', 1000, step=True, potentialEnergy=True, volume=True,temperature=True, separator='\\t'))\n print('Equilibrating at NVT...')\n simulation.step(10000) # 10 ps\n\n if solvate==True:\n positionsNVT = simulation.context.getState(getPositions=True).getPositions()\n velocitiesNVT = simulation.context.getState(getVelocities=True).getVelocities()\n\n # NPT equilibration\n barostat = mm.MonteCarloBarostat(1.0*u.bar, 300.0*u.kelvin)\n system.addForce(barostat)\n ### bc barostat, need new simulation and associated properties\n integ2 = newIntegrator()\n simulation = app.Simulation(topology, system, integ2) \n simulation.context.setPositions(positionsNVT)\n simulation.context.setVelocities(velocitiesNVT)\n simulation.reporters.append(app.DCDReporter('npt01.dcd', 1000))\n simulation.reporters.append(app.StateDataReporter('data02.csv', 1000, step=True, potentialEnergy=True, volume=True,temperature=True, separator='\\t'))\n print('Equilibrating at NPT...')\n simulation.step(10000) # 10 ps\n \n # NPT production\n print('Production run at NPT...')\n simulation.step(3000000) # 100 ps\n\n else:\n print('Production run at NVT...')\n simulation.step(3000000) # 100 ps\n\n\n topology.positions = simulation.context.getState(getPositions=True).getPositions(asNumpy=True)\n return topology.positions", "def install_openvswitch(args):\n # Install and configure OpenVSwitch\n logger.info(\"Install and configure OpenVSwitch\")\n\n # Install package\n for name in ['openvswitch-switch','openvswitch-datapath-dkms']:\n pkg = apt_cache[name]\n if pkg.is_installed:\n logger.debug('Package: %s already installed',name)\n else:\n pkg.mark_install()\n\n try:\n apt_cache.commit()\n except Exception, err:\n logger.error('Package installation failed: %s', str(err))\n return False\n \n # Start openvswitch\n logger.debug(subprocess.check_output('service openvswitch-switch start',shell=True))\n\n if args.node_type == 'controller':\n logger.info(\"Configure networking\")\n\n # Check network parameter\n if args.external_interface == None:\n if args.internal_interface != None:\n args.external_interface = args.internal_interface\n logger.warning(\"Set internal interface as external interface.\")\n else:\n logger.error(\"No any interface has been assigned.\")\n return False\n\n if args.external_ipaddr == None:\n if args.internal_ipaddr != None:\n args.external_ipaddr = args.internal_ipaddr\n logger.warning(\"Set internal IP address as external IP address.\")\n else:\n logger.error(\"No any IP address has been assigned.\")\n return False\n\n if args.external_netmask == None:\n if args.internal_netmask != None:\n args.external_netmask = args.internal_netmask\n logger.warning(\"Set internal netmask as external netmask.\")\n else:\n logger.error(\"No any netmask has been assigned.\")\n return False\n\n if args.default_gateway == None:\n logger.error(\"Gateway does not be assigned.\")\n return False\n\n # Create OVS\n try:\n cmd = 'ovs-vsctl br-exists br-ex'\n subprocess.check_output(cmd, shell=True)\n logger.info('The openvswitch: br-ex is exist.')\n except subprocess.CalledProcessError:\n cmd = 'ovs-vsctl add-br br-ex'\n subprocess.check_output(cmd, shell=True)\n logger.debug('Create openvswitch: br-ex')\n\n # Add physical NIC to OVS\n try:\n cmd = 'ovs-vsctl list-ports br-ex | grep %s' % args.external_interface\n subprocess.check_output(cmd, shell=True)\n logger.info('Check ovs port')\n except subprocess.CalledProcessError:\n cmd = 'ovs-vsctl add-port br-ex %s' % args.external_interface\n subprocess.check_output(cmd, shell=True)\n logger.debug('Add port to openvswitch')\n\n # Setting up networking\n try:\n cmd = 'ifconfig %s 0.0.0.0 promisc up' % (args.external_interface) \n subprocess.check_output(cmd, shell=True)\n logger.info('Setting up external physical interface')\n\n cmd = 'ifconfig br-ex %s netmask %s up' % (args.external_ipaddr, args.external_netmask)\n subprocess.check_output(cmd, shell=True)\n logger.info('Setting up openvswitch')\n\n except subprocess.CalledProcessError,err:\n logger.error(\"Setting up network failed: %s\", str(err))\n cmd = 'ifconfig %s %s netmask %s up' % (args.external_interface, args.external_ipaddr, args.external_netmask) \n subprocess.check_output(cmd, shell=True)\n logger.debug('Resume networking')\n\n if args.external_interface == args.internal_interface and args.external_ipaddr != args.internal_ipaddr :\n try:\n cmd = \"ip address add %s/%s dev br-ex\" % (args.internal_ipaddr,args.internal_netmask)\n subprocess.check_output(cmd, shell=True)\n logger.info('Bind internal network to openvswitch.')\n except:\n logger.error('Bind internal network to openvswitch failed.')\n\n try:\n cmd = \"route -n | grep 'UG' | grep '%s'\" % (args.default_gateway)\n subprocess.check_output(cmd, shell=True)\n logger.info('Check routing table')\n except:\n cmd = \"route add default gw %s\" % (args.default_gateway)\n subprocess.check_output(cmd, shell=True)\n logger.debug('Setting network gateway')\n\n return True", "def enable_ownership(self):\n # Enable stock location ownership\n\n InvenTreeSetting.set_setting('STOCK_OWNERSHIP_CONTROL', True, self.user)\n self.assertEqual(True, InvenTreeSetting.get_setting('STOCK_OWNERSHIP_CONTROL'))", "def turn_odoo_connection_on(self):\n res = super(WkSkeleton, self).turn_odoo_connection_on()\n ctx = dict(self._context or {})\n if 'magento' in ctx:\n instanceId = ctx.get('instance_id')\n self.change_connection_state('enable', instanceId)\n return res", "def turn_on(self, **kwargs):\r\n self._device.set_status(True, self._switch_id)", "def OnosEnvSetup(self, handle):\n self.Gensshkey(handle)\n self.home = self.GetEnvValue(handle, 'HOME')\n self.AddKnownHost(handle, self.OC1, \"karaf\", \"karaf\")\n self.AddKnownHost(handle, self.OC2, \"karaf\", \"karaf\")\n self.AddKnownHost(handle, self.OC3, \"karaf\", \"karaf\")\n self.DownLoadCode(handle,\n 'https://github.com/wuwenbin2/OnosSystemTest.git')\n # self.DownLoadCode(handle, 'https://gerrit.onosproject.org/onos')\n if self.masterusername == 'root':\n filepath = '/root/'\n else:\n filepath = '/home/' + self.masterusername + '/'\n self.OnosRootPathChange(filepath)\n self.CopyOnostoTestbin()\n self.ChangeOnosName(self.agentusername, self.agentpassword)\n self.InstallDefaultSoftware(handle)\n self.SetOnosEnvVar(handle, self.masterpassword, self.agentpassword)", "def turn_on(self, **kwargs: Any) -> None:\n # Stop other zones first\n self.turn_off()\n\n # Start this zone\n manual_run_time = timedelta(\n minutes=self._person.config_entry.options.get(\n CONF_MANUAL_RUN_MINS, DEFAULT_MANUAL_RUN_MINS\n )\n )\n # The API limit is 3 hours, and requires an int be passed\n self._controller.rachio.zone.start(self.zone_id, manual_run_time.seconds)\n _LOGGER.debug(\n \"Watering %s on %s for %s\",\n self.name,\n self._controller.name,\n str(manual_run_time),\n )", "def testxwuappswitch(self):\n if not self._hasxls:\n self.fail_noexcel()\n return\n app = self._app\n xwu.appswitch(app, True)\n os = xwu.appswitch(app)\n self.assertFalse(bool(os), \"no changes need to be made\")\n os = xwu.appswitch(app, False)\n self.assertTrue(len(os) > 0)\n xwu.appswitch(app, os)\n app.visible = False\n os = xwu.appswitch(app, {\"visible\": True})\n self.assertFalse(os[\"visible\"])\n app.api.enableevents = True\n os = xwu.appswitch(app, {\"visible\": True, \"enableevents\": False})\n self.assertEqual(1, len(os))\n self.assertTrue(os[\"enableevents\"])", "def switch_to_EMMA_solenoid(self):\n log.log_info(\"Switched to EMMA solenoids mode\")\n self.__control_schema = _EMMA_SOLENOIDS\n self.initialize_solenoids()", "def open_hadware_manager():\n os.system(\"devmgmt.msc\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if the VM this extension is running on is supported by OMSAgent Returns for platform.linux_distribution() vary widely in format, such as '7.3.1611' returned for a machine with CentOS 7, so the first provided digits must match The supported distros of the OMSAgentforLinux, as well as Ubuntu 16.10, are allowed to utilize this VM extension. All other distros will get error code 51
def is_vm_supported_for_extension(): supported_dists = {'redhat' : ('5', '6', '7'), # CentOS 'centos' : ('5', '6', '7'), # CentOS 'red hat' : ('5', '6', '7'), # Oracle, RHEL 'oracle' : ('5', '6', '7'), # Oracle 'debian' : ('6', '7', '8'), # Debian 'ubuntu' : ('12.04', '14.04', '15.04', '15.10', '16.04', '16.10'), # Ubuntu 'suse' : ('11', '12') #SLES } try: vm_dist, vm_ver, vm_id = platform.linux_distribution() except AttributeError: vm_dist, vm_ver, vm_id = platform.dist() vm_supported = False # Find this VM distribution in the supported list for supported_dist in supported_dists.keys(): if not vm_dist.lower().startswith(supported_dist): continue # Check if this VM distribution version is supported vm_ver_split = vm_ver.split('.') for supported_ver in supported_dists[supported_dist]: supported_ver_split = supported_ver.split('.') # If vm_ver is at least as precise (at least as many digits) as # supported_ver and matches all the supported_ver digits, then # this VM is guaranteed to be supported vm_ver_match = True for idx, supported_ver_num in enumerate(supported_ver_split): try: supported_ver_num = int(supported_ver_num) vm_ver_num = int(vm_ver_split[idx]) except IndexError: vm_ver_match = False break if vm_ver_num is not supported_ver_num: vm_ver_match = False break if vm_ver_match: vm_supported = True break if vm_supported: break return vm_supported, vm_dist, vm_ver
[ "def os_version_check():\n with hide('running', 'stdout'):\n version = run('cat /etc/issue')\n return True if 'Ubuntu 10.04' in versio else False", "def test_os_detection():\n host = ipaddress.ip_address(u\"92.222.10.88\")\n scanner = Scanner(host, mock=True, sudo=True)\n scanner.perform_scan()\n\n report = scanner.extract_host_report()\n assert report.operating_system == \"linux 3.7 - 3.10\"\n assert report.operating_system_accuracy == \"100\"", "def test_get_operating_system(self):\n pass", "def __verify__(cls):\n\n check_prior_ubuntu = True\n if 'Ubuntu' in aj.platform_string:\n ubuntu_version = int(aj.platform_string[7:9])\n check_prior_ubuntu = ubuntu_version < 18\n return aj.platform in ['debian'] and check_prior_ubuntu", "def test_get_platforms_usage(self):\n pass", "def check_os():\n if '_fd_checked_os_' in env:\n return env._fd_checked_os_\n print_green('INFO: Check your OS...')\n remote_os_issue = sudo('cat /etc/issue', quiet=True)\n if remote_os_issue.failed:\n remote_os_issue = ''\n remote_os_issue = remote_os_issue.replace('\\\\n', '').replace('\\\\l', '').strip()\n remote_os_name = allow_versions = ok = None\n if remote_os_issue:\n for os_issue, versions, os_name in SUPPORT_OS:\n if os_issue in remote_os_issue:\n remote_os_name = os_name\n allow_versions = versions\n ok = True\n break\n if not ok:\n abort('Your OS \"{}\" is not supported :('.format(remote_os_issue))\n remote_os_ver = sudo('cat /etc/debian_version', quiet=True)\n if remote_os_ver.failed:\n remote_os_ver = ''\n remote_os_ver = remote_os_ver.split('.', 1)[0].strip()\n if remote_os_ver not in allow_versions:\n abort('Your OS \"{}\" version \"{}\" is not supported :('.format(remote_os_issue, remote_os_ver))\n print_green('INFO: Check your OS... OK')\n env._fd_checked_os_ = remote_os_name, remote_os_ver\n return env._fd_checked_os_", "def validate_jvm_support():\n try:\n raw_output = subprocess.getoutput('java -version')\n lines = raw_output.splitlines()\n if lines:\n first_line = lines[0]\n parts = first_line.split(' ')\n if len(parts) == 3:\n jvm_vendor = parts[0]\n\n if jvm_vendor in SUPPORTED_JVM_VENDOR:\n jvm_version = parts[2].replace('\"', '')\n jvm_version_parts = jvm_version.split('.')\n if len(jvm_version_parts) >= 2:\n jvm_version_major = int(jvm_version_parts[0])\n jvm_version_minor = int(jvm_version_parts[1])\n jvm_version_tuple = (jvm_version_major, jvm_version_minor)\n\n if jvm_version_tuple >= SUPPORTED_JVM_VERSION:\n return\n else:\n raise JavaUnsupportedVersionError(jvm_version)\n else:\n raise JavaUnsupportedVendorError(jvm_vendor)\n\n raise JavaVersionParseError(raw_output)\n except CalledProcessError:\n raise JavaCallError('Failure calling `java -version`')", "def test_num_linux_os(self):\n # create computers\n create_watchman_computer(self.customer, os_type='linux', date_reported=date(2018, 12, 1), date_last_reported=date(2019, 1, 15))\n create_watchman_computer(self.customer, os_type='linux', date_reported=date(2019, 1, 10), date_last_reported=date(2019, 1, 15))\n create_watchman_computer(self.customer, os_type='mac', date_reported=date(2018, 12, 1), date_last_reported=date(2019, 1, 15))\n create_watchman_computer(self.customer, os_type='windows', date_reported=date(2018, 12, 1), date_last_reported=date(2019, 1, 15))\n # request\n request_body = {\n 'customer': self.customer.id,\n 'start_date': '2019-01-01',\n 'end_date': '2019-01-31'\n }\n self.client.post(reverse(self.view_name), request_body)\n # test database\n self.assertEqual(models.Report.objects.first().num_linux_os, 2)", "def os_ubuntu2104():\n yield AutoinstallMachineModel.OperatingSystem(\n 'ubuntu21', 'debian', 2104, 0, 'Ubuntu 21.04 LTS', None)", "def _check_kernel_version():\n full_version = platform.uname()[2]\n\n # Make sure version number makes sense and captures version numbers\n regex = re.compile(\"^(\\d+(?:\\.\\d+){1,3})(?:\\D.*)?$\")\n\n try:\n version_string = regex.match(full_version).group(1)\n except AttributeError:\n print \"ERROR: The kernel version does not match expected semantic \" \\\n \"versioning.\"\n return False\n\n version_list = [int(num) for num in version_string.split(\".\")]\n if version_list < MIN_KERNEL_VERSION:\n print \"Minimum kernel version to run Calico is %s.\" \\\n \"\\nDetected kernel version: %s\" % \\\n (MIN_KERNEL_VERSION_STR, version_string)\n return False\n\n return True", "def check_architecture():\n if not architecture.matches_architecture(*architecture.ARCH_SUPPORTED):\n inhibit_upgrade()", "def linux_detect_installer():\n global INSTALLER\n INSTALLER = None\n if vm_supported and (vm_dist.startswith('Ubuntu') or vm_dist.startswith('debian')):\n INSTALLER = 'APT'\n elif vm_supported and (vm_dist.startswith('CentOS') or vm_dist.startswith('Oracle') or vm_dist.startswith('Red Hat')):\n INSTALLER = 'YUM'\n elif vm_supported and vm_dist.startswith('SUSE Linux'):\n INSTALLER = 'ZYPPER'", "def test_create_script_extension_for_linux_os(self, operating_system_types):\n compute_client = mock.MagicMock()\n vm_extension_model = mock.MagicMock()\n group_name = \"testgroupname\"\n vm_name = \"testvmname\"\n self.vm_extension_service._prepare_linux_vm_script_extension = mock.MagicMock(\n return_value=vm_extension_model)\n\n # Act\n self.vm_extension_service.create_script_extension(\n compute_client=compute_client,\n location=self.location,\n group_name=group_name,\n vm_name=vm_name,\n image_os_type=operating_system_types.linux,\n script_file=self.script_file,\n script_configurations=self.script_configurations,\n tags=self.tags)\n\n # Verify\n compute_client.virtual_machine_extensions.create_or_update.assert_called_once_with(\n extension_parameters=vm_extension_model,\n resource_group_name=group_name,\n vm_extension_name=vm_name,\n vm_name=vm_name)", "def test_get_operating_systems(self):\n pass", "def test_debian_10_os_grains():\n # /etc/os-release data taken from base-files 10.3+deb10u11\n _os_release_data = {\n \"PRETTY_NAME\": \"Debian GNU/Linux 10 (buster)\",\n \"NAME\": \"Debian GNU/Linux\",\n \"VERSION_ID\": \"10\",\n \"VERSION\": \"10 (buster)\",\n \"VERSION_CODENAME\": \"buster\",\n \"ID\": \"debian\",\n \"HOME_URL\": \"https://www.debian.org/\",\n \"SUPPORT_URL\": \"https://www.debian.org/support\",\n \"BUG_REPORT_URL\": \"https://bugs.debian.org/\",\n }\n expectation = {\n \"os\": \"Debian\",\n \"os_family\": \"Debian\",\n \"oscodename\": \"buster\",\n \"osfullname\": \"Debian GNU/Linux\",\n \"osrelease\": \"10\",\n \"osrelease_info\": (10,),\n \"osmajorrelease\": 10,\n \"osfinger\": \"Debian-10\",\n }\n _run_os_grains_tests(_os_release_data, {}, expectation)", "def _check_python_and_os(self, test_name):\n if (test_name in self.MIN_VERSION_OR_OS and\n not self.MIN_VERSION_OR_OS[test_name]):\n return False\n return True", "def which_os(self):\n logger.info(\"entering which_os()\")\n junos = False\n evo = False\n bsd_version = float()\n sshd_version = float()\n result, stdout = self.ssh_cmd(\"uname\")\n if not result:\n err = \"cmd 'uname' failed on remote host, it must be *nix based\"\n self.close(err_str=err)\n if self.use_shell:\n host_os = stdout.split(\"\\n\")[1].rstrip()\n else:\n host_os = stdout\n if host_os == \"Linux\" and self.evo_os():\n evo = True\n elif host_os == \"JUNOS\":\n junos = True\n bsd_version = 6.3\n sshd_version = self.which_sshd()\n elif host_os == \"FreeBSD\" and self.junos_os():\n junos = True\n bsd_version = self.which_bsd()\n sshd_version = self.which_sshd()\n logger.info(\n f\"evo = {evo}, \"\n f\"junos = {junos}, \"\n f\"bsd_version = {bsd_version}, \"\n f\"sshd_version = {sshd_version}\"\n )\n return junos, evo, bsd_version, sshd_version", "def check_hwids():\n compatibles = get_compatibles()\n hwids = []\n messages = []\n for compatible in compatibles:\n hwid = compatible.get('hwidmatch').strip('^.*-').split(' ')[0]\n if hwid not in hwids:\n hwids.append(hwid)\n\n for item in CHROMEOS_RECOVERY_ARM_HWIDS:\n if item not in hwids:\n messages.append('%s is not available, please remove it from inputstreamhelper config' % item)\n for item in hwids:\n if item not in CHROMEOS_RECOVERY_ARM_HWIDS:\n messages.append('%s is missing, please add it to inputstreamhelper config' % item)\n if messages:\n raise Exception(messages)\n\n smallest = get_smallest()\n hwid = smallest.get('hwidmatch').strip('^.*-').split(' ')[0]\n print('Chrome OS hardware id\\'s are up to date, current smallest recovery image is %s' % hwid)", "def test_debian_11_os_grains():\n # /etc/os-release data taken from base-files 11.1+deb11u2\n _os_release_data = {\n \"PRETTY_NAME\": \"Debian GNU/Linux 11 (bullseye)\",\n \"NAME\": \"Debian GNU/Linux\",\n \"VERSION_ID\": \"11\",\n \"VERSION\": \"11 (bullseye)\",\n \"VERSION_CODENAME\": \"bullseye\",\n \"ID\": \"debian\",\n \"HOME_URL\": \"https://www.debian.org/\",\n \"SUPPORT_URL\": \"https://www.debian.org/support\",\n \"BUG_REPORT_URL\": \"https://bugs.debian.org/\",\n }\n expectation = {\n \"os\": \"Debian\",\n \"os_family\": \"Debian\",\n \"oscodename\": \"bullseye\",\n \"osfullname\": \"Debian GNU/Linux\",\n \"osrelease\": \"11\",\n \"osrelease_info\": (11,),\n \"osmajorrelease\": 11,\n \"osfinger\": \"Debian-11\",\n }\n _run_os_grains_tests(_os_release_data, {}, expectation)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate formats of workspace_id and workspace_key
def check_workspace_id_and_key(workspace_id, workspace_key): # Validate that workspace_id matches the GUID regex guid_regex = r'^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$' search = re.compile(guid_regex, re.M) if not search.match(workspace_id): raise OmsAgentInvalidParameterError('Workspace ID is invalid') # Validate that workspace_key is of the correct format (base64-encoded) try: encoded_key = base64.b64encode(base64.b64decode(workspace_key)) if encoded_key != workspace_key: raise OmsAgentInvalidParameterError('Workspace key is invalid') except TypeError: raise OmsAgentInvalidParameterError('Workspace key is invalid')
[ "def project_id_validator(s: str):\n value: Optional[str]\n is_valid, value, msg = False, None, None\n\n if not project_helper.project_id_is_valid(s):\n msg = ('The project id is invalid.\\n'\n 'A project id can only contain letters, numbers, or underscores with at most'\n f'{MAX_PROJECT_ID_CHARS} characters.')\n elif project_helper.project_exists(course_code, s):\n msg = f'The project with id \"{s}\" already exists in \"{course_code}\".'\n else:\n is_valid, value = True, s\n\n return is_valid, value, msg", "def _validate_project_and_api_key(self, request, context):\n if not request.project_id:\n context.abort(code_pb2.UNAUTHENTICATED,\n 'No project ID set in the request.')\n api_key = request_metadata.extract_metadata_value(\n context, _API_METADATA_KEY)\n if not api_key:\n context.abort(code_pb2.UNAUTHENTICATED,\n 'No API key found in the metadata.')\n project_id = request.project_id\n api_key_for_project = self.data_store.read_by_proto_ids(\n project_id=project_id).api_key\n if api_key_for_project != api_key:\n context.abort(\n code_pb2.UNAUTHENTICATED,\n f'Project ID {project_id} and API key {api_key} does not match.')", "def validate_synapse_id(cls, value: str) -> str:\n if not re.search(\"^syn[0-9]+\", value):\n raise ValueError(f\"{value} is not a valid Synapse id\")\n return value", "def validate_key_id(key_id):\n if key_id:\n try:\n k_id = long(key_id)\n except ValueError:\n raise ValueError(\"Incorrect value %r. String must include \"\n \"only digits chars.\" % (key_id,))\n if k_id < 0:\n raise ValueError(\"Incorrect value %r. Only positive \"\n \"numbers are allowed.\" % (key_id,))", "def validate_token_format(self):\n token_slices = self.token.split(';')\n if len(token_slices) == 3:\n # We need to check if any of the token slices is empty\n for token_slice in token_slices:\n if len(token_slice) == 0:\n return False\n return True\n return False", "def _validate_apikey(api):\r\n if api.isalnum() and len(api) == 32:\r\n return api\r\n raise APIError(\"API key should be 32 alphanum char. long.\")", "def _validate_pipeline_id(pipeline_id):\n if pipeline_id is None or len(pipeline_id) == 0:\n error_and_quit(u'Empty pipeline id provided')\n if not set(pipeline_id) <= PIPELINE_ID_PERMITTED_CHARACTERS:\n message = u'Pipeline id {} has invalid character(s)\\n'.format(pipeline_id)\n message += u'Valid characters are: _ - a-z A-Z 0-9'\n error_and_quit(message)", "def detect_workspace_id():\n global workspace_id\n x = subprocess.check_output('/opt/microsoft/omsagent/bin/omsadmin.sh -l', shell=True)\n try:\n workspace_id = re.search('[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}', x).group(0)\n except AttributeError:\n workspace_id = None", "def _get_project_id_validator(course_code: str):\n def project_id_validator(s: str):\n \"\"\"Validator for project id prompt.\"\"\"\n value: Optional[str]\n is_valid, value, msg = False, None, None\n\n if not project_helper.project_id_is_valid(s):\n msg = ('The project id is invalid.\\n'\n 'A project id can only contain letters, numbers, or underscores with at most'\n f'{MAX_PROJECT_ID_CHARS} characters.')\n elif project_helper.project_exists(course_code, s):\n msg = f'The project with id \"{s}\" already exists in \"{course_code}\".'\n else:\n is_valid, value = True, s\n\n return is_valid, value, msg\n\n return project_id_validator", "def test_input_validation(self):\n with self.assertRaises(ValueError):\n get_repo_data(\" \")\n with self.assertRaises(ValueError):\n get_repo_data(\"nonExistentUserID\")\n\n self.assertTrue(get_repo_data(\"derobertsw\"))", "def validate_threat_report_command_args(report_id: str, report_format: str):\n if not report_id.isnumeric():\n raise DemistoException(\"Argument 'report_id' accepts only numeric literals.\")\n if report_format not in [\"html\", \"pdf\"]:\n raise DemistoException(\"Argument 'report_format' accepts only 'html' or 'pdf' as input.\")", "def validate_google_id(self, value):\n if value:\n if re.match(r\"^([a-z\\d])([a-z\\d_\\-]{10,})$\", value, re.I):\n return value\n else:\n raise exceptions.ValidationError(\"Invalid Google Drive id.\")", "def _ValidateSpreadSheetId(self, spreadsheet: str) -> str:\n spreadsheet_match = re.search(r'.*?([01][0-9A-Za-z_-]{20,}).*', spreadsheet)\n if not spreadsheet_match:\n self.ModuleError(\n f'spreadsheet ID is not in the correct format {spreadsheet}.',\n critical=True)\n return \"\" #return is required otherwise mypy will complain\n\n return spreadsheet_match.group(1)", "def process_workspace_identifiers(id_or_ref, workspace=None):\n objspec = {}\n if workspace is None:\n objspec[\"ref\"] = id_or_ref\n else:\n if isinstance(workspace, int):\n objspec[\"wsid\"] = workspace\n else:\n objspec[\"workspace\"] = workspace\n if isinstance(id_or_ref, int):\n objspec[\"objid\"] = id_or_ref\n else:\n objspec[\"name\"] = id_or_ref\n return objspec", "def _project_name_validator(s: str):\n value: Optional[str]\n is_valid, value, msg = False, None, None\n\n if not project_helper.project_name_is_valid(s):\n msg = ('The project name is invalid.\\n'\n f'A project name must have between 1 and {MAX_PROJECT_NAME_CHARS} characters.')\n else:\n is_valid, value = True, s\n\n return is_valid, value, msg", "def create_workspace(workspace_name, auth_domain_name, project=\"anvil-datastorage\"):\n\n # check if workspace already exists\n ws_exists, ws_exists_response = check_workspace_exists(workspace_name, project)\n\n if ws_exists is None:\n return False, ws_exists_response\n\n if not ws_exists: # workspace doesn't exist (404), create workspace\n # create request JSON\n create_ws_json = make_create_workspace_request(workspace_name, auth_domain_name, project) # json for API request\n\n # request URL for createWorkspace\n uri = f\"https://api.firecloud.org/api/workspaces\"\n\n # Get access token and and add to headers for requests.\n # -H \"accept: application/json\" -H \"Authorization: Bearer [token] -H \"Content-Type: application/json\"\n headers = {\"Authorization\": \"Bearer \" + get_access_token(), \"accept\": \"application/json\", \"Content-Type\": \"application/json\"}\n\n # capture response from API and parse out status code\n response = requests.post(uri, headers=headers, data=json.dumps(create_ws_json))\n status_code = response.status_code\n\n if status_code != 201: # ws creation fail\n print(f\"WARNING: Failed to create workspace with name: {workspace_name}. Check output file for error details.\")\n return False, response.text\n # workspace creation success\n print(f\"Successfully created workspace with name: {workspace_name}.\")\n return True, None\n\n # workspace already exists\n print(f\"Workspace already exists with name: {project}/{workspace_name}.\")\n print(f\"Existing workspace details: {json.dumps(json.loads(ws_exists_response), indent=2)}\")\n # make user decide if they want to update/overwrite existing workspace\n while True: # try until user inputs valid response\n update_existing_ws = input(\"Would you like to continue modifying the existing workspace? (Y/N)\" + \"\\n\")\n if update_existing_ws.upper() in [\"Y\", \"N\"]:\n break\n else:\n print(\"Not a valid option. Choose: Y/N\")\n if update_existing_ws.upper() == \"N\": # don't overwrite existing workspace\n deny_overwrite_message = f\"{project}/{workspace_name} already exists. User selected not to overwrite. Try again with unique workspace name.\"\n return None, deny_overwrite_message\n\n accept_overwrite_message = f\"{project}/{workspace_name} already exists. User selected to overwrite.\"\n return True, accept_overwrite_message # overwrite existing workspace - 200 status code for \"Y\"", "def clean_team_id(self):\n team_id = self.cleaned_data['team_id'].strip()\n\n if '/' in team_id:\n raise forms.ValidationError(\n gettext('Team ID cannot contain slashes.'))\n\n return team_id", "def workspace_id(self) -> Optional[str]:\n return pulumi.get(self, \"workspace_id\")", "def workspace(string, projectPath=\"string\", updateAll=bool, fileRuleList=bool, fileRuleEntry=\"string\", renderTypeEntry=\"string\", renderType=\"string\", active=bool, expandName=\"string\", objectType=\"string\", saveWorkspace=bool, shortName=bool, objectTypeList=bool, fileRule=\"string\", filter=bool, newWorkspace=bool, listFullWorkspaces=bool, listWorkspaces=bool, fullName=bool, objectTypeEntry=\"string\", variableEntry=\"string\", rootDirectory=bool, update=bool, list=bool, renderTypeList=bool, variableList=bool, removeVariableEntry=\"string\", create=\"string\", baseWorkspace=\"string\", directory=\"string\", variable=\"string\", removeFileRuleEntry=\"string\", openWorkspace=bool):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Some commands fail because the package manager is locked (aptget/dpkg only); this will allow retries on failing commands.
def run_command_with_retries(hutil, cmd, retries, check_error = True, log_cmd = True, initial_sleep_time = 30, sleep_increase_factor = 1): try_count = 0 sleep_time = initial_sleep_time # seconds dpkg_locked_search = r'^.*dpkg.+lock.*$' dpkg_locked_re = re.compile(dpkg_locked_search, re.M) while try_count <= retries: exit_code, output = run_command_and_log(hutil, cmd, check_error, log_cmd) if exit_code is 0: break if not dpkg_locked_re.search(output): break try_count += 1 hutil_log_info(hutil, 'Retrying command "{0}" because package manager ' \ 'is locked. Command failed with exit code ' \ '{1}'.format(cmd, exit_code)) time.sleep(sleep_time) sleep_time *= sleep_increase_factor if exit_code is not 0 and dpkg_locked_re.search(output): hutil_log_info(hutil, 'The package manager still appears to be ' \ 'locked, so installation cannot completed.') exit_code = DPKGLockedErrorCode return exit_code
[ "def _retry_command(self, command_list, text, prefix, verbose=None):\n\n run_command = check_output\n if self.verbose or verbose:\n run_command = call\n\n for retry in range(self.max_retries):\n retry += 1\n try:\n output = run_command(command_list, env=self.env)\n except Exception:\n print(\"\"\"\n*******************************************************************************\nThere was a failure in constructing the conda environment.\nAttempt {retry} of {max_retries} will start {retry} minute(s) from {t}.\n*******************************************************************************\n\"\"\".format(retry=retry, max_retries=self.max_retries, t=time.asctime()))\n time.sleep(retry*60)\n else:\n break\n if retry == self.max_retries:\n raise RuntimeError(\"\"\"\nThe conda environment could not be constructed. Please check that there is a\nworking network connection for downloading conda packages.\n\"\"\")\n print('Completed {text}:\\n {prefix}'.format(text=text, prefix=prefix),\n file=self.log)\n\n # check that environment file is updated\n self.environments = self.update_environments()\n if prefix not in self.environments:\n raise RuntimeError(\"\"\"\nThe newly installed environment cannot be found in\n${HOME}/.conda/environments.txt.\n\"\"\")", "def testRetryCommands(self):\r\n\r\n class MockBusyDevice(MockDevice):\r\n def __init__(self):\r\n MockDevice.__init__(self)\r\n self.last_cmd = None\r\n self.retried = []\r\n\r\n # this command is special (and totally made up)\r\n # it does not return 515 errors like the others\r\n def at_test(self, one):\r\n return True\r\n\r\n def process(self, cmd):\r\n\r\n # if this is the first time we've seen\r\n # this command, return a BUSY error to\r\n # (hopefully) prompt a retry\r\n if self.last_cmd != cmd:\r\n self._output(\"+CMS ERROR: 515\")\r\n self.last_cmd = cmd\r\n return None\r\n\r\n # the second time, note that this command was\r\n # retried, then fail. kind of anticlimatic\r\n self.retried.append(cmd)\r\n return False\r\n\r\n device = MockBusyDevice()\r\n gsm = pygsm.GsmModem(device=device)\r\n n = len(device.retried)\r\n\r\n # override the usual retry delay, to run the tests fast\r\n gsm.retry_delay = 0.01\r\n\r\n # boot the modem, and make sure that\r\n # some commands were retried (i won't\r\n # check _exactly_ how many, since we\r\n # change the boot sequence often)\r\n gsm.boot()\r\n self.assert_(len(device.retried) > n)\r\n\r\n # try the special AT+TEST command, which doesn't\r\n # fail - the number of retries shouldn't change\r\n n = len(device.retried)\r\n gsm.command(\"AT+TEST=1\")\r\n self.assertEqual(len(device.retried), n)", "def test_ubuntu_no_arguments(self):\n distribution = 'ubuntu-14.04'\n commands = task_install_flocker(distribution=distribution)\n self.assertEqual(commands, sequence([\n run(command='apt-get -y install software-properties-common'),\n run(command='add-apt-repository -y ppa:james-page/docker'),\n run(command=\"add-apt-repository -y \"\n \"'deb https://s3.amazonaws.com/clusterhq-archive/ubuntu 14.04/amd64/'\"), # noqa\n run(command='apt-get update'),\n run(command='apt-get -y --force-yes install clusterhq-flocker-node'), # noqa\n ]))", "def try_rep(n, cmd):\n for i in xrange(n):\n if i > 0:\n print >> sys.stderr, \"retrying \" + cmd\n ret = spc.Popen(cmd, shell=True).wait()\n if ret == 0:\n return ret\n print >> sys.stderr, \"***** FAILED ***** (with ret %d): %s\" % (ret, cmd)\n return ret", "def aptUpdate():\n logging.debugv(\"functions/linux.py->aptUpdate()\", [])\n cmd = \"apt-get -qqy update 2>/dev/null\"\n try:\n apt = os.popen(cmd)\n except excepts.RunException, msg:\n logging.error(\"APT update error: %s\" % str(msg))", "def BackfillCommandAttempts():\n _GetAPIClient().coordinator().backfillCommandAttempts().execute()", "async def on_command_error(self, ctx, error: errors):\r\n if isinstance(error, errors.CommandNotFound):\r\n await ctx.send(f'Invalid command. Please type `{repo.get_prefix}help` to see a list of commands.')\r\n\r\n elif isinstance(error, errors.MissingRequiredArgument) or isinstance(error, errors.BadArgument):\r\n await send_command_help(ctx)\r\n\r\n elif isinstance(error, errors.CommandOnCooldown):\r\n await ctx.send(f'Woah, slow down there! Retry again in {error.retry_after:.0f} seconds.')\r\n\r\n elif isinstance(error, errors.CommandInvokeError):\r\n\r\n _traceback = traceback.format_list(traceback.extract_tb(error.__traceback__))\r\n _traceback = ''.join(_traceback)\r\n\r\n error_message = f'```Python\\n{_traceback}{type(error).__name__}: {error}```'\r\n\r\n await ctx.send(f'There was an error processing the command ;w; {error_message}')\r\n\r\n elif isinstance(error, errors.MissingPermissions):\r\n await ctx.send('You do not have the required permissions to run this command.')\r\n\r\n elif isinstance(error, errors.BotMissingPermissions):\r\n await ctx.send('I do not have permission to run this command ;w;')\r\n\r\n elif isinstance(error, HTTPException):\r\n # if HTTPResponse == 413:\r\n ctx.send('The file size is too large for me to send over Discord ;o;')", "def install_ubuntu10_packages():\r\n sudo_as('aptitude clean && aptitude update')\r\n try:\r\n sudo_as('aptitude -y install %s' % ' '.join(env.packages.get('ubuntu10').get('required')))\r\n except Exception, e:\r\n print \"Required packages installation process failed. Cannot proceed!\"\r\n raise\r\n \r\n try:\r\n sudo_as('aptitude -y install %s' % ' '.join(env.packages.get('ubuntu10').get('optional')))\r\n except Exception, e:\r\n print \"Optional packages installation process failed. But proceeding nevertheless. Assuming it'll be fixed manually!\"\r\n print \"Error was %s\", e \r\n\r\n for action in env.packages.get('ubuntu10').get('additional_commands'):\r\n try:\r\n sudo_as(action)\r\n except Exception, e:\r\n print \"Additional_command %s failed. Continuing anyway!\" % action\r\n print \"Error was %s\", e\r\n continue", "def test_invalid_commands(self):\n self.assertEqual(\n False,\n self.command_runner.MaybeCheckForAndOfferSoftwareUpdate('update', 0))", "def spawn_commands_and_wait_forever(*cmds, **kwargs):\n import time\n error_msg = kwargs.get('error_msg', '')\n\n processes = []\n for cmd in cmds:\n processes.append(spawn_command(cmd))\n\n def poll_processes():\n for process in processes:\n if process.poll() is not None:\n print(error_msg)\n return process.returncode\n return None\n\n try:\n failure_code = None\n while failure_code is None:\n failure_code = poll_processes()\n time.sleep(0.1)\n return failure_code\n except KeyboardInterrupt:\n return 0\n finally:\n for process in processes:\n process.terminate()", "def _try_command(self, mask_error, func, *args, **kwargs):\n from miio import DeviceException\n try:\n result = yield from self.hass.async_add_job(\n partial(func, *args, **kwargs))\n\n _LOGGER.debug(\"Response received from air purifier: %s\", result)\n\n return result == SUCCESS\n except DeviceException as exc:\n _LOGGER.error(mask_error, exc)\n return False", "async def on_command_error(self, ctx: Context, _error):\n await self.handle_error(ctx, _error)", "async def test_cluster_down_overreaches_retry_attempts(\n self,\n error: Union[Type[TimeoutError], Type[ClusterDownError], Type[ConnectionError]],\n ) -> None:\n with mock.patch.object(RedisCluster, \"_execute_command\") as execute_command:\n\n def raise_error(target_node, *args, **kwargs):\n execute_command.failed_calls += 1\n raise error(\"mocked error\")\n\n execute_command.side_effect = raise_error\n\n rc = await get_mocked_redis_client(host=default_host, port=default_port)\n\n with pytest.raises(error):\n await rc.get(\"bar\")\n assert execute_command.failed_calls == rc.cluster_error_retry_attempts\n\n await rc.close()", "def fail_on_npm_install():\n return 1", "def check_results(self):\n try:\n while True:\n item = self._pop_completed() # will throw Empty\n if not item.get_results().wasSuccessful():\n raise ExecutionError(\"Error Executing Command: \", item)\n except Empty:\n return", "def test_exec_fail_no_distro(self): # suppress(no-self-use)\n with SafeTempDir() as container_dir:\n with ExpectedException(RuntimeError):\n cmd = PLATFORM_PROGRAM_MAPPINGS[platform.system()][\"0\"]\n run_use_container_on_dir(container_dir, cmd=cmd)", "async def test_subprocess_exceptions(\n caplog: pytest.LogCaptureFixture, hass: HomeAssistant, load_yaml_integration: None\n) -> None:\n\n with patch(\n \"homeassistant.components.command_line.notify.subprocess.Popen\"\n ) as check_output:\n check_output.return_value.__enter__ = check_output\n check_output.return_value.communicate.side_effect = [\n subprocess.TimeoutExpired(\"cmd\", 10),\n None,\n subprocess.SubprocessError(),\n ]\n\n await hass.services.async_call(\n NOTIFY_DOMAIN, \"test6\", {\"message\": \"error\"}, blocking=True\n )\n assert check_output.call_count == 2\n assert \"Timeout for command\" in caplog.text\n\n await hass.services.async_call(\n NOTIFY_DOMAIN, \"test6\", {\"message\": \"error\"}, blocking=True\n )\n assert check_output.call_count == 4\n assert \"Error trying to exec command\" in caplog.text", "def prepare():\n packager = get_packager()\n if packager == APT:\n sudo('apt-get update')\n elif package == YUM:\n sudo('yum update')\n else:\n raise Exception, 'Unknown packager: %s' % (packager,)", "def test_101_cinder_failback(self):\n cinder_rbd_mirroring_mode = get_cinder_rbd_mirroring_mode(\n self.cinder_ceph_app_name)\n if cinder_rbd_mirroring_mode != 'image':\n logging.warning(\n \"Skipping 'test_101_cinder_failback' since Cinder RBD \"\n \"mirroring mode is {}.\".format(cinder_rbd_mirroring_mode))\n return\n\n session = openstack.get_overcloud_keystone_session()\n cinder = openstack.get_cinder_session_client(session, version=3)\n\n # Check if the Cinder volume host is already failed-over\n host = 'cinder@{}'.format(self.cinder_ceph_app_name)\n svc = cinder.services.list(host=host, binary='cinder-volume')[0]\n self.assertEqual(svc.replication_status, 'failed-over')\n self.assertEqual(svc.status, 'disabled')\n\n # Check if the test Cinder volume is already present. The method\n # 'cinder.volumes.find' raises 404 if the volume is not found.\n volume = cinder.volumes.find(name=self.test_cinder_volume_name)\n\n # Execute the Cinder volume failback\n openstack.failover_cinder_volume_host(\n cinder=cinder,\n backend_name=self.cinder_ceph_app_name,\n target_backend_id='default',\n target_status='enabled',\n target_replication_status='enabled')\n\n # Check if the test volume is still available after failback\n self.assertEqual(cinder.volumes.get(volume.id).status, 'available')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set and retrieve the contents of HandlerEnvironment.json as JSON
def get_handler_env(): global HandlerEnvironment if HandlerEnvironment is None: handler_env_path = os.path.join(os.getcwd(), 'HandlerEnvironment.json') try: with open(handler_env_path, 'r') as handler_env_file: handler_env_txt = handler_env_file.read() handler_env=json.loads(handler_env_txt) if type(handler_env) == list: handler_env = handler_env[0] HandlerEnvironment = handler_env except Exception as e: waagent_log_error(e.message) return HandlerEnvironment
[ "def load_backend() -> json:\n\treturn _load_config(\"env.json\")", "def load():\n flask_env = os.environ[\"FLASK_ENV\"]\n\n with open(f\"settings.{flask_env}.json\", \"r\") as f:\n return Dict(json.load(f))", "def get_setup_json():\n with open(FILEPATH_SETUP_JSON, \"r\") as handle:\n setup_json = json.load(handle) # , object_pairs_hook=OrderedDict)\n\n return setup_json", "def load_secrets_to_env():\n with open('secrets.json', 'r') as f:\n for env_name, env_value in json.loads(f.read()).items():\n os.environ[env_name] = env_value", "def load_environment_variables():\n config_json = json.load(open('settings/config.json'))\n\n for key in config_json.keys():\n if key not in os.environ:\n os.environ[key] = config_json[key]", "def _get_environment_data(self):\n if not self.env_file:\n return None\n f, url_parts = self._open(self.env_file, self.basedir)\n return f.read().decode('utf-8')", "def environment_info(self):\n\n return {\n \"application_environment\": {\n \"framework\": \"pylons\",\n \"env\": dict(os.environ),\n \"language\": \"python\",\n \"language_version\": sys.version.replace('\\n', ''),\n \"application_root_directory\": self.project_root()\n },\n \"client\": {\n \"name\": \"pylons-exceptional\",\n \"version\": __version__,\n \"protocol_version\": EXCEPTIONAL_PROTOCOL_VERSION\n }\n }", "def generate_env(self):\n for key in sorted(list(self.spec.keys())):\n if self.spec[key]['type'] in (dict, list):\n value = f\"\\'{json.dumps(self.spec[key].get('example', ''))}\\'\"\n else:\n value = f\"{self.spec[key].get('example', '')}\"\n print(f\"export {self.env_prefix}_{key.upper()}={value}\")", "def _file_environment(self, module_environment):\n js_env = self.state.document.settings.env.app.config.js_environment\n\n file_id = module_environment[\"file_id\"]\n file_environment = js_env[\"file\"][file_id]\n return file_environment", "def build_environment_from_json(self):\n jsondata = JsonData.load_json_file(filename)\n # Create a instance of JsonData to store object that\n # needs to be sent to UI\n self.render = JsonData()\n self.render.objects = {}\n\n for name in jsondata.keys():\n obj = eval(name.capitalize())\n self.render.objects[name] = self.create_environment_object(\n jsondata, obj)\n\n self.hub = self.render.objects['hub'][0]\n try:\n self.foods = []\n for site in self.render.objects['sites']:\n self.site = site # self.render.objects['sites'][0]\n\n for i in range(self.num_agents):\n f = Food(\n i, location=self.site.location,\n radius=self.site.radius)\n f.agent_name = None\n self.grid.add_object_to_grid(f.location, f)\n self.foods.append(f)\n except KeyError:\n pass\n\n if self.viewer:\n self.ui = UI(\n (self.width, self.height), [self.hub], self.agents,\n [self.site], food=self.foods)", "def setup_namespace(json_filenm):\n if json_filenm:\n tns = json.load(open(json_filenm))\n # Need to change Unicode keys into str keys.\n ns = {}\n for k, v in tns.items():\n ns[str(k)] = v\n else:\n ns = {}\n # Now add in the environment variables...\n for k, v in os.environ.items():\n # Don't override an JSON parameter with an environment variable.\n if k not in ns:\n ns[k] = v\n return ns", "def load_env(state, eid, socket, env_path=DEFAULT_ENV_PATH):\n env = {}\n if eid in state:\n env = state.get(eid)\n elif env_path is not None:\n p = os.path.join(env_path, eid.strip(), \".json\")\n if os.path.exists(p):\n with open(p, \"r\") as fn:\n env = tornado.escape.json_decode(fn.read())\n state[eid] = env\n\n if \"reload\" in env:\n socket.write_message(json.dumps({\"command\": \"reload\", \"data\": env[\"reload\"]}))\n\n jsons = list(env.get(\"jsons\", {}).values())\n windows = sorted(jsons, key=lambda k: (\"i\" not in k, k.get(\"i\", None)))\n for v in windows:\n socket.write_message(v)\n\n socket.write_message(json.dumps({\"command\": \"layout\"}))\n socket.eid = eid", "def load(self):\r\n try:\r\n if not path.isfile(self.json_name()):\r\n self.values = self.default_json_values()\r\n with open(self.json_name(), \"r\") as json_file:\r\n self.values = loads(json_file.read())\r\n except:\r\n print(\"Failed to load from json file\")\r\n self.values = self.default_json_values()", "def setup(self):\n with open(self.file_name, \"w\") as f:\n json.dump(obj=self.j_config, fp=f, indent=4, sort_keys=True)", "def get_environment_from_request(self):\n environment = Environment.objects.get(api_key=self.kwargs['environment_api_key'])\n return environment", "def write_json(file, env=None):\n build_conf = build_dict(env=env)\n if isinstance(file, basestring):\n with open(file, \"w\") as f:\n json.dump(build_conf, f)\n else:\n json.dump(build_conf, file)", "def populate_jinja_environment(self, env):\n env.filters['registry'] = self.registry\n env.globals['flattened_url'] = self.flattened_url\n env.globals['new_etcd_discovery_token'] = self.new_etcd_discovery_token\n env.globals['load_coreos_ami'] = self.load_coreos_ami_id\n env.globals['dockersystemd'] = self._dockersystemd_template", "def setup_environment():", "def get_environment(id=None, name=None):\n data = get_environment_raw(id, name)\n if data:\n return utils.format_json(data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mimic waagent mothod RunGetOutput in case waagent is not available Run shell command and return exit code and output
def run_get_output(cmd, chk_err = False, log_cmd = True): if 'Utils.WAAgentUtil' in sys.modules: # WALinuxAgent-2.0.14 allows only 2 parameters for RunGetOutput # If checking the number of parameters fails, pass 2 try: sig = inspect.signature(waagent.RunGetOutput) params = sig.parameters waagent_params = len(params) except: try: spec = inspect.getargspec(waagent.RunGetOutput) params = spec.args waagent_params = len(params) except: waagent_params = 2 if waagent_params >= 3: exit_code, output = waagent.RunGetOutput(cmd, chk_err, log_cmd) else: exit_code, output = waagent.RunGetOutput(cmd, chk_err) else: try: output = subprocess.check_output(cmd, stderr = subprocess.STDOUT, shell = True) exit_code = 0 except subprocess.CalledProcessError as e: exit_code = e.returncode output = e.output return exit_code, output.encode('utf-8').strip()
[ "def test_shell_with_return_code(self):\n response, code = self.device.shell_capability.shell(\n self.test_config[\"shell_cmd\"], include_return_code=True)\n self.assertTrue(response)\n self.assertIsInstance(response, str)\n self.assertEqual(code, _SUCCESS_RETURN_CODE)", "def _run_command(\n self,\n argv,\n expected_stdout=None,\n expected_stderr='',\n expected_status=0,\n format_vars=None,\n remove_version=False,\n expected_json_in_stdout: Optional[dict] = None,\n expected_part_of_stdout=None,\n unexpected_part_of_stdout=None,\n ):\n expected_stderr = self._normalize_expected_output(expected_stderr, format_vars)\n stdout, stderr = self._get_stdouterr()\n console_tool = ConsoleTool(self.b2_api, stdout, stderr)\n try:\n actual_status = console_tool.run_command(['b2'] + argv)\n except SystemExit as e:\n actual_status = e.code\n\n actual_stdout = self._trim_trailing_spaces(stdout.getvalue())\n actual_stderr = self._trim_trailing_spaces(stderr.getvalue())\n\n # ignore any references to specific api version\n if remove_version:\n actual_stdout = self._remove_api_version_number(actual_stdout)\n actual_stderr = self._remove_api_version_number(actual_stderr)\n\n if expected_stdout is not None and expected_stdout != actual_stdout:\n expected_stdout = self._normalize_expected_output(expected_stdout, format_vars)\n print('EXPECTED STDOUT:', repr(expected_stdout))\n print('ACTUAL STDOUT: ', repr(actual_stdout))\n print(actual_stdout)\n if expected_part_of_stdout is not None and expected_part_of_stdout not in actual_stdout:\n expected_part_of_stdout = self._normalize_expected_output(\n expected_part_of_stdout, format_vars\n )\n print('EXPECTED TO FIND IN STDOUT:', repr(expected_part_of_stdout))\n print('ACTUAL STDOUT: ', repr(actual_stdout))\n if expected_stderr != actual_stderr:\n print('EXPECTED STDERR:', repr(expected_stderr))\n print('ACTUAL STDERR: ', repr(actual_stderr))\n print(actual_stderr)\n\n if expected_json_in_stdout is not None:\n json_match = self.json_pattern.match(actual_stdout)\n if not json_match:\n self.fail('EXPECTED TO FIND A JSON IN: ' + repr(actual_stdout))\n\n found_json = json.loads(json_match.group('dict_json') or json_match.group('list_json'))\n if json_match.group('dict_json'):\n self.assertDictIsContained(expected_json_in_stdout, found_json)\n else:\n self.assertListOfDictsIsContained(expected_json_in_stdout, found_json)\n\n if expected_stdout is not None:\n self.assertEqual(expected_stdout, actual_stdout, 'stdout')\n if expected_part_of_stdout is not None:\n self.assertIn(expected_part_of_stdout, actual_stdout)\n if unexpected_part_of_stdout is not None:\n self.assertNotIn(unexpected_part_of_stdout, actual_stdout)\n self.assertEqual(expected_stderr, actual_stderr, 'stderr')\n self.assertEqual(expected_status, actual_status, 'exit status code')", "def test_check_output_failure(self):\n self.check_output.side_effect = actions.actions.CalledProcessError(\n 1, \"Failure\")\n\n actions.actions.diskusage([])\n self.check_output.assert_called_once_with(['swift-recon', '-d'])\n\n self.action_set.assert_called()\n self.action_fail.assert_called()", "def call_tools(self, cmd):\n return_value = collections.namedtuple('return_value', ['status', 'out'])\n sys.stdout.flush()\n p = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = p.communicate()\n if self.args.verbose:\n print(\"\\n\" + out)\n\n if err:\n return return_value(status=-1, out=out)\n\n # Check if we there is an error in the output\n m = re.search(r'dfuERROR.*?status\\(([0-9]*)', out)\n if m:\n return return_value(status=int(m.group(1)), out=out)\n\n return return_value(status=0, out=out)", "def syscall(*args):\n return subprocess.run(args, capture_output=True, check=True, text=True).stdout", "def check_output(self, args, dry_run=False, env=None):\n if dry_run:\n print \"Would have run '%s'\" % ' '.join(args)\n return ''\n\n output = subprocess.check_output(args, env=env)\n\n if self.verbose:\n print \"check_output(%s) returned '%s'\" % (args, output)\n return output", "def check_output(self, args, dry_run=False, env=None):\n if dry_run:\n print(\"Would have run '%s'\" % ' '.join(args))\n return ''\n\n output = subprocess.check_output(args, env=env)\n\n if self.verbose:\n print(\"check_output(%s) returned '%s'\" % (args, output))\n return output", "def test__run(self, mock_popen_args):\n wrapper = sadf.SADFWrapper()\n mock_popen_args.return_value = ['echo', 'hello world']\n generator = wrapper._run(1, 2, 3)\n self.assertIn(b'hello world', list(generator)[0])", "def test_o_exec_command_io(self):\n\t\ttheResult = False\n\t\ttry:\n\t\t\ttheResult = check_exec_command_has_output(self, [\n\t\t\t\tstr(\"-m\"),\n\t\t\t\tstr(\"piaplib.pocket\"),\n\t\t\t\tstr(\"lint\"),\n\t\t\t\tstr(\"execve\"),\n\t\t\t\tstr(\"\"\"--out\"\"\").format(str(sys.executable)),\n\t\t\t\tstr(\"\"\"--cmd={}\"\"\").format(str(sys.executable)),\n\t\t\t\tstr(\"\"\"--args={}\"\"\").format(str(\"piaplib.pocket\")),\n\t\t\t\tstr(\"\"\"--args={}\"\"\").format(str(\"--help\"))\n\t\t\t])\n\t\texcept Exception as err:\n\t\t\tprint(str(\"\"))\n\t\t\tprint(str(type(err)))\n\t\t\tprint(str(err))\n\t\t\tprint(str((err.args)))\n\t\t\tprint(str(\"\"))\n\t\t\tothererr = None\n\t\t\tdel othererr\n\t\t\ttheResult = False\n\t\tassert theResult", "def test_ncanda_help_output(create_indiv_script_call):\n args = ['ncanda', '-h']\n completed_process = subprocess.run([create_indiv_script_call] + args)\n assert completed_process.returncode == 0", "def test_invoke_shell(self):\n self.client.invoke_shell()\n self.p_client.invoke_shell.assert_called_with()\n return", "def test_run_this(self):\n run_this_return, run_this_output = service_utils.run_this('fake-command')\n self.assertTrue(run_this_return != 0)\n\n run_this_return, run_this_output = service_utils.run_this('type bash')\n self.assertEqual(run_this_return, 0)", "def Run(cmd):\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out = proc.stdout.read()\n err = proc.stderr.read()\n code = proc.wait()\n return int(code), out.decode('UTF-8'), err.decode('UTF-8')", "def test_lldbmi_process_output(self):\n\n self.spawnLldbMi(args=None)\n\n # Load executable\n self.runCmd(\"-file-exec-and-symbols %s\" % self.myexe)\n self.expect(\"\\^done\")\n\n # Run\n self.runCmd(\"-exec-run\")\n self.expect(\"\\^running\")\n\n # Test that a process output is wrapped correctly\n self.expect(\"\\@\\\"'\\\\\\\\r\\\\\\\\n\\\"\")\n self.expect(\"\\@\\\"` - it's \\\\\\\\\\\\\\\\n\\\\\\\\x12\\\\\\\\\\\"\\\\\\\\\\\\\\\\\\\\\\\\\\\"\")", "def test_watchlist_get_command(on_cloud, hr_output, requests_mock) -> None:\n watchlist_mock_response = load_mock_response(\"watchlist_get.json\")\n requests_mock.get(\"/api/v1/watchlist/devices\", json=watchlist_mock_response)\n\n network_mock_response = load_mock_response(\"network_get.json\")\n requests_mock.get(\"/api/v1/networks\", json=network_mock_response)\n client = init_mock_client(requests_mock, on_cloud)\n result = ExtraHop_v2.watchlist_get_command(client, on_cloud)\n\n assert len(result.outputs) == 3 # type: ignore\n assert result.outputs_prefix == EXTRAHOP_DEVICE\n assert result.readable_output == hr_output", "def mock_which(*args, **kwargs):\n return subprocess.CompletedProcess(\n args=None, returncode=0, stdout=b\"/my/conda/env/bin/Rscript\\n\"\n )", "def subprocessCheckedOutput(args, addToErrorInfo=''):\r\n try:\r\n return subprocess.check_output(args)\r\n except Exception as err:\r\n if addToErrorInfo:\r\n message = ('''Command <{0}> failed:\\n ERROR: {1}\\n {2}'''.format(' '.join(args), err, addToErrorInfo))\r\n else:\r\n message = ('''Command <{0}> failed:\\n ERROR: {1}'''.format(' '.join(args), err))\r\n sys.exit(message)", "def run(self, _input):\n cmd = self.get_cmd()\n self.log.info('command: %s', \" \".join(cmd))\n self.log.debug('input: %s', _input)\n with subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE,\n # only since 3.6: encoding='utf-8',\n ) as p:\n output, err = p.communicate(input=_input.encode('utf-8'))\n self.log_stderr(err)\n output = output.decode('utf-8')\n self.log.debug(\"got output\")\n retval = p.wait()\n self.log.debug(\"waited\")\n self.log.debug(\"%s: %i\", \" \".join(cmd), retval)\n if retval != 0:\n raise ValueError(\n \"cannot execute {}: {}\\n{}\"\n .format(\" \".join(cmd), retval, err.decode('utf-8')))\n # self.log.info('output: %s', output)\n return output", "def test_run_positive(self, *mocks):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"--skip-consistency-check\",\n \"get-wealth\",\n FetchAICrypto.identifier,\n ],\n standalone_mode=False,\n )\n self.assertEqual(result.exit_code, 0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize waagent logger If waagent has not been imported, catch the exception
def init_waagent_logger(): try: waagent.LoggerInit('/var/log/waagent.log','/dev/stdout', True) except Exception as e: print('Unable to initialize waagent log because of exception ' \ '{0}'.format(e))
[ "def _init_logger(self):\n #self._logger = logger_factory.make_logger(__name__)", "def __init__(self):\n self._logger = logging.getLogger(__name__)", "def _init_log():\n global log\n\n orig_logger_cls = logging.getLoggerClass()\n logging.setLoggerClass(EyeLogger)\n try:\n log = logging.getLogger('eye')\n log._set_defaults()\n finally:\n logging.setLoggerClass(orig_logger_cls)\n\n return log", "def setup(self):\n from Utilities.movoto.logger import MLogger\n self._mlogger = MLogger().getLogger(*self._args, **self._kw)", "def __init__(self, device_name, msg):\n super(LoggingAgentNotEnabledError, self).__init__(\n device_name, msg, reason=\"agent not installed\")", "def _init_logger(self, ensemble=False):\n if not self.use_logger:\n return\n\n run_name = f\"{self.name}-fold-{self.fold}\"\n if ensemble:\n run_name = self.name + \"-ensemble\"\n\n wandb.init(\n project=self.project,\n group=f\"{self.name}-eval\",\n name=run_name,\n config=self.model_config()\n )", "def setup_logging():\n client = logging.Client()\n client.get_default_handler()\n client.setup_logging()", "def __init__(self):\n self.logger = logging.getLogger('TwitterLogger')\n self.fh = logging.FileHandler('scraper_logs.log')\n self.set_level()\n self.set_formatter()\n self.add_handler()", "def logging_init():\n # Default logging levels. These can be overridden when the config file is loaded.\n logging.getLogger().setLevel(logging.WARNING)\n logging.getLogger('neocommon').setLevel(logging.INFO)\n logging.getLogger('fetch').setLevel(logging.INFO)\n\n # Add logging handlers\n logging.getLogger().addHandler(_LOG_HANDLER)", "def setLogger():\n import inspect\n s = inspect.stack()\n global logit\n for c in s: # for each context in the call stack...\n if \"logit\" in c[0].f_locals: # first in tuple is frame\n logit = c[0].f_locals[\"logit\"]\n try:\n if logit is None:\n logit = local.defaultLogit\n #print >>open(\"/tmp/foo1\",\"a\"), dir(local)\n except:\n logit = local.defaultLogit\n logit(\"logger initialized\")", "def create_logger(self):\n try:\n lg.basicConfig(filename='pre_processing_logger.log', level = lg.INFO ,format='%(asctime)s - %(levelname)s - %(message)s')\n except Exception as e:\n print(e)", "def init_logger(self):\n logger.Reinitialize(level=self.log_level, logToFileAtSpecifiedPath=self.log_file)", "def initialize_logging():\n # General python logging\n root = logging.getLogger()\n root.setLevel(logging.NOTSET)\n hdlr = HubstorageLogHandler()\n hdlr.setLevel(logging.INFO)\n hdlr.setFormatter(logging.Formatter('[%(name)s] %(message)s'))\n root.addHandler(hdlr)\n\n # Silence commonly used noisy libraries\n try:\n import boto # boto overrides its logger at import time\n except ImportError:\n pass\n\n nh = logging.NullHandler()\n for ln in ('boto', 'requests', 'hubstorage'):\n lg = logging.getLogger(ln)\n lg.propagate = 0\n lg.addHandler(nh)\n\n # Redirect standard output and error to HS log\n sys.stdout = StdoutLogger(0, 'utf-8')\n sys.stderr = StdoutLogger(1, 'utf-8')\n\n # Twisted specifics (includes Scrapy)\n obs = HubstorageLogObserver(hdlr)\n _oldshowwarning = warnings.showwarning\n txlog.startLoggingWithObserver(obs.emit, setStdout=False)\n warnings.showwarning = _oldshowwarning\n return hdlr", "def test_logging_01(self):\n # reset the logger\n actual = CrawlConfig.log(close=True)\n self.expected(None, actual)\n\n # now create a logger\n trg_logpath = self.tmpdir('CrawlConfig.log')\n exp_logpath = U.abspath(trg_logpath)\n CrawlConfig.log(logpath=trg_logpath)\n\n # now retrieving the logger should get the one just set\n actual = CrawlConfig.log()\n self.assertTrue(isinstance(actual, logging.Logger),\n \"Expected logging.Logger, got %s\" % (actual))\n self.expected(exp_logpath, actual.handlers[0].baseFilename)", "def test_logging_nocfg(self):\n self.dbgfunc()\n with ctx.nested(U.Chdir(self.tmpdir()), U.tmpenv('CRAWL_CONF', None)):\n # reset any logger and config that has been initialized\n CrawlConfig.get_config(reset=True, soft=True)\n CrawlConfig.log(close=True)\n\n # now ask for a default logger\n l = CrawlConfig.log(\"test log message\")\n\n # and check that it has the right handler\n self.expected(1, len(l.handlers))\n self.expected(U.default_logpath(), l.handlers[0].stream.name)\n self.expected(10*1024*1024, l.handlers[0].maxBytes)\n self.expected(5, l.handlers[0].backupCount)", "def init_logger():\n logger = logging.getLogger('animethemes-dl')\n if logger.handlers:\n return False\n \n logger_handler = logging.StreamHandler()\n logger.addHandler(logger_handler)\n \n logger_handler.setFormatter(ColorFormatter())\n \n logger.propagate = False\n\n return True", "def start_logger(self):\n # Logger just Works for Python3, this will be updated \n self.logger = logging.getLogger('Otto-CT-v0.0.1.beta') # Change logger\n self.logger.info('Otto Logger is been activated.')", "def test_connection_made_PyLogger(self):\n self._run_connection_made()\n bot.loggers.PyLogger.assert_called_once_with()", "def __init_loggers(self):\n loggers = ['monitor', 'system']\n\n for logger in loggers:\n self.__init_logger(logger)\n\n self.__init_logger('monitor', 'stdout')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Log informational message, being cautious of possibility that waagent may not be imported
def waagent_log_info(message): if 'Utils.WAAgentUtil' in sys.modules: waagent.Log(message) else: print('Info: {0}'.format(message))
[ "def info(self,msg):\n self.logger.info(msg)", "def log_message(self) -> global___LogMessage:", "def logger_info(self,text):\n logging.info(self.log_my_name()+' '+text)", "def info(msg):\n message(msg, flag='i')", "def logger_debug(self,text):\n logging.debug(self.log_my_name()+' '+text)", "def info(self, msg, *args, **kwargs):\n self.write(msg, level='INFO', *args, **kwargs)", "def info(self, message: str, **extra: t.Any):\n self.log(logging.INFO, message, extra)", "def hutil_log_info(hutil, message):\n if hutil is not None:\n hutil.log(message)\n else:\n print('Info: {0}'.format(message))", "def logDetails(self):\n for k,v in self._parser.getDetailsDict().items():\n self._log.debug(\"> %11s : %s\" % (k, str(v)[:50]))", "def info(self, message, farg=None):\n self.write(message, farg=farg, level=u\"INFO\")", "def cmd_info(self):\r\n self.log.setLevel(logging.INFO)\r\n self.log.info('Switching to INFO threshold')", "def handle_inform(self, msg):\n print msg", "def info(self, *args: Any, **kwargs: Any) -> None:\n\n self.client.logger.info(*args, **kwargs)", "def logger_warning(self,text):\n logging.warning(self.log_my_name()+' '+text)", "def debug(self,msg):\n self.logger.debug(msg)", "def info(self, *messages):\n self.log(LOGLEVELS[\"info\"], \"\\n[Info]\", *messages)", "def log_script_result(self, message):\n\n testlog.wtl_log(\"!*** %s\" %(message), force=True)", "def info(self,msg):\n\tentry = self._entry()\n entry['msg'] = msg\n self.log_collection.insert(entry)", "def system_message(self,msg,ukey):\n self.log(source=\"system\",destination=ukey,message=msg,types=[\"info\"])", "def logData(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Log error message, being cautious of possibility that waagent may not be imported
def waagent_log_error(message): if 'Utils.WAAgentUtil' in sys.modules: waagent.Error(message) else: print('Error: {0}'.format(message))
[ "def log_error(e):\r\n\tprint(e)", "def logger_error(self,text):\n logging.error(self.log_my_name()+' '+text)", "def error(msg):\n global logger\n logger.error(msg)", "def error(self, message):\n pass", "def error(self, message: str, **extra: t.Any):\n self.log(logging.ERROR, message, extra)", "def _reportErrors(self, msg) :\n self.help()\n print msg\n print self._line(\"-\")\n if not self.inhibitExceptions :\n raise ScriptInputError, msg", "def error(msg):\n message(msg, flag='e')", "def error(self, message, tenant=None):\n self.logger.error(message, extra={'tenant': tenant})", "def log_error(self, message: str):\n self.logger.error(message)", "def error(what,say):\n print 'ERROR: ', what, say", "def log_error(self):\n return \"\"\"--log-error=file_name\"\"\"", "def __init__(self, device_name, msg):\n super(LoggingAgentNotEnabledError, self).__init__(\n device_name, msg, reason=\"agent not installed\")", "def logException(self,message):\n self.wLogger.exception(message)", "def error(self, message, farg=None):\n self.write(message, farg=farg, level=u\"ERROR\")", "def add_detected_error(self,e):\n exc_tb = sys.exc_info()[2]\n exc_type = sys.exc_info()[0]\n exc_line = exc_tb.tb_lineno\n f_name = traceback.extract_tb(exc_tb,1)[0][2]\n t_err_msg = \"{} | Exception Type: {} | At Function: {} | Line No: {} | Error Message: {}\"\n t_err_msg = t_err_msg.format(self.host, exc_type, f_name, exc_line, e)\n self.add_cmnt_msg(t_err_msg, \"Error\")", "def init_waagent_logger():\n try:\n waagent.LoggerInit('/var/log/waagent.log','/dev/stdout', True)\n except Exception as e:\n print('Unable to initialize waagent log because of exception ' \\\n '{0}'.format(e))", "def hutil_log_error(hutil, message):\n if hutil is not None:\n hutil.error(message)\n else:\n print('Error: {0}'.format(message))", "def cmd_error(self):\n self.log.setLevel(logging.ERROR)\n self.log.error('Switching to ERROR threshold')", "def log_message(self) -> global___LogMessage:", "def errReceived(self, data):\n self.log.error(data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Log informational message, being cautious of possibility that hutil may not be imported and configured
def hutil_log_info(hutil, message): if hutil is not None: hutil.log(message) else: print('Info: {0}'.format(message))
[ "def hutil_log_error(hutil, message):\n if hutil is not None:\n hutil.error(message)\n else:\n print('Error: {0}'.format(message))", "def info(self,msg):\n self.logger.info(msg)", "def handle_info(self, message, info_name=None):\n\n msg = ''\n if info_name is not None:\n info_name = info_name.strip()\n if info_name:\n msg = info_name + ': '\n msg += str(message).strip()\n\n root_log = logging.getLogger()\n has_handlers = False\n if root_log.handlers:\n has_handlers = True\n\n if has_handlers:\n log.info(msg)\n\n if self.use_stderr or not has_handlers:\n curdate = datetime.datetime.now()\n curdate_str = \"[\" + curdate.isoformat(' ') + \"]: \"\n msg = curdate_str + msg + \"\\n\"\n sys.stderr.write(msg)\n\n return", "def logger_info(self,text):\n logging.info(self.log_my_name()+' '+text)", "def log_info(self, fmt, *args, end=os.linesep): \n self.log(fmt, *args, levels='info', end=end)", "def subheading(message):\n # get the appropriate logger\n logger = AdmitLogging.findLogger()\n if logger is None:\n return\n logger.info(\"\")\n logger.info(\" \" + message)\n logger.info(\"\")", "def header(self):\n self.head_formatter = logging.Formatter('%(message)s')\n self.head_logger = setup_logger(\"Header\",\n cfg.log.root,\n self.head_formatter,\n level=logging.INFO)\n self.head_logger.info(\"\\n\\n\" + \"*\" * 30)\n current_time = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n self.head_logger.info(current_time)\n if cfg.model.restart:\n self.head_logger.info(\"Using past training on: {}\".format(cfg.model.savepath)) \n self.head_logger.info(\"Using GPU: {}\".format(cfg.CUDA_VISIBLE_DEVICES))\n self.head_logger.info(\"-\" * 30)", "def cmd_info(self):\r\n self.log.setLevel(logging.INFO)\r\n self.log.info('Switching to INFO threshold')", "def create_logger(self):\n try:\n lg.basicConfig(filename='pre_processing_logger.log', level = lg.INFO ,format='%(asctime)s - %(levelname)s - %(message)s')\n except Exception as e:\n print(e)", "def info(self, message, farg=None):\n self.write(message, farg=farg, level=u\"INFO\")", "def logSystemInfo(log):\n log.info(\"-\" * 11 + ' System Information Summary ' + '-' * 11)\n #log.info('Machine Type = '+platform.machine())\n #log.info('Machine Version = '+platform.version())\n log.info('OS type = ' + platform.uname()[0])\n log.info('OS Version = ' + platform.uname()[2])\n log.info('Machine UserName = ' + platform.uname()[1])\n log.info('Machine Processor Type = ' + platform.processor())\n log.info('Number of cores = ' + str(psutil.NUM_CPUS))\n totMem = psutil.virtual_memory()[0] / 1073741824.0\n percentMem = psutil.virtual_memory()[2]\n log.info('Total RAM [GB] = ' + str(totMem) + ', % used = ' + str(percentMem))\n log.info('Python Version = ' + repr(platform.python_version()))\n log.info('-' * 50)", "def info(self, msg, *args, **kwargs):\n self.write(msg, level='INFO', *args, **kwargs)", "def info(self,msg):\n\tentry = self._entry()\n entry['msg'] = msg\n self.log_collection.insert(entry)", "def info(self, *args: Any, **kwargs: Any) -> None:\n\n self.client.logger.info(*args, **kwargs)", "def info(self, message: str, **extra: t.Any):\n self.log(logging.INFO, message, extra)", "def info(self, *messages):\n self.log(LOGLEVELS[\"info\"], \"\\n[Info]\", *messages)", "def hazard(msg, label=True):\n\n if not should_print_insecure_log_msgs:\n return\n\n final_msg = None\n\n if label:\n final_msg = '***** hazardous log: ' + str(msg)\n else:\n final_msg = str(msg)\n\n print term_red + final_msg + term_reset", "def test_filelog():\n\tlgr = simplelog.make_logger (handler='test/out/test.log')\n\tlgr.info (\"what?\")", "def logDetails(self):\n for k,v in self._parser.getDetailsDict().items():\n self._log.debug(\"> %11s : %s\" % (k, str(v)[:50]))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Log error message, being cautious of possibility that hutil may not be imported and configured
def hutil_log_error(hutil, message): if hutil is not None: hutil.error(message) else: print('Error: {0}'.format(message))
[ "def log_error(e):\r\n\tprint(e)", "def logger_error(self,text):\n logging.error(self.log_my_name()+' '+text)", "def error(msg):\n global logger\n logger.error(msg)", "def log_error(self):\n return \"\"\"--log-error=file_name\"\"\"", "def log_error(self, message: str):\n self.logger.error(message)", "def errln(line):\n print('HashMe.py: error:', line, file = sys.stderr, flush = True)", "def log_and_exit(error_msg):\n logging.info(error_msg)\n sys.exit(error_msg)", "def error(self, message: str, **extra: t.Any):\n self.log(logging.ERROR, message, extra)", "def hutil_log_info(hutil, message):\n if hutil is not None:\n hutil.log(message)\n else:\n print('Info: {0}'.format(message))", "def _log_exception():\n exc = traceback.format_exception(*sys.exc_info())\n rospy.logerr(\"\".join(exc))", "def log_traceback():\r\n\r\n logging.error(get_log_traceback())", "def error_args(error_msg):\n log(\"\\n\")\n log(\"Error : Arguments provided is invalid\")\n log(error_msg)\n log(\"use -h for more details\")\n exit(usage())", "def log_error(err, data):\n with open(ERROR_FILE,'a', encoding=\"utf-8\") as fo:\n fo.write('Error: {} \\nData:{}\\n\\n'.format(err,data))", "def _reportErrors(self, msg) :\n self.help()\n print msg\n print self._line(\"-\")\n if not self.inhibitExceptions :\n raise ScriptInputError, msg", "def error(self, message, farg=None):\n self.write(message, farg=farg, level=u\"ERROR\")", "def log_exception(error, message):\n log_entry = \"{}:\\n{}\".format(str(error), str(message))\n kivy.logger.Logger.error(log_entry)", "def cmd_error(self):\n self.log.setLevel(logging.ERROR)\n self.log.error('Switching to ERROR threshold')", "def error(what,say):\n print 'ERROR: ', what, say", "def ConfigurationError(msg):\n\n if not helpMode:\n print(msg)\n Exit(1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create Shipping in db
def create(session, params): shipment = models.Shipping() shipment.shippingName = params["shipping_name"] shipment.bltimeStamp = datetime.datetime.now() shipment.creationDate = datetime.datetime.now() # FK to proposal shipment.proposalId = params["proposal_id"] shipment.sendingLabContactId = params["labcontact_id"] shipment.returnLabContactId = params["labcontact_id"] session.add(shipment) session.commit() return shipment.shippingId
[ "def create_shipping(info: ShippingModel):\n data = storage.all('Shipping')\n id_ = 1 if data is None else max((one.id_ for one in data)) + 1\n obj = Shipping(id_, **info.__dict__)\n storage.new(obj)\n msg = storage.save()\n return msg if msg else {\n 'shipping': 'shipping created',\n \"info\": obj.to_dict()\n }", "def test_orders_create_shipping(self):\n accept_language = 'es'\n rq = conekta.ShippingRequest(\n amount=100\n )\n response = self.api.orders_create_shipping('ord_2tUigJ8DgBhbp6w5D', rq, accept_language)\n self.assertIsNotNone(response)", "def db_pop_shipments(db):\n\n vehicle_ids = [v.vehicle_id for v in Vehicle.query.all()]\n drivers = [p for p in Personnel.query.all() if p.category == 3]\n companies = readCompanyFile()\n n = 20\n for n in range(n):\n shipment = Shipment()\n shipment.destination_company = sample(companies, 1)[0]\n shipment.destination_address = fake.address()\n shipment.source_address = fake.address()\n shipment.source_company = sample(companies, 1)[0]\n shipment.shipment_type = randint(0, 1)\n shipment.confirmation = randint(0, 1)\n shipment.payment_status = randint(0, 1)\n shipment.vehicle_id = sample(vehicle_ids, 1)[0]\n shipment.shipment_personnel = sample(drivers, 3)\n shipment.depart_time = fake.date_time_between(start_date='-5y', end_date='-1d', tzinfo=None)\n shipment.est_time_arrival = shipment.depart_time + timedelta(days=7)\n shipment.arrival_status = shipment.est_time_arrival\n if shipment.shipment_type == '1':\n shipment.shipment_type = True\n manifest = Manifest()\n db.session.add(manifest)\n db.session.commit()\n shipment.manifest_id = manifest.manifest_id\n else:\n shipment.shipment_type = False\n purchase_order = PurchaseOrder()\n db.session.add(purchase_order)\n db.session.commit()\n shipment.po_id = purchase_order.po_id\n db.session.add(shipment)\n db.session.commit()\n\n return True", "def save(self):\n shipment = self.context.get('shipment', None)\n\n if not shipment:\n return\n\n data = self.validated_data\n\n request = self.context['request']\n user = request.user\n\n # Extract shipping date (defaults to today's date)\n shipment_date = data.get('shipment_date', datetime.now())\n if shipment_date is None:\n # Shipment date should not be None - check above only\n # checks if shipment_date exists in data\n shipment_date = datetime.now()\n\n shipment.complete_shipment(\n user,\n tracking_number=data.get('tracking_number', shipment.tracking_number),\n invoice_number=data.get('invoice_number', shipment.invoice_number),\n link=data.get('link', shipment.link),\n shipment_date=shipment_date,\n delivery_date=data.get('delivery_date', shipment.delivery_date),\n )", "def createDeliveryEntity(title, addressedTo, deliveryMethod, sg):\n project = sg.find(\n \"Project\", [[\"name\", \"is\", CONFIG_DATA[\"shotgun\"][\"settings\"][\"project_name\"]]]\n )\n data = {\n \"title\": title,\n \"addressings_to\": addressedTo,\n \"sg_delivery_method\": deliveryMethod,\n \"project\": {\"type\": \"Project\", \"id\": project[0][\"id\"]},\n }\n entity = sg.create(\"Delivery\", data)\n return entity", "def post(self, id):\n spaceship_data = json.loads(request.get_data())\n response = add_spaceship(spaceship_data, id)\n\n return response", "def createShipDesigns(self):\n for designID, designInfo in self.shipDesigns.iteritems():\n myDesign = self.getShipDesign(designID, designInfo[1],designInfo[2],designInfo[3],designInfo[0])\n self.shipDesignObjects[designID] = myDesign", "def update_deliveries():\n with db.session.connection(execution_options={\"schema_translate_map\":{\"tenant\":session['schema']}}):\n \n shift_id = request.form.get(\"shift\")\n shift = Shift.query.get(shift_id)\n document= request.form.get(\"document\")\n supplier_id=request.form.get(\"suppliers\")\n cost_price= float(request.form.get(\"cost_price\"))\n product_id = request.form.get(\"product\")\n qty= float(request.form.get(\"qty\"))\n product = Product.query.get(product_id)\n price= Price.query.filter(and_(Price.product_id == product.id,Price.shift_id ==shift_id)).first()\n inventory_acc = Account.query.get(product.account_id)\n amount = qty * cost_price\n amount = round(amount,2)\n new_cost = ((product.cost_price*product.qty) + amount)/(qty +product.qty)\n product.cost_price = cost_price\n product.avg_price = round(new_cost,2)\n price.avg_price = round(new_cost,2)\n price.cost_price = cost_price\n supplier = Supplier.query.get(supplier_id)\n post_balance = supplier_txn_opening_balance(shift.date,supplier.id) + amount\n txn = SupplierTxn(date=shift.date,txn_type=\"Delivery\",supplier_id=supplier.id,amount=amount,post_balance=post_balance)\n db.session.add(txn)\n db.session.flush()\n update_supplier_balances(shift.date,amount,supplier.id,txn.txn_type)\n if product.product_type ==\"Fuels\":\n tank_id= request.form.get(\"tank\")\n try:\n tank = Tank.query.get(tank_id)\n tank_id = tank.id\n except:\n flash('Please select valid tank','warning')\n return redirect(url_for('readings_entry'))\n \n delivery = Delivery(date=shift.date,shift_id=shift_id,tank_id=tank.id,qty=qty,product_id=product_id,document_number=document,supplier=supplier_id,cost_price=cost_price,supplier_txn_id=txn.id)\n else:\n delivery = Delivery(date=shift.date,shift_id=shift_id,qty=qty,product_id=product_id,document_number=document,supplier=supplier_id,cost_price=cost_price,supplier_txn_id=txn.id)\n \n db.session.add(delivery)\n db.session.flush()\n details = \"Delivery {}\".format(delivery.id)\n delivery_journal=Journal(date=shift.date,details=details,amount=amount,dr=inventory_acc.id,cr=supplier.account_id,created_by=session['user_id'],updated=False)\n db.session.add(delivery_journal)\n db.session.commit()\n \n return redirect(url_for('readings_entry'))", "def create_opportunity():", "def test_create_construct_shipment_minimal(self):\n test_shipment = ConstructShipment(ship_date = \"2010-01-01\", recipient = Recipient.objects.get(pk=1))\n test_shipment.save()\n test_shipment.constructs.add(Construct.objects.get(pk=1))\n self.assertEquals(test_shipment.__unicode__(), \"Fixture Laboratory (2010-01-01)\")", "def test_create_construct_shipment_all_fields(self):\n test_shipment = ConstructShipment(\n ship_date = \"2010-01-01\", \n recieved_date = \"2010-02-01\",\n recipient = Recipient.objects.get(pk=1),\n notes = \"here are some notes on the shipment\")\n test_shipment.save()\n test_shipment.constructs.add(Construct.objects.get(pk=1))\n self.assertEquals(test_shipment.__unicode__(), \"Fixture Laboratory (2010-01-01)\")", "def make_ship(self):\n self.is_ship = True", "def freeze_create(request):\n\n\trequest.user.kingdom.freeze_set.create()", "def create_shipping_event(self, order, lines):\n with transaction.commit_on_success():\n event_type = ShippingEventType._default_manager.get(code=self.request.POST['shipping_event'])\n event = ShippingEvent._default_manager.create(order=order, event_type=event_type)\n for line in lines:\n try:\n event_quantity = int(self.request.POST['order_line_quantity_%d' % line.id])\n except KeyError:\n event_quantity = line.quantity\n ShippingEventQuantity._default_manager.create(event=event, line=line, \n quantity=event_quantity)", "def fill_shipping(self):\n pass", "def _create_ship(self):\n random_loc = self.__random_coordinates()\n return Ship(random_loc, self.SHIP_INIT_SPEED, self.SHIP_INIT_HEADING)", "def create(self, context=None):\n values = self.obj_get_changes()\n db_bay = self.dbapi.create_bay(values)\n self._from_db_object(self, db_bay)", "def create_table(self):\r\n\r\n col = ', '.join(\"'{}' {}\".format(key, val) for key, val in self.columns.items())\r\n\r\n print(\"Creating GeoIP a table if one doesn't exist yet.\")\r\n\r\n self.db.execute('''CREATE TABLE IF NOT EXISTS GeoIP\r\n ({})'''.format(col))", "def shipping_update(self, shipping_id):\n\n finance = self.cart['finance']\n try:\n # verify shipping_id\n shipping_id = int(shipping_id)\n\t # throws error if it doesn't exist.\n shipping = Statics.shippings.get_id(shipping_id)\n\n c = get_cursor()\n c.execute(\"\"\"\n update cart\n set shipping_id = %s\n where cart_id = %s\"\"\",\n (shipping_id,\n self.cart['cart_id']))\n finance['shipping_id'] = shipping_id\n self.recompute()\n except Exception as e:\n import traceback\n traceback.print_exc()\n print e.__class__.__name__ + \": \" + str(e)\n raise DbError(\"Internal error\")\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Breaks the glyphname into a two item list
def breakSuffix(glyphname): if glyphname.find('.') != -1: split = glyphname.split('.') return split else: return None
[ "def controls(glyphname):\n\tcontrolslist =\t[]\n\tfor value in controldict.values():\n\t\tfor v in value:\n\t\t\tfor i in v.split('/'):\n\t\t\t\tif len(i) > 0:\n\t\t\t\t\tif i not in controlslist:\n\t\t\t\t\t\tcontrolslist.append(i)\t\n\tcs = ''\n\tif glyphname in controlslist:\n\t\tfor key in controldict.keys():\n\t\t\tfor v in controldict[key]:\n\t\t\t\tif glyphname in v.split('/'):\n\t\t\t\t\tcon = controldict[key]\n\t\tstriptriple = []\n\t\thold1 = ''\n\t\thold2 = ''\n\t\tfor i in ''.join(con).split('/'):\n\t\t\tif len(i) != 0:\n\t\t\t\tif i == hold1 and i == hold2:\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tstriptriple.append(i)\n\t\t\thold1 = hold2\n\t\t\thold2 = i\n\t\tconstr = '/' + '/'.join(striptriple)\n\t\t# this is a bit of a hack since FL seems to have trouble \n\t\t# when it encounters the same string more than once.\n\t\t# so, let's stick the glyph at the end to differentiate it.\n\t\t# for example: HHOHOOH and HHOHOOO\n\t\tcs = constr + '/' + glyphname\n\telse:\n\t\tsuffix = ''\n\t\tbS = breakSuffix(glyphname)\n\t\tif bS is not None:\n\t\t\tsuffix = bS[1]\n\t\t\tglyphname = bS[0]\n\t\tif suffix[:2] == 'sc':\n\t\t\tcontrols = controldict['SC']\n\t\telif glyphname in uppercase:\n\t\t\tcontrols = controldict['UC']\n\t\telif glyphname in lowercase:\n\t\t\tcontrols = controldict['LC']\n\t\telif glyphname in digits:\n\t\t\tcontrols = controldict['DIGITS']\n\t\telse:\n\t\t\tcontrols = controldict['UC']\n\t\tif len(suffix) != 0:\n\t\t\tglyphname = '.'.join([glyphname, suffix])\n\t\tcs = controls[0] + '/' + glyphname + controls[1] + '/' + glyphname + controls[2]\n\treturn cs", "def get_expanded_glyph_list(\n unicodes: List[int], ui: Optional[\"UniInfo\"] = None\n) -> List[Tuple[int, Optional[str]]]:\n glyphs = []\n if ui is None:\n ui = UniInfo(0)\n for ch in unicodes:\n ui.unicode = ch\n glyphs.append((ch, ui.glyphname))\n if ui.lc_mapping is not None:\n ui.unicode = ui.lc_mapping\n glyphs.append((ui.unicode, ui.glyphname))\n elif ui.uc_mapping is not None:\n ui.unicode = ui.uc_mapping\n glyphs.append((ui.unicode, ui.glyphname))\n return sorted(list(set(glyphs)))", "def glyphs(self, text):\r\n # fix: hackish\r\n text = re.sub(r'\"\\Z', '\\\" ', text)\r\n\r\n glyph_search = (\r\n # apostrophe's\r\n re.compile(r\"(\\w)\\'(\\w)\"),\r\n # back in '88\r\n re.compile(r'(\\s)\\'(\\d+\\w?)\\b(?!\\')'),\r\n # single closing\r\n re.compile(r'(\\S)\\'(?=\\s|' + self.pnct + '|<|$)'),\r\n # single opening\r\n re.compile(r'\\'/'),\r\n # double closing\r\n re.compile(r'(\\S)\\\"(?=\\s|' + self.pnct + '|<|$)'),\r\n # double opening\r\n re.compile(r'\"'),\r\n # 3+ uppercase acronym\r\n re.compile(r'\\b([A-Z][A-Z0-9]{2,})\\b(?:[(]([^)]*)[)])'),\r\n # 3+ uppercase\r\n re.compile(r'\\b([A-Z][A-Z\\'\\-]+[A-Z])(?=[\\s.,\\)>])'),\r\n # ellipsis\r\n re.compile(r'\\b(\\s{0,1})?\\.{3}'),\r\n # em dash\r\n re.compile(r'(\\s?)--(\\s?)'),\r\n # en dash\r\n re.compile(r'\\s-(?:\\s|$)'),\r\n # dimension sign\r\n re.compile(r'(\\d+)( ?)x( ?)(?=\\d+)'),\r\n # trademark\r\n re.compile(r'\\b ?[([]TM[])]', re.I),\r\n # registered\r\n re.compile(r'\\b ?[([]R[])]', re.I),\r\n # copyright\r\n re.compile(r'\\b ?[([]C[])]', re.I),\r\n )\r\n\r\n glyph_replace = [x % dict(self.glyph_defaults) for x in (\r\n r'\\1%(txt_apostrophe)s\\2', # apostrophe's\r\n r'\\1%(txt_apostrophe)s\\2', # back in '88\r\n r'\\1%(txt_quote_single_close)s', # single closing\r\n r'%(txt_quote_single_open)s', # single opening\r\n r'\\1%(txt_quote_double_close)s', # double closing\r\n r'%(txt_quote_double_open)s', # double opening\r\n r'<acronym title=\"\\2\">\\1</acronym>', # 3+ uppercase acronym\r\n r'<span class=\"caps\">\\1</span>', # 3+ uppercase\r\n r'\\1%(txt_ellipsis)s', # ellipsis\r\n r'\\1%(txt_emdash)s\\2', # em dash\r\n r' %(txt_endash)s ', # en dash\r\n r'\\1\\2%(txt_dimension)s\\3', # dimension sign\r\n r'%(txt_trademark)s', # trademark\r\n r'%(txt_registered)s', # registered\r\n r'%(txt_copyright)s', # copyright\r\n )]\r\n\r\n result = []\r\n for line in re.compile(r'(<.*?>)', re.U).split(text):\r\n if not re.search(r'<.*>', line):\r\n for s, r in zip(glyph_search, glyph_replace):\r\n line = s.sub(r, line)\r\n result.append(line)\r\n return ''.join(result)", "def buildKernStrings( listOfLeftGlyphNames, listOfRightGlyphNames, thisFont=None, linePrefix=\"nonn\", linePostfix=\"noon\" ):\n\tif thisFont is None:\n\t\tprint(\"No font detected.\")\n\t\treturn None\n\telse:\n\t\tkernStrings = []\n\t\n\t\t# collect left names/groups:\n\t\tleftGroups = []\n\t\tfor leftName in listOfLeftGlyphNames:\n\t\t\n\t\t\t# Hardcoded changes to prevent Æ/æ from appearing instead of E/e:\n\t\t\tif leftName == \"ae\" and thisFont.glyphs[\"ae\"].rightKerningGroup == thisFont.glyphs[\"e\"].rightKerningGroup:\n\t\t\t\tleftName = \"e\"\n\t\t\tif leftName == \"ae.sc\" and thisFont.glyphs[\"ae.sc\"].rightKerningGroup == thisFont.glyphs[\"e.sc\"].rightKerningGroup:\n\t\t\t\tleftName = \"e.sc\"\n\t\t\tif leftName == \"AE\" and thisFont.glyphs[\"AE\"].rightKerningGroup == thisFont.glyphs[\"E\"].rightKerningGroup:\n\t\t\t\tleftName = \"E\"\n\t\t\n\t\t\tleftGroup = thisFont.glyphs[leftName].rightKerningGroup\n\t\t\tif (leftGroup is not None) and (not leftGroup in leftGroups):\n\t\t\t\tleftGroups.append( leftGroup )\n\t\t\t\n\t\t\t\t# collect right names/groups:\n\t\t\t\trightGroups = []\n\t\t\t\tfor rightName in listOfRightGlyphNames:\n\t\t\t\t\n\t\t\t\t\t# Hardcoded changes:\n\t\t\t\t\tif rightName == \"idotless\" and thisFont.glyphs[\"idotless\"].leftKerningGroup == thisFont.glyphs[\"n\"].leftKerningGroup:\n\t\t\t\t\t\trightName = \"n\"\n\t\t\t\t\tif rightName == \"idotless\" and thisFont.glyphs[\"idotless\"].leftKerningGroup == thisFont.glyphs[\"i\"].leftKerningGroup:\n\t\t\t\t\t\trightName = \"i\"\n\t\t\t\t\tif rightName == \"jdotless\" and thisFont.glyphs[\"jdotless\"].leftKerningGroup == thisFont.glyphs[\"j\"].leftKerningGroup:\n\t\t\t\t\t\trightName = \"j\"\n\t\t\t\t\t\n\t\t\t\t\trightGroup = thisFont.glyphs[rightName].leftKerningGroup\n\t\t\t\t\tif (rightGroup is not None) and (not rightGroup in rightGroups):\n\t\t\t\t\t\trightGroups.append( rightGroup )\n\t\t\t\t\t\tkernString = \"%s/%s/%s %s\" % ( linePrefix, leftName, rightName, linePostfix )\n\t\t\t\t\t\tkernStrings.append( kernString )\n\t\treturn kernStrings", "def getMarkerName(index):", "def emb(item):\n\treturn '```' + str(item) + '```'", "def format_menu_item(entry):\n if len(entry) > 58:\n entry = entry[:56]\n while len(entry) < 56:\n entry += ' '\n entry += ' #'\n return entry", "def _legend_text(name, max_length=20):\n if len(name) > max_length:\n temp = []\n new = []\n for part in name.split(' '):\n if len(' '.join(temp + [part])) > max_length:\n new.append(' '.join(temp))\n temp = [part]\n else:\n temp.append(part)\n if temp:\n new.append(' '.join(temp))\n return '<br>'.join(new)\n else:\n return name", "def filter_glyph_names( alist, filter ):\n\n count = 0\n extras = []\n\n for name in alist:\n try:\n filtered_index = filter.index( name )\n except:\n extras.append( name )\n\n return extras", "def _get_name_doublelist(self):\n name = ''\n if self.parent_institution:\n name += self.parent_institution.name + '::'\n name += self.name\n return name", "def process_long_names(long_names):\n names = []\n tex_names = []\n # First pass, to remove the leading scales\n for name in long_names:\n # This can happen in the background file\n if name.startswith('(.)', 0):\n temp_name = name[3:]\n names.append(temp_name)\n tex_names.append(replace_scale(name))\n # Otherwise, we simply\n else:\n names.append(name)\n tex_names.append(name)\n # Second pass, to remove from the short names the indication of scale,\n # which should look like something between parenthesis, or square brackets,\n # and located at the end of the string\n for index, name in enumerate(names):\n if name.find('(') != -1:\n names[index] = name[:name.index('(')]\n elif name.find('[') != -1:\n names[index] = name[:name.index('[')]\n\n # Finally, remove any extra spacing\n names = [''.join(elem.split()) for elem in names]\n return names, tex_names", "def draw_name():\n dist_from_top = 35\n label1 = pyglet.text.Label(\"Chess\", font_name='Courier New', font_size=16, bold=True,\n x=label_calib, y=w_height - dist_from_top,\n anchor_x='center', anchor_y='center', color=side_label_color)\n label2 = pyglet.text.Label(\"II\", font_name='Courier New', font_size=16, bold=True,\n x=label_calib, y=w_height - dist_from_top - 20,\n anchor_x='center', anchor_y='center', color=side_label_color)\n label1.draw()\n label2.draw()", "def _parse_labels_for_repr(self) -> str:\n if self.labels:\n label_names = [label[\"name\"] for label in self.labels]\n all_names = \", \".join(label_names)\n if all_names:\n return str(click.style(f\"({all_names})\", fg=LABELS_COLOR, dim=True))\n return \"\"", "def genenames_from10x(genelist):\n genesymbol=[]\n #ensemblid=[]\n for i in range(len(genelist)):\n curgene=genelist[i]\n starts=[]\n for x in re.finditer('_',curgene):\n starts.append(x.start()+1)\n genesymbol.append(curgene[starts[-1]:])\n \n return genesymbol#,ensemblid", "def getLabelFromSurfName(surfName, args):\n label = surfName.split(\"-\")\n for l in args.label2Title:\n try:\n label.remove(l)\n except RuntimeError:\n args.label2Title.remove(l) # Nothing to remove, nothing to move to title !\n return \"-\".join(label)", "def genenames_from10x_mod(genelist):\n genesymbol=[]\n #ensemblid=[]\n for i in range(len(genelist)):\n curgene=genelist[i]\n starts=[]\n for x in re.finditer('_',curgene):\n starts.append(x.start()+1)\n genesymbol.append(curgene[starts[0]:])\n \n return genesymbol#,ensemblid", "def convertMsg(message, glyphList=[], charList=[], maxChar=maxCustomChar):\n new_msg = ''\n offset_glyph_list= len(glyphList) - len(charList)\n\n for c in message:\n if c in dictGlyph:\n if c in charList: # glyph has already been added to the list, so use it!\n new_msg += chr(offset_glyph_list + charList.index(c))\n else:\n glyphTuple = dictGlyph[c]\n if len(glyphList) < maxChar and glyphTuple[1] is not None: # is there still a free place? is a glyph defined ?\n glyphList.append(glyphTuple[1])\n charList.append(c)\n new_msg += chr(len(glyphList)-1)\n else:\n new_msg += glyphTuple[0] # use replacement char (because there is no glyph or because there is no more place for custom char)\n else:\n new_msg += c # add normal char to the message\n return (new_msg, glyphList, charList)", "def bullets(elements):\n for name in elements:\n print(\"*\", name)", "def split_name_ucsb(name):\n sans_prefix = name[::-1][:name[::-1].index('ScanImage_'[::-1])][::-1]\n sans_suffix = sans_prefix[:sans_prefix.index('_Ablation')]\n spot_name = sans_suffix[::-1][:sans_suffix[::-1].index('-')][::-1]\n sample_name = sans_suffix[::-1][sans_suffix[::-1].index('-')+1:][::-1]\n return sample_name, spot_name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the base glyph of an accented glyph
def findAccentBase(accentglyph): base = splitAccent(accentglyph)[0] return base
[ "def get_complementary_base(b):\n if b=='A':\n return 'T'\n elif b=='T':\n return 'A'\n elif b=='C':\n return 'G'\n elif b=='G':\n return 'C'\n else:\n print 'ERROR: Check String Input'", "def getGlyph(self, char):\n return FontGlyph(char, self, self.cairoContext)", "def base_pair(c):\n\n c = c.lower()\n\n to_return = 'unknown'\n\n if(c == 'a'):\n to_return = 't'\n elif(c == 't'):\n to_return = 'a'\n elif(c == 'g'):\n to_return = 'c'\n elif(c == 'c'):\n to_return = 'g'\n\n return to_return", "def current_char(self) -> str:", "def current_letter():\n return next_letter_data.current_letter", "def get_ascii_representation(char):\r\n\treturn ord(char)", "def getindex(self, char):\n return ord(char) - 97", "def get_ascii_character(index):\n\n return characters.characters[index]", "def get_unicode_alt(value):\n\n return value['code_points']['output']", "def text_image(self, char):\r\n if char in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ':\r\n _index = ord(char)\r\n if _index >= 97:\r\n _index -= 97\r\n else:\r\n _index -= 38\r\n else:\r\n _index = 26\r\n\r\n return alpha_image[_index]", "def letter_at(self, index):\n if (index >= 1) and (index <= len(self)):\n return self[index-1]\n else:\n return u'?'", "def matchingBase(base):\n if base == 'A':\n return 'T'\n elif base == 'T':\n return 'A'\n elif base == 'G':\n return 'C'\n elif base == 'C':\n return 'G'\n else:\n raise ValueError(\"{} is not a valid DNA base!\".format(repr(base)))", "def g(self):\r\n return 'A'", "def SoGlyph_getGlyph(*args) -> \"SoGlyph const *\":\n return _coin.SoGlyph_getGlyph(*args)", "def test_alt_char():\n assert current_weather.alt_char(7, 1) == '\\ue9a6'\n assert current_weather.alt_char(7, 2) == '\\ue9a7'", "def base(self, literal):\n literal = literal.strip()\n if not literal.startswith(u\"0\") or re.match(ur\"0+$\", literal):\n return 10\n elif literal[1] not in u\"box\":\n return 0\n return baseMAPPING[literal[1]]", "def getUnicodeChar(code: int) -> str:\n return chr(code)", "def getCharPositionInAlpha(char):\n ALPHABET = \"abcdefghijklmnopqrstuvwxyz\"\n return ALPHABET.find(char.lower()) + 1", "def glyphs(self, text):\r\n # fix: hackish\r\n text = re.sub(r'\"\\Z', '\\\" ', text)\r\n\r\n glyph_search = (\r\n # apostrophe's\r\n re.compile(r\"(\\w)\\'(\\w)\"),\r\n # back in '88\r\n re.compile(r'(\\s)\\'(\\d+\\w?)\\b(?!\\')'),\r\n # single closing\r\n re.compile(r'(\\S)\\'(?=\\s|' + self.pnct + '|<|$)'),\r\n # single opening\r\n re.compile(r'\\'/'),\r\n # double closing\r\n re.compile(r'(\\S)\\\"(?=\\s|' + self.pnct + '|<|$)'),\r\n # double opening\r\n re.compile(r'\"'),\r\n # 3+ uppercase acronym\r\n re.compile(r'\\b([A-Z][A-Z0-9]{2,})\\b(?:[(]([^)]*)[)])'),\r\n # 3+ uppercase\r\n re.compile(r'\\b([A-Z][A-Z\\'\\-]+[A-Z])(?=[\\s.,\\)>])'),\r\n # ellipsis\r\n re.compile(r'\\b(\\s{0,1})?\\.{3}'),\r\n # em dash\r\n re.compile(r'(\\s?)--(\\s?)'),\r\n # en dash\r\n re.compile(r'\\s-(?:\\s|$)'),\r\n # dimension sign\r\n re.compile(r'(\\d+)( ?)x( ?)(?=\\d+)'),\r\n # trademark\r\n re.compile(r'\\b ?[([]TM[])]', re.I),\r\n # registered\r\n re.compile(r'\\b ?[([]R[])]', re.I),\r\n # copyright\r\n re.compile(r'\\b ?[([]C[])]', re.I),\r\n )\r\n\r\n glyph_replace = [x % dict(self.glyph_defaults) for x in (\r\n r'\\1%(txt_apostrophe)s\\2', # apostrophe's\r\n r'\\1%(txt_apostrophe)s\\2', # back in '88\r\n r'\\1%(txt_quote_single_close)s', # single closing\r\n r'%(txt_quote_single_open)s', # single opening\r\n r'\\1%(txt_quote_double_close)s', # double closing\r\n r'%(txt_quote_double_open)s', # double opening\r\n r'<acronym title=\"\\2\">\\1</acronym>', # 3+ uppercase acronym\r\n r'<span class=\"caps\">\\1</span>', # 3+ uppercase\r\n r'\\1%(txt_ellipsis)s', # ellipsis\r\n r'\\1%(txt_emdash)s\\2', # em dash\r\n r' %(txt_endash)s ', # en dash\r\n r'\\1\\2%(txt_dimension)s\\3', # dimension sign\r\n r'%(txt_trademark)s', # trademark\r\n r'%(txt_registered)s', # registered\r\n r'%(txt_copyright)s', # copyright\r\n )]\r\n\r\n result = []\r\n for line in re.compile(r'(<.*?>)', re.U).split(text):\r\n if not re.search(r'<.*>', line):\r\n for s, r in zip(glyph_search, glyph_replace):\r\n line = s.sub(r, line)\r\n result.append(line)\r\n return ''.join(result)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Split an accented glyph into a two items
def splitAccent(accentglyph): base = None suffix = '' accentList=[] broken = breakSuffix(accentglyph) if broken is not None: suffix = broken[1] base = broken[0] else: base=accentglyph ogbase=base temp_special = lowercase_special_accents + uppercase_special_accents if base in lowercase_plain + uppercase_plain + smallcaps_plain: pass elif base not in temp_special: for accent in accents: if base.find(accent) != -1: base = base.replace(accent, '') accentList.append(accent) counter={} for accent in accentList: counter[ogbase.find(accent)] = accent counterList = counter.keys() counterList.sort() finalAccents = [] for i in counterList: finalAccents.append(counter[i]) accentList = finalAccents if len(suffix) != 0: base = '.'.join([base, suffix]) return base, accentList
[ "def accent_letters(self, letters=latin):\n if self.isaccent():\n for l in letters:\n a = l + self.combining_accent\n b = unicodedata.normalize(\"NFC\", a)\n if len(b) == 1:\n yield (l, b)", "def glyphs(self, text):\r\n # fix: hackish\r\n text = re.sub(r'\"\\Z', '\\\" ', text)\r\n\r\n glyph_search = (\r\n # apostrophe's\r\n re.compile(r\"(\\w)\\'(\\w)\"),\r\n # back in '88\r\n re.compile(r'(\\s)\\'(\\d+\\w?)\\b(?!\\')'),\r\n # single closing\r\n re.compile(r'(\\S)\\'(?=\\s|' + self.pnct + '|<|$)'),\r\n # single opening\r\n re.compile(r'\\'/'),\r\n # double closing\r\n re.compile(r'(\\S)\\\"(?=\\s|' + self.pnct + '|<|$)'),\r\n # double opening\r\n re.compile(r'\"'),\r\n # 3+ uppercase acronym\r\n re.compile(r'\\b([A-Z][A-Z0-9]{2,})\\b(?:[(]([^)]*)[)])'),\r\n # 3+ uppercase\r\n re.compile(r'\\b([A-Z][A-Z\\'\\-]+[A-Z])(?=[\\s.,\\)>])'),\r\n # ellipsis\r\n re.compile(r'\\b(\\s{0,1})?\\.{3}'),\r\n # em dash\r\n re.compile(r'(\\s?)--(\\s?)'),\r\n # en dash\r\n re.compile(r'\\s-(?:\\s|$)'),\r\n # dimension sign\r\n re.compile(r'(\\d+)( ?)x( ?)(?=\\d+)'),\r\n # trademark\r\n re.compile(r'\\b ?[([]TM[])]', re.I),\r\n # registered\r\n re.compile(r'\\b ?[([]R[])]', re.I),\r\n # copyright\r\n re.compile(r'\\b ?[([]C[])]', re.I),\r\n )\r\n\r\n glyph_replace = [x % dict(self.glyph_defaults) for x in (\r\n r'\\1%(txt_apostrophe)s\\2', # apostrophe's\r\n r'\\1%(txt_apostrophe)s\\2', # back in '88\r\n r'\\1%(txt_quote_single_close)s', # single closing\r\n r'%(txt_quote_single_open)s', # single opening\r\n r'\\1%(txt_quote_double_close)s', # double closing\r\n r'%(txt_quote_double_open)s', # double opening\r\n r'<acronym title=\"\\2\">\\1</acronym>', # 3+ uppercase acronym\r\n r'<span class=\"caps\">\\1</span>', # 3+ uppercase\r\n r'\\1%(txt_ellipsis)s', # ellipsis\r\n r'\\1%(txt_emdash)s\\2', # em dash\r\n r' %(txt_endash)s ', # en dash\r\n r'\\1\\2%(txt_dimension)s\\3', # dimension sign\r\n r'%(txt_trademark)s', # trademark\r\n r'%(txt_registered)s', # registered\r\n r'%(txt_copyright)s', # copyright\r\n )]\r\n\r\n result = []\r\n for line in re.compile(r'(<.*?>)', re.U).split(text):\r\n if not re.search(r'<.*>', line):\r\n for s, r in zip(glyph_search, glyph_replace):\r\n line = s.sub(r, line)\r\n result.append(line)\r\n return ''.join(result)", "def _split_chars(self, begin, end):\n\n s = self.document.gettext(begin, end)\n\n def get_category(c):\n return unicodedata.category(c)[0]\n\n for key, chars in itertools.groupby(s, get_category):\n chars = ''.join(chars)\n end = begin + len(chars)\n yield begin, end, chars, key\n begin = end", "def split_pair_into_letters(pair):\n first = None\n second = None\n\n if len(pair) == 2:\n first = pair[0]\n second = pair[1]\n else:\n # if the accent (\\W) is on the first letter\n m = re.match('(\\w\\W)(\\w)', pair)\n # if the accent (\\W) is on the second letter\n n = re.match('(\\w)(\\w\\W)', pair)\n\n if m is not None:\n first = m.group(1)\n second = m.group(2)\n\n elif n is not None:\n first = n.group(1)\n second = n.group(2)\n\n return first, second", "def grapheme_clusters(text):\n for c in normalize('NFC', text):\n yield c", "def test_alt_char():\n assert current_weather.alt_char(7, 1) == '\\ue9a6'\n assert current_weather.alt_char(7, 2) == '\\ue9a7'", "def testEscapedSplit(self):\n self.assertEquals((\"Раз,Два\", \"Три,Четыре\", \"Пять,Шесть\"), pytils.utils.split_values(\"Раз\\,Два,Три\\,Четыре,Пять\\,Шесть\"))\n self.assertEquals((\"Раз, Два\", \"Три\", \"Четыре\"), pytils.utils.split_values(\"Раз\\, Два, Три, Четыре\"))", "def substitute_accents(text):\r\n return unidecode(text).encode(\"ascii\")", "def test_qamats_gadol_next_accent():\n word = r\"אָז֩\" # az (Leviticus 26:34)\n parts = [\"alef\", \"qamats-gadol\", \"zayin\"]\n assert parts == Parser().parse(word).flat()", "def preprocess_char(self, text, lang=None):\n if lang == \"ron\":\n text = text.replace(\"ț\", \"ţ\")\n print(f\"{lang} (ț -> ţ): {text}\")\n return text", "def findAccentBase(accentglyph):\n\tbase = splitAccent(accentglyph)[0]\t\t\n\treturn base", "def _consume_alpha_u(self,text,offset):\r\n assert offset < len(text)\r\n incr = 0\r\n if text[offset].isalpha():\r\n incr = 1\r\n while offset + incr < len(text):\r\n if unicodedata.category(text[offset+incr])[0] != \"M\":\r\n break\r\n incr += 1\r\n return incr", "def text_image(self, char):\r\n if char in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ':\r\n _index = ord(char)\r\n if _index >= 97:\r\n _index -= 97\r\n else:\r\n _index -= 38\r\n else:\r\n _index = 26\r\n\r\n return alpha_image[_index]", "def get_glyphs(self, text):\n glyph_renderer = None\n glyphs = [] # glyphs that are committed.\n for c in get_grapheme_clusters(str(text)):\n # Get the glyph for 'c'. Hide tabs (Windows and Linux render\n # boxes)\n if c == '\\t':\n c = ' '\n if c not in self.glyphs:\n if not glyph_renderer:\n glyph_renderer = self.glyph_renderer_class(self)\n self.glyphs[c] = glyph_renderer.render(c)\n glyphs.append(self.glyphs[c])\n return glyphs", "def decomposition(chr: str) -> str:\n idx = ord(chr)\n try:\n return unicode_data_to_decomposition_start[idx]\n except KeyError:\n return \"\"", "def test_qamats_gadol_accent():\n word = r\"נָ֤ע\" # na (Genesis 4:14)\n parts = [\"nun\", \"qamats-gadol\", \"ayin\"]\n assert parts == Parser().parse(word).flat()", "def letter_range(start, stop=\"{\", step=1):# this function is for the animation \r\n for ord_ in range(ord(start.lower()), ord(stop.lower()), step):\r\n yield chr(ord_)", "def _new_letter(self):\n i=0\n done=False\n\n if self._type=='abc':\n while i<26 and not done:\n e=\"%c\"%(i+97)\n if e not in self.positive_letters():\n done=True\n result=[e,\"%c\"%(i+65)]\n i+=1\n elif self._type=='x0':\n i=0\n done=False\n while not done:\n e=\"x%s\"%i\n if e not in self.positive_letters():\n done=True\n result=[e,\"X%i\"%i]\n i+=1\n i=0\n while not done:\n e=\"a%s\"%i\n if e not in self.positive_letters():\n done=True\n result=[e,\"A%i\"%i]\n i+=1\n\n\n return result", "def _split_icon_address(address: str) -> (str, str):\n return address[:2], address[2:]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert all possible characters to uppercase in a glyph string.
def upper(glyphstring): _InternalCaseFunctions().expandcasedict() uc = [] for i in glyphstring.split('/'): if i.find('.sc') != -1: if i[-3] != '.sc': x = i.replace('.sc', '.') else: x = i.replace('.sc', '') i = x suffix = '' bS = breakSuffix(i) if bS is not None: suffix = bS[1] i = bS[0] if i in casedict.keys(): i = casedict[i] if len(suffix) != 0: i = '.'.join([i, suffix]) uc.append(i) return '/'.join(uc)
[ "def convert_pybites_chars(text):\r\n text = text.capitalize()\r\n for char in PYBITES:\r\n text = text.replace(char, char.upper())\r\n if text[0].lower() in PYBITES:\r\n text = text[0].lower() + text[1:]\r\n return text", "def str_to_ascii_upper_case(s):\n return ''.join([c.upper() if 'a' <= c <= 'z' else c for c in s])", "def uppercase(str):\n print(\"{}\".format(str.translate(\n {(c | 32): c for c in range(ord('A'), ord('Z') + 1)}\n )))", "def toUpper(self, p_str): # real signature unknown; restored from __doc__\n return \"\"", "def makeUpperCase(self, obj):\n obj.touch = None\n o = ''\n for n, x in enumerate(self.operationText()):\n if x == VC.BlankLetter:\n o += self.rawText[n]\n else:\n o += x.upper()\n self.resetRawText(o)", "def LetterChanges(str):\n # code goes here\n vowels = ['a', 'i', 'o', 'e', 'u']\n result = ''\n for c in str:\n if c.isalpha():\n if c == 'z' or c == 'Z':\n c = 'A'\n else:\n c = chr(ord(c) + 1)\n if c in vowels:\n c = c.upper()\n result += c\n\n return result", "def uppercase(self, string):\n return string.upper()", "def toUpper(self):\n self.name.toUpper()\n self.ext.toUpper()", "def upper_char(index):\n upper = upper_list()\n upper += upper[0:13]\n return upper[index]", "def upper(value):\n return translate(value, CZ_UPPER).upper()", "def convert_to_uppercase(item_in_dict):\n \n try:\n for key in item_in_dict.keys():\n item_in_dict[key.upper()] = convert_to_uppercase(item_in_dict.pop(key))\n except AttributeError:\n try:\n return item_in_dict.upper()\n except AttributeError:\n return item_in_dict\n return item_in_dict", "def convert_pybites_chars(text):\r\n char_table = defaultdict(str)\r\n for letter in ascii_letters:\r\n char_table[letter] = letter.swapcase() if letter.lower() in 'pybites' else letter\r\n trans_table = str.maketrans(dict(char_table))\r\n return text.translate(trans_table)", "def convert_uppercase(string1, index, number):\n i = 1\n j = index\n while i <= number:\n if j <= len(string1) - 1:\n string1 = string1[:j] + string1[j].upper() + string1[j+1:]\n i += 1\n j += 1\n return string1", "def myupper(value, arg):\n return value.replace(value[:arg], value[:arg].upper())", "def replace_with_uppercase(string, names, precompiled):\n for name in names:\n for result in precompiled[name].findall(string):\n string = string.replace(result, name)\n return string", "def _cmd_help_upper(self, ident, _from, to, msg, cmd):\n cinfo = self.init_cmd(ident, _from, to, msg)\n access = \"all\"\n\n if cmds[cmd][CMD_LEVEL] == 4:\n access = \"root\"\n elif cmds[cmd][CMD_LEVEL] == irc.LEVEL_MASKS['o']:\n access = \"op\"\n elif cmds[cmd][CMD_LEVEL] == irc.LEVEL_MASKS['v']:\n access = \"voice\"\n\n usage = '\\x02' + \"Usage\" + COLOR[\"rewind\"] + \": upper <string>.\"\n desc = '\\x02' + \"Description\" + COLOR[\"rewind\"] + \": Return a uppercased string.\"\n aliases = '\\x02' + \"Aliases\" + COLOR[\"rewind\"] + ': ' + \", \".join(cmds[cmd][CMD_ALIASES]) + '.'\n access = '\\x02' + \"Access\" + COLOR[\"rewind\"] + \": %s.\" %access\n\n self.privmsg(cinfo[1], usage + ' ' + desc + ' ' + aliases + ' ' + access)\n return None", "def upper_at_positon(my_string: str, n: int) -> str:\n\n tmp = list(my_string)\n tmp[n] = tmp[n].upper()\n return ''.join(tmp)", "def uppercase_words(string):\n string_parts = string.split()\n string_final = []\n for word in string_parts:\n id = 0\n for letter in word:\n if letter.isalpha():\n string_final.append(word[:id] + word[id].upper() + word[id + 1:])\n break\n id += 1\n\n\n # print(string_final) #Debug\n return \" \".join(string_final)", "def upper_case(self, columns):\n func = lambda cell: cell.upper() if cell is not None else cell\n self.set_col(columns, func, 'string')\n return self" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert all possible characters to smallcaps in a glyph string.
def small(glyphstring): _InternalCaseFunctions().expandcasedict() _InternalCaseFunctions().expandsmallcapscasedict() sc = [] for i in glyphstring.split('/'): suffix = '' bS = breakSuffix(i) if bS is not None: suffix = bS[1] if suffix == 'sc': suffix = '' i = bS[0] if i in lowercase: if i not in smallcapscasedict.keys(): i = casedict[i] if i in smallcapscasedict.keys(): i = smallcapscasedict[i] if i != 'S.sc/S.sc': if len(suffix) != 0: if i[-3:] == '.sc': i = ''.join([i, suffix]) else: i = '.'.join([i, suffix]) sc.append(i) return '/'.join(sc)
[ "def convert_pybites_chars(text):\r\n text = text.capitalize()\r\n for char in PYBITES:\r\n text = text.replace(char, char.upper())\r\n if text[0].lower() in PYBITES:\r\n text = text[0].lower() + text[1:]\r\n return text", "def smallcapName( glyphName=\"scGlyph\", suffix=\".sc\", lowercase=True ):\n\ttry:\n\t\treturnName = glyphName\n\t\t\n\t\t# make lowercase if requested:\n\t\tif lowercase:\n\t\t\tsuffixOffset = returnName.find(\".\")\n\t\t\tif suffixOffset > 0:\n\t\t\t\treturnName = returnName[:suffixOffset].lower() + returnName[suffixOffset:]\n\t\t\telse:\n\t\t\t\treturnName = returnName.lower()\n\t\t\t\n\t\t# add suffix:\n\t\treturnName = returnName + suffix\n\t\treturn returnName\n\texcept Exception as e:\n\t\tprint(\"Cannot compute smallcap name for: %s\" % glyphName)\n\t\tprint(\"Error: %s\" % e)\n\t\treturn None", "def str_to_ascii_lower_case(s):\n return ''.join([c.lower() if 'A' <= c <= 'Z' else c for c in s])", "def small_caps(self, small_caps):\n self._small_caps = small_caps", "def addSmallCapsLookups(self):\n smcpLookup = None\n c2scLookup = None\n for lookup in self.font.gsub_lookups:\n info = self.font.getLookupInfo(lookup)\n if info[0] == 'gsub_single':\n if len(info[2]) > 0:\n if info[2][0][0] == 'smcp' and smcpLookup is None:\n smcpLookup = lookup\n elif info[2][0][0] == 'c2sc' and c2scLookup is None:\n c2scLookup = lookup\n\n if smcpLookup is None:\n lastLookup = self.font.gsub_lookups[len(self.font.gsub_lookups) - 1]\n self.addLookup(SMCP_LOOKUP, 'gsub_single', 'smcp', lastLookup)\n smcpLookup = SMCP_LOOKUP\n\n if c2scLookup is None:\n self.addLookup(C2SC_LOOKUP, 'gsub_single', 'c2sc', smcpLookup)\n c2scLookup = C2SC_LOOKUP\n\n self.addLookupSubtable(smcpLookup, SMCP_SUBT)\n self.addLookupSubtable(c2scLookup, C2SC_SUBT)", "def _normalise_letter(self, letter: str) -> str: # pylint: disable=no-self-use\n return letter.lower()", "def lower_case(string):\n new_str = \"\"\n for letter in string:\n letter = ord(letter)\n if letter > 64 and letter < 91:\n letter += 32\n letter = chr(letter)\n new_str += letter\n return new_str", "def ascii_lower(text: str) -> str:\n return text.translate(ASCII_TABLE)", "def _lower(s):\n return s.translate(_lower_table)", "def LetterChanges(str):\n # code goes here\n vowels = ['a', 'i', 'o', 'e', 'u']\n result = ''\n for c in str:\n if c.isalpha():\n if c == 'z' or c == 'Z':\n c = 'A'\n else:\n c = chr(ord(c) + 1)\n if c in vowels:\n c = c.upper()\n result += c\n\n return result", "def _new_letter(self):\n i=0\n done=False\n\n if self._type=='abc':\n while i<26 and not done:\n e=\"%c\"%(i+97)\n if e not in self.positive_letters():\n done=True\n result=[e,\"%c\"%(i+65)]\n i+=1\n elif self._type=='x0':\n i=0\n done=False\n while not done:\n e=\"x%s\"%i\n if e not in self.positive_letters():\n done=True\n result=[e,\"X%i\"%i]\n i+=1\n i=0\n while not done:\n e=\"a%s\"%i\n if e not in self.positive_letters():\n done=True\n result=[e,\"A%i\"%i]\n i+=1\n\n\n return result", "def RNA_to_caps(RNA):\n \n # First uses is_RNA() method to check if input sequence is RNA;\n # this prevents proceeding on to use other methods (and wasting time\n # & resources) when the input sequence is not an RNA sequence.\n if Ribosome.is_RNA(RNA):\n return RNA.upper()\n \n return RNA.upper()", "def _encode_chars(self, sentence):\n chars_ids = [self._word_to_char_ids(cur_word)\n for cur_word in sentence]\n return self._wrap_in_s_char(chars_ids)", "def to_lower_alphanumeric(s):\n s = s.encode('ascii','ignore')\n s = filter(str.isalnum, s)\n return s.lower()", "def chinese_half2full():\n def string_op(input_str:str):\n rstring = \"\"\n for uchar in input_str:\n u_code = ord(uchar)\n if u_code == 32:\n u_code = 12288\n elif 33 <= u_code <= 126:\n u_code += 65248\n rstring += chr(u_code)\n return rstring\n return string_op", "def convert_pybites_chars(text):\r\n char_table = defaultdict(str)\r\n for letter in ascii_letters:\r\n char_table[letter] = letter.swapcase() if letter.lower() in 'pybites' else letter\r\n trans_table = str.maketrans(dict(char_table))\r\n return text.translate(trans_table)", "def DNA_to_caps(DNA):\n \n # First uses is_DNA() method to check if input sequence is DNA;\n # this prevents proceeding on to use other methods (and wasting time\n # & resources) when the input sequence is not a DNA sequence.\n if RNA_pol.is_DNA(DNA):\n return DNA.upper()", "def _new_letters(self,n=1):\n i=0\n result=[]\n\n if self._type=='abc':\n while i<26 and n>0:\n e=\"%c\"%(i+97)\n if e not in self.positive_letters():\n n=n-1\n result.append([e,\"%c\"%(i+65)])\n i+=1\n\n elif self._type=='x0':\n while n>0:\n e=\"x%s\"%i\n if e not in self.positive_letters():\n result.append([e,\"X%s\"%i])\n n=n-1\n i+=1\n i=0\n while n>0:\n e=\"a%s\"%i\n if e not in self.positive_letters():\n result.append([e,\"A%s\"%i])\n n=n-1\n i+=1\n\n\n return result", "def color_cap(color_letter, string):\n return C(color_letter)+string+C('N')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send this a glyph name and get a control string with all glyphs separated by slashes.
def controls(glyphname): controlslist = [] for value in controldict.values(): for v in value: for i in v.split('/'): if len(i) > 0: if i not in controlslist: controlslist.append(i) cs = '' if glyphname in controlslist: for key in controldict.keys(): for v in controldict[key]: if glyphname in v.split('/'): con = controldict[key] striptriple = [] hold1 = '' hold2 = '' for i in ''.join(con).split('/'): if len(i) != 0: if i == hold1 and i == hold2: pass else: striptriple.append(i) hold1 = hold2 hold2 = i constr = '/' + '/'.join(striptriple) # this is a bit of a hack since FL seems to have trouble # when it encounters the same string more than once. # so, let's stick the glyph at the end to differentiate it. # for example: HHOHOOH and HHOHOOO cs = constr + '/' + glyphname else: suffix = '' bS = breakSuffix(glyphname) if bS is not None: suffix = bS[1] glyphname = bS[0] if suffix[:2] == 'sc': controls = controldict['SC'] elif glyphname in uppercase: controls = controldict['UC'] elif glyphname in lowercase: controls = controldict['LC'] elif glyphname in digits: controls = controldict['DIGITS'] else: controls = controldict['UC'] if len(suffix) != 0: glyphname = '.'.join([glyphname, suffix]) cs = controls[0] + '/' + glyphname + controls[1] + '/' + glyphname + controls[2] return cs
[ "def glyphs(self, text):\r\n # fix: hackish\r\n text = re.sub(r'\"\\Z', '\\\" ', text)\r\n\r\n glyph_search = (\r\n # apostrophe's\r\n re.compile(r\"(\\w)\\'(\\w)\"),\r\n # back in '88\r\n re.compile(r'(\\s)\\'(\\d+\\w?)\\b(?!\\')'),\r\n # single closing\r\n re.compile(r'(\\S)\\'(?=\\s|' + self.pnct + '|<|$)'),\r\n # single opening\r\n re.compile(r'\\'/'),\r\n # double closing\r\n re.compile(r'(\\S)\\\"(?=\\s|' + self.pnct + '|<|$)'),\r\n # double opening\r\n re.compile(r'\"'),\r\n # 3+ uppercase acronym\r\n re.compile(r'\\b([A-Z][A-Z0-9]{2,})\\b(?:[(]([^)]*)[)])'),\r\n # 3+ uppercase\r\n re.compile(r'\\b([A-Z][A-Z\\'\\-]+[A-Z])(?=[\\s.,\\)>])'),\r\n # ellipsis\r\n re.compile(r'\\b(\\s{0,1})?\\.{3}'),\r\n # em dash\r\n re.compile(r'(\\s?)--(\\s?)'),\r\n # en dash\r\n re.compile(r'\\s-(?:\\s|$)'),\r\n # dimension sign\r\n re.compile(r'(\\d+)( ?)x( ?)(?=\\d+)'),\r\n # trademark\r\n re.compile(r'\\b ?[([]TM[])]', re.I),\r\n # registered\r\n re.compile(r'\\b ?[([]R[])]', re.I),\r\n # copyright\r\n re.compile(r'\\b ?[([]C[])]', re.I),\r\n )\r\n\r\n glyph_replace = [x % dict(self.glyph_defaults) for x in (\r\n r'\\1%(txt_apostrophe)s\\2', # apostrophe's\r\n r'\\1%(txt_apostrophe)s\\2', # back in '88\r\n r'\\1%(txt_quote_single_close)s', # single closing\r\n r'%(txt_quote_single_open)s', # single opening\r\n r'\\1%(txt_quote_double_close)s', # double closing\r\n r'%(txt_quote_double_open)s', # double opening\r\n r'<acronym title=\"\\2\">\\1</acronym>', # 3+ uppercase acronym\r\n r'<span class=\"caps\">\\1</span>', # 3+ uppercase\r\n r'\\1%(txt_ellipsis)s', # ellipsis\r\n r'\\1%(txt_emdash)s\\2', # em dash\r\n r' %(txt_endash)s ', # en dash\r\n r'\\1\\2%(txt_dimension)s\\3', # dimension sign\r\n r'%(txt_trademark)s', # trademark\r\n r'%(txt_registered)s', # registered\r\n r'%(txt_copyright)s', # copyright\r\n )]\r\n\r\n result = []\r\n for line in re.compile(r'(<.*?>)', re.U).split(text):\r\n if not re.search(r'<.*>', line):\r\n for s, r in zip(glyph_search, glyph_replace):\r\n line = s.sub(r, line)\r\n result.append(line)\r\n return ''.join(result)", "def fromFonttoolsGlyph(klass,font,glyphname):\n glyphset = font.getGlyphSet()\n from beziers.utils.pens import BezierPathCreatingPen\n pen = BezierPathCreatingPen(glyphset)\n glyph = font.getGlyphSet()[glyphname]\n glyph.draw(pen)\n return pen.paths", "def SoGlyph_getGlyph(*args) -> \"SoGlyph const *\":\n return _coin.SoGlyph_getGlyph(*args)", "def render_command(self):\n return ' '.join(self.command)", "def _get_name(self) -> \"std::string\" :\n return _core.ControlDefinition__get_name(self)", "def current_char(self) -> str:", "def glyphname(self) -> Optional[str]:\n\n return getGlyphnameForUnicode(self.unicode)", "def _get_name(self) -> \"std::string\" :\n return _core.TextCommandPalette__get_name(self)", "def get_name(self):\n return self._character", "def getGlyph(self, char):\n return FontGlyph(char, self, self.cairoContext)", "def symbol(self): \n \n return self._content[self._current_command].split(' ')[0][1:]", "def symbol(self):\n if self.command_type() == 'A_COMMAND':\n return self.next_command.split('@')[1]\n if self.command_type() == 'L_COMMAND':\n return self.next_command.split('(')[1][:-1]", "def __repr__(self):\n self.strg = replace_sym(self.strg, '___', self.frame+3)\n return replace_sym(self.strg, self.player.symbol, self.frame)", "def control_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"control_name\")", "def asciiRender(self) -> str:\n return self.identifier", "def DrawControlLabel(*args, **kwargs):\n return _aui.AuiToolBarArt_DrawControlLabel(*args, **kwargs)", "def getChar(self):\n if self.UART.is_open:\n if self.studentNumBox.hasFocus():\n c = self.UART.read(1)\n if c:\n self.studentNumBox.setText(self.studentNumBox.text() + c.decode('ascii'))\n elif self.passwordBox.hasFocus():\n c = self.UART.read(1)\n if c:\n self.passwordBox.setText(self.passwordBox.text() + c.decode('ascii'))", "def getPathname(*args) -> \"SbString\":\n return _coin.SoInput_getPathname(*args)", "def getAllText (self): # swingTextWidget.\n\n w = self\n ### s = Tk.Text.get(w,\"1.0\",\"end-1c\") # New in 4.4.1: use end-1c.\n s = '' ###\n\n if s is None:\n return u\"\"\n else:\n return g.toUnicode(s,g.app.tkEncoding)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Roughly sort a list of control strings.
def sortControlList(list): controls = [] for v in controldict.values(): for w in v: for x in w.split('/'): if len(x) is not None: if x not in controls: controls.append(x) temp_digits = digits + digits_oldstyle + fractions temp_currency = currency + currency_oldstyle ss_uppercase = [] ss_lowercase = [] ss_smallcaps = [] ss_digits = [] ss_currency = [] ss_other = [] for i in list: glyphs = i.split('/') c = glyphs[2] for glyph in glyphs: if len(glyph) is not None: if glyph not in controls: c = glyph if c in uppercase: ss_uppercase.append(i) elif c in lowercase: ss_lowercase.append(i) elif c in smallcaps: ss_smallcaps.append(i) elif c in temp_digits: ss_digits.append(i) elif c in temp_currency: ss_currency.append(i) else: ss_other.append(i) ss_uppercase.sort() ss_lowercase.sort() ss_smallcaps.sort() ss_digits.sort() ss_currency.sort() ss_other.sort() return ss_uppercase + ss_lowercase + ss_smallcaps + ss_digits + ss_currency + ss_other
[ "def sortCaseInsensitive():\n pass", "def reorder_strucs_in_canonical_order_and_omit_punctuation(struc_list):\r\n\r\n labeled_struc_list = [struc for struc in struc_list if not struc.is_space_or_punctuation_only()]\r\n labeled_struc_list.sort(key = lambda struc: (canonical_struc_order.index(struc.label) if struc.label in canonical_struc_order else 100, get_key(struc), struc.quick_print_struc()))\r\n return labeled_struc_list", "def radix_sort_str(strlist):\n offset = ord('a') - 1 # We want a placeholder space before 'a', chr(96)\n max_length = 0\n for word in strlist:\n max_length = max(max_length, len(word))\n\n # Add placeholders so all words are max length\n for i, word in enumerate(strlist[:]):\n strlist[i] = word + chr(96) * (max_length - len(word))\n\n buckets = [[] for j in xrange(ord('z') - offset)]\n for i in xrange(1, max_length + 1):\n for word in strlist:\n buckets[ord(word[-i].lower()) - offset].append(word)\n strlist[:] = []\n for bucket in buckets:\n strlist.extend(bucket)\n bucket[:] = []\n\n strlist[:] = [word.strip(chr(96)) for word in strlist]", "def sort(list_in, sort_key=lambda s: s.lower()):\n return sorted(list_in, key=sort_key)", "def sort_word_list(self)->None:\n self.word_list.sort(key=lambda item: (len(item), item))", "def SortNames(self, nameList):\n nameList.sort()", "def sortListAlphabetical(arr: List[str]) -> List[str]:\n return sorted(arr, key=str.lower)", "def sort(self):\n self.userInput.childElements.sort(key=Inputs.Option.value)", "def get_texts_sorted(elements: List):\n return sorted([e.get_text() for e in elements])", "def problem4_1(wordlist):\n print(wordlist)\n wordlist.sort(key = str.lower)\n print(wordlist)", "def sort(self,line):\r\n\t\tcommands = line.split(' ')\r\n\t\tcommands.sort(cmp)\r\n\t\t\r\n\t\tline = \"\"\r\n\t\tfor command in commands:\r\n\t\t\tline += command + \" \"\r\n\t\t\r\n\t\treturn line[:-1]", "def sort_words_case_insensitively(words):\r\n pass", "def _sort(list_in, sort_key=None):\n if sort_key is None:\n sort_key = lambda s: s.lower()\n return sorted(list_in, key=sort_key)", "def unstable_len_sort ( word_list ) :\n\n t = []\n for word in word_list :\n t.append( ( len(word), random.random(), word ) )\n\n t.sort()\n\n r = []\n for tpl in t :\n r.append( tpl[2] )\n\n return r", "def my_sort(m_list):\n pattern = r'([\\d\\.]+|[a-zA-Z]+)'\n print('sorted by Firewall Throughput ...')\n # Sort by number\n my_list = sorted(m_list,\n key=lambda l: float(re.findall(pattern, l[2])[0])\n )\n # Sort by unit\n my_list = sorted(my_list,\n key=lambda l: re.findall(pattern, l[2])[1],\n reverse=True\n )\n return my_list", "def sort_subject_list() -> None:\n with open(\"resources/subject_list.txt\", \"r+\") as outfile:\n lines = outfile.readlines()\n lines.sort()", "def shell_sort(data):\n if len(data) <= 1:\n return data\n\n gap = len(data) // 2\n while gap > 0:\n for i in range(gap, len(data)):\n last = data[i]\n j = i\n while j >= gap and data[j - gap] > last:\n data[j] = data[j - gap]\n j -= gap\n data[j] = last\n gap = gap // 2\n\n return data", "def gnome_sort(input_list):\n i=1\n while True:\n if i < len(input_list)-1:\n if input_list[i] >= input_list[i - 1]:\n i += 1\n if input_list[i] < input_list[i-1]:\n input_list[i],input_list[i-1]=input_list[i-1],input_list[i]\n i-=1\n if i==0:\n i+=1\n if i==len(input_list)-1:\n break\n return input_list", "def mergesort(file_list: List[str]) -> List[str]:\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return no existing user token and item name
def NO_EXISTING_TOKEN(): return { "token":"token_invalid", "name":"myobject1" }
[ "def NO_EXISTING_ITEM():\r\n ###TODO must be query db or get request\r\n return {\r\n \"item_id\":\"100\", \r\n }", "def get_single_user():", "def user(self) -> Optional[str]:\n\n if header := self.data.get(\"User\"):\n return header.name\n return None", "def get_user_name(token):\n\n if token is None:\n return None\n\n token = token.split(\" \")[1]\n\n if token == TEST_BEARER_TOKEN:\n return \"Test\"\n\n try:\n decoded_token = jwt.decode(token, properties.get_app_secret_key(), algorithm=\"HS256\")\n except InvalidTokenError:\n return None\n\n return decoded_token[\"sub\"]", "def get_identifier(self, request):\n api_key = self.extract_apikey(request)\n username = self.get_username_from_api_key(api_key)\n return username or 'nouser'", "def fetch_username_from_token() -> Any:\n import json\n user = User.query.filter(User.id == g.user.id).first()\n if user.github_token is None:\n return None\n url = 'https://api.github.com/user'\n session = requests.Session()\n session.auth = (user.email, user.github_token)\n try:\n response = session.get(url)\n data = response.json()\n return data['login']\n except Exception as e:\n g.log.error('Failed to fetch the user token')\n return None", "def get_reference_token(client, name):\n\n for member in client.api_call('users.list')['members']:\n if member['name'] == name:\n return '<@{0[id]}>'.format(member)\n\n raise KeyError('Could not find user with name: %s', name)", "def token_user_display_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"token_user_display_name\")", "def _get_name(payload):\n\n if not payload.data or \\\n \"RequestBody\" not in payload.data or \\\n \"User\" not in payload.data[\"RequestBody\"] or \\\n not payload.data[\"RequestBody\"][\"User\"]:\n return None\n\n return payload.data[\"RequestBody\"][\"User\"]", "def item_name(self) -> Optional[str]:\n return self.data.get(self._DATA_KEY_ITEM_NAME)", "def itemInfo(item_id):\n\n item = session.query(Item).filter_by(id=item_id).one()\n creator = getUserInfo(item.user_id)\n if 'username' not in login_session or \\\n creator.id != login_session['user_id']:\n return render_template('publiciteminfo.html', item=item)\n else:\n return render_template('iteminfo.html', item=item)", "def test_user_get_tokens(self):\n pass", "def resolve_token(self):\n\n token = self.request_string(\"token\", default=None)\n if not token:\n return (None, None)\n\n unverified_user = user_models.UnverifiedUser.get_for_token(token)\n if not unverified_user:\n return (None, None)\n\n # Success - token does indeed point to an unverified user.\n return (token, unverified_user)", "def test_get_user_inexistent_user(self):\n user = self.cm.get_user(\"NonExistent\")\n self.assertEqual(user, None)", "def no_token(self):\n self.response['status'] = 'error'\n self.data['message'] = \"No Request Token was found or the Request Token sent was invalid. You probably have not logged in\"", "def test_create_token_not_existed_user(self):\n payload = {\n \"email\": \"test@gmail.com\",\n \"password\": \"Test1234\"\n }\n\n # we are trying to generate token for not created user\n response = self.client.post(TOKEN_URL, payload)\n self.assertNotIn(\"token\", response.data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def item_stolen(self):\n return self.name_item", "async def user_identifier(request: Request) -> str:\n api_key = request.headers.get(\"X-API-Key\") or request.query_params.get(\"api_key\")\n user = request.headers.get(\"x-oasst-user\")\n if not user:\n payload = await request.json()\n auth_method = payload.get(\"user\").get(\"auth_method\")\n user_id = payload.get(\"user\").get(\"id\")\n user = f\"{auth_method}:{user_id}\"\n return f\"{api_key}:{user}\"", "def get_item(uid):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return no existing item
def NO_EXISTING_ITEM(): ###TODO must be query db or get request return { "item_id":"100", }
[ "def test_getNonexistant(self):\n failure = self.failureResultOf(self.storage.get([\"BOGUS\"]))\n failure.trap(exceptions.NoSuchStoreException)", "def _get_item_no_load(self):\r\n for index, iterator in enumerate(self._loaded_files):\r\n try:\r\n item = next(iterator)\r\n return item\r\n except StopIteration:\r\n pass\r\n raise StopIteration", "def has_item(self, usage_key):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def get_item(self, item_name):\n if len(self.items) > 0: # if there is at least one item in that location\n for element in self.items:\n if element.get_name() == item_name:\n return element\n return False\n else:\n return False", "def item_not_found_error():\n\n backend_function = settings.OPR_MODULESTORE\n backend = import_module(backend_function)\n\n return backend.get_item_not_found_error()", "def _is_missing(self, item):\n dst = '{}/{}'.format(self._data_list[item], item.split()[0])\n if os.path.exists(dst):\n # it is bare repo who knows\n return 'maybe'\n return True", "def pesquisar_item(self, id):\n\n item = None\n\n for pesquisa in DbLoja.query(DbLoja.id == id, order_by=DbAluno.id):\n item = pesquisa\n\n if item == '' and item == None:\n return False\n else:\n return item", "def test_get_item(self):\n self.test_application.get('/v0.0/item', status=200)\n return None", "def test_exists_noexist_return_jobid(self):\n exists, jid = Job.exists(\n \"16S\", \"Beta Diversity\",\n {\"--otu_table_fp\": 1, \"--mapping_fp\": 27}, Analysis(1),\n return_existing=True)\n self.assertFalse(exists)\n self.assertEqual(jid, None)", "def not_found(self):\n\n _id = self.request.args.get('_id')\n oid = ObjectId(_id)\n db = self.request.args.get('db')\n url = self.request.args.get('url')\n\n if self.valid_apikey():\n self.db = self.dbclient[db]\n cursor = self.db['data'].find({'_id': oid})\n data = []\n for i in cursor:\n data.append(i)\n data[0]['url'] = url\n self.db['gslookup_not_found'].insert(data)\n self.db['data'].delete_one({'_id': oid})\n response = self.app.response_class(\n response=self.json.dumps(\n {'msg': 'register {} moved from data to gslookup_not_data'.format(_id)}),\n status=200,\n mimetype='application/json'\n )\n return response\n else:\n return self.apikey_error()", "def is_singleton(item):\n return isinstance(item, Item) and not item.album_id", "def test_get_unseen(self):\n pass", "def get_item(self, key):\n\t\tif not key in self.items: return None\n\t\treturn self.items[ key ]", "def get_one(self, uid):\n entity = self.table.get_item(Key={\"uid\": uid}).get(\"Item\")\n if not entity:\n return None\n return entity", "def not_exist(self):\n return self.filter(not_, Configuration.exists)", "def label_exists(self):\n entity_number = wb_SQL_query(self.label, \"item\")\n if entity_number:\n return entity_number[0]\n return None", "def test_non_item(self):\n mainpage = self.get_mainpage()\n datasite = self.get_repo()\n\n item = next(datasite.preload_entities([mainpage]))\n self.assertIsInstance(item, pywikibot.ItemPage)\n self.assertTrue(hasattr(item, '_content'))\n self.assertEqual(item.id, 'Q5296')", "def asserted_one(items):\n one = False\n for item in items:\n if one:\n raise ValueError('More than one: [%s]' % item.items())\n one = True\n if one:\n return item\n else:\n raise NoItemError('No items')", "def exists(self, key, item): # noqa\n return self.execute_command(BF_EXISTS, key, item)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Devuelve el NER del modelo si existe o un NER en blanco en caso que no exista.
def __get_model_ner(self): if not self.is_loaded(): return None return ModelLoader.get_model_ner(self.__reference)
[ "def _check_model_path(self):\n root = os.path.expanduser('~')\n model_path = os.path.join(root, 'cltk_data', self.language, 'model',\n self.language + '_models_cltk', 'taggers',\n 'pos', 'model.la')\n assert os.path.isfile(model_path), \"Lapos model not present. Import '<language>_models_cltk'.\"", "def _is_model_present(self) -> bool:\n if file_exists(self.model_path):\n return True\n return False", "def test_raises_on_doesnotexist(self):\n from django_cereal.pickle import model_encode, model_decode\n from django_cereal.tests.testapp.models import ModelWithBasicField\n\n dne = ModelWithBasicField(id=1)\n encoded = model_encode(dne)\n self.assertRaises(ModelWithBasicField.DoesNotExist, model_decode, encoded)", "def check_model_exists(config):\n return os.path.exists(get_model_name_config(config))", "def load_model():\n # TODO: INSERT CODE\n # return model", "def _checkModelConfig(self):\n if (self.modelConfig.__eq__('')):\n print('Debe cargar primero el archivo de configuración')\n self.statusBar().showMessage('Debe cargar primero el archivo de configuración')\n return False\n else:\n return True #true porque no esta vacio", "def test_get_model_by_name_must_return_error_after_looking_for_a_model_that_is_not_in_the_database(self):\n # Picking a model\n model = sample_models[0]\n # Perform GET request to /modelo with 'nome' as url parameter\n response = self.client.get(url_for('aimodels.get_model', nome=model['nome']))\n # Ensure matching conditions to response received\n self.assertRaises(NoResultFound)\n self.assertEqual(404, response.status_code)\n self.assertEqual({'error': 'No such model found within the database'}, response.json)", "def _nk_or_pk_field(self, serialize, data, model_field):\n if ((serialize and self.root.use_natural_keys) or\n not serialize\n and hasattr(model_field.rel.to._default_manager, 'get_by_natural_key')\n and hasattr(data[model_field.name], '__iter__')):\n return NaturalKeyRelatedField()\n return PrimaryKeyRelatedField()", "def validate_relation(relt_code):\n try:\n Relation.objects.get(code=relt_code)\n except Relation.DoesNotExist:\n return False\n\n return True", "def _nativeModel( self ):\r\n\t\tfullname = self._nativePointer.name\r\n\t\tif ( '.' in fullname ):\r\n\t\t\treturn mxs.getNodeByName( fullname.split('.')[0] )\r\n\t\treturn None", "def test_get_model_by_name(self):\n pass", "def _nativeModel(self):\n\t\treturn None", "def find_node(self, name_or_representation):\n for node in self.all_nodes:\n if name_or_representation.lower() in {node.name.lower(), \\\n node.representation.lower()}:\n return node\n #if name_or_representation -ends\n #for node -ends\n return None", "def has_successor(self, ngram):\n\n if ngram in self.model.keys():\n return True\n return False\n pass", "def get_minimal_model():\n if 'minimal' not in _model_cache:\n _model_cache['minimal'] = spacy.load(\n 'en_core_web_sm', \n disable=['parser', 'tagger', 'ner']\n )\n return _model_cache['minimal']", "def test_nonexistent_odid(self):\n self.assertIsNone(get_object_detection_by_id(odid=999))", "def toplevel_relationloader():\n from lkbutils import rdflib_load_relations", "def _get_default_model_id(self, cr, uid, context=None):\n if context is None:\n context = {}\n\n default_model = context.get('default_model', False)\n if default_model:\n tag_model_obj = self.pool.get('res.tag.model')\n model_ids = tag_model_obj.search(cr, uid, [('model', '=', default_model)], limit=1, context=context)\n if model_ids:\n return model_ids[0]\n\n return False", "def _nativeModel( self ):\r\n\t\tname = self.name()\r\n\t\tsplit = name.split( '.' )\r\n\t\tif len( split ) > 1:\r\n\t\t\tmodelName = split[0]\r\n\t\t\tfrom cross3d import Scene\r\n\t\t\tscene = Scene()\r\n\t\t\treturn scene._findNativeObject( modelName )\r\n\t\treturn None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(num, num, num) > (num) This function receives initial velocity, launch angle, and altitude change in the landing position and returns the horizontal range of a projectile. v is in [m/s] theta is in [degrees] del_h is in [m] >>> range = horizontal_distance(100, 20, 40) >>> range 515.5652309241808
def horizontal_distance(v, theta, del_h): gravitational_acceleration = 9.81 theta_converted = theta * (math.pi/180) #converts theta from given degrees to radians #split range equation into four parts so it's cleaner when pieced together first_term = v*math.cos(theta_converted) second_term= (v*math.sin(theta_converted))/gravitational_acceleration third_term = (pow(v, 2)*pow(math.sin(theta_converted), 2))/ pow(gravitational_acceleration, 2) fourth_term = (2*del_h)/gravitational_acceleration range = first_term*(second_term + math.sqrt(third_term - fourth_term)) return range
[ "def HorizontalLineLimits(self, horizontalTrial):\n x,y = self.point\n x2,y2 = self.point2\n if self.direction is UP or self.direction is DOWN:\n top = min(y, y2)\n bottom = max(y, y2)\n if horizontalTrial.ptOrigin[1] >= top and horizontalTrial.ptOrigin[1] <= bottom and \\\n horizontalTrial.leftBound <= x and horizontalTrial.rightBound >= x:\n if horizontalTrial.ptOrigin[0] < x:\n horizontalTrial.rightBound = x - ConnectorSeparation\n horizontalTrial.rightTop = top - ConnectorSeparation\n horizontalTrial.rightBottom = bottom + ConnectorSeparation\n else:\n horizontalTrial.leftBound = x + ConnectorSeparation\n horizontalTrial.leftTop = top - ConnectorSeparation\n horizontalTrial.leftBottom = bottom + ConnectorSeparation\n elif horizontalTrial.ptOrigin[1] == y:\n left = min(x, x2)\n right = max(x, x2)\n if horizontalTrial.rightBound >= left and horizontalTrial.leftBound <= right:\n if horizontalTrial.ptOrigin[0] < left:\n horizontalTrial.rightBound = left - ConnectorSeparation\n horizontalTrial.rightTop = y - ConnectorSeparation\n horizontalTrial.rightBottom = y + ConnectorSeparation", "def horizon_range(planet_radius, observer_height):\n circle = Math.geometry.Circular(planet_radius)\n #cos_theta = float(planet_radius)/(planet_radius + observer_height)\n #theta = math.acos(cos_theta)\n #return planet_radius*math.sin(theta)\n return circle.tangent_distance(observer_height)", "def calculate_horizontal_wind(self):\n self.wind_alpha = 2.5 / self.hill.wind_variability\n self.wind_horizontal = (\n random.expovariate(self.wind_alpha) +\n self.hill.base_wind_horizontal)\n direction = -1 if random.random() < 0.5 else 1\n self.wind_horizontal *= direction\n self.wind_horizontal *= (self.skijumper.balance - 5)\n self.wind_horizontal = round(self.wind_horizontal, 2)\n self.angle = math.radians(self.wind_horizontal)\n self.jump_distance = self.jump_distance * math.cos(self.angle)\n pass", "def _horizontal_coordinates(target, observer, datetime_):\n observer.date = ephem.date(datetime_)\n target.compute(observer)\n return target.az, target.alt", "def horizontalDistance(point1, point2):\n return np.sqrt(np.square(point2[0] - point1[0]) + np.square(point2[1] - point1[1]))", "def extrapolate_start_position(self, start_position):\r\n\r\n print \"Extrapolating lane\"\r\n\r\n width = self.img.shape[1]\r\n if start_position > width / 2:\r\n return start_position - self.avg_lane_width\r\n else:\r\n return start_position + self.avg_lane_width", "def findAzimuthRange(min_azimuth, max_azimuth, beamwidth):\n if beamwidth == 360:\n azimuths = [0]\n else:\n if max_azimuth < min_azimuth:\n max_azimuth += 360\n azimuths = np.arange(min_azimuth, max_azimuth+beamwidth/2., beamwidth/2.0)\n if azimuths[-1] > max_azimuth: azimuths[-1] = max_azimuth\n if azimuths[-1] % 360. == azimuths[0]: azimuths = azimuths[:-1]\n azimuths[azimuths>=360] -= 360\n return azimuths", "def get_horizontal_translation(rect_1, rect_2):\n dist_1 = rect_2.left - rect_1.right\n dist_2 = rect_2.right - rect_1.left\n return dist_1 if abs(dist_1) <= abs(dist_2) else dist_2", "def altitude_range(rpc, x, y, w, h, margin_top, margin_bottom):\n # TODO: iterate the procedure used here to get a finer estimation of the\n # TODO: bounding box on the ellipsoid and thus of the altitude range. For flat\n # TODO: regions it will not improve much, but for mountainous regions there is a\n # TODO: lot to improve.\n\n # find bounding box on the ellipsoid (in geodesic coordinates)\n lon_m, lon_M, lat_m, lat_M = geodesic_bounding_box(rpc, x, y, w, h)\n\n # if bounding box is out of srtm domain, return coarse altitude estimation\n if (lat_m < -60 or lat_M > 60):\n print \"Out of SRTM domain, returning coarse range from rpc\"\n return altitude_range_coarse(rpc)\n\n # sample the bounding box with regular step of 3 arcseconds (srtm\n # resolution)\n ellipsoid_points = sample_bounding_box(lon_m, lon_M, lat_m, lat_M)\n\n # compute srtm height on all these points\n # these altitudes are computed with respect to the WGS84 ellipsoid\n import os\n srtm = common.run_binary_on_list_of_points(ellipsoid_points, 'srtm4',\n option=None, binary_workdir=os.path.dirname(__file__))\n srtm = np.ravel(srtm)\n\n # srtm data may contain 'nan' values (meaning no data is available there).\n # These points are most likely water (sea) and thus their height with\n # respect to geoid is 0. Thus we replace the nans with 0.\n srtm[np.isnan(srtm)] = 0\n\n # extract extrema (and add a +-100m security margin)\n h_m = np.round(srtm.min()) + margin_bottom\n h_M = np.round(srtm.max()) + margin_top\n\n return h_m, h_M", "def get_range(self):\n if self.battery_size == 24:\n range = 200\n elif self.battery_size == 34:\n range = 330\n\n print(f\"this car goes about {range} miles\")", "def _infer_from_speed_range(min_speed, max_speed):\n return (min_speed + max_speed) / 2", "def point_find(self, projectile):\n\n while True:\n value_type = input(\"Find horizontal displacement (x), height (y),\"\n \" or quit (q)? \")\n if value_type == \"q\":\n return\n \n elif value_type == \"y\":\n #loop for correct input\n while True:\n try:\n x = float(input(\"x: \"))\n\n break\n\n except ValueError: print(\"Please use a number.\")\n\n print(\"y =\", round(-self.a * x * (x - self.b), 5))\n dy_dx = -2 * self.a * x + self.a * self.b #differentiate\n angle = atan(dy_dx) #arctangent for the angle\n velocity = projectile.hori_vel / cos(angle) #h = a/cos(x)\n print(\"Velocity:\", round(velocity, 5),\n \"\\nAngle:\", round(angle, 5))\n\n elif value_type == \"x\":\n #loop for correct input\n while True:\n try:\n y = float(input(\"y: \"))\n\n if y > projectile.max_height: raise ValueError\n\n break\n\n except ValueError: print(\"Please use a number\"\n \" < maximum height.\")\n\n #x can take two different values (parabolic function)\n x_one = (self.b ** 2 / 4 - y/self.a) ** 0.5 + self.b/2\n x_two = -(self.b ** 2 / 4 - y/self.a) ** 0.5 + self.b/2\n \n print(\"x =\", round(x_one, 5))\n dy_dx = -2 * self.a * x_one + self.a * self.b\n angle = atan(dy_dx)\n velocity = projectile.hori_vel / cos(angle)\n print(\"Velocity:\", round(velocity, 5),\n \"\\nAngle:\", round(angle, 5))\n\n print(\"OR x =\", round(x_two, 5))\n dy_dx = -2 * self.a * x_two + self.a * self.b\n angle = atan(dy_dx)\n velocity = projectile.hori_vel / cos(angle)\n print(\"Velocity:\", round(velocity, 5),\n \"\\nAngle:\", round(angle, 5))\n\n else: print(\"Please use x, y, or q.\")", "def reflect_horizontal(location, _, board_height):\n r, c = location\n bottom_row_of_board = board_height - 1\n return (bottom_row_of_board - r, c)", "def heuristic(self,x1,y1):\n absXDiff = abs(self.x - x1)\n absYDiff = abs(self.y - y1)\n h = ceil((absXDiff + absYDiff) / 3)\n return h", "def horizontal_velocity_test(u, v,\n max_u_velocity=150, max_v_velocity=150):\n\n flags = []\n for u_vel, v_vel in izip(u, v):\n if abs(u_vel) > max_u_velocity or abs(v_vel) > max_v_velocity:\n flags.append(ADCP_FLAGS['bad'])\n else:\n flags.append(ADCP_FLAGS['good'])\n\n return flags", "def compute_hyperparameter_ranges(self): \n exponent = np.floor(\n np.log10(np.abs(1 / self.trainX.shape[0]))).astype(int)\n self.gamma = np.logspace(exponent - 1, exponent + 4, self.param_space)\n self.c = np.logspace(exponent, 1, self.param_space)\n self.alpha = np.logspace(exponent, 1, self.param_space)\n self.l1_ratio = np.logspace(exponent, 0, self.param_space)", "def compute_hyperparameter_ranges(self):\n exponent = np.floor(\n np.log10(np.abs(1 / self.trainX.shape[0]))).astype(int)\n self.gamma = np.logspace(exponent - 1, exponent + 4, self.param_space)\n self.c = np.logspace(exponent, 1, self.param_space)\n self.alpha = np.logspace(exponent, 1, self.param_space)\n self.l1_ratio = np.logspace(exponent, 0, self.param_space)", "def start_target_to_space(start, target, length, width):\n origin = (min(start[0], target[0][0] + length / 2) - length,\n min(start[1], target[0][1] + width / 2) - width)\n bounds = (max(start[0], target[0][0] + length / 2) - origin[0] + width,\n max(start[1], target[0][1] + width / 2) - origin[1] + width)\n return origin, bounds", "def calc_range_slope(self, up: int, down: int) -> int:\n max_der, min_der = -1e9, 1e9\n\n while up < down:\n up += 1\n val = self.hor_hist[up] - self.hor_hist[up - 1]\n max_der = max(max_der, val)\n min_der = min(min_der, val)\n\n return max_der - min_der" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate weighted percentiles. Multiple percentiles may be passed as a list
def calculate_weighted_percentiles(data, wt, percentiles): assert np.greater_equal(percentiles, 0.0).all(), "Percentiles less than zero" assert np.less_equal(percentiles, 1.0).all(), "Percentiles greater than one" data = np.asarray(data) assert len(data.shape) == 1 if wt is None: wt = np.ones(data.shape, np.float) else: wt = np.asarray(wt, np.float) assert wt.shape == data.shape assert np.greater_equal(wt, 0.0).all(), "Not all weights are " \ "non-negative." assert len(wt.shape) == 1 n = data.shape[0] assert n > 0 i = np.argsort(data) sd = np.take(data, i, axis=0) sw = np.take(wt, i, axis=0) aw = np.add.accumulate(sw) if not aw[-1] > 0: raise ValueError('Nonpositive weight sum') w = (aw - 0.5 * sw) / aw[-1] spots = np.searchsorted(w, percentiles) o = [] for (s, p) in zip(spots, percentiles): if s == 0: o.append(sd[0]) elif s == n: o.append(sd[n - 1]) else: f1 = (w[s] - p) / (w[s] - w[s - 1]) f2 = (p - w[s - 1]) / (w[s] - w[s - 1]) assert f1 >= 0 and f2 >= 0 and f1 <= 1 and f2 <= 1 assert abs(f1 + f2 - 1.0) < 1e-6 o.append(sd[s - 1] * f1 + sd[s] * f2) return o
[ "def percentile(data, percentiles, weights=None):\n # check if actually weighted percentiles is needed\n if weights is None:\n return np.percentile(data, list(percentiles))\n if np.equal(weights, 1.).all():\n return np.percentile(data, list(percentiles))\n\n # make sure percentiles are fractions between 0 and 1\n if not np.greater_equal(percentiles, 0.0).all():\n raise ValueError(\"Percentiles less than 0\")\n if not np.less_equal(percentiles, 100.0).all():\n raise ValueError(\"Percentiles greater than 100\")\n\n #Make sure data is in correct shape\n shape = np.shape(data)\n n = len(data)\n if (len(shape) != 1):\n raise ValueError(\"wrong data shape, expecting 1d\")\n\n if len(weights) != n:\n raise ValueError(\"weights must be the same shape as data\")\n if not np.greater_equal(weights, 0.0).all():\n raise ValueError(\"Not all weights are non-negative.\")\n\n _data = np.asarray(data, dtype=float)\n\n if hasattr(percentiles, '__iter__'):\n _p = np.asarray(percentiles, dtype=float) * 0.01\n else:\n _p = np.asarray([percentiles * 0.01], dtype=float)\n\n _wt = np.asarray(weights, dtype=float)\n\n len_p = len(_p)\n sd = np.empty(n, dtype=float)\n sw = np.empty(n, dtype=float)\n aw = np.empty(n, dtype=float)\n o = np.empty(len_p, dtype=float)\n\n i = np.argsort(_data)\n np.take(_data, i, axis=0, out=sd)\n np.take(_wt, i, axis=0, out=sw)\n np.add.accumulate(sw, out=aw)\n\n if not aw[-1] > 0:\n raise ValueError(\"Nonpositive weight sum\")\n\n w = (aw - 0.5 * sw) / aw[-1]\n\n spots = np.searchsorted(w, _p)\n for (pk, s, p) in zip(range(len_p), spots, _p):\n if s == 0:\n o[pk] = sd[0]\n elif s == n:\n o[pk] = sd[n - 1]\n else:\n f1 = (w[s] - p) / (w[s] - w[s - 1])\n f2 = (p - w[s - 1]) / (w[s] - w[s - 1])\n assert (f1 >= 0) and (f2 >= 0) and (f1 <= 1 ) and (f2 <= 1)\n assert abs(f1 + f2 - 1.0) < 1e-6\n o[pk] = sd[s - 1] * f1 + sd[s] * f2\n return o", "def wp(data, wt, percentiles):\n assert np.greater_equal(percentiles, 0.0).all(), \"Percentiles less than zero\"\n assert np.less_equal(percentiles, 1.0).all(), \"Percentiles greater than one\"\n data = np.asarray(data)\n assert len(data.shape) == 1\n if wt is None:\n wt = np.ones(data.shape, np.float)\n else:\n wt = np.asarray(wt, np.float)\n assert wt.shape == data.shape\n assert np.greater_equal(wt, 0.0).all(), \"Not all weights are non-negative.\"\n\n assert len(wt.shape) == 1\n n = data.shape[0]\n assert n > 0\n i = np.argsort(data)\n sd = np.take(data, i, axis=0)\n sw = np.take(wt, i, axis=0)\n aw = np.add.accumulate(sw)\n if not aw[-1] > 0:\n print(\"Nonpositive weight sum\")\n w = (aw-0.5*sw)/aw[-1]\n spots = np.searchsorted(w, percentiles)\n o = []\n for (s, p) in zip(spots, percentiles):\n if s == 0:\n o.append(sd[0])\n elif s == n:\n o.append(sd[n-1])\n else:\n f1 = (w[s] - p)/(w[s] - w[s-1])\n f2 = (p - w[s-1])/(w[s] - w[s-1])\n assert f1 >= 0 and f2 >= 0 and f1 <= 1 and f2 <= 1\n assert abs(f1+f2-1.0) < 1e-6\n o.append(sd[s-1]*f1 + sd[s]*f2)\n return o", "def weighted_quantile(values, quantiles, sample_weight=None, \n values_sorted=False):\n values = np.array(values)\n quantiles = np.array(quantiles)\n if sample_weight is None:\n sample_weight = np.ones(len(values))\n sample_weight = np.array(sample_weight)\n assert np.all(quantiles >= 0) and np.all(quantiles <= 1), 'quantiles should be in [0, 1]'\n\n if not values_sorted:\n sorter = np.argsort(values)\n values = values[sorter]\n sample_weight = sample_weight[sorter]\n\n weighted_quantiles = np.cumsum(sample_weight) - 0.5 * sample_weight\n weighted_quantiles /= np.sum(sample_weight)\n return np.interp(quantiles, weighted_quantiles, values)", "def weighted_quantile(data, weights, quantile):\n ind_sorted = np.argsort(data)\n sorted_data = data[ind_sorted]\n sorted_weights = weights[ind_sorted]\n # Compute the auxiliary arrays\n Sn = np.cumsum(sorted_weights)\n Pn = (Sn-0.5*sorted_weights)/np.sum(sorted_weights)\n # Get the value of the weighted median\n interpolated_quant = np.interp(quantile, Pn, sorted_data)\n\n return interpolated_quant", "def calculate_percentile(data_list, percentile):\n return numpy.percentile(data_list, percentile)", "def calculate_percentiles(self):\n self.percentile_low = np.percentile(self.data, 25)\n self.percentile_high = np.percentile(self.data, 75)", "def weighted_percentile(vector):\n return stats.rankdata(vector, method='average') / len(vector)", "def get_percentiles(self):\n self.percentiles = np.linspace(0,100,self.n_bins+1)[1:-1].tolist()\n return self", "def percentile(self, expression, percentage=50., binby=[], limits=None, shape=default_shape, percentile_shape=1024*16, percentile_limits=\"minmax\", selection=False, async=False):\n\t\tif not isinstance(binby, (tuple, list)):\n\t\t\tbinby = [binby]\n\t\telse:\n\t\t\tbinby = binby\n\t\t@delayed\n\t\tdef calculate(expression, shape, limits):\n\t\t\t#print(binby + [expression], shape, limits)\n\t\t\ttask = TaskStatistic(self, [expression] + binby, shape, limits, op=OP_ADD1, selection=selection)\n\t\t\tself.executor.schedule(task)\n\t\t\treturn task\n\t\t@delayed\n\t\tdef finish(percentile_limits, *counts_list):\n\t\t\tmedians = []\n\t\t\tfor i, counts in enumerate(counts_list):\n\t\t\t\tcounts = counts[0]\n\t\t\t\t#print(\"percentile_limits\", percentile_limits)\n\t\t\t\t#print(\"counts=\", counts)\n\t\t\t\t#print(\"counts shape=\", counts.shape)\n\t\t\t\t# F is the 'cumulative distribution'\n\t\t\t\tF = np.cumsum(counts, axis=0)\n\t\t\t\t# we'll fill empty values with nan later on..\n\t\t\t\tok = F[-1,...] > 0\n\t\t\t\tF /= np.max(F, axis=(0))\n\t\t\t\t#print(F[-1])\n\t\t\t\t# find indices around 0.5 for each bin\n\t\t\t\ti2 = np.apply_along_axis(lambda x: x.searchsorted(percentage/100., side='left'), axis = 0, arr = F)\n\t\t\t\ti1 = i2 - 1\n\t\t\t\ti1 = np.clip(i1, 0, percentile_shapes[i]-1)\n\t\t\t\ti2 = np.clip(i2, 0, percentile_shapes[i]-1)\n\n\t\t\t\t# interpolate between i1 and i2\n\t\t\t\t#print(\"cum\", F)\n\t\t\t\t#print(\"i1\", i1)\n\t\t\t\t#print(\"i2\", i2)\n\t\t\t\tpmin, pmax = percentile_limits[i]\n\n\t\t\t\t# np.choose seems buggy, use the equivalent code instead\n\t\t\t\t#a = i1\n\t\t\t\t#c = F\n\t\t\t\tF1 = np.array([F[i1[I]][I] for I in np.ndindex(i1.shape)])\n\t\t\t\tF1 = F1.reshape(F.shape[1:])\n\n\t\t\t\t#a = i2\n\t\t\t\tF2 = np.array([F[i2[I]][I] for I in np.ndindex(i2.shape)])\n\t\t\t\tF2 = F2.reshape(F.shape[1:])\n\n\t\t\t\t#print(\"F1,2\", F1, F2)\n\n\t\t\t\toffset = (percentage/100.-F1)/(F2-F1)\n\t\t\t\tmedian = pmin + (i1+offset) / float(percentile_shapes[i]-1.) * (pmax-pmin)\n\t\t\t\t#print(\"offset\", offset)\n\t\t\t\t#print(pmin + (i1+offset) / float(percentile_shapes[i]-1.) * (pmax-pmin))\n\t\t\t\t#print(pmin + (i1) / float(percentile_shapes[i]-1.) * (pmax-pmin))\n\t\t\t\t#print(median)\n\n\t\t\t\t# empty values should be set to nan\n\t\t\t\tmedian[~ok] = np.nan\n\t\t\t\tmedians.append(median)\n\t\t\tvalue = np.array(vaex.utils.unlistify(waslist, medians))\n\t\t\treturn value\n\t\twaslist, [expressions, ] = vaex.utils.listify(expression)\n\t\tshape = _expand_shape(shape, len(binby))\n\t\tpercentile_shapes = _expand_shape(percentile_shape, len(expressions))\n\t\tif percentile_limits:\n\t\t\tpercentile_limits = _expand_limits(percentile_limits, len(expressions))\n\t\tlimits = self.limits(binby, limits, selection=selection, async=True)\n\t\tpercentile_limits = self.limits(expressions, percentile_limits, selection=selection, async=True)\n\t\t@delayed\n\t\tdef calculation(limits, percentile_limits):\n\t\t\ttasks = [calculate(expression, (percentile_shape, ) + tuple(shape), list(percentile_limits) + list(limits))\n\t\t\t\t\t for percentile_shape, percentile_limit, expression\n\t\t\t\t\t in zip(percentile_shapes, percentile_limits, expressions)]\n\t\t\treturn finish(percentile_limits, delayed_args(*tasks))\n\t\t\t#return tasks\n\t\tresult = calculation(limits, percentile_limits)\n\t\treturn self._async(async, result)", "def percentile(data_list, score, kind='weak'):\n\tn = len(data_list)\n\n\tif kind == 'strict':\n\t\treturn len([i for i in data_list if i < score]) / float(n) * 100\n\telif kind == 'weak':\n\t\treturn len([i for i in data_list if i <= score]) / float(n) * 100\n\telif kind == 'mean':\n\t\treturn (len([i for i in data_list if i < score]) + len([i for i in data_list if i <= score])) * 50 / float(n)\n\telse:\n\t\traise ValueError(\"The kind kwarg must be 'strict', 'weak' or 'mean'. You can also opt to leave it out and rely on the default method.\")", "def second_quartile(my_list):\n return percentileR7(my_list, 50)", "def percentile(values=None, pct=None):\n\n watermark_index = int(round((float(pct) / 100) * len(values) + .5))\n watermark = sorted(values)[watermark_index - 1]\n\n return [element for element in values if element <= watermark]", "def percentiles(self, *percentiles):\n return [percentile for percentile in\n self.histogram.percentiles(*percentiles)]", "def test_number_of_points_half_of_number_of_percentiles(self):\n\n expected = np.array(\n [\n [\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 0.1, 1.0, 1.0, 1.0],\n [1.0, 1.0, 0.1, 0.1, 0.1, 1.0, 1.0],\n [1.0, 1.0, 1.0, 0.1, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n ],\n [\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 0.2, 1.0, 1.0, 1.0],\n [1.0, 1.0, 0.2, 0.2, 0.2, 1.0, 1.0],\n [1.0, 1.0, 1.0, 0.2, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n ],\n [\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 0.3, 1.0, 1.0, 1.0],\n [1.0, 1.0, 0.3, 0.3, 0.3, 1.0, 1.0],\n [1.0, 1.0, 1.0, 0.3, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n ],\n [\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 0.4, 1.0, 1.0, 1.0],\n [1.0, 1.0, 0.4, 0.4, 0.4, 1.0, 1.0],\n [1.0, 1.0, 1.0, 0.4, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n ],\n [\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0],\n [1.0, 1.0, 0.5, 0.5, 0.5, 1.0, 1.0],\n [1.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n ],\n [\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 0.6, 1.0, 1.0, 1.0],\n [1.0, 1.0, 0.6, 0.6, 0.6, 1.0, 1.0],\n [1.0, 1.0, 1.0, 0.6, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n ],\n [\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 0.7, 1.0, 1.0, 1.0],\n [1.0, 1.0, 0.7, 0.7, 0.7, 1.0, 1.0],\n [1.0, 1.0, 1.0, 0.7, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n ],\n [\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 0.8, 1.0, 1.0, 1.0],\n [1.0, 1.0, 0.8, 0.8, 0.8, 1.0, 1.0],\n [1.0, 1.0, 1.0, 0.8, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n ],\n [\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 0.9, 1.0, 1.0, 1.0],\n [1.0, 1.0, 0.9, 0.9, 0.9, 1.0, 1.0],\n [1.0, 1.0, 1.0, 0.9, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n ],\n [\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n ],\n ]\n )\n\n data = np.ones((7, 7), dtype=np.float32)\n data[3, 3] = 0\n cube = set_up_variable_cube(data, spatial_grid=\"equalarea\",)\n\n percentiles = np.array([2.5, 5, 7.5, 10, 12.5, 15, 17.5, 20, 22.5, 25])\n radius = 2000.0\n result = GeneratePercentilesFromANeighbourhood(\n radius, percentiles=percentiles\n ).process(cube)\n self.assertArrayAlmostEqual(result.data, expected)", "def third_quartile(my_list):\n return percentileR7(my_list, 75)", "def get_percentiles(results, sim_number=\"\"):\n p5, p25, p50, p75, p95 = [], [], [], [], []\n fields = [\n word\n for word in results.dtype.names\n if word.startswith(\"simulation\" + str(sim_number))\n ]\n for i in range(len(fields)):\n p5.append(np.percentile(list(results[fields[i]]), 5))\n p25.append(np.percentile(list(results[fields[i]]), 25))\n p50.append(np.percentile(list(results[fields[i]]), 50))\n p75.append(np.percentile(list(results[fields[i]]), 75))\n p95.append(np.percentile(list(results[fields[i]]), 95))\n return p5, p25, p50, p75, p95", "def quantiles(data_x,data_y,bins=None):\n\n data_x = np.ravel(data_x)\n data_y = np.ravel(data_y)\n\n if (bins is None):\n bins = [np.amin(data_x),np.amax(data_x),20]\n\n perc_all = {}\n perc_all['count'], bin_edges, binnumber = scipy.stats.binned_statistic(data_x,data_y,statistic='count', bins=bins)\n perc_all['median'], bin_edges, binnumber = scipy.stats.binned_statistic(data_x,data_y,statistic='median', bins=bins)\n perc_all['10th'], bin_edges, binnumber = scipy.stats.binned_statistic(data_x,data_y,statistic=lambda y: np.percentile(y, 10), bins=bins)\n perc_all['90th'], bin_edges, binnumber = scipy.stats.binned_statistic(data_x,data_y,statistic=lambda y: np.percentile(y, 90), bins=bins)\n perc_all['25th'], bin_edges, binnumber = scipy.stats.binned_statistic(data_x,data_y,statistic=lambda y: np.percentile(y, 25), bins=bins)\n perc_all['75th'], bin_edges, binnumber = scipy.stats.binned_statistic(data_x,data_y,statistic=lambda y: np.percentile(y, 75), bins=bins)\n bin_centers = (bin_edges[1:] + bin_edges[:-1])/2.\n\n return bin_centers, perc_all", "def first_quartile(my_list):\n return percentileR7(my_list, 25)", "def quantiles(self, param, *args, **kwargs):\n q = kwargs.pop(\"q\", [0.025, 0.975])\n return np.quantile(\n self.dist(param, *args),\n q,\n axis=0,\n )", "def StatsQuartiles(population):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator that creates new Click command and adds it to command list.
def command(self, *args, **kwargs) -> Callable: def inner(func): command = click.command(*args, **kwargs)(func) self.commands.append(command) return command return inner
[ "def command(*args, **kwargs):\r\n\r\n def decorator(function):\r\n return Command(function, **kwargs)\r\n\r\n if args:\r\n return decorator(*args)\r\n return decorator", "def command(self, func=None, **kwargs):\n def decorator(func):\n self._register_command(func, **kwargs)\n return func\n if func is None:\n return decorator\n return decorator(func)", "def command(self, *args, **kwargs):\n kwargs['cls'] = CustomCommand\n return super().command(*args, **kwargs)", "def add_command( self, **kw ) :\n return self._add_widget( 'button', None, **kw )", "def command(*args, **kwargs):\n\n if args in COMMANDS:\n raise ValueError('%s is already defined!' % args)\n\n def decorator(handler=None):\n COMMANDS[args] = (kwargs, handler)\n\n return handler\n\n return decorator", "def at_cmdset_creation(self):\r\n self.add(Command())", "def add_command(cls, cmd):\n cls._commands.append(cmd)", "def __call__(self, command_type, name='_'):\n return self.dataset.add_command(command_type, name)", "def command(self, *args, **kwargs):\n def decorator(f):\n old_kwargs = kwargs.copy()\n self.__strip_invalidKeys(kwargs)\n\n from .pretty import prettyCommand\n\n tmpCommand = None\n origHelpTxt = None\n aliases = []\n\n try:\n if isinstance(args[0], list):\n _args = [args[0][0]] + list(args[1:])\n for alias in args[0][1:]:\n if tmpCommand is None:\n cmd: PrettyCommand = prettyCommand(alias, None, **kwargs)(f)\n origHelpTxt = cmd.help\n cmd.alias = True\n cmd.aliases = []\n cmd.help = \"(Alias for '{c}') {h}\".format(c = _args[0], h = cmd.help)\n cmd.short_help = \"Alias for '{}'\".format(_args[0])\n cmd.true_hidden = cmd.hidden\n cmd.hidden = True\n self.__assign_invalidKeys(old_kwargs, cmd)\n super(MultiCommandShell, self).add_command(cmd)\n tmpCommand = cmd\n\n else:\n cmd = deepcopy(tmpCommand)\n cmd.alias = True\n cmd.aliases = []\n cmd.name = alias\n cmd.help = \"(Alias for '{c}') {h}\".format(c = _args[0], h = origHelpTxt)\n cmd.short_help = \"Alias for '{}'\".format(_args[0])\n cmd.hidden = True\n self.__assign_invalidKeys(old_kwargs, cmd)\n super(MultiCommandShell, self).add_command(cmd)\n\n aliases.append(alias)\n else:\n _args = args\n\n\n if tmpCommand is None:\n cmd: PrettyCommand = prettyCommand(*_args, **kwargs)(f)\n cmd.alias = False\n cmd.aliases = aliases\n self.__assign_invalidKeys(old_kwargs, cmd)\n super(MultiCommandShell, self).add_command(cmd)\n return cmd\n\n else:\n cmd = deepcopy(tmpCommand)\n cmd.alias = False\n cmd.aliases = aliases\n cmd.name = _args[0]\n cmd.help = origHelpTxt\n cmd.short_help = ''\n cmd.hidden = cmd.true_hidden\n self.__assign_invalidKeys(old_kwargs, cmd)\n super(MultiCommandShell, self).add_command(cmd)\n return cmd\n\n except:\n cmd: PrettyCommand = prettyCommand(*args, **kwargs)(f)\n cmd.alias = False\n cmd.aliases = aliases\n self.__assign_invalidKeys(old_kwargs, cmd)\n super(MultiCommandShell, self).add_command(cmd)\n return cmd\n\n return decorator", "def command(func):\n @wraps(func)\n def wrapped():\n return func()\n\n if func.__name__ not in OPTIONS:\n raise KeyError('Cannot register {}, not mentioned in docstring/docopt.'.format(func.__name__))\n if OPTIONS[func.__name__]:\n command.chosen = func\n\n return wrapped", "def addCommand(self, command):\n self.commands.append(command)", "def cmd(name: str) -> Callable:\n return g.new_cmd_decorator(name, ['c', 'findCommands',])", "def add_slash_command(\n self,\n cmd,\n name: str = None,\n description: str = None,\n guild_ids: typing.List[int] = None,\n options: list = None,\n connector: dict = None,\n has_subcommands: bool = False,\n ):\n name = name or cmd.__name__\n name = name.lower()\n guild_ids = guild_ids if guild_ids else []\n if name in self.commands:\n tgt = self.commands[name]\n if not tgt.has_subcommands:\n raise error.DuplicateCommand(name)\n has_subcommands = tgt.has_subcommands\n for x in tgt.allowed_guild_ids:\n if x not in guild_ids:\n guild_ids.append(x)\n\n description = description or getdoc(cmd)\n\n if options is None:\n options = manage_commands.generate_options(cmd, description, connector)\n\n _cmd = {\n \"func\": cmd,\n \"description\": description,\n \"guild_ids\": guild_ids,\n \"api_options\": options,\n \"connector\": connector or {},\n \"has_subcommands\": has_subcommands,\n }\n obj = model.CommandObject(name, _cmd)\n self.commands[name] = obj\n self.logger.debug(f\"Added command `{name}`\")\n return obj", "def add_command(self, command_type, name=None):\n return self.dataset.add_command(command_type, name)", "def addCommandCallback(*args, **kwargs):\n \n pass", "def from_callable(func):\n return Command(\n name=Command.extract_name(func),\n usage=Command.extract_usage(func),\n brief=Command.extract_brief(func),\n description=Command.extract_description(func),\n )", "def add_authorization():\n def decorator(command):\n \"\"\"Empty wrapper around decoration to be consistent with Click style.\"\"\"\n @click.option('-h', '--host', default=None)\n @click.option('-a', '--auth-token', default=None)\n @wraps(command)\n def wrapper(host, auth_token, *args, **kwargs):\n \"\"\"Wrap command with authorized Uploader creation.\"\"\"\n try:\n auth = TokenAuth(jwt_token=auth_token)\n except KeyError:\n warn_missing_auth()\n\n if host is None:\n try:\n host = os.environ['MGS_HOST']\n except KeyError:\n print('No host. Exiting', file=stderr)\n exit(1)\n\n knex = Knex(token_auth=auth, host=host)\n uploader = Uploader(knex=knex)\n\n return command(uploader, *args, **kwargs)\n return wrapper\n return decorator", "def sub_command(self, **attrs):\n def inner(func: Callable):\n return self.add_child(SlashSubCommand(func, **attrs))\n\n return inner", "def new_shell(self, cls=None, **kwargs):\n from .pretty import prettyGroup\n\n def decorator(f):\n cmd = prettyGroup(cls=MultiCommandShell if not cls else cls, isShell=True, **kwargs)(f)\n cmd.alias = False\n self.add_command(cmd)\n return cmd\n\n return decorator" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Registers all commands to the given group.
def register_to(self, group: click.Group) -> None: for command in self.commands: group.add_command(command)
[ "def register_all_groups():\n register_group_default()", "def add_subcommands(command_group, plugin_manager):", "def add_target_command_groups(self, target: \"SoCTarget\", command_set: \"CommandSet\"):\n pass", "def add_all_commands(parser):\n for command in pymod.command.all_commands():\n parser.add_command(command)", "def load_cmds(path: PathLike, package: str, group: click.Group):\n path = pathlib.Path(path)\n\n for item in path.parent.glob(\"cmd_*.py\"):\n module = importlib.import_module(f\".{item.stem}\", package=package)\n module_cmd = getattr(module, \"cmd\", None)\n if callable(module_cmd):\n # noinspection PyTypeChecker\n group.add_command(module_cmd)", "def install_cmdgroup(self,\n grpname,\n prefix=None,\n permission=None,\n helptext=None,\n ):\n return _CommandGroup(\n grpname=grpname,\n cmdlist=self.__cmds,\n cmdglist=self.__cmdgs,\n prefix=prefix,\n permission=permission,\n helptext=helptext,\n globalprefix=self.__globalprefix,\n )", "def sub_command_group(self, **attrs):\n def inner(func: Callable):\n return self.add_child(SlashCommandGroup(func, **attrs))\n\n return inner", "def associate_all(self, group):\n self._associate_all(group)", "def addGroupCommandInput(self, *args) -> \"adsk::core::Ptr< adsk::core::GroupCommandInput >\" :\n return _core.CommandInputs_addGroupCommandInput(self, *args)", "def assign_to_all_link_group(self, group=0x01):\n self._plm.send_standard(self._address,\n COMMAND_ASSIGN_TO_ALL_LINK_GROUP_0X01_NONE,\n group)", "def add_commands(self, classes):\n #We instanciate all modules and then add them to the self.modules list\n for classe in classes:\n logger.debug(\"Registering %s\" % classe)\n objet = classe(self)\n self.modules.append(objet)", "def _register_groups(self):\n self._groups[GRP_COOL_ON] = OnOff(COOLING, self._address, GRP_COOL_ON)\n self._groups[GRP_HEAT_ON] = OnOff(HEATING, self._address, GRP_HEAT_ON)\n self._groups[GRP_HUMID_HI_ON] = OnOff(\n DEHUMIDIFYING, self._address, GRP_HUMID_HI_ON\n )\n self._groups[GRP_HUMID_LO_ON] = OnOff(\n HUMIDIFYING, self._address, GRP_HUMID_LO_ON\n )\n\n self._groups[GRP_TEMP] = Temperature(TEMPERATURE, self._address, GRP_TEMP, 0)\n self._groups[GRP_HUMID] = Humidity(\n HUMIDITY, self._address, group=GRP_HUMID, default=0\n )\n self._groups[GRP_SYS_MODE] = SystemMode(\n SYSTEM_MODE, self._address, group=GRP_SYS_MODE, default=0\n )\n self._groups[GRP_FAN_MODE] = FanMode(\n FAN_MODE, self._address, group=GRP_FAN_MODE, default=4\n )\n self._groups[GRP_COOL_SP] = SetPoint(\n COOL_SET_POINT,\n self._address,\n group=GRP_COOL_SP,\n default=65,\n )\n self._groups[GRP_HEAT_SP] = SetPoint(\n HEAT_SET_POINT,\n self._address,\n group=GRP_HEAT_SP,\n default=95,\n )\n self._groups[GRP_HUMID_HI_SP] = Humidity(\n HUMIDITY_HIGH, self._address, group=GRP_HUMID_HI_SP, default=0\n )\n self._groups[GRP_HUMID_LO_SP] = Humidity(\n HUMIDITY_LOW, self._address, group=GRP_HUMID_LO_SP, default=0\n )", "async def dispatch_command(self, message, group, name, args, **kwargs):\n await group.on_command(self.client, message, name, args, **kwargs)", "def package_commands() -> None:\n pass", "def register(self, name, Command, force=False):\n if not force and name in self.registry:\n raise AlreadyRegistered('Command %r is already registered' % name)\n command = Command(self.prog_name, self.stdout)\n command.manager = self\n self.registry[name] = command\n command.post_register(self)", "def update_groups(self, uid, groups, character=None):\n pass", "def register_commands_in_dir(self, dir):\n # Get all the files below the directory\n for root, dirs, files in os.walk(f'{dir}'):\n for filename in files:\n # Filter out \"*.pyc\" files\n if filename[-1] != 'c':\n # The name of the file, minus \".py\"\n name = filename[:-3]\n # Gets everything exported by this module\n members = inspect.getmembers(importlib.import_module(name))\n for member in members:\n # Get the member that is a class and has the same name as the file\n if inspect.isclass(member[1]) and member[1].__name__.lower() == name.lower():\n # ...And register it as a command\n self.register_command(member[1])", "def create(self, group):\n self.request.mongo_connection.shinken.hostgroups.insert(\n group.as_dict()\n )", "def Register(self, group_name, flag):\n self.flag_groups[group_name].append(flag)\n self.flags[flag.name] = flag" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use this data source to access information about an existing Enrollment Account Billing Scope. Example Usage ```python import pulumi import pulumi_azure as azure example = azure.billing.get_enrollment_account_scope(billing_account_name="existing", enrollment_account_name="existing") pulumi.export("id", example.id) ```
def get_enrollment_account_scope(billing_account_name: Optional[str] = None, enrollment_account_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEnrollmentAccountScopeResult: __args__ = dict() __args__['billingAccountName'] = billing_account_name __args__['enrollmentAccountName'] = enrollment_account_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure:billing/getEnrollmentAccountScope:getEnrollmentAccountScope', __args__, opts=opts, typ=GetEnrollmentAccountScopeResult).value return AwaitableGetEnrollmentAccountScopeResult( billing_account_name=pulumi.get(__ret__, 'billing_account_name'), enrollment_account_name=pulumi.get(__ret__, 'enrollment_account_name'), id=pulumi.get(__ret__, 'id'))
[ "def get_enrollment_account_scope_output(billing_account_name: Optional[pulumi.Input[str]] = None,\n enrollment_account_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetEnrollmentAccountScopeResult]:\n ...", "def test_enrollment_by_id(self):\r\n account_id = None # Change me!!\r\n id = None # Change me!!\r\n\r\n r = self.client.enrollment_by_id(id, account_id)", "def billing_scope(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"billing_scope\")", "async def get_lending_account(self, **params):\r\n return await self.client_helper(\"get_lending_account\", **params)", "def get_organization(oid):\n try:\n return Organization.objects.get(id=oid)\n except DoesNotExist:\n raise APIError(ORG_NOT_FOUND, status_code=404)", "def read_scope(scope_name=getuser()):\n if scope_name.isalnum():\n with open(Path.joinpath(\n Path.home(),\n '.sbox',\n f'{scope_name}.keyring',\n )) as f:\n add_scope(f.read(), scope_name)", "def get_account(self, acc_id):\n acc_list = self.get_account_list(acc_id)\n if not acc_list:\n acc_list = self.get_account_file(acc_id)\n if not acc_list:\n return None\n else:\n return acc_list\n return acc_list", "def exporter_access_request(db):\n return ExporterAccessRequest.objects.get(reference=\"ear/1\")", "def get_account(self):\n return self.fetch_data(\"account\")", "def get(account_id: str):\n return Account.query.get(account_id)", "def receivables_account(self) -> Account:\n row = AccountEntry.objects.filter(source_invoice=self).order_by('id').first()\n return row.account if row else None", "def test_non_existing_enrollments(self):\n\n client = graphene.test.Client(schema)\n\n params = {\n 'uuid': 'e8284285566fdc1f41c8a22bb84a295fc3c4cbb3',\n 'organization': 'Example',\n 'fromDate': '2050-01-01T00:00:00+0000',\n 'toDate': '2060-01-01T00:00:00+0000'\n }\n executed = client.execute(self.SH_WITHDRAW,\n context_value=self.context_value,\n variables=params)\n\n msg = executed['errors'][0]['message']\n self.assertEqual(msg, ENROLLMENT_DOES_NOT_EXIST_ERROR)", "def get_ip_scope(auth, url,scopeId=None):\n if auth is None or url is None: # checks to see if the imc credentials are already available\n set_imc_creds()\n if scopeId is None:\n get_ip_scope_url = \"/imcrs/res/access/assignedIpScope\"\n f_url = url + get_ip_scope_url\n r = requests.get(f_url, auth=auth, headers=HEADERS) # creates the URL using the payload variable as the contents\n try:\n if r.status_code == 200:\n ipscopelist = (json.loads(r.text))\n return ipscopelist\n\n\n except requests.exceptions.RequestException as e:\n return \"Error:\\n\" + str(e) + \" get_ip_scope: An Error has occured\"", "def get_acls(scope: str, profile: str) -> Dict[str, str]:\n\n # Get the acls for the scope\n acl_query = 'databricks secrets list-acls'\n acl_query += f' --profile {profile}'\n acl_query += f' --scope {scope}'\n\n # Run and enforce success\n sp = subprocess.run(acl_query, capture_output=True)\n sp.check_returncode()\n\n # Extract the existing scopes\n acl_lines = [l.strip('\\r') for l in sp.stdout.decode().split('\\n')[1:]]\n acl_lines = [l for l in acl_lines if l.replace('-', '').strip()]\n acl_lines = [[elem for elem in l.split(' ') if elem] for l in acl_lines]\n\n # Turn acls int a dictionary\n existing_acls = {acl[0]: acl[1] for acl in acl_lines}\n\n return existing_acls", "def list_account_details(self, resp):\n data = resp['data']\n base_currency_adjustment = BaseCurrencyAdjustment()\n base_currency_adjustment.set_adjustment_date(data['adjustment_date'])\n base_currency_adjustment.set_adjustment_date_formatted(data[\\\n 'adjustment_date_formatted'])\n base_currency_adjustment.set_exchange_rate(data['exchange_rate'])\n base_currency_adjustment.set_exchange_rate_formatted(data[\\\n 'exchange_rate_formatted'])\n base_currency_adjustment.set_currency_id(data['currency_id'])\n for value in data['accounts']:\n accounts = Account()\n accounts.set_account_id(value['account_id'])\n accounts.set_account_name(value['account_name'])\n accounts.set_gl_specific_type(value['gl_specific_type'])\n accounts.set_fcy_balance(value['fcy_balance'])\n accounts.set_fcy_balance_formatted(value['fcy_balance_formatted'])\n accounts.set_bcy_balance(value['bcy_balance'])\n accounts.set_bcy_balance_formatted(value['bcy_balance_formatted'])\n accounts.set_adjusted_balance(value['adjusted_balance'])\n accounts.set_adjusted_balance_formatted(value[\\\n 'adjusted_balance_formatted'])\n accounts.set_gain_or_loss(value['gain_or_loss'])\n accounts.set_gain_or_loss_formatted(value['gain_or_loss_formatted'])\n base_currency_adjustment.set_accounts(accounts)\n base_currency_adjustment.set_notes(data['notes'])\n base_currency_adjustment.set_currency_code(data['currency_code'])\n return base_currency_adjustment", "def test_get_no_record(self):\n with self.assertRaises(ResourceNotFound):\n Scope.get(generate_uuid())", "def getAccount(self):\n result = self.getAccounts(1)\n if len(result) < 1:\n return None\n else:\n return result[0]", "def test_non_existing_organization(self):\n\n client = graphene.test.Client(schema)\n\n params = {\n 'uuid': 'e8284285566fdc1f41c8a22bb84a295fc3c4cbb3',\n 'organization': 'Bitergia',\n 'fromDate': '1998-01-01T00:00:00+0000',\n 'toDate': '2009-01-01T00:00:00+0000'\n }\n executed = client.execute(self.SH_ENROLL,\n context_value=self.context_value,\n variables=params)\n\n msg = executed['errors'][0]['message']\n self.assertEqual(msg, ORGANIZATION_BITERGIA_DOES_NOT_EXIST_ERROR)", "async def fetch_scopes(self):\n\n data = await self.http.fetch_scopes()\n return data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use this data source to access information about an existing Enrollment Account Billing Scope. Example Usage ```python import pulumi import pulumi_azure as azure example = azure.billing.get_enrollment_account_scope(billing_account_name="existing", enrollment_account_name="existing") pulumi.export("id", example.id) ```
def get_enrollment_account_scope_output(billing_account_name: Optional[pulumi.Input[str]] = None, enrollment_account_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetEnrollmentAccountScopeResult]: ...
[ "def get_enrollment_account_scope(billing_account_name: Optional[str] = None,\n enrollment_account_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEnrollmentAccountScopeResult:\n __args__ = dict()\n __args__['billingAccountName'] = billing_account_name\n __args__['enrollmentAccountName'] = enrollment_account_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure:billing/getEnrollmentAccountScope:getEnrollmentAccountScope', __args__, opts=opts, typ=GetEnrollmentAccountScopeResult).value\n\n return AwaitableGetEnrollmentAccountScopeResult(\n billing_account_name=pulumi.get(__ret__, 'billing_account_name'),\n enrollment_account_name=pulumi.get(__ret__, 'enrollment_account_name'),\n id=pulumi.get(__ret__, 'id'))", "def test_enrollment_by_id(self):\r\n account_id = None # Change me!!\r\n id = None # Change me!!\r\n\r\n r = self.client.enrollment_by_id(id, account_id)", "def billing_scope(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"billing_scope\")", "async def get_lending_account(self, **params):\r\n return await self.client_helper(\"get_lending_account\", **params)", "def get_organization(oid):\n try:\n return Organization.objects.get(id=oid)\n except DoesNotExist:\n raise APIError(ORG_NOT_FOUND, status_code=404)", "def read_scope(scope_name=getuser()):\n if scope_name.isalnum():\n with open(Path.joinpath(\n Path.home(),\n '.sbox',\n f'{scope_name}.keyring',\n )) as f:\n add_scope(f.read(), scope_name)", "def get_account(self, acc_id):\n acc_list = self.get_account_list(acc_id)\n if not acc_list:\n acc_list = self.get_account_file(acc_id)\n if not acc_list:\n return None\n else:\n return acc_list\n return acc_list", "def exporter_access_request(db):\n return ExporterAccessRequest.objects.get(reference=\"ear/1\")", "def get_account(self):\n return self.fetch_data(\"account\")", "def get(account_id: str):\n return Account.query.get(account_id)", "def receivables_account(self) -> Account:\n row = AccountEntry.objects.filter(source_invoice=self).order_by('id').first()\n return row.account if row else None", "def test_non_existing_enrollments(self):\n\n client = graphene.test.Client(schema)\n\n params = {\n 'uuid': 'e8284285566fdc1f41c8a22bb84a295fc3c4cbb3',\n 'organization': 'Example',\n 'fromDate': '2050-01-01T00:00:00+0000',\n 'toDate': '2060-01-01T00:00:00+0000'\n }\n executed = client.execute(self.SH_WITHDRAW,\n context_value=self.context_value,\n variables=params)\n\n msg = executed['errors'][0]['message']\n self.assertEqual(msg, ENROLLMENT_DOES_NOT_EXIST_ERROR)", "def get_ip_scope(auth, url,scopeId=None):\n if auth is None or url is None: # checks to see if the imc credentials are already available\n set_imc_creds()\n if scopeId is None:\n get_ip_scope_url = \"/imcrs/res/access/assignedIpScope\"\n f_url = url + get_ip_scope_url\n r = requests.get(f_url, auth=auth, headers=HEADERS) # creates the URL using the payload variable as the contents\n try:\n if r.status_code == 200:\n ipscopelist = (json.loads(r.text))\n return ipscopelist\n\n\n except requests.exceptions.RequestException as e:\n return \"Error:\\n\" + str(e) + \" get_ip_scope: An Error has occured\"", "def get_acls(scope: str, profile: str) -> Dict[str, str]:\n\n # Get the acls for the scope\n acl_query = 'databricks secrets list-acls'\n acl_query += f' --profile {profile}'\n acl_query += f' --scope {scope}'\n\n # Run and enforce success\n sp = subprocess.run(acl_query, capture_output=True)\n sp.check_returncode()\n\n # Extract the existing scopes\n acl_lines = [l.strip('\\r') for l in sp.stdout.decode().split('\\n')[1:]]\n acl_lines = [l for l in acl_lines if l.replace('-', '').strip()]\n acl_lines = [[elem for elem in l.split(' ') if elem] for l in acl_lines]\n\n # Turn acls int a dictionary\n existing_acls = {acl[0]: acl[1] for acl in acl_lines}\n\n return existing_acls", "def list_account_details(self, resp):\n data = resp['data']\n base_currency_adjustment = BaseCurrencyAdjustment()\n base_currency_adjustment.set_adjustment_date(data['adjustment_date'])\n base_currency_adjustment.set_adjustment_date_formatted(data[\\\n 'adjustment_date_formatted'])\n base_currency_adjustment.set_exchange_rate(data['exchange_rate'])\n base_currency_adjustment.set_exchange_rate_formatted(data[\\\n 'exchange_rate_formatted'])\n base_currency_adjustment.set_currency_id(data['currency_id'])\n for value in data['accounts']:\n accounts = Account()\n accounts.set_account_id(value['account_id'])\n accounts.set_account_name(value['account_name'])\n accounts.set_gl_specific_type(value['gl_specific_type'])\n accounts.set_fcy_balance(value['fcy_balance'])\n accounts.set_fcy_balance_formatted(value['fcy_balance_formatted'])\n accounts.set_bcy_balance(value['bcy_balance'])\n accounts.set_bcy_balance_formatted(value['bcy_balance_formatted'])\n accounts.set_adjusted_balance(value['adjusted_balance'])\n accounts.set_adjusted_balance_formatted(value[\\\n 'adjusted_balance_formatted'])\n accounts.set_gain_or_loss(value['gain_or_loss'])\n accounts.set_gain_or_loss_formatted(value['gain_or_loss_formatted'])\n base_currency_adjustment.set_accounts(accounts)\n base_currency_adjustment.set_notes(data['notes'])\n base_currency_adjustment.set_currency_code(data['currency_code'])\n return base_currency_adjustment", "def test_get_no_record(self):\n with self.assertRaises(ResourceNotFound):\n Scope.get(generate_uuid())", "def getAccount(self):\n result = self.getAccounts(1)\n if len(result) < 1:\n return None\n else:\n return result[0]", "def test_non_existing_organization(self):\n\n client = graphene.test.Client(schema)\n\n params = {\n 'uuid': 'e8284285566fdc1f41c8a22bb84a295fc3c4cbb3',\n 'organization': 'Bitergia',\n 'fromDate': '1998-01-01T00:00:00+0000',\n 'toDate': '2009-01-01T00:00:00+0000'\n }\n executed = client.execute(self.SH_ENROLL,\n context_value=self.context_value,\n variables=params)\n\n msg = executed['errors'][0]['message']\n self.assertEqual(msg, ORGANIZATION_BITERGIA_DOES_NOT_EXIST_ERROR)", "async def fetch_scopes(self):\n\n data = await self.http.fetch_scopes()\n return data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print Database User Lock Information
def userlock(self): sql = """SELECT username, account_status FROM dba_users WHERE (account_status LIKE '%EXPIRED%' OR account_status LIKE '%LOCKED%\') AND username NOT in('DBSNMP','DMSYS','ORACLE_OCM', 'OLAPSYS', 'WMSYS', 'XDB', 'SCOTT', 'OUTLN', 'PERFSTAT', 'MGMT_VIEW', 'SYS', 'SYSTEM','TSMSYS', 'DIP', 'SYSMAN','ANONYMOUS', 'MONI', 'BACKUP', 'ORASYSSI','ORDDATA','SI_INFORMTN_SCHEMA','ORDPLUGINS','XS$NULL', 'TIVOLI','EXFSYS','APPQOSSYS', 'CTXSYS','MDSYS','ORDSYS','APEX_030200','OWBSYS','SQLTXADMIN','SPA','SPATIAL_CSW_ADMIN_USR', 'OWBSYS_AUDIT','APEX_PUBLIC_USER','MDDATA', 'FLOWS_FILES','SPATIAL_WFS_ADMIN_USR') """ return QueryResult(sql, self.cur)
[ "def lock_info(self):\n infos = []\n\n locks = Lock.query.valid_locks(self.object_type, self.object_id)\n for lock in locks:\n infos.append({'creator': lock.creator,\n 'time': lock.time,\n 'token': lock.token,\n 'type': self.desearialize_lock_type(lock.lock_type)})\n\n return infos", "def db_lock_action(self): # pragma: no cover\n pass", "def print_users(db):\n cursor = db.cursor()\n cursor.execute('SELECT * FROM users')\n data = cursor.fetchall()\n for row in data:\n print(row[0])\n print(row[1])\n print(row[2])\n print(row[3])\n print(row[4])\n print(row[5])\n print(row[6])\n print(row[7])\n print(row[8])\n print(row[9])", "def user_lock(self, user):\n self.locked_on = now()\n self.locked_by = user\n self.modified_on = now()\n self.save(update_fields=(\"locked_on\", \"locked_by\", \"modified_on\"))", "def confirm_db_info():\n print(\"\\nlog: ###### DB Logs ######\")\n connect()\n confirm_brain_db_info()\n confirm_plugin_db_info()", "def show_db_contents(db_file):\n blocklist = db_read(db_file)\n if not blocklist:\n print(\"DB file is empty.\")\n else:\n blocklist = sorted(blocklist.items(),\n key=lambda tup: time.mktime(time.strptime(tup[1],\n \"%Y %b %d %H:%M:%S\")),\n reverse=True)\n\n for ip_addr, date in blocklist:\n print(\"{:15} blocked on {}\".format(ip_addr, date))", "def lock_all_tables(self):\n return \"\"\"--lock-all-tables\"\"\"", "def user_unlock(self):\n self.locked_on = None\n self.locked_by = None\n self.modified_on = now()\n self.save(update_fields=(\"locked_on\", \"locked_by\", \"modified_on\"))", "def info_database(self):\n for x in self.list_databases:\n print(\"%50s: %s\" %( x['definition'], x['entry_id']))", "def test_lock_account_user(self):\n pass", "def queries_active_locks():\n query_queries_active_locks(current_app.extensions['sqlalchemy'].db)", "def select_user_and_print_report(self):\n self.print_all_transaction(self.prompt_user_selection())", "def locked(self, version):\n if version.state == constants.DRAFT and version_is_locked(version):\n return render_to_string('djangocms_version_locking/admin/locked_icon.html')\n return \"\"", "def describe_user(self):\n description = f\"First name : {self.first_name.title()}\\n\"\n description += f\"Last name : {self.last_name.title()}\\n\"\n description += f\"Age : {self.age}\\n\"\n description += f\"Heigth : {self.heigth}\\n\"\n description += f\"Weight : {self.weight}\\n\"\n description += f\"Login attempts : {self.login_attempts}\\n\"\n print(description)", "def lock_time(self, user, next = False):\n base = float(self.user_lock_time_progression)\n lock_count = self.lock_count(user)\n if not lock_count > 0:\n return 0\n else:\n if next is False:\n exponent = lock_count - 1\n else:\n exponent = lock_count\n t_lock = self.user_lock_time * 1000000 * base ** exponent\n # limit maximum lock time\n t_lock_max = self.user_lock_max_time * 1000000\n if t_lock > t_lock_max:\n t_lock = t_lock_max\n self.log.debug('AcctMgr:lock_time(%s): ' % user + str(t_lock))\n return t_lock", "def show_privileges(self):\n for privilege in self.privileges:\n print(privilege)", "def acc_status():\n print(\"\\nAccount Information\")\n for info in acc_info:\n print(info, \":\", acc_info[info])", "def get_all_locked(s):\n locked = s.query(Panels,Users).join(Users).filter(Panels.locked != None).values(Panels.name,Users.username,Panels.id.label(\"id\"))\n return locked", "def flush_privileges(self):\n return \"\"\"--flush-privileges\"\"\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register a Minid for a file.
def register(filename, title, locations, replaces, test, json): mc = commands.get_client() kwargs = parse_none_values([ ('replaces', replaces, None), ('locations', locations.split(',') if locations else None, []), ]) minid = mc.register_file(filename, title=title, test=test, **kwargs) print_minids(minid.data, output_json=json)
[ "def setup_id_file(self):\n\n with open(self.id_path, \"w+\") as f_id:\n\n f_id.write(str(UniqueID.create_id()))", "def register_raw_file(self, nifti_file):\n\n # insert the NIfTI file\n self.fetch_and_insert_nifti_file(nifti_file)", "def add_file(self, filename, UUID):\n self.files[UUID] = Data(filename=filename)", "def register(self, f, mode='w'):\n if hasattr(f, 'write'):\n self._register_stream(f)\n return True\n else:\n f = open(f, mode)\n self._register_stream(f)\n return True", "def upload_minidump(fileobj, event_id):\n with transaction.atomic():\n file = File.objects.create(\n name=event_id,\n type='event.minidump',\n headers={'Content-Type': 'application/x-minidump'},\n )\n\n file.putfile(fileobj)\n\n return MinidumpFile.objects.create(\n file=file,\n event_id=event_id,\n )", "def setFileID(self, fileId: unicode) -> None:\n ...", "def batch_register(filename, test, update_if_exists):\n batch_register = commands.get_client().batch_register(filename, test,\n update_if_exists=update_if_exists)\n click.echo(json.dumps(batch_register, indent=2))", "def add_file_to_instance(self, *, agent_name: str, instance_name: str, file_id: str, file_path: str) -> None:", "def add_monitor(self, fname):\n self.fam.AddMonitor(os.path.join(os.path.dirname(self.name), fname),\n self)\n self.extras.append(fname)", "def register_file(url, filename, component_type=None, known_hash=None):\n fname = _generate_filename(filename, component_type)\n _FILE_REGISTRY[fname] = known_hash\n _FILE_URLS[fname] = url\n return fname", "def test_register_intent_intent_file(self):\n self._test_intent_file(SimpleSkill6())", "def set_id(ctx, id): # pylint: disable=redefined-builtin\n workspace = Workspace(ctx.resolver, directory=ctx.directory, mets_basename=ctx.mets_basename, automatic_backup=ctx.automatic_backup)\n workspace.mets.unique_identifier = id\n workspace.save_mets()", "def registerInMemoryFileData(virtual_filename,data):\n if ( isinstance(data,str) and data.startswith('ondisk://')):\n data = 'ondisk://'+str(_pathlib.Path(data[9:]).resolve())\n _rawfct['ncrystal_register_in_mem_file_data'](virtual_filename,data)", "def from_file(self, register_filepath):\n\n with open(register_filepath,'r') as reg_file:\n for line in reg_file:\n self.store_register(json.loads(line.strip()))", "def file_id():\n return uuid4()", "def add_jasmin_file(self, jfile):\n self._jasmin_files.append(jfile)", "def WriteId(self, fileName):\n print 'writing an .id file', fileName\n idhandle = TextFile.TextFile(fileName, 'w')\n idhandle.write('! derived from the file:\\n')\n idhandle.write('! ' + self.fileName + '\\n')\n atomCounter = 1\n for EACH in self.atomlist:\n if EACH.xeasyatomnumber == None:\n outAtomNumber = str(atomCounter)\n else:\n outAtomNumber = EACH.xeasyatomnumber\n idhandle.write('do ( store2 = ' + outAtomNumber +\\\n ' ) ( resid ' + EACH.residuenumber +\\\n ' and name ' + EACH.atomname[0] + ' )\\n')\n atomCounter = atomCounter + 1", "def _register(self, f_path, regex=False):\r\n if regex != False:\r\n files = glob.glob(f_path)\r\n results = []\r\n for f in files:\r\n term, ret = self._register(f)\r\n results.append(ret)\r\n return False, Counter(results)\r\n\r\n if not os.path.isfile(f_path):\r\n self.logger.error(\"%s does not exist or is not a file.\" % f_path)\r\n return False, False\r\n\r\n # Register to the indexing server\r\n name = os.path.basename(f_path)\r\n replicate_to = self.idx_server_proxy.register(self.id, name)\r\n\r\n if replicate_to == False:\r\n self.logger.debug(\"Main node and replica are done for metadata.\")\r\n return False, False\r\n\r\n # Register locally\r\n local_files = self.parent.local_files\r\n local_files[name] = os.path.abspath(f_path)\r\n self.parent.local_files = local_files\r\n\r\n # Replicate files\r\n if replicate_to:\r\n self.logger.debug(\"Replicate to %s\", replicate_to)\r\n for peer_id in replicate_to:\r\n peer_sock = self._get_peer_sock(peer_id)\r\n if peer_sock:\r\n peer_exch = MessageExchanger(peer_sock)\r\n peer_action = dict(type='replicate', name=name)\r\n peer_exch.pkl_send(peer_action)\r\n peer_exch.file_send(f_path)\r\n\r\n return False, True", "def register(cls, file_type, load_callable):\n cls._file_types[file_type] = load_callable" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register a batch of Minids from an RFM or file stream Batch Register can either be passed a file to a Remote File Manifest JSON file, or streamed where each entry in the stream is an RFM formatted dict.
def batch_register(filename, test, update_if_exists): batch_register = commands.get_client().batch_register(filename, test, update_if_exists=update_if_exists) click.echo(json.dumps(batch_register, indent=2))
[ "def register(filename, title, locations, replaces, test, json):\n mc = commands.get_client()\n kwargs = parse_none_values([\n ('replaces', replaces, None),\n ('locations', locations.split(',') if locations else None, []),\n ])\n minid = mc.register_file(filename, title=title, test=test, **kwargs)\n print_minids(minid.data, output_json=json)", "def registerUsageRecords(mapping, logdir, ctxFactory, batch_size=DEFAULT_BATCH_SIZE):\n urmap = createFileEPMapping(mapping)\n if not urmap: # no registration to perform\n log.msg(\"No registrations to perform\")\n return defer.succeed(None)\n\n log.msg(\"Registrations to perform: %i files\" % len(urmap))\n log.msg(\"Retrieving registration hrefs (service endpoints)\")\n d = createEPRegistrationMapping(mapping.keys(), ctxFactory)\n\n d.addCallback(_performURRegistration, urmap, logdir, ctxFactory, batch_size)\n archive = lambda _, logdir, urmap : archiveUsageRecords(logdir, urmap)\n d.addCallback(archive, logdir, urmap)\n return d", "def run(self, args):\n filenameList = sum([glob(filename) for filename in args.files], [])\n root = args.butler.repository._mapper.root\n context = self.register.openRegistry(root, create=args.create, dryrun=args.dryrun)\n with context as registry:\n for infile in filenameList:\n if self.isBadFile(infile, args.badFile):\n self.log.info(\"Skipping declared bad file %s\" % infile)\n continue\n fileInfo, hduInfoList = self.parse.getInfo(infile)\n if self.isBadId(fileInfo, args.badId.idList):\n self.log.info(\"Skipping declared bad file %s: %s\" % (infile, fileInfo))\n continue\n if self.register.check(registry, fileInfo):\n self.log.warn(\"%s: already ingested: %s\" % (infile, fileInfo))\n outfile = self.parse.getDestination(args.butler, fileInfo, infile)\n ingested = self.ingest(infile, outfile, mode=args.mode, dryrun=args.dryrun)\n if not ingested:\n continue\n for info in hduInfoList:\n self.register.addRow(registry, info, dryrun=args.dryrun, create=args.create)\n self.register.addVisits(registry, dryrun=args.dryrun)", "def make_registration_list_importer(input_file, config, statsd, conn, metadata_conn,\n run_id, metrics_root, metrics_run_root, **kwargs):\n common_params = _common_config_params(config)\n return RegistrationListImporter(conn, metadata_conn, run_id, metrics_root, metrics_run_root, config.db_config,\n input_file, logging.getLogger('dirbs.import'), statsd, **common_params, **kwargs)", "def from_file(self, register_filepath):\n\n with open(register_filepath,'r') as reg_file:\n for line in reg_file:\n self.store_register(json.loads(line.strip()))", "def _register(self, f_path, regex=False):\r\n if regex != False:\r\n files = glob.glob(f_path)\r\n results = []\r\n for f in files:\r\n term, ret = self._register(f)\r\n results.append(ret)\r\n return False, Counter(results)\r\n\r\n if not os.path.isfile(f_path):\r\n self.logger.error(\"%s does not exist or is not a file.\" % f_path)\r\n return False, False\r\n\r\n # Register to the indexing server\r\n name = os.path.basename(f_path)\r\n replicate_to = self.idx_server_proxy.register(self.id, name)\r\n\r\n if replicate_to == False:\r\n self.logger.debug(\"Main node and replica are done for metadata.\")\r\n return False, False\r\n\r\n # Register locally\r\n local_files = self.parent.local_files\r\n local_files[name] = os.path.abspath(f_path)\r\n self.parent.local_files = local_files\r\n\r\n # Replicate files\r\n if replicate_to:\r\n self.logger.debug(\"Replicate to %s\", replicate_to)\r\n for peer_id in replicate_to:\r\n peer_sock = self._get_peer_sock(peer_id)\r\n if peer_sock:\r\n peer_exch = MessageExchanger(peer_sock)\r\n peer_action = dict(type='replicate', name=name)\r\n peer_exch.pkl_send(peer_action)\r\n peer_exch.file_send(f_path)\r\n\r\n return False, True", "def batch_register_users(l):\n user = acquire_user(\n {\"usertype\": \"teacher\", \"hit_batch_reg_limit\": {\"$in\": [False, None]}}\n )\n if not user:\n l.interrupt()\n login(l, username=user[\"username\"], password=user[\"password\"])\n simulate_loading_classroom_page(l)\n res = l.client.get(GROUPS_ENDPOINT)\n groups = res.json()\n if len(groups) < 1:\n l.interrupt()\n group = random.choice(groups)\n with l.client.post(\n GROUPS_ENDPOINT + \"/\" + group[\"gid\"] + \"/batch_registration\",\n files={\n \"csv\": (\n \"batch_registration_template.csv\",\n open(BATCH_REGISTRATION_CSV_FILENAME, \"rb\"),\n \"text/csv\",\n )\n },\n headers={\"X-CSRF-Token\": l.client.cookies[\"token\"],},\n name=GROUPS_ENDPOINT + \"/[gid]/batch_registration\",\n catch_response=True,\n ) as res:\n if res.status_code == 200:\n res.success()\n elif (\n res.status_code == 403\n and \"You have exceeded the maximum\" in res.json()[\"message\"]\n ):\n res.success()\n db.users.find_one_and_update(\n {\"username\": user[\"username\"]},\n {\"$set\": {\"hit_batch_reg_limit\": True}},\n )\n else:\n res.failure(\"Failed to batch register users: \" + str(res.json()))\n logout(l)\n release_user(user[\"username\"])\n l.interrupt()", "def set_registers(self, cpu_id, names, values):\n if not isinstance(cpu_id, baseinteger):\n raise TypeError(\"cpu_id can only be an instance of type baseinteger\")\n if not isinstance(names, list):\n raise TypeError(\"names can only be an instance of type list\")\n for a in names[:10]:\n if not isinstance(a, basestring):\n raise TypeError(\\\n \"array can only contain objects of type basestring\")\n if not isinstance(values, list):\n raise TypeError(\"values can only be an instance of type list\")\n for a in values[:10]:\n if not isinstance(a, basestring):\n raise TypeError(\\\n \"array can only contain objects of type basestring\")\n self._call(\"setRegisters\",\n in_p=[cpu_id, names, values])", "def startBatch(self, reader=None):", "def process_jats_stream(\n fname: str,\n stream: bytes,\n temp_dir: str=BASE_TEMP_DIR\n):\n temp_input_dir = os.path.join(temp_dir, 'input')\n temp_input_file = os.path.join(temp_input_dir, fname)\n\n os.makedirs(temp_dir, exist_ok=True)\n os.makedirs(temp_input_dir, exist_ok=True)\n\n with open(temp_input_file, 'wb') as outf:\n outf.write(stream)\n\n output_file = process_jats_file(temp_input_file)\n\n if os.path.exists(output_file):\n with open(output_file, 'r') as f:\n contents = json.load(f)\n return contents\n else:\n return []", "def registerSerializationTypes(self, *serialTypes):", "def process_batch(self, batch: List[Dict[str, Any]]) -> List[Response]:\n pass", "def registerSerializationTypes(self, *serialTypes):\n self.serialTypes = serialTypes", "def npu_generate_register_command_stream(npu_op_list: List[NpuOperation], accelerator: NpuAccelerator) -> List[int]:\n from . import register_command_stream_generator\n\n return register_command_stream_generator.generate_register_command_stream(npu_op_list, accelerator)", "def __init__(self, *files):\n self._files = []\n self._opened_here = []\n for f in files:\n self.register(f)", "def upload_new_requests(self, request_ids):\n\n insert_query = \"\"\"\n BEGIN\n IF NOT EXISTS (\n SELECT * \n FROM {table}\n WHERE AlienRegistrationNumber = ?\n )\n\n BEGIN\n INSERT INTO {table}\n (AlienRegistrationNumber, Status)\n VALUES (?, '{status}')\n END\n END\n \"\"\".format(table=db_tablename, status=Statuses.new)\n\n cursor = self.connection.cursor()\n for id in request_ids:\n cursor.execute(insert_query, id, id)\n self.connection.commit()", "def register_raw_file(self, nifti_file):\n\n # insert the NIfTI file\n self.fetch_and_insert_nifti_file(nifti_file)", "def register_all(self, resources):\n for resource in resources:\n self.register_resource(resource)", "def register_many_numbers_multiple_times(ejabberd_gateway, registration_url):\n # Creating temporary table on local postgres for testing purpose.\n query = \" CREATE TABLE registered_users_new (users_id text NOT NULL,authorization_code text, local_time timestamp without time zone, \" \\\n \"server_time timestamp without time zone, username text, status text);\"\n LocalQueryHandler.execute(query)\n\n registration_count = 10\n phone_numbers = ['919718626363', '918447860079', '919868177790', '919899224493', '918802035676', '919953936440',\n '919717261060', '919560488236', '919509925532', '919873503029']\n\n for count in range(0, registration_count):\n print '*******************************************' + str(count) + '****************************************'\n\n for number in phone_numbers:\n query = \"INSERT INTO registered_users_new (users_id, local_time) VALUES (%s, %s);\"\n variables = (number + '@@' + str(count), datetime.now(), )\n LocalQueryHandler.execute(query, variables)\n\n # Hitting ejabberd server on register url.\n response = requests.get(ejabberd_gateway + registration_url, params={'phone_number': number})\n response = str.strip(str.split(str(response.text), ',')[0].split(':')[1])\n\n query = \" SELECT * FROM registered_users WHERE username = %s;\"\n variables = (str.strip(number) + '@mm.io', )\n rds_results = MasterQueryHandler.get_results(query, variables)\n\n # Copy the result in local postgres table if registration was successful\n # else create a new entry for missed out entry.\n if rds_results:\n query = \" UPDATE registered_users_new SET (authorization_code, server_time, username, status)=(%s, %s, %s, %s) \" \\\n \"WHERE users_id=%s;\"\n variables = (rds_results[0]['authorization_code'], rds_results[0]['created_at'],\n rds_results[0]['username'], response[1:-1], number + '@@' + str(count))\n else:\n query = \" UPDATE registered_users_new SET (server_time, username, status)=(%s, %s, %s) \" \\\n \"WHERE users_id=%s;\"\n variables = (datetime.now(), str.strip(number) + '@mm.io', response[1:-1])\n\n LocalQueryHandler.execute(query, variables)\n\n time.sleep(10) # wait for 10 seconds\n\n query = \" DROP TABLE registered_users_new;\"\n LocalQueryHandler.execute(query)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send message to recipient using SMS, on failure call on_error.
def send_sms(self, recipient, message, on_error): # Shorten the message because SMS is precious if len(message) > 320: sms_message_to_send = message[:317] + "..." else: sms_message_to_send = message send = self.sms.send_sms(sender=recipient, jid=recipient, message=sms_message_to_send) send.addErrback(on_error)
[ "def send_msg(to_number, message):\r\n smsagent = SMSAgent();\r\n smsagent.send_msg(to_number, message)", "def _google_voice_sms(phone_number, msg):\r\n try:\r\n _voice.send_sms(phone_number, msg)\r\n except googlevoice.ValidationError:\r\n # I seem to be getting these but the text messages still go\r\n # through (eventually).\r\n pass", "def send_sms(self, recipient, text):\r\n\r\n old_mode = None\r\n with self._modem_lock:\r\n try:\r\n try:\r\n # cast the text to a string, to check that\r\n # it doesn't contain non-ascii characters\r\n try:\r\n text = str(text)\r\n\r\n # uh-oh. unicode ahoy\r\n except UnicodeEncodeError:\r\n\r\n # fetch and store the current mode (so we can\r\n # restore it later), and override it with UCS2\r\n csmp = self.query(\"AT+CSMP?\", \"+CSMP:\")\r\n if csmp is not None:\r\n old_mode = csmp.split(\",\")\r\n mode = old_mode[:]\r\n mode[3] = \"8\"\r\n\r\n # enable hex mode, and set the encoding\r\n # to UCS2 for the full character set\r\n self.command('AT+CSCS=\"HEX\"')\r\n self.command(\"AT+CSMP=%s\" % \",\".join(mode))\r\n text = text.encode(\"utf-16\").encode(\"hex\")\r\n\r\n # initiate the sms, and give the device a second\r\n # to raise an error. unfortunately, we can't just\r\n # wait for the \"> \" prompt, because some modems\r\n # will echo it FOLLOWED BY a CMS error\r\n result = self.command(\r\n 'AT+CMGS=\\\"%s\\\"' % (recipient),\r\n read_timeout=1)\r\n\r\n # if no error is raised within the timeout period,\r\n # and the text-mode prompt WAS received, send the\r\n # sms text, wait until it is accepted or rejected\r\n # (text-mode messages are terminated with ascii char 26\r\n # \"SUBSTITUTE\" (ctrl+z)), and return True (message sent)\r\n except errors.GsmReadTimeoutError, err:\r\n if err.pending_data[0] == \">\":\r\n self.command(text, write_term=chr(26))\r\n return True\r\n\r\n # a timeout was raised, but no prompt nor\r\n # error was received. i have no idea what\r\n # is going on, so allow the error to propagate\r\n else:\r\n raise\r\n\r\n # for all other errors...\r\n # (likely CMS or CME from device)\r\n except Exception, err:\r\n\r\n # whatever went wrong, break out of the\r\n # message prompt. if this is missed, all\r\n # subsequent writes will go into the message!\r\n self._write(chr(27))\r\n\r\n # rule of thumb: pyGSM is meant to be embedded,\r\n # so DO NOT EVER allow exceptions to propagate\r\n # (obviously, this sucks. there should be an\r\n # option, at least, but i'm being cautious)\r\n return None\r\n\r\n finally:\r\n\r\n # if the mode was overridden above, (if this\r\n # message contained unicode), switch it back\r\n if old_mode is not None:\r\n self.command(\"AT+CSMP=%s\" % \",\".join(old_mode))\r\n self.command('AT+CSCS=\"GSM\"')", "def send_sms(self, number, message):\n with self.session():\n res = self._send('sms sendtxt %s' % number)\n if 'Start sms input' in res:\n self._write('%s\\n.\\n' % message)", "def send_SMS(to, body):\n\ttry:\n\t\tclient.messages.create(to=to, from_=NUMBER, body=body)\n\texcept TwilioRestException as e:\n\t\tif e.code == 21211:\n\t\t\traise APIexception(code=7)\n\t\telse:\n\t\t\traise e", "def send_sms(self, num, text):\n message = self.client.messages.create(\n body = text, # optional\n to = num,\n from_ = TWILIO_NUMBER\n )\n #print message.sid", "def sendSMS(message):\n sns_client = boto3.client('sns', 'us-west-2')\n mobileNumber = getContactDetails()\n response = sns_client.publish(PhoneNumber=mobileNumber, Message=message)", "def sms(phone_number, msg, provider=None):\r\n if provider is None:\r\n assert _voice is not None, \\\r\n 'You must login to Google Voice using google_voice_login before ' \\\r\n 'sending an sms without the provider parameter.'\r\n if provider is not None:\r\n assert _smtp is not None, \\\r\n 'You must login to an SMTP server using gmail_login or by ' \\\r\n 'passing an smtplib.SMTP instance via the smtp parameter' \\\r\n 'before sending an sms with the provider parameter.'\r\n\r\n if provider is None:\r\n _google_voice_sms(phone_number, msg)\r\n else:\r\n to = '%s@%s' % (phone_number, providers.get(provider, provider))\r\n _send_email('', to, 'To: %s\\r\\n\\r\\n%s' % (to, msg))", "def send_sms(self, recipient, text, max_messages = 255): \n pdus = gsmpdu.get_outbound_pdus(text, recipient)\n if len(pdus) > max_messages:\n raise ValueError(\n 'Max_message is %d and text requires %d messages' %\n (max_messages, len(pdus))\n )\n\n for pdu in pdus:\n self._send_pdu(pdu)\n return True", "def send_sms_with_callback_token(user, mobile_token, **kwargs):\n base_string = kwargs.get('mobile_message', api_settings.PASSWORDLESS_MOBILE_MESSAGE)\n\n try:\n\n if api_settings.PASSWORDLESS_MOBILE_NOREPLY_NUMBER:\n # We need a sending number to send properly\n if api_settings.PASSWORDLESS_TEST_SUPPRESSION is True:\n # we assume success to prevent spamming SMS during testing.\n return True\n\n from twilio.rest import Client\n twilio_client = Client(os.environ['TWILIO_ACCOUNT_SID'], os.environ['TWILIO_AUTH_TOKEN'])\n twilio_client.messages.create(\n body=base_string % mobile_token.key,\n to=getattr(user, api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME),\n from_=api_settings.PASSWORDLESS_MOBILE_NOREPLY_NUMBER\n )\n return True\n else:\n logger.debug(\"Failed to send token sms. Missing PASSWORDLESS_MOBILE_NOREPLY_NUMBER.\")\n return False\n except ImportError:\n logger.debug(\"Couldn't import Twilio client. Is twilio installed?\")\n return False\n except KeyError:\n logger.debug(\"Couldn't send SMS.\"\n \"Did you set your Twilio account tokens and specify a PASSWORDLESS_MOBILE_NOREPLY_NUMBER?\")\n except Exception as e:\n logger.debug(\"Failed to send token SMS to user: {}. \"\n \"Possibly no mobile number on user object or the twilio package isn't set up yet. \"\n \"Number entered was {}\".format(user.id, getattr(user, api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME)))\n logger.debug(e)\n return False", "def send_sms(\n self,\n number: hug.types.text='+79994413746',\n content: hug.types.text=\"Your Order is ready\",\n ):\n state = notifications_rpc.send_sms(number, content)\n return state", "def execute(self):\n try:\n self.api.send_direct_message(screen_name=self.screen_name,\n text=self.text)\n except TweepError:\n raise InvalidUserException\n self.send_message_status = True", "def send_text_message(txt):\n twilio.messages.create(to=to_number, from_=from_number, body=txt)", "def handler(request, context):\n\n sns = boto3.client('sns')\n phone = request.get('phone')\n country_code = request.get('country_code')\n sender_id = request.get('sender_id')\n message = request.get('message')\n\n if not phone:\n return {\"message\": \"Missing phone number.\"}\n\n if not message:\n return {\"message\": \"Missing message body.\"}\n\n try:\n phone_number = parse_phone_number(phone, country_code)\n except (InvalidNumberException, ParsingException) as e:\n return {\"message\": str(e)}\n\n print('Sending sms message: \"{}\" to: {}'.format(message, phone_number))\n\n message_attributes = {}\n if sender_id:\n message_attributes = {\n 'AWS.SNS.SMS.SenderID': {\n 'DataType': 'String',\n 'StringValue': sender_id,\n }\n }\n\n sns.publish(\n PhoneNumber=phone_number,\n Message=message,\n MessageAttributes=message_attributes,\n )\n\n return {\"success\": True}", "def do_send_sms_spryng(num, text):\n cleaned_num = cleaned_number(num)\n if not is_valid_phone_number(cleaned_num):\n return False\n\n if not text:\n return False\n\n if len(text) > app.config[\"MAX_SMS_LENGTH\"]:\n return False\n\n url = app.config[\"SPRYNG_API_URL\"]\n token = app.config[\"SPRYNG_API_BEARER_TOKEN\"]\n spryng_headers = {\n \"Accept\": \"application/json\",\n \"Authorization\": f\"Bearer {token}\",\n }\n\n spryng_request = {\n \"body\": text,\n \"encoding\": \"auto\",\n \"originator\": app.config[\"SPRYNG_MSG_ORIGINATOR\"],\n \"recipients\": [num],\n \"route\": app.config[\"SPRYNG_API_ROUTE\"],\n # \"scheduled_at\": \"now\" #optional, default: \"now\"\n }\n print(\"spryng request\", url, spryng_request, spryng_headers)\n response = requests.post(url, headers=spryng_headers, json=spryng_request)\n rj = response.json()\n print(\"spryng response\", response, \"json-content:\", rj)\n\n if not response.ok:\n return False\n\n #success = rj[\"status\"] in [\"scheduled\", \"pending\", \"delivered\"]\n # could also be \"failed\"\n success = True\n #assume success, FIXME!\n\n return success", "def at_msg_send(self, text=None, to_obj=None, **kwargs):\r\n pass", "def send_single_sms(harambee, message):\n try:\n send_sms(harambee.candidate_id, message)\n Sms.objects.create(harambee=harambee, message=message, sent=True, time_sent=timezone.now())\n except (ValueError, httplib2.ServerNotFoundError):\n Sms.objects.create(harambee=harambee, message=message)", "def _createMessage(number):\n client = TwilioRestClient(TWILIO_ACCOUNT, TWILIO_TOKEN)\n\n sms = client.sms.messages.create(\n to=number,\n from_=FROM_NUMBER,\n body=MESSAGE)\n\n if sms.status == 'failed':\n return 'Message failed to send, please try again.'", "def send_smses():\n smses = Sms.objects.filter(sent=False)\n fail = 0\n\n for sms in smses:\n if fail < 3:\n try:\n message = unicode(sms.message, \"utf-8\")\n send_sms(sms.harambee.candidate_id, message)\n except (ValueError, httplib2.ServerNotFoundError):\n fail += 1\n continue\n\n sms.sent = True\n sms.time_sent = timezone.now()\n try:\n sms.save()\n except IntegrityError:\n fail += 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send msg to user, if no ack received try SMS.
def wait_for_ack(self, user, msg): def sms_failed(failure): self.messagehandler.send_chat( user, "My SMS attempt failed, sorry.") def no_ack(recipient): self.messagehandler.send_chat( recipient, "I didn't get a response so I'll try SMS.") self.send_sms(recipient, msg, on_error=sms_failed) def ack(message): message.reply('Receipt acknowledged.') self.messagehandler.send_chat( user, msg + '\n\nPlease acknowledge by replying to this.', response_cb=ack, no_response_cb=no_ack, timeout=30)
[ "def send(self, msg):\n\n # send length\n send_only_msg(len(pickle.dumps(msg)))\n\n # wait for confirmation\n if (recv_only_msg() == 'ack'):\n\n # send msg\n send_only_msg(msg)\n\n # if didn't recv ack for some reason\n else:\n raise ConnectionRefusedError", "def on_send_btn_pressed(self):\n\n msg = self.ui.send_text.text()\n\n if msg:\n self.client_instance.send_msg(to_user=self.contact_username, content=msg)\n time.sleep(0.1)\n self.update_chat()\n self.ui.send_text.clear()", "def send_msg(to_number, message):\r\n smsagent = SMSAgent();\r\n smsagent.send_msg(to_number, message)", "def notify_by_sms(msgs):\n enabled = is_sms_notification_enabled()\n if not enabled:\n return\n pass", "def send_message(msg, uid) :\n #get chatpartner\n try:\n u = Users.objects.get(uid=uid)\n except Users.DoesNotExist :\n return -1 ##Could not send because user not found. maybe diconnected?\n \n partner=str(u.partner)\n #put it in his queue\n try:\n p = Users.objects.get(uid=partner)\n except Users.DoesNotExist :\n return -1 ##Could not send because user not found. maybe diconnected?\n\n #put msg in the msg queue\n p.mq0=msg\n\n return partner", "def handle_message(self, mess):\n if mess.x and mess.x.defaultUri:\n # Check if message is delayed.\n if mess.x.defaultUri in ['jabber:x:delay', 'urn:xmpp:delay']:\n self.logprint(\"Skipping delayed message.\")\n return\n\n # Ignore status message about anonymous room.\n if mess.x.defaultUri == 'http://jabber.org/protocol/muc#user':\n if mess.x.status and mess.x.status.getAttribute('code') == '100':\n self.logprint(\"Anonymous room message, skipping.\")\n return\n\n fromstr = mess.getAttribute('from')\n fromjid = jid.JID(fromstr)\n\n # Check if user is in ignore list\n if fromjid in self.ignorelist:\n return\n\n # Groupchat messages have different from jid\n if mess['type'] in ['groupchat', 'chat']:\n (fromstr, sep, nick) = fromstr.rpartition('/')\n else:\n nick = fromjid.user + '@' + fromjid.host\n\n # Skip if message is sent by shoutbridge\n #print \"Nick is\", nick\n fromuser = self.roster.get(nick)\n if fromuser and fromuser.name == self.login or nick == self.current_nick:\n self.logprint(\"Got message from myself, skipping...\")\n return\n\n # Get message body.\n body = getElStr(mess.body)\n\n # Send message.\n user = self.get_from_roster(nick, fromstr)\n if body and mess['type'] in ['message', 'groupchat', None]: \n self.logprint(\"Relaying message to shoutbox:\", user.id, user.jid, user.name, \"\\n\", body)\n self.update_last_time()\n self.shoutbox.sendShout(user, body)\n elif body and mess['type'] == 'chat':\n self.logprint(\"Received priavate message:\", user.id, user.jid, user.name, \"\\n\", body)\n self.update_last_time()\n shout = Shout(0, user.id, nick, body, time.time())\n self.trigger_plugin_event('XmppDirectMessage', shout)\n else:\n self.logprint(\"Unknown message:\", mess.toXml())\n\n # Trigger handleXmppMessage event\n mess['nick'] = nick\n shout = Shout(0, 0, nick, body, time.time())\n self.trigger_plugin_event('XmppMessage', mess.toXml())\n self.trigger_plugin_event('Message', shout)", "def send_sms(self, number, message):\n with self.session():\n res = self._send('sms sendtxt %s' % number)\n if 'Start sms input' in res:\n self._write('%s\\n.\\n' % message)", "def send_sms(\n self,\n number: hug.types.text='+79994413746',\n content: hug.types.text=\"Your Order is ready\",\n ):\n state = notifications_rpc.send_sms(number, content)\n return state", "def send_sms(self, recipient, text):\r\n\r\n old_mode = None\r\n with self._modem_lock:\r\n try:\r\n try:\r\n # cast the text to a string, to check that\r\n # it doesn't contain non-ascii characters\r\n try:\r\n text = str(text)\r\n\r\n # uh-oh. unicode ahoy\r\n except UnicodeEncodeError:\r\n\r\n # fetch and store the current mode (so we can\r\n # restore it later), and override it with UCS2\r\n csmp = self.query(\"AT+CSMP?\", \"+CSMP:\")\r\n if csmp is not None:\r\n old_mode = csmp.split(\",\")\r\n mode = old_mode[:]\r\n mode[3] = \"8\"\r\n\r\n # enable hex mode, and set the encoding\r\n # to UCS2 for the full character set\r\n self.command('AT+CSCS=\"HEX\"')\r\n self.command(\"AT+CSMP=%s\" % \",\".join(mode))\r\n text = text.encode(\"utf-16\").encode(\"hex\")\r\n\r\n # initiate the sms, and give the device a second\r\n # to raise an error. unfortunately, we can't just\r\n # wait for the \"> \" prompt, because some modems\r\n # will echo it FOLLOWED BY a CMS error\r\n result = self.command(\r\n 'AT+CMGS=\\\"%s\\\"' % (recipient),\r\n read_timeout=1)\r\n\r\n # if no error is raised within the timeout period,\r\n # and the text-mode prompt WAS received, send the\r\n # sms text, wait until it is accepted or rejected\r\n # (text-mode messages are terminated with ascii char 26\r\n # \"SUBSTITUTE\" (ctrl+z)), and return True (message sent)\r\n except errors.GsmReadTimeoutError, err:\r\n if err.pending_data[0] == \">\":\r\n self.command(text, write_term=chr(26))\r\n return True\r\n\r\n # a timeout was raised, but no prompt nor\r\n # error was received. i have no idea what\r\n # is going on, so allow the error to propagate\r\n else:\r\n raise\r\n\r\n # for all other errors...\r\n # (likely CMS or CME from device)\r\n except Exception, err:\r\n\r\n # whatever went wrong, break out of the\r\n # message prompt. if this is missed, all\r\n # subsequent writes will go into the message!\r\n self._write(chr(27))\r\n\r\n # rule of thumb: pyGSM is meant to be embedded,\r\n # so DO NOT EVER allow exceptions to propagate\r\n # (obviously, this sucks. there should be an\r\n # option, at least, but i'm being cautious)\r\n return None\r\n\r\n finally:\r\n\r\n # if the mode was overridden above, (if this\r\n # message contained unicode), switch it back\r\n if old_mode is not None:\r\n self.command(\"AT+CSMP=%s\" % \",\".join(old_mode))\r\n self.command('AT+CSCS=\"GSM\"')", "def send_sms(self, recipient, message, on_error):\n\n # Shorten the message because SMS is precious\n if len(message) > 320:\n sms_message_to_send = message[:317] + \"...\"\n else:\n sms_message_to_send = message\n send = self.sms.send_sms(sender=recipient,\n jid=recipient,\n message=sms_message_to_send)\n send.addErrback(on_error)", "def send_message(self, msg):\n if not self.simulate:\n self.tracker.sendMessage(msg)", "def execute(self):\n try:\n self.api.send_direct_message(screen_name=self.screen_name,\n text=self.text)\n except TweepError:\n raise InvalidUserException\n self.send_message_status = True", "def post_send_message(self, msg):\r\n pass", "def at_msg_send(self, text=None, to_obj=None, **kwargs):\r\n pass", "def send_smses():\n smses = Sms.objects.filter(sent=False)\n fail = 0\n\n for sms in smses:\n if fail < 3:\n try:\n message = unicode(sms.message, \"utf-8\")\n send_sms(sms.harambee.candidate_id, message)\n except (ValueError, httplib2.ServerNotFoundError):\n fail += 1\n continue\n\n sms.sent = True\n sms.time_sent = timezone.now()\n try:\n sms.save()\n except IntegrityError:\n fail += 1", "def send_user_msg_to_scheduler(self, user_msg):\n self.user_msg_queue.put(user_msg, block=True)", "def sendChatMessage(self, msg):\n\n\t\tself.__serverHandler.sendChatMessage(msg)", "def send_immediate_sms_task(harambee, message):\n try:\n send_immediate_sms(harambee.candidate_id, message)\n Sms.objects.create(harambee=harambee, message=message, sent=True, time_sent=timezone.now())\n except (ValueError, httplib2.ServerNotFoundError):\n Sms.objects.create(harambee=harambee, message=message)", "def _start_msg(message):\n # add chat to the list of chats for broadcast\n self._cids.add(message.chat.id)\n # send answer\n self._bot.send_message(message.chat.id, \"Hello there!\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Instantiate the Users, Airline, Review, and RequestFactory instances needed for testing.
def setUp(self): self.factory = RequestFactory() self.user = User.objects.create(username='Abdullah', email='abd@gmail.com', password="Abdullah's passwd") self.airline = Airline.objects.create(title='United Airlines', verified=True) self.review = Review.objects.create(airline=self.airline, headline='Great Service!', rating=5, comments='Highly Recommended!', price=697.87)
[ "def setUp(self):\n self.factory = APIRequestFactory()\n self.user = User.objects.create(\n username='testuser',\n email='loser@loser.com',\n password='password'\n )\n self.user_pref = {\n 'user':self.user,\n 'age':'y, a, s',\n 'gender':'m, f',\n 'size': 'xl, m'\n }\n self.preference = models.UserPref.objects.create(**self.user_pref)", "def setUp(self):\n self.factory = RequestFactory()\n self.client = Client()\n self.user = User.objects.create_user(\"username\", \"user@example.com\", \"123456\")\n self.project_owner = self.user.projectowner\n self.app = ApplicationModel.objects.create(\n name='app',\n client_type=ApplicationModel.CLIENT_CONFIDENTIAL,\n authorization_grant_type=ApplicationModel.GRANT_CLIENT_CREDENTIALS,\n user=self.user\n )\n self.token = AccessToken.objects.create(user=self.user,\n token='tokstr',\n application=self.app,\n expires=now() + timedelta(days=365),\n scope=\"read write\")", "def test_all_betterself_factories(self):\n factories_to_test = [\n ActivityFactory,\n ActivityLogFactory,\n DailyProductivityLogFactory,\n IngredientFactory,\n IngredientCompositionFactory,\n MeasurementFactory,\n SleepLogFactory,\n SupplementFactory,\n SupplementLogFactory,\n SupplementStackFactory,\n SupplementStackCompositionFactory,\n WellBeingLogFactory,\n FoodFactory,\n FoodLogFactory,\n ]\n\n for factory in factories_to_test:\n created_instance = factory()\n self.assertIsNotNone(created_instance)", "def setUp(self):\n self.dog1 = models.Dog.objects.create(**dog1)\n self.dog2 = models.Dog.objects.create(**dog2)\n self.dog3 = models.Dog.objects.create(**dog3)\n self.dog4 = models.Dog.objects.create(**dog4)\n\n # set up token authorized user\n self.user = models.User.objects.create(**user1)\n self.userpref = models.UserPref.objects.get_or_create(\n user=self.user)\n self.client = APIClient()\n self.token, created = Token.objects.get_or_create(user=self.user)\n self.client.credentials(\n HTTP_AUTHORIZATION='Token ' + self.token.key\n )\n\n # set up anonymous user\n self.user_unauthorized = AnonymousUser()\n self.client_unauthorized = APIClient()\n\n # initial likes and dislikes for authorized user\n self.dog1.update_userdog_status(status=LIKED, user=self.user)\n self.dog2.update_userdog_status(status=DISLIKED, user=self.user)\n self.dog3.update_userdog_status(status=UNDECIDED, user=self.user)", "def setUp(self):\n self.user = User.objects.create_user(\n 'test_case_user',\n 'test_case_email@example.com',\n 'test_case_password'\n )\n login_url = '/sign-in/'\n login_response = self.client.post(\n login_url,\n data={\n 'username': self.user.username,\n 'password': 'test_case_password'\n },\n format='json'\n )\n self.auth_token = login_response.data['access']\n self.url = '/api/v1/posts/'\n self.request = Request(FACTORY.get(self.url))\n self.post = Post.objects.create(\n title='Test Case Post Title',\n content='Test Case Post Content'\n )", "def setUp(self):\n User.objects.create_user(\n username=SELF_USERNAME, \n password=ORIGINAL_PASSWORD)\n User.objects.create_user(\n username=NORMAL_USER_USERNAME, \n password=ORIGINAL_PASSWORD)\n User.objects.create_superuser(\n username=ADMIN_USERNAME, \n password=ORIGINAL_PASSWORD)\n self.client = APIClient()", "def setUp(self):\n # Setup user\n self.user = User.objects.create_user(\n 'testuser', 'testemail@x.com', 'testpass')\n self.user.save()\n # Setup the obstacles\n for path in TESTDATA_MOVOBST_PATHS:\n # Stationary obstacle\n (stat_lat, stat_lon, _) = path[0]\n stat_gps = GpsPosition()\n stat_gps.latitude = stat_lat\n stat_gps.longitude = stat_lon\n stat_gps.save()\n stat_obst = StationaryObstacle()\n stat_obst.gps_position = stat_gps\n stat_obst.cylinder_radius = 100\n stat_obst.cylinder_height = 200\n stat_obst.save()\n # Moving obstacle\n mov_obst = MovingObstacle()\n mov_obst.speed_avg = 40\n mov_obst.sphere_radius = 100\n mov_obst.save()\n for pt_id in range(len(path)):\n # Obstacle waypoints\n (wpt_lat, wpt_lon, wpt_alt) = path[pt_id]\n gpos = GpsPosition()\n gpos.latitude = wpt_lat\n gpos.longitude = wpt_lon\n gpos.save()\n apos = AerialPosition()\n apos.altitude_msl = wpt_alt\n apos.gps_position = gpos\n apos.save()\n wpt = Waypoint()\n wpt.name = 'test waypoint'\n wpt.order = pt_id\n wpt.position = apos\n wpt.save()\n mov_obst.waypoints.add(wpt)\n mov_obst.save()\n # Setup test objs\n self.client = Client()\n self.loginUrl = reverse('auvsi_suas:login')\n self.obstUrl = reverse('auvsi_suas:obstacles')\n logging.disable(logging.CRITICAL)", "def setUp(self):\n\n self.customers = dict()\n self.customers[\"james_bowen\"] = Customer.objects.create(\n name='James Bowen')\n self.customers[\"amanda-arias\"] = Customer.objects.create(\n name='Amanda Arias')\n self.customers[\"beau-jeppesen\"] = Customer.objects.create(\n name='Beau Jeppesen')", "def setUp(self):\n self.app_test = app.test_client()\n self.db = TaskRepository(app)\n self.response = self.app_test.get('/')", "def setUp(self):\n self.u = User.objects.create_user(\"Mittens\", \"mittensthekitten@gmail.com\", \"meow\")", "def setUp(self):\n self.subject = \"Is Django the best?\"\n self.constant_time = timezone.now()\n self.vote_taken = self.constant_time\n self.ayes = 10\n self.nays = 15\n self.vote = Votes.objects.create(\n subject=self.subject,\n vote_taken=self.vote_taken,\n ayes=self.ayes,\n nays=self.nays\n )\n self.vote_serializer = VoteSerializer(self.vote)", "def setUpClass(cls):\n disconnect()\n connect('mongoenginetest', host='mongomock://localhost')\n user_1 = {'_id': '6062dabdbbb2c3f109a049e9', 'username': 'James', 'password': '123'}\n user_2 = {'_id': '605e4c879cd28ab1cca591b8', 'username': 'Bob', 'password': 'cba'}\n # pass = abc\n login_user = {'_id': '605f583d605104259229a39c',\n 'password': '$2b$12$NQMhiNagI637rUuwJ2RCCeXik0eVeErtQ6npGYUg7/O3NFeq4EUkO', 'username': 'Joe'}\n\n User(username=user_1['username'], _id=user_1['_id'], password=user_1['password']).save()\n User(username=user_2['username'], _id=user_2['_id'], password=user_2['password']).save()\n User(username=login_user['username'], _id=login_user['_id'], password=login_user['password']).save()\n print(\"setupClass\")", "def setUp(self):\n self.queries = [QueryFactory.create() for q in range(5)]", "def setUp(self):\n User = get_user_model()\n user = User.objects.create_user(\n email='testUser@email.com',\n password='testpassword123',\n )\n self.customer = CustomerProfile.objects.create(\n user=user,\n first_name='Mohamed',\n last_name='Ayman',\n mobile_number='0111111111',\n address='11th St.',\n country='Egypt',\n date_of_birth=datetime.date(2000, 1, 1),\n )\n self.req = self.create_request()\n self.admin = CustomerAdmin(CustomerProfile, admin_site=admin.site)", "def setUp(self) -> None:\n self.pid = create_project(name=\"test_project\")\n self.fid = create_filter(pid=self.pid)", "def setUp(self):\n self.app_test = app.test_client()\n self.db = TaskRepository(app)\n self.response = self.app_test.get('/insert')", "def setUp(self):\n # Create a test user as an attribute of ProjectTestCase, for future use\n # (we're not testing user or profile methods here)\n self.user1 = User.objects.create_user(\n 'user_test1',\n 'test1@test.com',\n 'groupthink')\n\n self.user2 = User.objects.create_user(\n 'user_test2',\n 'test2@test.com',\n 'groupthink2')\n\n # Create a dummy project (with no M2M relationships) that will be associated with user1\n project1 = Project.objects.create(\n title=\"Test Project 1\",\n creator=self.user1,\n scrum_master=self.user2,\n ta=self.user2,\n tagline=\"Test Tagline 1\",\n content=\"Test Content 1\",\n avail_mem=True,\n sponsor=False,\n slug=\"test1-slug\",\n resource=\"Test Resource 1\")\n\n # Create a membership object between user1 and project1\n Membership.objects.create(user=self.user1, project=project1, invite_reason='')", "def setUp(self):\n\n db.session.rollback()\n User.query.delete()\n Quiz.query.delete()\n Question.query.delete()\n\n add_questions('test/test_quiz_questions.csv', 'test family')\n\n u=User(**USER_DATA)\n db.session.add(u)\n db.session.commit()\n\n self.client = app.test_client()", "def __init__(self, requestType, urlParams, payload={}, files={}):\n self.baseUrl = os.getenv(\"TEST_RAIL_BASE_URL\")\n self.username = os.getenv(\"TEST_RAIL_USERNAME\")\n self.password = os.getenv(\"TEST_RAIL_API_KEY\") # or password\n self.requestType = requestType\n self.urlParams = urlParams\n self.headers = {'Content-type': 'application/json'}\n self.payload = payload\n self.response = False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Instantiate the Users, Airline, Review, and RequestFactory instances needed for testing.
def setUp(self): self.factory = RequestFactory() self.user = User.objects.create(username='Abdullah', email='abd@gmail.com', password="Abdullah's passwd") self.airline = Airline.objects.create(title='United Airlines', verified=True) self.review = Review.objects.create(airline=self.airline, headline='Great Service!', rating=5, comments='Highly Recommended!', price=697.87)
[ "def setUp(self):\n self.factory = APIRequestFactory()\n self.user = User.objects.create(\n username='testuser',\n email='loser@loser.com',\n password='password'\n )\n self.user_pref = {\n 'user':self.user,\n 'age':'y, a, s',\n 'gender':'m, f',\n 'size': 'xl, m'\n }\n self.preference = models.UserPref.objects.create(**self.user_pref)", "def setUp(self):\n self.factory = RequestFactory()\n self.client = Client()\n self.user = User.objects.create_user(\"username\", \"user@example.com\", \"123456\")\n self.project_owner = self.user.projectowner\n self.app = ApplicationModel.objects.create(\n name='app',\n client_type=ApplicationModel.CLIENT_CONFIDENTIAL,\n authorization_grant_type=ApplicationModel.GRANT_CLIENT_CREDENTIALS,\n user=self.user\n )\n self.token = AccessToken.objects.create(user=self.user,\n token='tokstr',\n application=self.app,\n expires=now() + timedelta(days=365),\n scope=\"read write\")", "def test_all_betterself_factories(self):\n factories_to_test = [\n ActivityFactory,\n ActivityLogFactory,\n DailyProductivityLogFactory,\n IngredientFactory,\n IngredientCompositionFactory,\n MeasurementFactory,\n SleepLogFactory,\n SupplementFactory,\n SupplementLogFactory,\n SupplementStackFactory,\n SupplementStackCompositionFactory,\n WellBeingLogFactory,\n FoodFactory,\n FoodLogFactory,\n ]\n\n for factory in factories_to_test:\n created_instance = factory()\n self.assertIsNotNone(created_instance)", "def setUp(self):\n self.dog1 = models.Dog.objects.create(**dog1)\n self.dog2 = models.Dog.objects.create(**dog2)\n self.dog3 = models.Dog.objects.create(**dog3)\n self.dog4 = models.Dog.objects.create(**dog4)\n\n # set up token authorized user\n self.user = models.User.objects.create(**user1)\n self.userpref = models.UserPref.objects.get_or_create(\n user=self.user)\n self.client = APIClient()\n self.token, created = Token.objects.get_or_create(user=self.user)\n self.client.credentials(\n HTTP_AUTHORIZATION='Token ' + self.token.key\n )\n\n # set up anonymous user\n self.user_unauthorized = AnonymousUser()\n self.client_unauthorized = APIClient()\n\n # initial likes and dislikes for authorized user\n self.dog1.update_userdog_status(status=LIKED, user=self.user)\n self.dog2.update_userdog_status(status=DISLIKED, user=self.user)\n self.dog3.update_userdog_status(status=UNDECIDED, user=self.user)", "def setUp(self):\n self.user = User.objects.create_user(\n 'test_case_user',\n 'test_case_email@example.com',\n 'test_case_password'\n )\n login_url = '/sign-in/'\n login_response = self.client.post(\n login_url,\n data={\n 'username': self.user.username,\n 'password': 'test_case_password'\n },\n format='json'\n )\n self.auth_token = login_response.data['access']\n self.url = '/api/v1/posts/'\n self.request = Request(FACTORY.get(self.url))\n self.post = Post.objects.create(\n title='Test Case Post Title',\n content='Test Case Post Content'\n )", "def setUp(self):\n User.objects.create_user(\n username=SELF_USERNAME, \n password=ORIGINAL_PASSWORD)\n User.objects.create_user(\n username=NORMAL_USER_USERNAME, \n password=ORIGINAL_PASSWORD)\n User.objects.create_superuser(\n username=ADMIN_USERNAME, \n password=ORIGINAL_PASSWORD)\n self.client = APIClient()", "def setUp(self):\n # Setup user\n self.user = User.objects.create_user(\n 'testuser', 'testemail@x.com', 'testpass')\n self.user.save()\n # Setup the obstacles\n for path in TESTDATA_MOVOBST_PATHS:\n # Stationary obstacle\n (stat_lat, stat_lon, _) = path[0]\n stat_gps = GpsPosition()\n stat_gps.latitude = stat_lat\n stat_gps.longitude = stat_lon\n stat_gps.save()\n stat_obst = StationaryObstacle()\n stat_obst.gps_position = stat_gps\n stat_obst.cylinder_radius = 100\n stat_obst.cylinder_height = 200\n stat_obst.save()\n # Moving obstacle\n mov_obst = MovingObstacle()\n mov_obst.speed_avg = 40\n mov_obst.sphere_radius = 100\n mov_obst.save()\n for pt_id in range(len(path)):\n # Obstacle waypoints\n (wpt_lat, wpt_lon, wpt_alt) = path[pt_id]\n gpos = GpsPosition()\n gpos.latitude = wpt_lat\n gpos.longitude = wpt_lon\n gpos.save()\n apos = AerialPosition()\n apos.altitude_msl = wpt_alt\n apos.gps_position = gpos\n apos.save()\n wpt = Waypoint()\n wpt.name = 'test waypoint'\n wpt.order = pt_id\n wpt.position = apos\n wpt.save()\n mov_obst.waypoints.add(wpt)\n mov_obst.save()\n # Setup test objs\n self.client = Client()\n self.loginUrl = reverse('auvsi_suas:login')\n self.obstUrl = reverse('auvsi_suas:obstacles')\n logging.disable(logging.CRITICAL)", "def setUp(self):\n\n self.customers = dict()\n self.customers[\"james_bowen\"] = Customer.objects.create(\n name='James Bowen')\n self.customers[\"amanda-arias\"] = Customer.objects.create(\n name='Amanda Arias')\n self.customers[\"beau-jeppesen\"] = Customer.objects.create(\n name='Beau Jeppesen')", "def setUp(self):\n self.app_test = app.test_client()\n self.db = TaskRepository(app)\n self.response = self.app_test.get('/')", "def setUp(self):\n self.u = User.objects.create_user(\"Mittens\", \"mittensthekitten@gmail.com\", \"meow\")", "def setUp(self):\n self.subject = \"Is Django the best?\"\n self.constant_time = timezone.now()\n self.vote_taken = self.constant_time\n self.ayes = 10\n self.nays = 15\n self.vote = Votes.objects.create(\n subject=self.subject,\n vote_taken=self.vote_taken,\n ayes=self.ayes,\n nays=self.nays\n )\n self.vote_serializer = VoteSerializer(self.vote)", "def setUpClass(cls):\n disconnect()\n connect('mongoenginetest', host='mongomock://localhost')\n user_1 = {'_id': '6062dabdbbb2c3f109a049e9', 'username': 'James', 'password': '123'}\n user_2 = {'_id': '605e4c879cd28ab1cca591b8', 'username': 'Bob', 'password': 'cba'}\n # pass = abc\n login_user = {'_id': '605f583d605104259229a39c',\n 'password': '$2b$12$NQMhiNagI637rUuwJ2RCCeXik0eVeErtQ6npGYUg7/O3NFeq4EUkO', 'username': 'Joe'}\n\n User(username=user_1['username'], _id=user_1['_id'], password=user_1['password']).save()\n User(username=user_2['username'], _id=user_2['_id'], password=user_2['password']).save()\n User(username=login_user['username'], _id=login_user['_id'], password=login_user['password']).save()\n print(\"setupClass\")", "def setUp(self):\n self.queries = [QueryFactory.create() for q in range(5)]", "def setUp(self):\n User = get_user_model()\n user = User.objects.create_user(\n email='testUser@email.com',\n password='testpassword123',\n )\n self.customer = CustomerProfile.objects.create(\n user=user,\n first_name='Mohamed',\n last_name='Ayman',\n mobile_number='0111111111',\n address='11th St.',\n country='Egypt',\n date_of_birth=datetime.date(2000, 1, 1),\n )\n self.req = self.create_request()\n self.admin = CustomerAdmin(CustomerProfile, admin_site=admin.site)", "def setUp(self) -> None:\n self.pid = create_project(name=\"test_project\")\n self.fid = create_filter(pid=self.pid)", "def setUp(self):\n self.app_test = app.test_client()\n self.db = TaskRepository(app)\n self.response = self.app_test.get('/insert')", "def setUp(self):\n # Create a test user as an attribute of ProjectTestCase, for future use\n # (we're not testing user or profile methods here)\n self.user1 = User.objects.create_user(\n 'user_test1',\n 'test1@test.com',\n 'groupthink')\n\n self.user2 = User.objects.create_user(\n 'user_test2',\n 'test2@test.com',\n 'groupthink2')\n\n # Create a dummy project (with no M2M relationships) that will be associated with user1\n project1 = Project.objects.create(\n title=\"Test Project 1\",\n creator=self.user1,\n scrum_master=self.user2,\n ta=self.user2,\n tagline=\"Test Tagline 1\",\n content=\"Test Content 1\",\n avail_mem=True,\n sponsor=False,\n slug=\"test1-slug\",\n resource=\"Test Resource 1\")\n\n # Create a membership object between user1 and project1\n Membership.objects.create(user=self.user1, project=project1, invite_reason='')", "def setUp(self):\n\n db.session.rollback()\n User.query.delete()\n Quiz.query.delete()\n Question.query.delete()\n\n add_questions('test/test_quiz_questions.csv', 'test family')\n\n u=User(**USER_DATA)\n db.session.add(u)\n db.session.commit()\n\n self.client = app.test_client()", "def __init__(self, requestType, urlParams, payload={}, files={}):\n self.baseUrl = os.getenv(\"TEST_RAIL_BASE_URL\")\n self.username = os.getenv(\"TEST_RAIL_USERNAME\")\n self.password = os.getenv(\"TEST_RAIL_API_KEY\") # or password\n self.requestType = requestType\n self.urlParams = urlParams\n self.headers = {'Content-type': 'application/json'}\n self.payload = payload\n self.response = False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
An airline is still visibile after the user who posted it is deleted from the database.
def test_see_airline_after_user_deletion(self): pass
[ "def delete_by_user(self):\n\n self.availability_flag = False\n self.save()", "def deactivatable(self):\n\n\t\traise foundations.exceptions.ProgrammingError(\n\t\t\"{0} | '{1}' attribute is not deletable!\".format(self.__class__.__name__, \"deactivatable\"))", "def revoke_dataset_access(self):\n self.is_active = False\n self.save()", "def isDeleted():", "def delete_ride():", "def at_object_delete(self):\r\n return True", "def unlink(self, cr, uid, ids, context=None):\n for rec in self.browse(cr, uid, ids, context=context):\n if rec.state != 'draft':\n raise osv.except_osv(_('Warning!'),_('You cannot delete an employee additional allowance which is in %s state.')%(rec.state))\n return super(hr_additional_allowance, self).unlink(cr, uid, ids, context)", "def delete_lega(self, event):\n self.Disable()\n ViewDeleteLega(parent=self, title='Delete Lega')", "def is_deletion(self):\n return self.act == \"-\"", "def consider_deactivation(self):\n pass", "def desactiver(self):\n self.est_activee = False", "def remove_from_timeline(self, instance, user):\r\n ctype = ContentType.objects.get_for_model(instance)\r\n try:\r\n timeline = self.get(content_type=ctype, object_id=instance.pk, user=user)\r\n timeline.delete()\r\n return True\r\n except self.model.DoesNotExist:\r\n raise ObjectDoesNotExist('Failure trying to delete {instance}'.format(instance=instance.title))", "def attiva(request, key):\n att=get_object_or_404(key_tab,pk=key)\n utente = User.objects.get(pk=att.utente.id)\n utente.is_active=True\n utente.save()\n att.delete()\n info={'titolo':'Account attivato con successo','corpo':'Da adesso in poi puoi navigare liberamente'}\n return render(request,'GestioneUtenti/avviso.html',info)", "def deactiveHeadTrack(self):\n\t\t# get objects\n\t\tplayer = scene.objects['Link']\n\t\tplayer.rig['armConstraint'] = False", "def cancel_Line(self):\n if self.line.itemA is not None and self.line.itemB is not None:\n if self.line.disconnect_item_filters() is SUCCESS: pass \n# else: self.line = None\n self.line.disconnect_connector()\n self.removeItem(self.line)\n self.scene_update_sig.emit()\n self.line = None \n return", "def deactivatable(self):\n\n\t\treturn self.__deactivatable", "def deleted(self, user, **payload):\n pass", "def destructionDuVaisseau(self):\n canva.delete(self.vaisseaux)\n self.vivant=False\n return 'perdu'", "def test_activity_deleted(self):\n liturgy = LiturgyFactory.create(\n date=date(2015, 2, 21), service__time=time(9, 30)\n )\n l_id = liturgy.id\n liturgy.delete()\n self.assertEqual(Activity.objects.filter(liturgy_id=l_id).count(), 0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Can check if key is not in client
def test_not_in(self): client = etcd.Client() client.get = mock.Mock(side_effect=etcd.EtcdKeyNotFound()) result = '/testkey' not in client self.assertEquals(True, result)
[ "def validate_client_key(self, client_key, request):\r\n log.debug('Validate client key for %r', client_key)\r\n if not request.client:\r\n request.client = self._clientgetter(client_key=client_key)\r\n if request.client:\r\n return True\r\n return False", "def assertResponseDoesNotContainKey(self, response, key):\n response_json = json.loads(response.content)\n self.assertNotIn(key, response_json)", "def negotiate(self, my_key, his_key):\n pass", "def validate_client_key(self, client_key, request):\n try:\n self.lti_content_source = LtiLmsPlatform.objects.get(consumer_key=client_key)\n except LtiLmsPlatform.DoesNotExist:\n log.exception('Consumer with the key {} is not found.'.format(client_key))\n return False\n return True", "def have_agent_key(self):\n\t\treturn self.agent_key != ''", "def tienes_chats_client():\n for key in users_Info:\n if users_Info[str(key)] != 'Null':\n return True\n\n return False\n\n ##### CLIENTE 0 #####", "def _check_key(self, key):\n\n locked_ckt = circuit.Circuit.specify_inputs(key, self.nodes, self.output_names)\n miter = circuit.Circuit.miter(locked_ckt, self.oracle_ckt)\n\n s = z3.Solver()\n s.add(miter.outputs()[\"diff\"] == True)\n\n return s.check() == z3.unsat", "def check_key(self, key):\n\t\treturn len(key) == KEY_LEN", "def dummy_client(self):\n log.debug('[Warning] Client key validation failed, getting dummy client then')\n return None", "def api_checkkey():\n config = GitReceiveConfig.load(g.cintf.db)\n key = parse_public_key(request.args['key'])\n if not key[:2] in [k[:2] for k in config.auth_keys]:\n return 'unauthorized'\n return 'ok'", "def test_blacklisted_key(self):\n\n key = \"1QCC5-W30DP-FGFRG-K1JEF-QUDLP\"\n KeyValidator.add_key_to_blacklist(key)\n key_status = KeyValidator.check_key(key)\n self.assertEqual(KeyStatus.BLACKLISTED, key_status)", "def test_missing_keys(self):\n self.assertEqual(None, tsig_keys.check({}))", "def has_key(key):\n from bempp.api.utils import pool\n\n return key in pool._DATA", "def assertResponseKeyNotEquals(self, response, key, value):\n response_json = json.loads(response.content)\n self.assertIn(key, response_json)\n self.assertNotEquals(response_json[key], value)", "def __validate_key(self, key_message):\n result = (PeerIdentifier.POLLING_REQUEST_STRING == key_message)\n logging.debug(f\"{self.__class__.__name__} | __validate_key | {key_message} | {result}\")\n return result", "def isKey(self,arg):\n if arg in list(self.keyInfo.keys()):\n return True\n else:\n return False", "def item_in_game_socket(item, game):\r\n\r\n if item.game.gameID is not game.gameID:\r\n return False\r\n return True", "def exclude(consumer, keys):\n keys = aslist(keys)\n while True:\n info = (yield)\n if all(i not in info for i in keys):\n consumer.send(info)\n else:\n continue", "def __contains__(self, key):\n return key in self._keys" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads a .wav file, returning the sound data. If stereo, converts to mono by averaging the two channels
def load_wav(fname): rate, data = wavfile.read(fname) if len(data.shape) > 1 and data.shape[1] > 1: data = data[:, 0] + data[:, 1] # stereo -> mono length = data.shape[0] / rate print(f"Loaded sound file {fname}.") return rate, data, length
[ "def mono_only(wavfile):\n open_wav = wave.open(wavfile, 'r')\n wav_channels = open_wav.getnchannels()\n file_mono = open_wav\n if wav_channels != 1:\n open_wav = wave.open(wavfile)\n wav_frames = open_wav.getnframes()\n wav_read = open_wav.readframes(wav_frames)\n wavpairs = zip(wav_read[::2], wav_read[1::2])\n wav_right = wavpairs[1::2]\n wav_left = wavpairs[0::2]\n #average_wav = (wav_left + wav_right) / 2\n file_mono = wav_left\n return file_mono", "def read_wav(filename):\n s,fs = load(filename) # scipy reads int\n s = np.array(s)/float(max(abs(s)))\n s = add_wgn(s) # Add jitter for numerical stability\n return fs,s", "def read_wav(wavfile):\n assert os.path.isfile(wavfile), \\\n 'ERROR: wivefile file %s does not exist' % wavfile\n\n x, fs, enc = audiolab.wavread(wavfile)\n if len(x.shape) >= 2:\n x = x[:, 0] # Make mono\n\n assert fs == 44100, \\\n \"ERROR: File %s is not sampled at 44100 Hz\" % wavfile\n\n return x, fs", "def read(filename):\n if not quiet: print 'loading', filename\n\n file = wave.open(filename, \"r\")\n file_frames = file.readframes(file.getnframes())\n\n snd = Sound()\n\n # check for mono files\n if file.getnchannels() == 1:\n file_frames = audioop.tostereo(file_frames, file.getsampwidth(), 0.5, 0.5)\n snd.params = file.getparams()\n snd.params = (2, snd.params[1], snd.params[2], snd.params[3], snd.params[4], snd.params[5])\n else:\n snd.params = file.getparams()\n\n snd.data = file_frames\n\n return snd", "def read_wav(file):\n f=wave.open(file,\"r\")\n raw_data=f.readframes(f.getnframes())\n array=np.fromstring(raw_data,np.short)\n array.shape=-1,2\n array=array.T.astype(float)[0]\n samplerate=f.getframerate()\n f.close()\n return feature_normalize(array),samplerate", "def Load16bitMonoSampleWAV(*args):\n return _wiimote.wiimote_Load16bitMonoSampleWAV(*args)", "def sndreadmono(path, channel=None):\n samples, sr = _sndfileio.sndread(path)\n if channel is None:\n channel = config.CONFIG['monochannel']\n monosamples = _sndfileio.asmono(samples, channel)\n return monosamples, sr", "def wiimote_Load16bitMonoSampleWAV(*args):\n return _wiimote.wiimote_Load16bitMonoSampleWAV(*args)", "def wav_data(mono_wav):\n the_data = fft.data_from_file(mono_wav)\n return the_data", "def waveread(audio_name, separateChannels = True):\n # open wave file read binary\n if (audio_name.split(\".\")[-1] == \"wav\") | (audio_name.split(\".\")[-1] == \"WAV\"):\n wr = wave.open(audio_name, 'rb')\n else:\n print('wrong file format! only WAVE files are supported')\n return\n\n sampling_rate = wr.getframerate()\n chunk = wr.getnframes() # length of auidiofile\n bin_array = wr.readframes(chunk) # binary wave information\n channel_nr = wr.getnchannels()\n quantization = wr.getsampwidth()\n\n if channel_nr == 1 and quantization == 1: # 8 bit mono\n # binary to array with numbers\n data = np.array(struct.unpack('BB' * chunk, bin_array))\n # has values from 0 to 255, which have to be changed to [-1:1]\n wave_array = data-np.mean(data)\n wave_array = wave_array / np.max(abs(wave_array))\n\n left_channel = None\n right_channel = None\n mono_channel = wave_array\n if separateChannels:\n wave_array = de_interlace_channel(wave_array)\n\n return wave_array, sampling_rate, left_channel, right_channel, mono_channel\n\n elif channel_nr == 1 and quantization == 2: # 16 bit mono\n # binary to array with numbers\n data = np.array(struct.unpack('h' * int((len(bin_array) / 2)), bin_array))\n wave_array = data / np.max(abs(data))\n\n left_channel = None\n right_channel = None\n mono_channel = wave_array\n\n if separateChannels:\n wave_array = de_interlace_channel(wave_array)\n\n return wave_array, sampling_rate, left_channel, right_channel, mono_channel\n\n elif channel_nr == 2 and quantization == 1: # 8 bit stereo\n # binary to array with numbers\n data = np.array(struct.unpack('BB' * chunk, bin_array))\n # has values from 0 to 255, which have to be changed to [-1:1]\n wave_array = data - np.mean(data)\n\n # Define channels and avoid clipping\n left_channel = wave_array[::2] / np.max(abs(wave_array))\n right_channel = wave_array[1::2] / np.max(abs(wave_array))\n mono_channel = left_channel + right_channel\n mono_channel = mono_channel / np.max(abs(mono_channel))\n wave_array = wave_array / np.max(abs(wave_array))\n if separateChannels:\n wave_array = de_interlace_channel(wave_array)\n\n return wave_array, sampling_rate, left_channel, right_channel, mono_channel\n\n elif channel_nr == 2 and quantization == 2: # 16 bit stereo\n # stero handling\n data = np.array(struct.unpack('hh' * chunk, bin_array))\n\n left_channel = data[::2] / np.max(abs(data))\n right_channel = data[1::2] / np.max(abs(data))\n mono_channel = left_channel + right_channel\n mono_channel = mono_channel / np.max(abs(mono_channel))\n wave_array = data / np.max(abs(data))\n if separateChannels:\n wave_array = de_interlace_channel(wave_array)\n\n return wave_array, sampling_rate, left_channel, right_channel, mono_channel\n\n else:\n print(\"not supported channel number or quantization\")\n\n return", "def read_wav_file(path):\n \n # Parse the input file's extension\n extension = os.path.splitext(path)[1]\n \n # Load the WAV file and set the output parameters\n try:\n if extension.lower() == '.wav':\n [fs, x] = wavfile.read(path)\n num_samples = len(x)\n try: \n num_channels = x.shape[1]\n except:\n num_channels = 1\n data = [] \n for channel in range(num_channels):\n if num_channels == 1:\n data.append(x.astype(np.float32)/float(2**15))\n else:\n data.append(x[0:,channel].astype(np.float32)/float(2**15))\n else:\n raise IOError(\"unknown file type\")\n return (-1,-1,-1)\n except: \n IOError(\"file not found\")\n return (-1,-1,-1)\n \n # Return the output data (tuple)\n return (data, fs, num_channels, num_samples)", "def read_wave(path):\n with contextlib.closing(wave.open(path, \"rb\")) as wf:\n num_channels = wf.getnchannels()\n assert num_channels == 1\n sample_width = wf.getsampwidth()\n assert sample_width == 2\n sample_rate = wf.getframerate()\n assert sample_rate in (8000, 16000, 32000, 48000)\n pcm_data = wf.readframes(wf.getnframes())\n return pcm_data, sample_rate", "def load_wav_16k_mono(filename):\n file_contents = tf.io.read_file(filename)\n wav, sample_rate = tf.audio.decode_wav(\n file_contents,\n desired_channels=1\n )\n wav = tf.squeeze(wav, axis=-1)\n sample_rate = tf.cast(sample_rate, dtype=tf.int64)\n wav = tfio.audio.resample(wav, rate_in=sample_rate, rate_out=16000)\n return wav", "def ReadWaveFile(filename):\n f = wave.open(filename, 'rb')\n waveInfo = dict()\n waveInfo[\"nchannels\"] = f.getnchannels()\n waveInfo[\"framerate\"] = f.getframerate()\n waveInfo[\"nframes\"] = f.getnframes()\n waveInfo[\"samplewidth\"] = f.getsampwidth()\n str_data = f.readframes(waveInfo[\"nframes\"])\n\n # np.short is 16-bit length\n wave_data = np.fromstring(str_data, dtype=np.short) \n wave_data = wave_data.astype(np.float16)\n wave_data /= 32768.0\n wave_data.shape = -1, waveInfo[\"nchannels\"]\n return waveInfo, wave_data", "def load_wav_to_array(full_path):\n sampling_rate, data = read(full_path)\n return data.astype(np.float32), sampling_rate", "def import_signal(file):\n sample_rate, signal = wav.read(file)\n return sample_rate, signal", "def _read_audio(self, path:str):\n try:\n extension = path.split('.')[-1]\n sound = AudioSegment.from_file(path)\n self.audio = np.array(sound.get_array_of_samples())\n self.original_rate = sound.frame_rate\n if len(self.audio.shape) != 1:\n self.audio = self.audio[:,0]\n \n self.audio_duration = len(self.audio) / self.original_rate\n\n except Exception as e:\n print('please insert a valid audio file')\n print(e)\n raise ValueError('please insert a valid audio file')", "def load_wav_file(file_path: str) -> Sample:\n # Read the .wav file\n rate, data = wavfile.read(file_path)\n\n # cut the number of data points to the chosen power of 2\n data = np.array(data[:N])\n\n if rate != CD_QUALITY_RATE:\n raise ValueError(\n f'Invalid file rate, found {rate} Hz but '\n f'expected {CD_QUALITY_RATE} Hz')\n\n # Extract file meta data\n file_name = Path(file_path).name\n raw_phoneme = file_name.split('_')[0]\n try:\n phoneme = Phoneme(raw_phoneme.lower())\n except ValueError:\n raise ValueError(f'Invalid phoneme \"{raw_phoneme.lower()}\"')\n\n # Instantiate the associated data object\n return Sample(phoneme, file_name, data)", "def analyze_wav(wavfile, amp_threshold, plot=False):\r\n rate = 0\r\n audio_data = 0\r\n\r\n if wavfile.is_file():\r\n try:\r\n rate, audio_data = scipy.io.wavfile.read(wavfile.resolve())\r\n\r\n length_s = audio_data.shape[0] / rate\r\n channels = len(audio_data.shape)\r\n\r\n if channels == 2:\r\n mode = 'Stereo'\r\n channel_l = audio_data[:, 0]\r\n channel_r = audio_data[:, 1]\r\n amp_avg = np.average((abs(channel_l) + abs(channel_r)) / 2)\r\n length_s_above_threshold = audio_data[abs(audio_data) > amp_threshold].shape[0] / rate / 2\r\n elif channels == 1:\r\n mode = 'Mono'\r\n amp_avg = np.average(abs(audio_data))\r\n length_s_above_threshold = audio_data[abs(audio_data) > amp_threshold].shape[0] / rate\r\n\r\n if plot:\r\n plot_wav(rate, audio_data)\r\n\r\n return {'mode': mode, 'amp_avg': amp_avg, 'length_s': length_s,\r\n 'length_s_above_threshold': length_s_above_threshold}\r\n except Exception as e:\r\n print(f'There seems to be an issue with file {wavfile}', e)\r\n return False\r\n else:\r\n return False", "def data_load_librosa(filename, duration=None, offset=0.0, sr=22050, mono=True, **kwargs):\n assert type(filename) is str and filename is not None and filename != '', 'filename argument {0} / {1} is invalid'.format(filename, type(filename))\n # assert type(duration) in [float, int], 'duration argument {0} / {1} is invalid'.format(duration, type(duration))\n \n # if args is not None:\n # # args = Namespace()\n # # filename = file\n # # args.duration = 10.0\n # filename = data_get_filename(args)\n # duration = args.duration\n myprint(f'data_load_librosa: loading {filename}') \n y, sr = librosa.load(\n filename, sr=sr, mono=mono,\n offset=offset, duration=duration)\n myprint('data_load_librosa: loaded audio %s samples / %f seconds at rate %d' % (y.shape, y.shape[0]/sr, sr))\n return y, sr" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
An input function for training
def train_input_fn(): dataset = data.get_training_dataset() return dataset
[ "def eval_input_fn():\n dataset = tf.data.TFRecordDataset(\"test\")\n dataset = dataset.map(_parse_function)\n dataset = dataset.batch(32)\n return dataset.make_one_shot_iterator().get_next()", "def _create_input_fn(self, label_dimension, batch_size):\n data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)\n data = data.reshape(batch_size, label_dimension)\n # learn y = x\n train_input_fn = numpy_io.numpy_input_fn(\n x={'x': data},\n y=data,\n batch_size=batch_size,\n num_epochs=None,\n shuffle=True)\n eval_input_fn = numpy_io.numpy_input_fn(\n x={'x': data}, y=data, batch_size=batch_size, shuffle=False)\n predict_input_fn = numpy_io.numpy_input_fn(\n x={'x': data}, batch_size=batch_size, shuffle=False)\n\n return train_input_fn, eval_input_fn, predict_input_fn", "def input_fn():\n serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,\n shape=[default_batch_size],\n name='input_example_tensor')\n inputs = {'examples': serialized_tf_example}\n features = parsing_ops.parse_example(serialized_tf_example, feature_spec)\n labels = None # these are not known in serving!\n return InputFnOps(features, labels, inputs)", "def create_input_fn(dataset_builder):\n return _InputFn(dataset_builder)", "def create_train_function(self):\n action_prob = self.model.output\n\n action_one_hot_placeholder = back.placeholder(shape=(None, self.num_actions),\n name=\"action_one_hot\")\n\n discounted_reward_placeholder = back.placeholder(shape=(None, ),\n name='discount_reward')\n\n log_prob = back.sum(action_one_hot_placeholder * back.log(action_prob), axis=1)\n\n loss = back.mean(- log_prob * discounted_reward_placeholder)\n\n adam = keras.optimizers.Adam(lr=self.alpha)\n\n updates = adam.get_updates(params=self.model.trainable_weights,\n loss=loss)\n\n self.train_fcn = back.function(inputs=[self.model.input, action_one_hot_placeholder, discounted_reward_placeholder],\n outputs=[],\n updates=updates)", "def test_numpy_input_fn(self):\n label_dimension = 2\n batch_size = 10\n train_input_fn, eval_input_fn, predict_input_fn = self._create_input_fn(\n label_dimension, batch_size)\n\n self._test_complete_flow(\n train_input_fn=train_input_fn,\n eval_input_fn=eval_input_fn,\n predict_input_fn=predict_input_fn,\n input_dimension=label_dimension,\n label_dimension=label_dimension,\n batch_size=batch_size)", "def compute_function_input(self, node_id):\n valuations = self.state[self.masks[node_id]]\n function_input = state_to_index(valuations, self.base)\n return function_input", "def create_function_evaluation(self, function_input, node_id):\n random_value = 0 if np.random.rand() < self.bias else 1 \n self.functions[node_id][function_input] = random_value", "def train(self, features):", "def serving_input_fn():\n # TODO: Add automatic check fo rthe size of the trained images for use here?\n inputs = {'inputs': tf.placeholder(tf.float32, [None, 784])}\n return tf.estimator.export.ServingInputReceiver(inputs, inputs)", "def eval(self, input):\n\n ## Add bias to input\n input = np.array(input) if type(input) != np.array else input\n input = np.concatenate((input, [-1]))\n input = input.reshape((1, input.size))\\\n\n ## Regression Activations\n if self.activation_type == \"linear\":\n return self.forward(input)[0,0]\n else:\n return 1 if self.forward(input)[0,0] > 0.5 else 0", "def input_fn():\n raw_placeholder_spec = RAW_DATA_METADATA.schema.as_batched_placeholders()\n # remove label key that is not going to be available at seving\n raw_placeholder_spec.pop(LABEL_KEY)\n\n # we are defining the feature_column (raw_featutes) and the tensor\n # (receiver_tensors) for the raw data\n raw_input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(\n raw_placeholder_spec)\n raw_features, receiver_tensors, _ = raw_input_fn()\n\n # we are tranforming the raw_features with the graph written by\n # preprocess.py to transform_fn_io.TRANSFORM_FN_DIR and that was used to\n # write the tf records. This helps avoiding training/serving skew\n\n _, transformed_features = (\n saved_transform_io.partially_apply_saved_transform(\n os.path.join(tft_working_dir, transform_fn_io.TRANSFORM_FN_DIR),\n raw_features))\n\n return tf.estimator.export.ServingInputReceiver(transformed_features,\n receiver_tensors)", "def make_train_function(model, **kwargs):\n model._assert_compile_was_called() # pylint:disable=protected-access\n model._configure_steps_per_execution(tf.int64.max) # pylint:disable=protected-access\n opt = ScipyOptimizer(model, **kwargs)\n return opt.train_function", "def input_fn():\n features_placeholders = {}\n for name, t in features.items():\n shape_list = t.get_shape().as_list()\n shape_list[0] = default_batch_size\n shape = tensor_shape.TensorShape(shape_list)\n\n features_placeholders[name] = array_ops.placeholder(\n dtype=t.dtype, shape=shape, name=t.op.name)\n labels = None # these are not known in serving!\n return InputFnOps(features_placeholders, labels, features_placeholders)", "def create_input_fn(split, batch_size):\n\n def input_fn():\n \"\"\"input_fn for tf.estimator.Estimator.\"\"\"\n\n indir = FLAGS.input_dir\n tfrecord = 'train_data*.tfrecord' if split == 'train' else 'validation_data.tfrecord'\n\n def parser(serialized_example):\n\n features_ = {'img': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.string)}\n\n if split != 'train':\n features_['cl_live'] = tf.FixedLenFeature([], tf.string)\n features_['cl_mem'] = tf.FixedLenFeature([], tf.string)\n\n fs = tf.parse_single_example(\n serialized_example,\n features=features_\n )\n\n fs['img'] = tf.reshape(tf.cast(tf.decode_raw(fs['img'], tf.uint8),\n tf.float32) / 255.0, [__vh, __vw, 3])\n fs['label'] = tf.reshape(tf.decode_raw(fs['label'], tf.uint8), [__vh, __vw])\n fs['label'] = tf.cast(tf.one_hot(fs['label'], N_CLASSES), tf.float32)\n if split != 'train':\n fs['cl_live'] = tf.reshape(tf.cast(tf.decode_raw(fs['cl_live'], tf.uint8),\n tf.float32) / 255.0, [__vh, __vw, 3])\n fs['cl_mem'] = tf.reshape(tf.cast(tf.decode_raw(fs['cl_mem'], tf.uint8),\n tf.float32) / 255.0, [__vh, __vw, 3])\n fs['cl_live'] = tf.reshape(tf.image.resize_images(fs['cl_live'],\n (vh, vw)), [vh, vw, 3])\n fs['cl_mem'] = tf.reshape(tf.image.resize_images(fs['cl_mem'],\n (vh, vw)), [vh, vw, 3])\n\n return fs\n\n if split == 'train':\n files = tf.data.Dataset.list_files(indir + tfrecord, shuffle=True,\n seed=np.int64(time()))\n else:\n files = [indir + tfrecord]\n\n dataset = tf.data.TFRecordDataset(files)\n dataset = dataset.apply(tf.data.experimental.shuffle_and_repeat(400, seed=np.int64(time())))\n # dataset.shuffle(\n # 400, seed=np.int64(time()), reshuffle_each_iteration=True).repeat()\n # dataset.map(parser, num_parallel_calls=n_cpus() // 2).batch(batch_size if split == 'train' else batch_size // 3)\n # dataset = dataset.apply(tf.data.experimental.shuffle_and_repeat(400, seed=np.int64(time())))\n dataset = dataset.apply(tf.data.experimental.map_and_batch(parser,\n batch_size if split == 'train' else batch_size // 3,\n num_parallel_calls=n_cpus() // 2))\n\n dataset = dataset.prefetch(buffer_size=2)\n\n return dataset\n\n return input_fn", "def train_one_epoch(self, *args, **kwargs):\r\n raise NotImplementedError", "def mock_model_fn(adv_input, is_train, reuse):\n del is_train, reuse\n return adv_input", "def task2train_and_evaluate_func(task):\n if task == SENTIMENT:\n return train_and_evaluate_sentiment\n if task == POS:\n return train_and_evaluate_pos\n if task == POS_BILSTM:\n return train_and_evaluate_pos_bilstm\n if task == PARSING:\n return train_and_evaluate_parsing\n if task == SLOT_FILLING :\n return train_and_evaluate_slot_filling_MTL\n raise ValueError('Train_and_evaluate is not implemented for %s.' % task)", "def make_input(self, *args, **kwargs):\r\n self.add(input.Input(*args, **kwargs))", "def call_input_function(self, function, params):\n if hasattr(self, '_special_input_function_' + function):\n attr = getattr(self, '_special_input_function_' + function)\n else:\n try:\n attr = getattr(self.vtkInstance, function)\n except AttributeError:\n # Compensates for overload by exploiting the fact that\n # no VTK method has underscores.\n f = function.find('_')\n if f != -1:\n function = function[:f]\n attr = getattr(self.vtkInstance, function)\n\n from init import get_method_signature, prune_signatures\n\n doc = ''\n try:\n # doc = self.provide_output_port_documentation(function)\n doc = self.get_doc(function)\n except:\n doc = ''\n\n setterSig = []\n if doc != '': setterSig = get_method_signature(None, doc, function)\n\n if len(setterSig) > 1:\n prune_signatures(self, function, setterSig)\n\n pp = []\n for j in xrange(len(setterSig)):\n setter = list(setterSig[j][1]) if setterSig[j][1] != None else None\n aux = []\n if setter != None and len(setter) == len(params) and pp == []:\n for i in xrange(len(setter)):\n if setter[i].find('[') != -1:\n del aux[:]\n aux.append(params[i])\n elif setter[i].find(']') != -1:\n aux.append(params[i])\n pp.append(aux)\n else:\n if len(aux) > 0: \n aux.append(params[i])\n else:\n pp.append(params[i]) \n if pp != []:\n params = pp \n attr(*params)\n else: \n attr(*params)\n # print \"Called \",attr,function,params" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The value that was passed to the parameter
def parameter_value(self): return self._param_value
[ "def p_value(self):\n return self.field_control_params[0]", "def get_parameter(self) -> str:\n return self.parameter", "def i_value(self):\n return self.field_control_params[1]", "def get_value(self):\n return self._variable_value", "def get_param(self, name):\n return(self._find_param(name).value)", "def _pyforaComputedValueArg(self):\n return self.computedValue", "def value(self):\r\n return self.atom.value", "def get_value(self, ba, i):\n return ba.in_args[i]", "def getValue(self):\n if self.validator:\n self.validator(self.value)\n return self.value", "def get_value(self):\n return self.node.value()", "def extract(self):\n return self.params.get(self.name, null)", "def get_target_value(self):\n return self.target_value", "def encodedRequestValue(self):\n return self.requestValue", "def get_value_decoded(self): # real signature unknown; restored from __doc__\n pass", "def decode(self):\n return self.arg", "def get_parameter_identifier():", "def get_current_value(self):\r\n return self.curr_val", "def value1(self):\n return self.container['value1']", "def stringVal(self):\r\n return self.token" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unregisters the specified callback function
def unregister_callback(callback_func): if callback_func in _callbacks: _callbacks.remove(callback_func)
[ "def unregister(self, callback):\n for n, reg in enumerate(self.__registry):\n if reg[\"callback\"] == callback:\n del self.__registry[n]\n self.driftwood.log.info(\"Tick\", \"unregistered\",\n callback.__qualname__)", "def remove_callback(self):\n\n\t\tself.callback = None\n\n\t\treturn", "def unregister_event(self, event: CARLAEvent, callback):\n assert isinstance(event, CARLAEvent)\n assert callable(callback)\n\n if event in self.events:\n callbacks = self.events[event]\n callbacks.remove(callback)\n self.events[event] = callbacks\n else:\n print(f'Event {event} not yet registered!')", "def removeCallback(self, f: 'SoCallbackListCB *', userdata: 'void *'=None) -> \"void\":\n return _coin.SoCallbackList_removeCallback(self, f, userdata)", "def unregister_callback(self, callback_class):\n cb = callback_class(self)\n self._checkcbcls(cb)\n current_cb = getattr(\n self, self._getcbattrname(cb._cb_type_string), None)\n if current_cb:\n if (self._needs_delete_callback(current_cb) and\n self._get_num_delete() < 2):\n # We are about to remove the last callback that requires\n # a delete callback.\n _procedural.delpydel(self._e)\n current_cb._cb_set_function(self._e, None)\n self._callbacks.remove(current_cb)\n delattr(self, self._getcbattrname(current_cb._cb_type_string))\n return current_cb", "def unbind(self, event, callback):\r\n self._emitter.unsubscribe(event, callback)", "def remove(self, callback):\n self._listeners.remove(callback)", "def Unregister(\n self,\n func: Callable[..., Any],\n extra_args: Sequence[object] = _EXTRA_ARGS_CONSTANT,\n ) -> None:\n key = self._GetKey(func, extra_args)\n self._UnregisterByKey(key)", "def deregister_observer(self, func):\n self._observers.remove(func)", "def unsubscribe(self, event, callback):\n if event in self._subscribers:\n self._subscribers[event] = [\n x for x in self._subscribers[event] if x != callback]", "def unsubscribe(self, handle, callback=None):\r\n pass", "def removeEventCallback(self, *args) -> \"void\":\n return _coin.SoEventCallback_removeEventCallback(self, *args)", "def deregister(self, func: Address):\n\n addr = self.r2api.get_address(func)\n if addr in self.sims:\n self.sims.pop(addr)\n elif addr in self.hooks:\n self.hooks.pop(addr)", "def del_callbacks(self, hostname, callback=None):\n try:\n if callback:\n cbs = self._cbs[hostname]\n del cbs[callback]\n if not cbs:\n del self._cbs[hostname]\n else:\n del self._cbs[hostname]\n except KeyError:\n pass", "def removeCallback(self, *args):\n return _coin.SoCallbackList_removeCallback(self, *args)", "def removeEventCallback(self, *args):\n return _coin.SoEventCallback_removeEventCallback(self, *args)", "def removeUdpDiscoveryCallback(self, callback):\n\n\t\tfor cb in self.__udpDiscoveryCallbacks:\n\t\t\tif cb is callback:\n\t\t\t\tself.__udpDiscoveryCallbacks.remove(callback)\n\t\tlogging.debug(\"UDP Discovery callback removed\")", "def unregister_callback(self, message_class_type, message_name=None):\n if message_name is None:\n if hasattr(message_class_type, 'MESSAGE_NAME'):\n message_name = message_class_type.MESSAGE_NAME\n else:\n message_name = message_class_type.__name__\n if issubclass(message_class_type, BroadcastMessage):\n binding_params = [message_name]\n self.unregister_binding(binding_params)\n self.unregister_message_name_callback(message_name)", "def remove(self, callback_name, number):\n handler = type(self).script\n if handler:\n handler.del_callback(self.obj, callback_name, number)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sends the specified failure information to all registered callback handlers.
def failure_dispatch(failureinfo): global _callbacks for cb in _callbacks: assert isinstance(failureinfo, ValidationFailureInfo) try: cb(failureinfo) except Exception as ex: # If a particular callback throws an exception, we do not want # that to prevent subsequent callbacks from happening, so we # catch and squash this error and write it to stderr import sys sys.stderr.write("ERROR: Dispatch function threw an exception.") sys.stderr.write(str(ex)) sys.stderr.flush()
[ "def _callback_on_error(self):\n try:\n yield\n except:\n self._error_count += 1\n self._do_callback()\n raise", "def notify_failure(self, notify_failure):\n\n self._notify_failure = notify_failure", "def on_failure(self, failure: VerificationFailed) -> None:\n pass", "def on_failure(error):\n if verbose:\n print \"Failed to send email to %s.\"%to\n print \"-\"*70\n print error.getErrorMessage()\n print \"-\"*70\n reactor.stop()\n os.kill(os.getpid(),9) # suicide", "def celery_failure_handler():\n connect_failure_handler()\n try:\n yield\n finally:\n task_failure.disconnect(failure_handler)", "def record_failure(self):\n self.state.record_failure()\n logger.debug(\"Failure recorded\")", "def addConnectionFailedCallback(*args, **kwargs):\n \n pass", "def general_failure(conn, err_msg):\n err_send = err_msg.encode('utf-8')\n send_to_client(conn, b'\\x00', err_send)", "def ebLoopFailed(failure):\n dbg.debug(failure.getBriefTraceback())\n reactor.stop()", "def task_failed(self, task, errors):\n pass", "def test_callbacks(self):\n mock.mock(logger, 'logFailure')\n disp = mock.MockObject()\n handler = rmk_handler.JobHandler(disp, self.job, None)\n task = handler.newTask('spam', 'ham', 'eggs').freeze()\n\n toSend = [\n types.JobStatus(100, 'one'),\n types.JobStatus(150, 'two'),\n types.JobStatus(200, 'three'),\n ]\n expected = toSend[:]\n def watch_func(task, somearg):\n try:\n assert somearg == 'pants'\n assert task.status == expected.pop(0)\n except:\n self._postponeError()\n handler.watchTask(task, watch_func, somearg='pants')\n\n # First callback raises an exception\n d1 = handler.waitForTask(task)\n def blow_up(result):\n raise RuntimeError(\"oops.\")\n d1.addBoth(blow_up)\n\n # Second one should still get the original result\n d2 = handler.waitForTask(task)\n success = []\n def works_ok(result):\n assert result.status == toSend[-1]\n success.append(1)\n d2.addCallback(works_ok)\n d2.addErrback(self._postponeError)\n\n for status in toSend:\n task2 = task.thaw()\n task2.status = status\n handler.taskUpdated(task2)\n\n # Everything above should have been called synchronously, but just to\n # make sure, we've touched \"success\" once the last callback fires.\n assert success\n self._raisePostponed()", "def test_asyncFail(self):\n o = LocalRemoteTest()\n d = o.callRemote(\"fail\")\n def eb(f):\n self.assertTrue(isinstance(f, failure.Failure))\n f.trap(RuntimeError)\n d.addCallbacks(lambda res: self.fail(\"supposed to fail\"), eb)\n return d", "def on_error(self, status_code, data):\n print status_code\n if status_code == 420:\n print \"Being rate-limited: too many calls in too short a time.\"\n time.sleep(60*(2**(self.num_420_errors)))\n self.num420errors += 1\n print (\"Num 420 errors: %s\" % self.num_420_errors)\n if self.num_420_errors > 4:\n self.disconnect()\n elif status_code == 304:\n print \"No new data returned.\"\n elif status_code == 400:\n print \"Invalid Request: check authentication.\"\n elif status_code == 401:\n print \"Unauthorized Credentials.\"\n elif status_code == 403:\n print \"Denied: Update Limit Reached.\"\n elif status_code == 404:\n print \"Invalid URI used.\"\n elif status_code == 406:\n print \"Invalid format in Search request.\"\n elif status_code == 410:\n print \"REST API changed: use v1.1 instead of v.1.\"\n elif status_code == 429:\n print \"Too Many Requests.\"\n elif status_code == 500:\n print \"Twitter Internal Server Error - contact Twitter.\"\n elif status_code == 502:\n print \"Bad Gateway: Twitter down or being upgraded.\"\n elif status_code == 503:\n print \"Twitter servers overloaded with requests - try again later.\"\n elif status_code == 504:\n print \"Twitter servers up, but error in stack. Gateway Timeout.\"\n else:\n print \"Non-standard error: investigate further.\"", "async def failure_handler(self, instances):\n log.debug(\n \"The following '%s' instances are failing: %s\",\n self.config['service'], instances\n )\n\n # Select eligible rescuers\n ntw = self.raft.network\n rescuers = [ipv4 for ipv4, uid in ntw.items() if uid not in instances]\n\n # Iterate over all failing instances\n for ifrom in instances:\n\n # Fetch the list of workflows for a given failing instance.\n index = self.memory.key(ifrom, 'workflows', 'instances')\n for wflow in await self.memory.store.smembers(index):\n wflow = wflow.decode('utf-8')\n\n # Get the report shared by the failing instance\n try:\n report = await self.read_report(wflow, ifrom)\n except KeyError:\n log.error(\"Workflow %s memory has been wiped out\", wflow)\n break\n\n shuffle(rescuers)\n report = json.dumps(report, default=serialize_object)\n\n # Send a failover request to a valid, not failing, instance.\n for ito in rescuers:\n request = {\n 'url': 'http://{}:{}/v1/workflow/instances'.format(\n ito, self.api._port\n ),\n 'headers': {'Content-Type': 'application/json'},\n 'data': report\n }\n async with aiohttp.ClientSession() as session:\n async with session.put(**request) as resp:\n if resp.status == 200:\n # `ito` rescuer has taken over the workflow\n break\n else:\n log.error(\"Workflow %s hasn't be rescued properly\", wflow)\n continue\n asyncio.ensure_future(self.clear_report(wflow, ifrom=ifrom))", "def _failure_handler(self):\n self._log.debug('DHCP request failed')\n self._write_ret(ovpn.CC_RET_FAILED)", "def _on_permanent_failure_batch(self):\n logger.info(('Moving permamently %d failed tasks to the '\n 'dead-letter-queue %s.') % (\n len(self._permanent_failures), self._batch_queue.dlq_name))", "def fail (self):\n \n import leoGlobals as g\n \n g.app.unitTestDict[\"fail\"] = g.callerName(2)", "def fail(self):\n if self._check_notified():\n return\n self.result_synchronizer.notify((\"fail\",None))", "def _notify_handlers(self):\n\n # Notify all handlers \n for handler_callback in self._registered_handlers:\n try:\n handler_callback(self._target_position)\n except Exception as e:\n # A receiver failed, catch and move on\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check whether software version or sample set has changed.
def _changed(self, samples): with self._open('r') as f: if f.attrs['version'] != __version__: return True if not _np.array_equal(f['thetas'], samples): return True return False
[ "def test_no_tracker_file_version_old(self):\n if os.path.exists(self.timestamp_file_path):\n os.remove(self.timestamp_file_path)\n self.assertFalse(os.path.exists(self.timestamp_file_path))\n self.version_mod_time = 0\n expected = not self._IsPackageOrCloudSDKInstall()\n self.assertEqual(\n expected,\n self.command_runner.MaybeCheckForAndOfferSoftwareUpdate('ls', 0))", "def outofdate(self):\n if self.device_version and self.bundle_version:\n try:\n return VersionInfo.parse(self.device_version) < VersionInfo.parse(\n self.bundle_version\n )\n except ValueError as ex:\n logger.warning(\"Module '%s' has incorrect semver value.\", self.name)\n logger.warning(ex)\n return True # Assume out of date to try to update.", "def version_check(self):\n sdk_version = settings.SDK_VERSION\n firmware_date_str = self.firmware_build_date()\n if self.type() != 'electric':\n rospy.logwarn(\"%s %s (%s): Version Check not needed\",\n self.name, self.type(), firmware_date_str)\n return True\n if not firmware_date_str:\n rospy.logerr(\"%s %s: Failed to retrieve version string during\"\n \" Version Check.\", self.name, self.type())\n return False\n firmware_time = self._version_str_to_time(\n firmware_date_str,\n \"current firmware\")\n warn_time = self._version_str_to_time(\n settings.VERSIONS_SDK2GRIPPER[sdk_version]['warn'],\n \"baxter_interface settings.py firmware 'warn'\")\n fail_time = self._version_str_to_time(\n settings.VERSIONS_SDK2GRIPPER[sdk_version]['fail'],\n \"baxter_interface settings.py firmware 'fail'\")\n if firmware_time > warn_time:\n return True\n elif firmware_time <= warn_time and firmware_time > fail_time:\n rospy.logwarn(\"%s %s: Gripper Firmware version built on date (%s) \"\n \"is not up-to-date for SDK Version (%s). Please use \"\n \"the Robot's Field-Service-Menu to Upgrade your \"\n \"Gripper Firmware.\",\n self.name, self.type(),\n firmware_date_str, sdk_version)\n return True\n elif firmware_time <= fail_time and firmware_time > 0.0:\n rospy.logerr(\"%s %s: Gripper Firmware version built on date (%s) \"\n \"is *incompatible* with SDK Version (%s). Please use \"\n \"the Robot's Field-Service-Menu to Upgrade your \"\n \"Gripper Firmware.\",\n self.name, self.type(),\n firmware_date_str, sdk_version)\n return False\n else:\n legacy_str = '1.1.242'\n if self.firmware_version()[0:len(legacy_str)] == legacy_str:\n # Legacy Gripper version 1.1.242 cannot be updated\n # This must have a Legacy Gripper build date of 0.0, \n # so it passes\n return True\n else:\n rospy.logerr(\"%s %s: Gripper Firmware version built on \" \n \"date (%s) does not fall within any known Gripper \"\n \"Firmware Version dates for SDK (%s). Use the \"\n \"Robot's Field-Service-Menu to Upgrade your Gripper \" \n \"Firmware.\",\n self.name, self.type(),\n firmware_date_str, sdk_version)\n return False", "def version_check(self):\n return # TODO implement a different check\n\n if self.threedicore_result_version != self.threedicore_version:\n logger.warning(\n \"[!] threedicore version differ! \\n\"\n \"Version result file has been created with: %s\\n\"\n \"Version gridadmin file has been created with: %s\",\n self.threedicore_result_version,\n self.threedicore_version,\n )", "def hasModifiedFiles(self): #This is the prepared files?\n for qpackage in self.getQPackages():\n if qpackage.hasModifiedFiles():\n return True\n return False", "def check_addon_upgrade():\n # Upgrades that require user interaction or to be performed outside of the service\n addon_previous_ver = CmpVersion(G.LOCAL_DB.get_value('addon_previous_version'))\n addon_current_ver = CmpVersion(G.VERSION)\n if not addon_previous_ver or addon_current_ver > addon_previous_ver:\n _perform_addon_changes(addon_previous_ver, addon_current_ver)\n G.LOCAL_DB.set_value('addon_previous_version', str(addon_current_ver))\n return not addon_previous_ver", "def changed(self):\n for filename in _iter_module_files():\n try:\n mtime = os.stat(filename).st_mtime\n except OSError:\n continue\n\n old_time = self.mtimes.get(filename)\n if old_time is None:\n self.mtimes[filename] = mtime\n continue\n elif mtime > old_time:\n getLogger().debug(\"Found code change for: {}\".format(\n filename,\n ))\n return True\n return False", "def is_changed(self, files):\n status = self.run_command(STATUS_COMMAND, files, False, False)\n return self.test_changed(status.output())", "def verify(self):\n self.installed_version = Version(VERSION)\n\n return check_version(self.installed_version, self.operator, self.version)", "def test_1020(self):\n assert d1_common.system_metadata.are_equivalent_pyxb(self.sm_pyxb, self.sm_pyxb)", "def validate_version(self):\n valid_vers = self.rdb.list_available('product_version')\n if self.opts.oo_version:\n if not self.opts.oo_version in valid_vers:\n self.logger.error('You have specified an invalid version: '\n '%s is not one of: %s' %\n (self.opts.oo_version, ', '.join(valid_vers)))\n self.problem = True\n return False\n return True", "def check_version(self):\n global check, error_details\n for row_index, row in self.primer_df.iterrows():\n if (row['Version'] is not None) and (not isinstance(row['Version'], float)) and (\n not isinstance(row['Version'], int)):\n check += 1\n error = \"Version number not a valid entry, see row %s in file\" % (row_index + 4)\n error_details.append(error)", "def test_versions(self):\n versions = self._project.versions()\n self.assertTrue(\"0.1\" in versions)", "def test_no_tracker_file_version_recent(self):\n if os.path.exists(self.timestamp_file_path):\n os.remove(self.timestamp_file_path)\n self.assertFalse(os.path.exists(self.timestamp_file_path))\n self.version_mod_time = time.time()\n self.assertEqual(\n False, self.command_runner.MaybeCheckForAndOfferSoftwareUpdate('ls', 0))", "def do_inputs_need_upgrade(inputs_dir):\n # Not every code revision requires an update, so just hard-code the last\n # revision that required an update.\n inputs_version = get_input_version(inputs_dir)\n return StrictVersion(inputs_version) < StrictVersion(last_required_update)", "def is_sdk_version_outdated():\n latest_version = get_latest_sdk_version()\n if LooseVersion(__version__) < latest_version:\n return latest_version, True\n else:\n return latest_version, False", "def check_capture_kit(self):\n appt = self.sample.udf['Sequencing Analysis']\n captb = self.sample.udf['Capture Library version']\n if appt[0:3] == 'EXX' and captb == 'NA':\n self.log.write(\" FAIL: 'Capture Library version' is set to NA and has to be changed!\")\n self.all_right = False", "def needsUpdate(program):\r\n _registry = registry.getRegistry()\r\n dependencyTable = registry.getDependencyTable(availablePrograms.getAvailablePrograms())\r\n programsToCheck = dependencyTable[program][\"depends-on\"]\r\n programsToCheck.append(program)\r\n for programToCheck in programsToCheck:\r\n myPermissions = permissions.getPermissions(programToCheck)\r\n if not permissions.getLastUpdateTime(myPermissions) == _registry[programToCheck][\"last-update-time\"] and not permissions.getLastUpdateTime(myPermissions) == None:\r\n return True\r\n return False", "def _is_any_file_changed(self, mtimes):\n\n for filename in self._iter_module_files():\n try:\n mtime = os.stat(filename).st_mtime\n except IOError:\n continue\n old_time = mtimes.get(filename, None)\n if old_time is None:\n mtimes[filename] = mtime\n elif mtime > old_time:\n logger.info('模块已更新')\n return 1\n return 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Archive an existing cache file.
def _archive(self): # to archive the existing cache file archive_dir = _os.path.join(self._cache_dir, 'archive') try: if not _os.path.isdir(archive_dir): _os.mkdir(archive_dir) except OSError: yield ('Archiving failed... cache file %s will be ' 'overwritten.' % self._filename) yield else: yield 'Targeting subdirectory: %s.' % archive_dir try: from datetime import datetime except ImportError: yield ('Archiving failed... cache file %s will be ' 'overwritten.' % self._filename) yield else: name_archived = self._filename[:-3] + '__archive__' name_archived += 'xpsi_version_%s__' % __version__ obj = datetime.now() name_archived += 'datetime__%i.%i.%i__%i.%i.%i' % (obj.day, obj.month, obj.year, obj.hour, obj.minute, obj.second) try: _os.rename(self._filename, _os.path.join(archive_dir, name_archived + '.h5')) except OSError: yield ('Archiving failed... cache file %s will be ' 'overwritten.' % self._filename) else: yield ('Exisiting cache file archived in ' 'subdirectory %s.' % archive_dir) yield None
[ "def archive_file(filepath, archivepath):\n with zipfile.ZipFile(archivepath, 'w', zipfile.ZIP_DEFLATED) as archive:\n archive.write(filepath)", "def archive(self, file):\n with zipfile.ZipFile(file, \"w\") as archive:\n # Write archive members in deterministic order and with deterministic timestamp.\n for filename in sorted(self.files):\n archive.writestr(zipfile.ZipInfo(filename), self.files[filename])", "def archive(self):\n archive_key_parts = ['archive', self.version, self.filename]\n self.pipeline.copy_key(self.key_path, _make_key_path(archive_key_parts))", "def archive_file(*, bucket, filename, contents):\n key = 'archive/{}'.format(filename)\n try:\n boto3.resource('s3').Object(bucket, key).put(Body=contents)\n except botocore.exceptions.BotoCoreError as ex:\n logger.exception(f\"S3-SFTP: Error archiving '{ filename }' as '{ key }'.\")\n else:\n logger.info(f\"S3-SFTP: Archived '{ filename }' as '{ key }'.\")", "def cache_file(self, content, filename):\n out_path = os.path.join(self.dirname, filename)\n\n if os.path.exists(out_path):\n return\n\n with open(out_path, self.mode) as outfile:\n outfile.write(content)", "def archive(self, archive_config: ArchiveConfig) -> None:\n # Ensure this object is not yet in the database\n # If it is, it will already have been archived (File.archived is required)\n if self.id:\n raise ValueError(\"Cannot archive a file twice\")\n\n # Create Archived object with hash to get ID\n # This should be created regardless of whether the archive succeeds\n archived = Archived.create(hash=self.calculate_hash(), size=self.size)\n\n try:\n # Store the file\n archive_config.storage.store(\n local_path=self.path,\n archive_id=str(archived.id),\n password=archive_config.password,\n )\n\n except Exception as e:\n # Null the Archived hash rather than delete it, to prevent it being reused\n archived.hash = \"\"\n archived.save()\n raise ValueError(f\"Unable to archive {self.path}: {e}\")\n\n else:\n # Link archived object to this file\n self.archived = archived\n self.save()", "def archive(self, archive_prefix:str):\r\n self._logger.info(\"archive start\")\r\n\r\n archive_path = os.path.join(archive_prefix, self._timestamp.strftime('%Y/%m-%B/%Y%m%d_%H%M%S'))\r\n \r\n common.utils.s3_archive_raw_files(list(self._processed_files), archive_path)\r\n\r\n self._logger.info(\"archive done\")", "def pigz_writer(archive_file: Path, source_dir: Path):\n temp_archive = archive_file + \".tmp.tar\"\n with tarfile.open(temp_archive, \"w\") as archive:\n archive.add(source_dir, arcname='')\n subprocess.check_call([PIGZ_PATH, temp_archive, \"-9\"])\n os.rename(temp_archive + \".gz\", archive_file)", "def write(self):\r\n try:\r\n with open(self.cachefile, 'wb') as open_cache:\r\n pickle.dump(self.cache, open_cache)\r\n logging.debug('Cache file entries written: filename:cnt: %s:%s', \r\n self.cachefile, len(self.cachefile))\r\n except OSError:\r\n logging.error('Cache file could not be written: %s', self.cachefile)\r\n else:\r\n logging.info('Caching disabled. Touching file: %s', self.cachefile)\r\n touch(self.cachefile)", "def archive(self, dest=defaultArchivePath):\n\t\tfrom shutil import copytree # copytree(src, dest)\n\t\tif dest:\n\t\t\tsrc = self._path.getParentFile()\n\t\t\tif not src.isDirectory():\n\t\t\t\tprint \"ERROR: Source directory does not exist.\"\n\t\t\t\treturn\n\t\t\t# src = \"root\"+\"/project\"+\"/sample\"+\"/apa\"+\"/timestamp\"\n\t\t\tapa = src.getParentFile()\n\t\t\tsample = apa.getParentFile()\n\t\t\tproj = sample.getParentFile()\n\t\t\tdestF=jio.File(jio.File(jio.File(jio.File(dest, proj.getName()),sample.getName()),apa.getName()),src.getName())\n\t\t\tif destF.isDirectory():\n\t\t\t\tprint \"ERROR: Destination directory already exists - %s\" % (destF.getAbsolutePath())\n\t\t\t\treturn\n\t\t\tdestF.getParentFile().mkdirs()\n\t\t\tcopytree(src.getAbsolutePath(), destF.getAbsolutePath())\n\t\t\tprint \"%s archived to %s\" % (src, destF)\n\t\t\treturn", "def tarfile_writer(archive_file: Path, source_dir: Path):\n with tarfile.open(archive_file, \"w:gz\") as archive:\n archive.add(source_dir, arcname='')", "def archive(file_or_dir, backup_file, log_file):\n # NB: could use shutil.make_archive() but prefer making a system call\n command = ['tar', '-uf', backup_file, file_or_dir]\n call(command, log_file)", "def download_cache(self, args):\n cwd = os.getcwd()\n os.chdir(self.cache)\n\n wheelhouse_archive = os.path.basename(args.url)\n try:\n if args.bucket is not None:\n self.download_cache_from_amazon(wheelhouse_archive, args.bucket, args.key, args.secret)\n else:\n logging.info('Downloading ' + args.url)\n subprocess.call(['rsync', '-r', '-l', args.url, '.'])\n except:\n os.chdir(cwd)\n raise\n\n wheelhouse_archive_lowercase = wheelhouse_archive.lower()\n if wheelhouse_archive_lowercase.endswith('.tar.gz'):\n logging.info('Unzipping')\n subprocess.call(['tar', '-xzvf', wheelhouse_archive])\n elif wheelhouse_archive_lowercase.endswith('.tar.bz'):\n logging.info('Unzipping')\n subprocess.call(['tar', '-xjvf', wheelhouse_archive])\n elif wheelhouse_archive_lowercase.endswith('.zip'):\n logging.info('Unzipping')\n subprocess.call(['unzip', wheelhouse_archive])\n\n if os.path.isfile(wheelhouse_archive):\n os.remove(wheelhouse_archive)\n os.chdir(cwd)\n logging.info('Done')", "def archive():\n os.chdir(BASE_DIR)\n p = run(\n 'git archive --format=tar.gz -9 --output ' +\n BASE_DIR + '/archive/kehia.tar.gz HEAD'\n )\n _fail_loudly(p)\n click.echo('Created source archive in /archive')", "def archive(self, **kwargs):\n\t\tself.__transact.is_archive = not self.__transact.is_archive\n\t\treturn self.__save()", "def put(self, key, content):\n try:\n file_path = self.__get_file_path(key)\n if not os.path.exists(os.path.dirname(file_path)):\n os.makedirs(os.path.dirname(file_path))\n\n file_obj = open(file_path, \"wb\")\n file_obj.write(content)\n file_obj.close()\n except IOError:\n print(\"CACHE: not able to cache the content\")\n pass", "def create_archive(archive, folder):\n with tarfile.open(archive, \"w:gz\") as tar:\n tar.add(folder, arcname=os.path.basename(folder))", "def write_file(self, name, contents):\n # TODO: find a way to make ZipFile accept a file object.\n zi = zipfile.ZipInfo(name)\n zi.date_time = time.localtime(time.time())[:6]\n zi.compress_type = zipfile.ZIP_DEFLATED\n zi.external_attr = 0777 << 16L\n self.zip_file.writestr(zi, contents)", "def compress_storage(self):\n curr_time = dt.datetime.utcnow().strftime(self.time_format)\n zip_archive = '{}.zip'.format(curr_time)\n\n with zipfile.ZipFile(os.path.join(self.storage_path, zip_archive), 'w') as zf:\n for file in glob.iglob(os.path.join(self.storage_path, '*.jpg')):\n zf.write(filename=file)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for hostname, mapped from YANG variable /access_points/access_point/config/hostname (leafref)
def _set_hostname(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="hostname", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='leafref', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """hostname must be of a type compatible with leafref""", 'defined-type': "leafref", 'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="hostname", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='leafref', is_config=True)""", }) self.__hostname = t if hasattr(self, '_set'): self._set()
[ "def _set_hostname(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"hostname\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='leafref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"hostname must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"hostname\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='leafref', is_config=True)\"\"\",\n })\n\n self.__hostname = t\n if hasattr(self, '_set'):\n self._set()", "def _set_hostname(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name=\"hostname\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"hostname must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=unicode, is_leaf=True, yang_name=\"hostname\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__hostname = t\n if hasattr(self, '_set'):\n self._set()", "def set_hostname(self, name):\n self.update(hostname=name)", "def hostname(self) -> str:\n return typing.cast(\n str,\n self._properties.get(\"hostname\"),\n )", "def configure_hostname():\n check_sudo()\n check_os()\n print_green('INFO: Configure hostname...')\n chn = sudo('cat /etc/hostname').strip()\n nhn = prompt('Set hostname', default=chn, validate=r'[\\w\\.\\-]+')\n ip = prompt('Set host ip', default=socket.gethostbyname(env.host),\n validate=r'^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$')\n sudo('echo \"{}\" > /etc/hostname'.format(nhn))\n comment('/etc/hosts', r'127.0.0.1', use_sudo=True)\n comment('/etc/hosts', r'127.0.1.1', use_sudo=True, backup='')\n append('/etc/hosts', '\\n127.0.0.1\\tlocalhost', use_sudo=True)\n append('/etc/hosts', '127.0.1.1\\t{}'.format(nhn.split('.')[0]), use_sudo=True)\n append('/etc/hosts', '{}\\t{}'.format(ip, nhn), use_sudo=True)\n sudo('hostname -F /etc/hostname')\n print_green('INFO: Configure hostname... OK')", "def set_hostname(self, hostname):\n if hostname is None:\n hostname = 'localhost'\n\n self.hostname = hostname\n if not self.check_hostname_domains_consistency():\n raise ValueError('Hostname is not consistent with domains, please, rather use set_domains()')\n\n self.web_props['httpsserver.hostname'] = hostname\n self.web_props['httpsserver.dn'] = 'CN=%s,O=Enigma Bridge Ltd,C=GB' % hostname\n\n leftmost_domain = util.get_leftmost_domain(hostname)\n self.install_props['ca.name'] = 'SystemCA-%s' % leftmost_domain\n self.install_props['ca.dn'] = 'CN=SystemCA-%s,O=Enigma Bridge Ltd,C=GB' % leftmost_domain\n\n # Update another hostname related properties\n if self.do_vpn:\n self.web_props['vpn.email.from'] = 'private-space@%s' % hostname\n self.mail_props['mail.from'] = 'private-space@%s' % hostname\n\n return self.web_props", "def hostname(self):\n return \"host%d\" % (self.host_id)", "def set_hostname(self, hostname):\n self.hostname = hostname\n if os_is_pi() or os_is_linux():\n card = SDCard()\n else:\n raise NotImplementedError\n\n mountpoint = card.root_volume\n # write the new hostname to /etc/hostname\n if not self.dryrun:\n self.system(\n f'echo {hostname} | sudo cp /dev/stdin {mountpoint}/etc/hostname')\n else:\n print()\n print(\"Write to /etc/hostname\")\n print(hostname)\n\n # change last line of /etc/hosts to have the new hostname\n # 127.0.1.1 raspberrypi # default\n # 127.0.1.1 red47 # new\n if not self.dryrun:\n # with open(f'{mountpoint}/etc/hosts', 'r') as f: # read /etc/hosts\n f = sudo_readfile(f'{mountpoint}/etc/hosts')\n # lines = [l for l in f.readlines()][:-1] # ignore the last line\n lines = f[:-1]\n newlastline = '\\n127.0.1.1 ' + hostname + '\\n'\n\n if not self.dryrun:\n new_hostsfile_contents = ''.join(lines) + newlastline\n sudo_writefile(f'{mountpoint}/etc/hosts', new_hostsfile_contents)\n else:\n print()\n print(\"Write to /etc/hosts\")\n print('127.0.1.1 ' + hostname + '\\n')\n\n # Adds the ip and hostname to /etc/hosts if it isn't already there.", "def get_node_hostname(self, key):\n return self._get(key, \"hostname\")", "def hostname(self):\n return self._domain_info.get('hostname')", "def get_host_name(self):\n return self.__get_value(\"agentLevelParams/hostname\")", "def _get_hostname(self) -> str:\n hostname = identity.get_hostname()\n\n if not hostname:\n try:\n hostname = identity.set_hostname()\n except identity.Error as e:\n raise Error(e) from e\n\n return hostname", "def hostname(self):\n return self.__urlsplit.hostname", "def with_hostname(self, hostname):\n return self.__replace(hostname=hostname)", "def _get_hostname(self) -> str:\n hostname = identity.get_hostname()\n\n if not hostname:\n try:\n hostname = identity.set_hostname()\n except identity.Error as e:\n raise IdentityError('hostname') from e\n\n return hostname", "def fix_missing_hostname():\n ssh_client = store.current_appliance.ssh_client\n logger.info(\"Checking appliance's /etc/hosts for its own hostname\")\n if ssh_client.run_command('grep $(hostname) /etc/hosts').rc != 0:\n logger.info(\"Adding it's hostname to its /etc/hosts\")\n # Append hostname to the first line (127.0.0.1)\n ret = ssh_client.run_command('sed -i \"1 s/$/ $(hostname)/\" /etc/hosts')\n if ret.rc == 0:\n logger.info(\"Hostname added\")\n else:\n logger.error(\"Failed to add hostname\")", "def add_hostname(self, private_ip = False, help = \"Hostname of the target EC2 instance\"):\n self._hostname = True\n self._private_ip = private_ip\n if self._bosslet:\n raise Exception(\"Cannot add_hostname and add_bosslet\")\n\n if self._private_ip:\n self.add_argument(\"--private-ip\", \"-p\",\n action='store_true',\n default=False,\n help = \"If the hostname is an AWS IP address instead of an EC2 instance name\")\n self.add_argument(\"--bosslet\",\n metavar = \"BOSSLET\",\n choices = list_bosslets(),\n default=None,\n help=\"Bosslet in which the machine is running\")\n self.add_argument(\"hostname\", help = help)", "def host(self, value):\n if self._host:\n raise RuntimeError(\"HostManager already set!\")\n self._host = value", "def send_hostname():\n err = ''\n try:\n cmd = ['hostname']\n result, err = charms.sshproxy._run(cmd)\n except:\n log('command failed: {}'.format(err))\n else:\n string_interface = endpoint_from_flag('string.joined')\n string_interface.publish_string(result)\n clear_flag('osm-vca.joined')", "def host_url(self, host_url):\n if host_url is None:\n raise ValueError(\"Invalid value for `host_url`, must not be `None`\")\n\n self._host_url = host_url" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for transmit_power, mapped from YANG variable /access_points/access_point/radios/radio/config/transmit_power (int8)
def _set_transmit_power(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8)(9), is_leaf=True, yang_name="transmit-power", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """transmit_power must be of a type compatible with int8""", 'defined-type': "int8", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8)(9), is_leaf=True, yang_name="transmit-power", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=True)""", }) self.__transmit_power = t if hasattr(self, '_set'): self._set()
[ "def set_transmit_power(self, power):\n (status, null) = self.__device.set_transmit_power(int(power,0))\n if(status != 0x01):\n print self.__device.decode_error_status(status)", "def set_tx_power(self, tx_power):\r\n valid_tx_power_values = [-40, -20, -16, -12, -8, -4, 0, 3, 4]\r\n if tx_power not in valid_tx_power_values:\r\n raise ValueError(\"Invalid transmit power value {}. Must be one of: {}\".format(tx_power, valid_tx_power_values))\r\n self.ble_driver.ble_gap_tx_power_set(tx_power)", "def change_txpower():\n\n txpower_response = webcli_command('radioSettings.txPower')\n current_txpower = txpower_response['radioSettings']['txPower']\n print(\"Current Transmit Power: \" + current_txpower)\n\n # rudimentary check to ensure we're actually making a change\n new_txpower = '10dbm'\n if current_txpower == '10dbm':\n new_txpower = '12dbm'\n\n change_txpower_response = webcli_command('radioSettings.txpower=' + new_txpower)\n changed_txpower = change_txpower_response['radioSettings']['txPower']\n print(\"Changed Transmit Power: \" + changed_txpower)", "def get_transmit_power(self):\n (status, power) = self.__device.get_transmit_power()\n self.__device.decode_error_status(status, cmd='get_transmit_power', print_on_error=True)\n return \"%d dBm\" % (power)", "def set_wifi_power(self, standard, wifi_power):\n POWER_VALUES_2G = [\"-1\", \"2\", \"5\", \"8\", \"11\", \"14\", \"17\", \"20\", \"max\"]\n POWER_VALUES_5G = [\"-1\", \"2\", \"5\", \"8\", \"11\", \"14\", \"17\", \"max\"]\n \n # Control of the value to set\n if standard not in self.WIFI_STANDARD_5G \\\n and str(wifi_power) not in POWER_VALUES_2G :\n raise Exception(-5, \\\n \"Unsupported wifi power value for 5GHz '%s'\" % str(wifi_power)) \n elif standard in self.WIFI_STANDARD_5G \\\n and str(wifi_power) not in POWER_VALUES_5G :\n raise Exception(-5, \\\n \"Unsupported wifi power value for 2.4GHz '%s'\" \\\n % str(wifi_power))\n\n # Set the power value\n cmd = 'power local ' + str(wifi_power)\n for radio in ('0','1'):\n self._send_cmd(\"interface dot11radio \" + str(radio))\n self._send_cmd(cmd)\n self._send_cmd(\"exit\") # exit interface", "def set_power(self, power):\n pass", "def set_power(self, power):\r\n self._power = power", "def setRadioChannelAndTx(self, channel, tx_power):\n if channel == None:\n channel = -1\n if tx_power == None:\n tx_power = -1\n\n return self.sendCommand(\"RADIO SET %s %s\\r\\n\" % (channel, tx_power))", "def wifi_power(self, power: object = None) -> object:\n if power is None:\n self._logger.info(\"Retrieving current WiFi radio power... [NOT IMPLEMENTED]\")\n raise NotImplementedError(\"Command 'wifi_power' is not implemented yet\")\n if (isinstance(power, str) and power.lower() == 'off') or not power:\n return self.wifi_off()\n raise NotImplementedError(\"Command 'wifi_power(<value>)' is not implemented yet\")", "def SetEncoderSpeed(self, power):\n pwm = int(PWM_MAX * power)\n if pwm > PWM_MAX:\n pwm = PWM_MAX\n\n try:\n self.RawWrite(COMMAND_SET_ENC_SPEED, [pwm])\n except KeyboardInterrupt:\n raise\n except:\n self.Print('Failed sending motor encoder move speed limit!')", "def tx_power(self):\n out = self.__fcobj._execute_transceiver_cmd()\n if self.__swobj.is_connection_type_ssh():\n shintd = ShowInterfaceTransceiverDetail(out)\n tp = shintd.tx_power\n if tp is not None:\n return tp.strip()\n return None\n try:\n table_calibaration = out[\"TABLE_calibration\"][\"ROW_calibration\"]\n if type(table_calibaration) is list:\n table_calibaration = table_calibaration[0]\n table_calibaration_detail = table_calibaration[\"TABLE_detail\"][\"ROW_detail\"]\n if type(table_calibaration_detail) is list:\n table_calibaration_detail = table_calibaration_detail[0]\n txpow = get_key(interfacekeys.TX_POWER, self._SW_VER)\n tp = table_calibaration_detail.get(txpow, None)\n if tp is not None:\n return tp.strip()\n return None\n except KeyError:\n return None", "def getTXPower(wifi):\n try:\n txpower = wifi.wireless_info.getTXPower()\n except IOError, (errno, strerror):\n return None\n else:\n if txpower.fixed:\n fixed = \"=\"\n else:\n fixed = \":\"\n return \"Tx-Power%c%s \" % (fixed, wifi.getTXPower())", "def set_channel_power(self, channel, power):\n assert isinstance(channel, int), \"Channel must be an int\"\n assert isinstance(power, float), \"Power must be a float\"\n\n if power < -2.0 or power > 13.0:\n print(\"Warning: you might be using power outside supported range\")\n\n # select channel\n self.inst.write(\"CH {}\".format(channel))\n # turn on/off selected channel\n self.inst.write(\"LEVEL {}\".format(power))", "def include_tx_power(self, show_power=None):\n if show_power is None:\n return self.broadcaster.include_tx_power\n else:\n self.broadcaster.include_tx_power = show_power", "def set_loraPower(pwr):\n\t\tcommand = \"set_config=pwr_level:%s\" % pwr\n\t\treturn uart_tx(command)", "def getTerminalPower(self):\n return float(self.instr.query(\"MEAS:POW?\"))", "def getTerminalPower(self):\n return float(self.query(\"MEAS:POW?\"))", "def get_power(self, t: Time):\n t = t.as_decimal_hour\n return self.P15_ip.solve(t) # unit: kW", "def get_power(self):\r\n return self._power" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for transmit_eirp, mapped from YANG variable /access_points/access_point/radios/radio/config/transmit_eirp (uint8)
def _set_transmit_eirp(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="transmit-eirp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """transmit_eirp must be of a type compatible with uint8""", 'defined-type': "uint8", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="transmit-eirp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True)""", }) self.__transmit_eirp = t if hasattr(self, '_set'): self._set()
[ "def _set_rssi(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name=\"rssi\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='int8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"rssi must be of a type compatible with int8\"\"\",\n 'defined-type': \"int8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name=\"rssi\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='int8', is_config=False)\"\"\",\n })\n\n self.__rssi = t\n if hasattr(self, '_set'):\n self._set()", "def _set_steering_rssi(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name=\"steering-rssi\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='int8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"steering_rssi must be of a type compatible with int8\"\"\",\n 'defined-type': \"int8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name=\"steering-rssi\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='int8', is_config=False)\"\"\",\n })\n\n self.__steering_rssi = t\n if hasattr(self, '_set'):\n self._set()", "def _set_steering_rssi(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name=\"steering-rssi\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='int8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"steering_rssi must be of a type compatible with int8\"\"\",\n 'defined-type': \"int8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name=\"steering-rssi\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='int8', is_config=True)\"\"\",\n })\n\n self.__steering_rssi = t\n if hasattr(self, '_set'):\n self._set()", "def _set_transmit_power(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8)(9), is_leaf=True, yang_name=\"transmit-power\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"transmit_power must be of a type compatible with int8\"\"\",\n 'defined-type': \"int8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8)(9), is_leaf=True, yang_name=\"transmit-power\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=True)\"\"\",\n })\n\n self.__transmit_power = t\n if hasattr(self, '_set'):\n self._set()", "def _set_neighbor_rssi(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name=\"neighbor-rssi\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='int8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"neighbor_rssi must be of a type compatible with int8\"\"\",\n 'defined-type': \"int8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name=\"neighbor-rssi\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='int8', is_config=False)\"\"\",\n })\n\n self.__neighbor_rssi = t\n if hasattr(self, '_set'):\n self._set()", "def send_enq(self):\n self.serial.write(ENQ)", "def _set_transceiver(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=ReferenceType(referenced_path='/oc-platform:components/oc-platform:component[oc-platform:name=current()/../oc-port:hardware-port]/oc-platform:subcomponents/oc-platform:subcomponent/oc-platform:name', caller=self._path() + ['transceiver'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"transceiver\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"transceiver must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=ReferenceType(referenced_path='/oc-platform:components/oc-platform:component[oc-platform:name=current()/../oc-port:hardware-port]/oc-platform:subcomponents/oc-platform:subcomponent/oc-platform:name', caller=self._path() + ['transceiver'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"transceiver\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__transceiver = t\n if hasattr(self, '_set'):\n self._set()", "def signal_rssi(self):\n return max(min(self.signal_quality / 2 - 100, -50), -100)", "def SetRiseTime(self, time):\n self._PWriteInt('red', 'device/trise', time)", "def set_ee_signal_value(self, ee_signal_type, value, endpoint_id=None, timeout=5.0):\n (ept_id, endpoint_info) = self.get_endpoint_info(endpoint_id)\n if ee_signal_type in endpoint_info:\n self.set_signal_value(endpoint_info[ee_signal_type], value)", "async def async_set_ir_mode(self, ir_mode):\n await self.upv_object.set_camera_ir(self._device_id, ir_mode)", "def make_RxSelReg(rx_wait: int, uart: UARTSel=UARTSel_ModulatedAnalog) -> bytes:\n if rx_wait > 0b11111:\n raise ValueError('rx_wait maximum is 0b11111, it is {}'.format(rx_wait))\n result = rx_wait\n result |= (uart << 6)\n return bytes([result])", "def IR_wake_up_trought_Xbee(self):\n self.check_serial()\n\n frame = bytearray(100) \n for i in range(100):\n frame[i] = 0xAA\n \n try :\n self.serial.write(str(frame))\n self.serial.flushInput()\n self.serial.flushOutput()\n \n except OSError as e: # bug fix python before 2.7\n raise IOError(e)", "def _set_scanning_interval(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"scanning-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"scanning_interval must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"scanning-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__scanning_interval = t\n if hasattr(self, '_set'):\n self._set()", "def send_ack(self):\n self.serial.write(ACK)", "def RXEN_A(self, value):\n if value not in [0, 1]:\n raise ValueError(\"Value must be [0,1]\")\n self._writeReg('CHIPCFG', 'RXEN_A', value)", "def _radio433_transmit_ppm(pauses, pulse_length):\n with _connect_to_arduino() as ser:\n assert ser.readline().startswith('?')\n ser.write(\"R{0}\\n\".format(len(pauses)))\n for i in xrange(len(pauses)*2-1):\n ser.write(\"{0}\\n\".format(pauses[i/2] if i%2 else pulse_length))\n assert ser.readline().startswith('!')", "def RetransmitTimeout(self):\n if self.force_auto_sync:\n self.get('RetransmitTimeout')\n return self._RetransmitTimeout", "def send_eot(self):\n self.serial.write(EOT)", "def transmit_mode(self, mode: Optional[TransmitMode] = None):\n if mode is None:\n return self._remote_mode\n else:\n self._remote_mode = mode\n data = bytearray(bytes([mode]))\n data.append(0x00)\n return self.__do_call(FunctionBytes.TRANSMIT, data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for channel, mapped from YANG variable /access_points/access_point/radios/radio/config/channel (uint8)
def _set_channel(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..165']}), is_leaf=True, yang_name="channel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """channel must be of a type compatible with uint8""", 'defined-type': "uint8", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..165']}), is_leaf=True, yang_name="channel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True)""", }) self.__channel = t if hasattr(self, '_set'): self._set()
[ "def _set_channel(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..165']}), is_leaf=True, yang_name=\"channel\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"channel must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..165']}), is_leaf=True, yang_name=\"channel\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__channel = t\n if hasattr(self, '_set'):\n self._set()", "def setRadioChannel(self,channel):\n \n data=self.EZSPtrans([0x9A, channel&xFF]);\n return ord(data[5]);", "def set_radio_channel(self, radio, channel):\n if (radio == 'working') or (radio == '0'):\n (status, null) = self.__device.set_radio_channel(0, int(channel,0))\n if(status != 0x01):\n print self.__device.decode_error_status(status)\n elif (radio == 'monitor') or (radio == '1'):\n (status, null) = self.__device.set_radio_channel(1, int(channel,0))\n if(status != 0x01):\n print self.__device.decode_error_status(status)\n else:\n print(self.help('set_radio_channel'))", "def _set_neighbor_channel(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"neighbor-channel\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"neighbor_channel must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"neighbor-channel\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__neighbor_channel = t\n if hasattr(self, '_set'):\n self._set()", "def set_channel(self, chan, val):\n try:\n self.dmx_frame[chan] = val\n except OverflowError:\n raise ValueError(\"Channel value {} out of range. \"\n \"DMX uses 8bit unsigned values (0-255).\"\n .format(val))", "def update_channel(self, channel):", "def selectchannel(self, c, channel):\n dev = self.selectedDevice(c)\n dev.onlyChannel = channel\n if channel > 0:\n dev.selectChannel(channel)\n return channel", "def _set_channel_support(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=TypedListType(allowed_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8)), is_leaf=False, yang_name=\"channel-support\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"channel_support must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8)), is_leaf=False, yang_name=\"channel-support\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__channel_support = t\n if hasattr(self, '_set'):\n self._set()", "def _set_num_channels(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"num_channels must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__num_channels = t\n if hasattr(self, '_set'):\n self._set()", "def _set_num_channels(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"num_channels must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__num_channels = t\n if hasattr(self, '_set'):\n self._set()", "def set_channel(self):\n\t\tself.channel = int(input(\"Enter the Channel No. = \"))\n\t\twhile self.channel > 7 :\n\t\t\tself.channel = int(input(\"Enter the Channel No. = \"))\n\t\t\n\t\treturn self.channel", "def get_radio_channel(self):\n (status, channel) = self.__device.get_radio_channel()\n self.__device.decode_error_status(status, cmd='get_radio_channel', print_on_error=True)\n return \"Ch.%d - %dMHz\" % (channel, dec.channel_to_freq.get(channel, \"Unknown channel\"))", "def configure(self, channel, mode):\n if channel >= len(input_lines):\n raise ValueError, \"Unrecognized channel\"\n\n if digihw.get_channel_type(channel) != Analog:\n raise ValueError, \"Not an analog input channel\"\n\n if mode == CurrentLoop or mode == TenV:\n digihw.configure_channel(channel, mode)\n self.XBeeCommandSet(input_lines[channel], 2)\n else:\n raise ValueError, \"Unrecognized mode\"\n\n self.channels[channel] = mode", "def setChannel(self, chan: str, chanData: np.ndarray) -> None:\n self.data[chan] = chanData", "async def setChannel(self, ctx, channel: discord.TextChannel = None):\n if channel:\n await self.config.guild(ctx.message.guild).get_attr(KEY_POST_CHANNEL).set(channel.id)\n self.logger.info(\n \"%s#%s (%s) set the post channel to %s\",\n ctx.message.author.name,\n ctx.message.author.discriminator,\n ctx.message.author.id,\n channel.name,\n )\n await ctx.send(\n \":white_check_mark: **GSC - Channel**: **{}** has been set \"\n \"as the update channel!\".format(channel.name)\n )\n else:\n await self.config.guild(ctx.message.guild).get_attr(KEY_POST_CHANNEL).set(None)\n await ctx.send(\":white_check_mark: **GSC - Channel**: GSC updates are now disabled.\")", "async def setchannel(self, ctx, channel=None):\n # get channel obj, depending on if it was mentioned or just the name was specified\n if len(ctx.message.channel_mentions) > 0:\n channel_obj = ctx.message.channel_mentions[0]\n elif channel is not None:\n channel_obj = discord.utils.get(\n ctx.guild.channels, name=channel.replace(\"#\", \"\"))\n if channel_obj is None:\n await ctx.send(f\"No channel named {channel}\")\n return\n else:\n await ctx.send(\"Missing channel parameter\")\n return\n\n bot_member = ctx.guild.get_member(self.bot.user.id)\n permissions = channel_obj.permissions_for(bot_member)\n if not permissions.send_messages or not permissions.embed_links:\n await ctx.send(\"Command failed, please make sure that the bot has both permissions for sending messages and using embeds in the specified channel!\")\n return\n\n async with self.bot.pool.acquire() as db:\n # add channel id for the guild to the database\n await db.execute(\"UPDATE Guilds SET RedditNotifChannel=$1 WHERE ID=$2\",\n channel_obj.id, ctx.guild.id)\n\n await ctx.send(\"Successfully set Reddit notifications to \" + channel_obj.mention)", "def setRadioChannelAndTx(self, channel, tx_power):\n if channel == None:\n channel = -1\n if tx_power == None:\n tx_power = -1\n\n return self.sendCommand(\"RADIO SET %s %s\\r\\n\" % (channel, tx_power))", "def set_channel_wavelength(self, channel, wavelength):\n assert isinstance(channel, int), \"Channel must be an int\"\n assert isinstance(wavelength, float), \"Wavelength must be a float\"\n\n default_wavelengths = (\n 1544.53,\n 1545.32,\n 1546.92,\n 1547.72,\n 1555.72,\n 1558.98,\n 1561.42,\n 1562.23,\n )\n\n if (wavelength - default_wavelengths[channel - 1]) ** 2 > 9:\n print(\n \"Warning: you might be using a wavelength outside supported range, \"\n \"default is {} and you're using {}\".format(\n default_wavelengths[channel - 1], wavelength\n )\n )\n\n # select channel\n self.inst.write(\"CH {}\".format(channel))\n # turn on/off selected channel\n self.inst.write(\"WAVE {}\".format(wavelength))", "def change_channel_route(remote_name, channel):\n remote = get_remote(remote_name)\n parse_int_from_url(channel)\n\n if len(channel) > 4:\n raise InvalidAPIUsageException(\n \"The channel parameter cannot be more than 4 digits. '{0}' was passed.\".format(channel))\n\n keys = []\n for number in channel:\n keys.append(\"KEY_\" + number)\n keys.append(\"KEY_OK\")\n\n irsend_rv = press_keys(remote, keys)\n logging.info(\"Changing channel to %s %s.\", channel, \"SUCCEEDED\" if not irsend_rv else \"FAILED\")\n return jsonify({'remote': remote_name, 'channel': channel, \"irsend_rv\": irsend_rv})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for channel_width, mapped from YANG variable /access_points/access_point/radios/radio/config/channel_width (uint8)
def _set_channel_width(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8)(20), is_leaf=True, yang_name="channel-width", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """channel_width must be of a type compatible with uint8""", 'defined-type': "uint8", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8)(20), is_leaf=True, yang_name="channel-width", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True)""", }) self.__channel_width = t if hasattr(self, '_set'): self._set()
[ "def _set_num_channels(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"num_channels must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__num_channels = t\n if hasattr(self, '_set'):\n self._set()", "def _set_num_channels(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"num_channels must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__num_channels = t\n if hasattr(self, '_set'):\n self._set()", "def set_chan_width(self, chan, width):\n self._set_chan_width(chan, width)", "def width(self, val):\n if (val is None) or (val == -1):\n if not self.log.full(): self.log.put_nowait((logging.WARNING, \"CV2:Width not changed:{}\".format(val)))\n return\n if self.cam_open:\n with self.cam_lock: \n isok = self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, val)\n if isok:\n if not self.log.full(): self.log.put_nowait((logging.INFO, \"CV2:Width:{}\".format(val)))\n else:\n if not self.log.full(): self.log.put_nowait((logging.ERROR, \"CV2:Failed to set width to {}!\".format(val)))", "def _set_channel(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..165']}), is_leaf=True, yang_name=\"channel\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"channel must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..165']}), is_leaf=True, yang_name=\"channel\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__channel = t\n if hasattr(self, '_set'):\n self._set()", "def _set_channel(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..165']}), is_leaf=True, yang_name=\"channel\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"channel must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..165']}), is_leaf=True, yang_name=\"channel\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__channel = t\n if hasattr(self, '_set'):\n self._set()", "def width(self):\n return capi.get_band_xsize(self.ptr)", "def maximum_channel_length(self) -> int:\n return self['channellen']", "def set_channel_wavelength(self, channel, wavelength):\n assert isinstance(channel, int), \"Channel must be an int\"\n assert isinstance(wavelength, float), \"Wavelength must be a float\"\n\n default_wavelengths = (\n 1544.53,\n 1545.32,\n 1546.92,\n 1547.72,\n 1555.72,\n 1558.98,\n 1561.42,\n 1562.23,\n )\n\n if (wavelength - default_wavelengths[channel - 1]) ** 2 > 9:\n print(\n \"Warning: you might be using a wavelength outside supported range, \"\n \"default is {} and you're using {}\".format(\n default_wavelengths[channel - 1], wavelength\n )\n )\n\n # select channel\n self.inst.write(\"CH {}\".format(channel))\n # turn on/off selected channel\n self.inst.write(\"WAVE {}\".format(wavelength))", "def setWidth(self, w):\n if not isinstance(w, (int,float)):\n raise TypeError('width must be numeric value')\n if w <= 0:\n raise ValueError('width must be positive')\n self._width = w\n self._canvasChanged()", "def width(self):\n if self.cam_open:\n return self.cam.get(cv2.CAP_PROP_FRAME_WIDTH)\n else: return float(\"NaN\")", "def get_num_inchannels(self):\n return self.in_channels", "def n_channels(self):\n return self.colours.shape[1]", "def set_width(self, width):\n self.width = width\n self.changed = True", "def setWidthOfBand(self, width) -> None:\n ...", "def frame_width(self, frame_width):\n # type: (int) -> None\n\n if frame_width is not None:\n if not isinstance(frame_width, int):\n raise TypeError(\"Invalid type for `frame_width`, type has to be `int`\")\n\n self._frame_width = frame_width", "def _set_width(self):\n\n width = self.settings[\"height\"] * (1.5 + np.sqrt(5)) / 2\n\n\n if \"width\" in self.settings and self.settings[\"width\"] > 0:\n # use the width in JSON file instead.\n width = self.settings[\"width\"]\n\n return width", "def num_channels(self):\n return self.train.images.shape[3]", "def frame_width(self):\n # type: () -> int\n return self._frame_width", "def GetNumberOfChannels(self):\n\t\treturn len(self.__analog_routes)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for dca, mapped from YANG variable /access_points/access_point/radios/radio/config/dca (boolean)
def _set_dca(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="dca", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='boolean', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """dca must be of a type compatible with boolean""", 'defined-type': "boolean", 'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="dca", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='boolean', is_config=True)""", }) self.__dca = t if hasattr(self, '_set'): self._set()
[ "def is_ca(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"is_ca\")", "def set_dcc_selection(self, value):\n assert type(value) is bool\n read = bytearray(self._device.readRaw(PI3HDMI336_TOTAL_BYTES))\n read[PI3HDMI336_OFFSET_BYTE1] = \\\n (read[PI3HDMI336_OFFSET_BYTE1] & (~PI3HDMI336_BYTE1_DCC_CHANNEL)) | value\n self._device.writeRaw(read)", "def set_cd(self, b):\n _ldns.ldns_pkt_set_cd(self, b)\n #parameters: ldns_pkt *,bool,\n #retvals: ", "def get_dca(msa_file,\r\n max_len):\r\n msa_prpc = np.load(msa_file)\r\n msa_prpc = msa_prpc[:max_len, :max_len, :]\r\n pad_len = max(0, max_len - msa_prpc.shape[0])\r\n # todo : log ?\r\n\r\n if msa_prpc.shape[0] < max_len:\r\n msa_prpc = np.pad(msa_prpc, [[0, pad_len], [0, pad_len], [0, 0]])\r\n\r\n return msa_prpc", "def is_dcp(self):\n return self.args[0].is_convex()", "def test_solver_sdca(self):\n solver = SDCA(l_l2sq=1e-5, max_iter=100, verbose=False, tol=0)\n self.check_solver(solver, fit_intercept=False, model=\"logreg\",\n decimal=1)\n\n # Now a specific test with a real prox for SDCA\n np.random.seed(12)\n n_samples = Test.n_samples\n n_features = Test.n_features\n\n for fit_intercept in [True, False]:\n y, X, coeffs0, interc0 = TestSolver.generate_logistic_data(\n n_features, n_samples)\n\n model = ModelLogReg(fit_intercept=fit_intercept).fit(X, y)\n ratio = 0.5\n l_enet = 1e-2\n\n # SDCA \"elastic-net\" formulation is different from elastic-net\n # implementation\n l_l2_sdca = ratio * l_enet\n l_l1_sdca = (1 - ratio) * l_enet\n sdca = SDCA(l_l2sq=l_l2_sdca, max_iter=100, verbose=False, tol=0,\n seed=Test.sto_seed).set_model(model)\n prox_l1 = ProxL1(l_l1_sdca)\n sdca.set_prox(prox_l1)\n coeffs_sdca = sdca.solve()\n\n # Compare with SVRG\n svrg = SVRG(max_iter=100, verbose=False, tol=0,\n seed=Test.sto_seed).set_model(model)\n prox_enet = ProxElasticNet(l_enet, ratio)\n svrg.set_prox(prox_enet)\n coeffs_svrg = svrg.solve(step=0.1)\n\n np.testing.assert_allclose(coeffs_sdca, coeffs_svrg)", "def dns_active(self, dns_active: bool):\n self._indicator_data['flag1'] = self.util.to_bool(dns_active)", "def test_sdca_sparse_and_dense_consistency(self):\n\n def create_solver():\n return SDCA(max_iter=1, verbose=False, l_l2sq=1e-3,\n seed=TestSolver.sto_seed)\n\n self._test_solver_sparse_and_dense_consistency(create_solver)", "def datacenter_configured(name):\n proxy_type = __salt__[\"vsphere.get_proxy_type\"]()\n if proxy_type == \"esxdatacenter\":\n dc_name = __salt__[\"esxdatacenter.get_details\"]()[\"datacenter\"]\n else:\n dc_name = name\n log.info(\"Running datacenter_configured for datacenter '%s'\", dc_name)\n ret = {\"name\": name, \"changes\": {}, \"result\": None, \"comment\": \"Default\"}\n comments = []\n si = None\n try:\n si = __salt__[\"vsphere.get_service_instance_via_proxy\"]()\n dcs = __salt__[\"vsphere.list_datacenters_via_proxy\"](\n datacenter_names=[dc_name], service_instance=si\n )\n if not dcs:\n if __opts__[\"test\"]:\n comments.append(f\"State will create datacenter '{dc_name}'.\")\n else:\n log.debug(\"Creating datacenter '%s'\", dc_name)\n __salt__[\"vsphere.create_datacenter\"](dc_name, si)\n comments.append(f\"Created datacenter '{dc_name}'.\")\n log.info(comments[-1])\n ret[\"changes\"].update({\"new\": {\"name\": dc_name}})\n else:\n comments.append(\n f\"Datacenter '{dc_name}' already exists. Nothing to be done.\"\n )\n log.info(comments[-1])\n __salt__[\"vsphere.disconnect\"](si)\n ret[\"comment\"] = \"\\n\".join(comments)\n ret[\"result\"] = None if __opts__[\"test\"] and ret[\"changes\"] else True\n return ret\n except salt.exceptions.CommandExecutionError as exc:\n log.error(\"Error: %s\", exc)\n if si:\n __salt__[\"vsphere.disconnect\"](si)\n ret.update(\n {\"result\": False if not __opts__[\"test\"] else None, \"comment\": str(exc)}\n )\n return ret", "def _set_csa(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"csa\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"csa must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"csa\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__csa = t\n if hasattr(self, '_set'):\n self._set()", "def _set_csa(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"csa\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"csa must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"csa\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__csa = t\n if hasattr(self, '_set'):\n self._set()", "def enable_di(self, value):\n self._set_property('enable_di', value)", "def updateDome(self, azimuth):\n\n if self.pointerDome is None:\n return False\n if not isinstance(azimuth, (int, float)):\n self.pointerDome.set_visible(False)\n return False\n\n visible = self.app.mainW.deviceStat['dome'] is not None\n\n self.pointerDome.set_xy((azimuth - 15, 0))\n self.pointerDome.set_visible(visible)\n\n return True", "def HasDPA(self):\n return self.__has('DPA')", "def ds_factory_enabled(self) -> ConfigNodePropertyBoolean:\n return self._ds_factory_enabled", "def is_dazed(sim_info: SimInfo) -> bool:\n return CommonMoodUtils.has_mood(sim_info, CommonMoodId.DAZED)", "def dns_active(self) -> bool:\n return self._indicator_data.get('flag1') # type: ignore", "def is_dcp(self):\n return all(\n expr.is_dcp() for expr in self.constraints + [self.objective])", "def has_antenna(self, antenna_name):\n if antenna_name in self.dipoleNames:\n index = self.index_adjusts( self.dipoleNames.index(antenna_name) )\n if self.antenna_to_file[index] is None:\n return False\n else:\n return True\n else:\n return False", "def ddeta(\n self,\n varname,\n hcoord=None,\n scoord=None,\n hboundary=\"extend\",\n hfill_value=None,\n sboundary=\"extend\",\n sfill_value=None,\n attrs=None,\n ):\n\n assert isinstance(\n varname, str\n ), \"varname should be a string of the name of a variable stored in the Dataset\"\n assert varname in self.ds, 'variable called \"varname\" must be in Dataset'\n var = xroms.ddeta(\n self.ds[varname],\n self.grid,\n hcoord=hcoord,\n scoord=scoord,\n hboundary=hboundary,\n hfill_value=hfill_value,\n sboundary=sboundary,\n sfill_value=sfill_value,\n attrs=attrs,\n )\n\n self._ds[var.name] = var\n return self._ds[var.name]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for dtp, mapped from YANG variable /access_points/access_point/radios/radio/config/dtp (boolean)
def _set_dtp(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="dtp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='boolean', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """dtp must be of a type compatible with boolean""", 'defined-type': "boolean", 'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="dtp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='boolean', is_config=True)""", }) self.__dtp = t if hasattr(self, '_set'): self._set()
[ "def _set_trust_dscp(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"trust-dscp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"trust_dscp must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"trust-dscp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__trust_dscp = t\n if hasattr(self, '_set'):\n self._set()", "def _set_trust_dscp(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"trust-dscp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"trust_dscp must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"trust-dscp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__trust_dscp = t\n if hasattr(self, '_set'):\n self._set()", "def tddMode(self, on = True):\n if on is True:\n on = 'ON'\n\n elif on is False:\n on = 'OFF'\n\n elif on is None:\n on = 'MATE'\n\n command = 'TDD MODE {}'.format(on)\n d = self.sendCommand(command)\n d = d.addCallback(self.checkFailure)\n # planned eventual failure case (not capable)\n d = d.addCallback(self.checkFailure, failure = '0')\n d = d.addCallback(self.resultAsInt)\n return d", "def set_dtr(self, value, *args, **kwargs):\n with self.change_connection():\n self.connection.dtr = bool(value)", "async def get_ptp_enabled(self):\n return unpack('I', await self._execute_command('#GetPtpEnabled').content)[0] > 0", "def get_ntp_enabled(self):\n return None", "def set_ntp_enabled(self, enabled):\n return None", "def HasDRT(self):\n return self.__has('DRT')", "def set_stp_config(\n self, stp_id, new_stp_id, force, preferred_time_server,\n backup_time_server, arbiter, current_time_server):\n body = {\n 'stp-id': stp_id,\n 'force': force,\n 'preferred-time-server': preferred_time_server.json(),\n 'current-time-server': current_time_server,\n }\n if new_stp_id:\n body['new-stp-id'] = new_stp_id\n if backup_time_server:\n body['backup-time-server'] = backup_time_server.json()\n if arbiter:\n body['arbiter'] = arbiter.json()\n self.manager.session.post(\n self.uri + '/operations/set-stp-config', body=body)", "def patm_enabled(self):\n ret = self._get_attr(\"PATMEnabled\")\n return ret", "def is_datetime(self) -> \"bool\":\n return self._value.getType() == Value.DTVAL", "def DDISPY(self,order,x0,y0,t):\n return poly.DPOLY[self.DISPY_POLYNAME[order]](self.DISPY_DATA[order],x0,y0,t)", "async def get_ntp_enabled(self):\n\n return unpack('I', await self._execute_command('#GetNtpEnabled').content)[0] > 0", "def update_ntp_config(self, context):\n return self.call(context, self.make_msg('update_ntp_config'))", "def setDTR(self, value:bool)->None:\n self.serial.setDTR(value)", "def _update_is_passive(self):\n passive_setting = self._view.settings().get('wrap_as_you_type_passive')\n if passive_setting in (None, False, True):\n self.is_passive = bool(passive_setting)\n else:\n self.is_passive = False\n raise UserFacingError('The value must be a boolean')", "def test_bool_direct(self):\n for source in (\"direct\", \"default\"):\n self.assertEqual(self.setting.detect_type(True, source), \"bool\")", "def setdtr(self, dtr):\n try:\n self.ser.setDTR(dtr)\n self.log(\"DTR set to \"+`dtr`)\n return True\n except SilentException:\n return False", "def PtpOptions(self):\n\t\tfrom ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ptpoptions_b3rvy29su3rhy2svchrwt3b0aw9ucw import PtpOptions\n\t\treturn PtpOptions(self)", "def DDISPL(self,order,x0,y0,t):\n return poly.DPOLY[self.DISPL_POLYNAME[order]](self.DISPL_DATA[order],x0,y0,t)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for dtp_min, mapped from YANG variable /access_points/access_point/radios/radio/config/dtp_min (int8)
def _set_dtp_min(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8)(3), is_leaf=True, yang_name="dtp-min", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """dtp_min must be of a type compatible with int8""", 'defined-type': "int8", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8)(3), is_leaf=True, yang_name="dtp-min", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=True)""", }) self.__dtp_min = t if hasattr(self, '_set'): self._set()
[ "def min_start_time(self, min_start_time):\n if min_start_time is not None and len(min_start_time) > 5:\n raise ValueError(\"Invalid value for `min_start_time`, length must be less than or equal to `5`\") # noqa: E501\n\n self._min_start_time = min_start_time", "def minimum_temperature(self, value: float) -> None:\n self._min_temp = value", "def src_port_min(self, src_port_min):\n\n self._src_port_min = src_port_min", "def _set_min_threshold(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name=\"min-threshold\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint64', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"min_threshold must be of a type compatible with uint64\"\"\",\n 'defined-type': \"uint64\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name=\"min-threshold\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint64', is_config=False)\"\"\",\n })\n\n self.__min_threshold = t\n if hasattr(self, '_set'):\n self._set()", "def min_vpu_low(self, min_vpu_low: float):\n\n self._min_vpu_low = min_vpu_low", "def t_min(self):\n t_min = self.get('t_min')\n if np.isfinite(t_min):\n return Time(t_min, format='mjd')\n else:\n return None", "def set_tmin(self):\n\t\t#pass\n\t\tif len(self.data) == 10:\n\t\t\tld = len(self.data[0])\n\t\t\tcheck = self.data[3:7]\n\t\t\tnew_min = [1000 for i in range(len(self.data[0]))]\n\t\t\tfor tt in check:\n\t\t\t\t#print 'tt: ' + str(tt)\n\t\t\t\tfor j in range(len(tt)):\n\t\t\t\t\tif tt[j] >= self.min_times[j] and tt[j] < new_min[j]:\n\t\t\t\t\t\tnew_min[j] = tt[j]\n\n\t\t\tfor i in range(ld):\n\t\t\t\tif new_min[i] != 1000:\n\t\t\t\t\tself.t_min[i] = new_min[i]", "def default_min(self, default_min):\n\n self._default_min = default_min", "def min_sample_value(self, min_sample_value):\n self._min_sample_value = min_sample_value", "def min_event_delay(self, min_event_delay: ConfigNodePropertyInteger):\n\n self._min_event_delay = min_event_delay", "def min_start_date(self, min_start_date):\n\n self._min_start_date = min_start_date", "def min_study_duration(self, min_study_duration):\n\n self._min_study_duration = min_study_duration", "def _set_min_ttl(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"min-ttl\", rest_name=\"min-ttl\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"min_ttl must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"min-ttl\", rest_name=\"min-ttl\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__min_ttl = t\n if hasattr(self, '_set'):\n self._set()", "def min_heat_setpoint(self, min_heat_setpoint):\n\n self._min_heat_setpoint = min_heat_setpoint", "async def set_minimum_pressure(self, value: str = \"0 bar\"):\n pressure = ureg.Quantity(value)\n command = PMIN10 if self._headtype == AzuraPumpHeads.FLOWRATE_TEN_ML else PMIN50\n await self.create_and_send_command(\n command,\n setpoint=round(pressure.m_as(\"bar\")),\n setpoint_range=(0, self.max_allowed_pressure + 1),\n )\n logger.info(f\"Minimum pressure set to {pressure}\")", "def set_min(self, x):\n if x in self.timestamps:\n self._set_min_directly(x)", "def set_curr_min(self):\r\n self.inst.write(\"CURR MIN\")", "def set_min_time(self, time):\n raise NotImplementedError", "def min_hrs_per_week(self, min_hrs_per_week):\n\n self._min_hrs_per_week = min_hrs_per_week", "def test_Tmin(self):\n self.assertAlmostEqual(self.stick.Tmin.value_si, self.Tmin, 6)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for dtp_max, mapped from YANG variable /access_points/access_point/radios/radio/config/dtp_max (int8)
def _set_dtp_max(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8)(15), is_leaf=True, yang_name="dtp-max", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """dtp_max must be of a type compatible with int8""", 'defined-type': "int8", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8)(15), is_leaf=True, yang_name="dtp-max", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=True)""", }) self.__dtp_max = t if hasattr(self, '_set'): self._set()
[ "def set_max_rate_deviation(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_set_max_rate_deviation(self, *args, **kwargs)", "def target_temperature_max(self) -> Optional[float]:\n if self._state is None:\n return None\n limits = self._device_conf.get(\"max\", {})\n return limits.get(str(_operation_mode_to(self.operation_mode)), {}).get(\"max\", 31)", "def dst_port_max(self, dst_port_max):\n\n self._dst_port_max = dst_port_max", "def set_max_data_values(self, max_data_values: int) -> None:\n self._max_data_values = max_data_values", "def set_max_utilization(self, max_utilization):\n if max_utilization is not None:\n self.max_utilization = max_utilization\n else:\n self.max_utilization = _MAX_UTILIZATION", "def setMaxPeriod(self, maxPeriod):\n hal.setCounterMaxPeriod(self.counter, float(maxPeriod))", "def _set_max_threshold(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name=\"max-threshold\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint64', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"max_threshold must be of a type compatible with uint64\"\"\",\n 'defined-type': \"uint64\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name=\"max-threshold\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint64', is_config=False)\"\"\",\n })\n\n self.__max_threshold = t\n if hasattr(self, '_set'):\n self._set()", "def setMaxValue(self, value):\n self.maxValue = value", "def set_max_rate_deviation(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_set_max_rate_deviation(self, *args, **kwargs)", "def setMaxPeriod(self, maxPeriod):\n hal.setEncoderMaxPeriod(self.encoder, maxPeriod)", "def _set_max_ttl(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"max-ttl\", rest_name=\"max-ttl\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"max_ttl must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"max-ttl\", rest_name=\"max-ttl\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__max_ttl = t\n if hasattr(self, '_set'):\n self._set()", "def setMaximumThreshold(self, max_threshold):\n self.max_threshold = max_threshold", "def src_port_max(self, src_port_max):\n\n self._src_port_max = src_port_max", "def max_speed(self, value):\n\n pass", "def default_max(self, default_max):\n\n self._default_max = default_max", "def get_maxbandwidth(self):\n return self.options['maxbandwidth']", "def max_sample_value(self, max_sample_value):\n self._max_sample_value = max_sample_value", "def max_zoom(self, max_zoom=18):\n return int(max_zoom)", "def t_max(self):\n t_max = self.get('t_max')\n if np.isfinite(t_max):\n return Time(t_max, format='mjd')\n else:\n return None", "def maximum_value(self):\n ret = self._get_attr(\"maximumValue\")\n return ret" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for antenna_gain, mapped from YANG variable /access_points/access_point/radios/radio/config/antenna_gain (int8)
def _set_antenna_gain(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name="antenna-gain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """antenna_gain must be of a type compatible with int8""", 'defined-type': "int8", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name="antenna-gain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=True)""", }) self.__antenna_gain = t if hasattr(self, '_set'): self._set()
[ "def set_pga_gain(self, pga_num, gain):\n\t\treturn self.config_ads(pga_num, 2, gain)", "def set_analog_gain(self, gain):\n if gain < 0:\n raise ValueError('Gain register must be greater than 0.')\n self.i2c.mem_write(int(gain), self.bus_addr, 1)", "def _set_gain(self, adjustment: int) -> int:\n return _lib.opus_decoder_ctl(self._state, CTL_SET_GAIN, adjustment)", "def set_gain(self, dB: float) -> int:\n\n dB_Q8 = max(-32768, min(32767, round(dB * 256))) # dB * 2^n where n is 8 (Q8)\n return self._set_gain(dB_Q8)", "def set_gain(self, gain):\n if gain is None:\n r = self.subdev.gain_range()\n gain = (r[0] + r[1])/2 # set gain to midpoint\n self.gain = gain\n return self.subdev.set_gain(gain)", "def setGain(self, gain: 'float') -> \"void\":\n return _coin.SoAudioDevice_setGain(self, gain)", "def Set_ALS_Gain(self,gain):\n\t\tif gain in self._Gain_LOOKUP:\n\t\t\tregval = self._read_reg(self._REG_ALS_CONTR)\n\t\t\tregval = (regval & self._Gain_CLEAR) | self._Gain_LOOKUP[gain][0]\n\t\t\tself._write_reg(self._REG_ALS_CONTR,regval)", "def test_set_gain():\n _setup()\n\n as7262.set_gain(1)\n assert as7262._as7262.CONTROL.get_gain_x() == 1\n\n # Should snap to the highest gain value\n as7262.set_gain(999)\n assert as7262._as7262.CONTROL.get_gain_x() == 64\n\n # Should snap to the lowest gain value\n as7262.set_gain(-1)\n assert as7262._as7262.CONTROL.get_gain_x() == 1", "def get_RxGain(rfCfgReg: int) -> RxGain:\n return RxGain((rfCfgReg[0] & 0x70) >> 4)", "def set_gain(self):\n DescStr = 'Setting Gain for AHF_Camera '\n if (self.AHFgainMode & 2):\n DescStr += 'from current illumination'\n else:\n DescStr += \"from ISO \" + str(self.iso)\n if (self.AHFgainMode & 1):\n DescStr += ' with white balancing'\n else:\n DescStr += \" with No white balancing\"\n print (DescStr)\n if (self.AHFgainMode & 1):\n self.awb_mode = 'auto'\n else:\n self.awb_mode = 'off'\n self.awb_gains = (1, 1)\n # if (self.AHFgainMode & 2):\n self.exposure_mode = 'auto'\n # else:\n # self.exposure_mode = 'off'\n super().start_preview(fullscreen=False, window=self.AHFpreview)\n sleep(2.0) # let gains settle, then fix values\n if (self.AHFgainMode & 1):\n savedGain = self.awb_gains\n self.awb_gains = savedGain\n self.awb_mode = \"off\"\n # if (self.AHFgainMode & 2):\n self.exposure_mode = 'off'\n super().stop_preview()\n print (\"Red Gain for white balance =\" + str(float(self.awb_gains[0])))\n print (\"Blue Gain for white balance =\" + str(float(self.awb_gains[1])))\n print (\"Analog Gain = \" + str(float(self.analog_gain)))\n print (\"Digital Gain = \" + str(float(self.digital_gain)))\n return", "def getRelativeGain(self):\n if len(self.gainSettings) > 0 :\n return self.gainSettings\n\n xdim = len(self.antennaGrid)\n ydim = len(self.antennaGrid[0])\n self.gainSettings = [[self.beamStrength / self.beamStrength for y in range(ydim)] for x in range(xdim)]\n\n return self.gainSettings", "def set_sum_input_gain(self, input_channel: Channel, gain: float):\n assert Channel.INPUT_A <= input_channel <= Channel.INPUT_C\n return self._invoke(0x16 + input_channel - Channel.INPUT_A, Channel.SETUP, _15db_range(gain))", "def set_manual_gain_enabled(self, enabled):\n result = librtlsdr.rtlsdr_set_tuner_gain_mode(self.dev_p, int(enabled))\n if result < 0:\n raise IOError('Error code %d when setting gain mode'\\\n % (result))\n\n return", "def process_gain(self):\n return 1", "def gainToInt(self,gain):\n dial_num = int(gain*2)\n self.logger.debug(\"%f converted to %d\", gain, dial_num)\n return dial_num # int((gain*2)+23)", "def apply_gain(infile, gain):\n fs1, x = monoWavRead(filename=infile)\n\n x = np.copy(x)\n x = x * (10 ** (gain / 20.0))\n x = np.minimum(np.maximum(-1.0, x), 1.0)\n #Change the output file name to suit your requirements here\n outfile_name = os.path.basename(infile).split(\".\")[0] + (\"_gain%s.wav\" % str(gain))\n outfile = os.path.join(outfile_path, outfile_name)\n write(filename = outfile, rate = fs1, data = x)\n if (FILE_DELETION):\n extractFeaturesAndDelete(outfile)", "def power_on(self, gain):\n\t\t# Turn on channel 0\n\t\tself._bus.write_byte_data(TSL2561_ADDRESS, TSL2561_CHANNEL_0 | TSL2561_CMD, TSL2561_POWER_ON)\n\t\tself._bus.write_byte_data(TSL2561_ADDRESS, TSL2561_CHANNEL_0 | TSL2561_CMD, gain)\n\t\t# Turn on channel 1\n\t\tself._bus.write_byte_data(TSL2561_ADDRESS, TSL2561_CHANNEL_1 | TSL2561_CMD, TSL2561_POWER_ON)\n\t\tself._bus.write_byte_data(TSL2561_ADDRESS, TSL2561_CHANNEL_1 | TSL2561_CMD, gain)", "def mag_gain(self, gain=0x20):\n self._mag_gain = gain\n self.i2c.writeto_mem(self.ADDRESS_MAG, self.REGISTER_MAG_CRB_REG_M, self._mag_gain)\n if self._mag_gain == MAGGAIN_1_3:\n self._lsm303mag_gauss_lsb_xy = 1100.0\n self._lsm303mag_gauss_lsb_z = 980.0\n elif self._mag_gain == MAGGAIN_1_9:\n self._lsm303mag_gauss_lsb_xy = 855.0\n self._lsm303mag_gauss_lsb_z = 760.0\n elif self._mag_gain == MAGGAIN_2_5:\n self._lsm303mag_gauss_lsb_xy = 670.0\n self._lsm303mag_gauss_lsb_z = 600.0\n elif self._mag_gain == MAGGAIN_4_0:\n self._lsm303mag_gauss_lsb_xy = 450.0\n self._lsm303mag_gauss_lsb_z = 400.0\n elif self._mag_gain == MAGGAIN_4_7:\n self._lsm303mag_gauss_lsb_xy = 400.0\n self._lsm303mag_gauss_lsb_z = 355.0\n elif self._mag_gain == MAGGAIN_5_6:\n self._lsm303mag_gauss_lsb_xy = 330.0\n self._lsm303mag_gauss_lsb_z = 295.0\n elif self._mag_gain == MAGGAIN_8_1:\n self._lsm303mag_gauss_lsb_xy = 230.0\n self._lsm303mag_gauss_lsb_z = 205.0", "def test_15_flux_and_bandpass_calibrators_gain():\n\tcasalog.origin(\"test_15_flux_and_bandpass_calibrators_gain\")\n\tcasalog.post(\"starting\")\n\n\tgaincal(vis='G192_flagged_6s.ms', caltable='calG192.G1', field='0,3', \\\n\t gaintable=['calG192.antpos', 'calG192.gaincurve', 'calG192.requantizer', \\\n\t 'calG192.opacity', 'calG192.K0', \\\n\t 'calG192.B0'], \\\n\t gaintype='G', refant='ea05', calmode='ap', solint='30s', minsnr=3)", "def get_ml_gain_increment(self):\n frames = self.integration.frames\n valid_frames = frames.valid & frames.is_unflagged('MODELING_FLAGS')\n return snf.get_ml_gain_increment(\n frame_data=frames.data,\n signal_wc=frames.temp_wc,\n signal_wc2=frames.temp_wc2,\n sample_flags=frames.sample_flag,\n channel_indices=self.mode.channel_group.indices,\n valid_frames=valid_frames)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for scanning, mapped from YANG variable /access_points/access_point/radios/radio/config/scanning (boolean)
def _set_scanning(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="scanning", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='boolean', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """scanning must be of a type compatible with boolean""", 'defined-type': "boolean", 'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="scanning", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='boolean', is_config=True)""", }) self.__scanning = t if hasattr(self, '_set'): self._set()
[ "def is_scan_enabled(self):\n return self._is_scan_enabled", "def SendStartScanSignal(self):\n self._scanning = True", "def _set_scanning_interval(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"scanning-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"scanning_interval must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"scanning-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__scanning_interval = t\n if hasattr(self, '_set'):\n self._set()", "def _scan(self):\n # Set the scan parameters\n if self._data_range is not None:\n image_range = tuple(self._data_range)\n else:\n image_range = (self._starting_frame, self._starting_frame)\n oscillation = (self._starting_angle, self._oscillation_range)\n\n # Create the scan object\n return self._scan_factory.make_scan(\n image_range,\n 0.0,\n oscillation,\n [0] * (image_range[-1] - image_range[0] + 1),\n deg=True,\n )", "def pitch_scan_status_changed(self, status):\n self.scan_status = status", "def __configure_scanner(\n self, zap_scanner: ascan, scanner_config: collections.OrderedDict\n ):\n\n logging.debug(\"Trying to configure the ActiveScan\")\n self.configure_scripts(config=scanner_config)\n\n if self._is_not_empty_integer(\"maxRuleDurationInMins\", scanner_config):\n self.check_zap_result(\n result=zap_scanner.set_option_max_rule_duration_in_mins(\n integer=str(scanner_config[\"maxRuleDurationInMins\"])\n ),\n method_name=\"set_option_max_rule_duration_in_mins\",\n )\n if self._is_not_empty_integer(\"maxScanDurationInMins\", scanner_config):\n self.check_zap_result(\n result=zap_scanner.set_option_max_scan_duration_in_mins(\n integer=str(scanner_config[\"maxScanDurationInMins\"])\n ),\n method_name=\"set_option_max_scan_duration_in_mins\",\n )\n if self._is_not_empty_integer(\"threadPerHost\", scanner_config):\n self.check_zap_result(\n result=zap_scanner.set_option_thread_per_host(\n integer=str(scanner_config[\"threadPerHost\"])\n ),\n method_name=\"set_option_thread_per_host\",\n )\n if self._is_not_empty_integer(\"delayInMs\", scanner_config):\n self.check_zap_result(\n result=zap_scanner.set_option_delay_in_ms(\n integer=str(scanner_config[\"delayInMs\"])\n ),\n method_name=\"set_option_delay_in_ms\",\n )\n\n if self._is_not_empty_bool(\"addQueryParam\", scanner_config):\n self.check_zap_result(\n result=zap_scanner.set_option_add_query_param(\n boolean=str(scanner_config[\"addQueryParam\"])\n ),\n method_name=\"set_option_add_query_param\",\n )\n if self._is_not_empty_bool(\"handleAntiCSRFTokens\", scanner_config):\n self.check_zap_result(\n result=zap_scanner.set_option_handle_anti_csrf_tokens(\n boolean=str(scanner_config[\"handleAntiCSRFTokens\"])\n ),\n method_name=\"set_option_handle_anti_csrf_tokens\",\n )\n if self._is_not_empty_bool(\"injectPluginIdInHeader\", scanner_config):\n self.check_zap_result(\n result=zap_scanner.set_option_inject_plugin_id_in_header(\n boolean=str(scanner_config[\"injectPluginIdInHeader\"])\n ),\n method_name=\"set_option_inject_plugin_id_in_header\",\n )\n if self._is_not_empty_bool(\"scanHeadersAllRequests\", scanner_config):\n self.check_zap_result(\n result=zap_scanner.set_option_scan_headers_all_requests(\n boolean=str(scanner_config[\"scanHeadersAllRequests\"])\n ),\n method_name=\"set_option_scan_headers_all_requests\",\n )\n if self._is_not_empty_string(\"defaultPolicy\", scanner_config):\n self.check_zap_result(\n result=zap_scanner.set_option_default_policy(\n string=str(scanner_config[\"defaultPolicy\"])\n ),\n method_name=\"set_option_default_policy\",\n )\n else:\n # Ensure a default value even if nothing is defined\n scanner_config[\"defaultPolicy\"] = \"Default Policy\"", "def do_scan(self, param=None):\n if type(param) is dict:\n self.logger.info('Updating scan properties with supplied parameters dictionary.')\n self.properties['scan'].update(param)\n # Start with various checks and warn+return if something is wrong\n if self._busy:\n self.logger.error('Scan should not be started while Operator is busy.')\n return\n if 'scan' not in self.properties:\n self.logger.error(\"The config file or properties dict should contain 'scan' section.\")\n return\n required_keys = ['start', 'stop', 'step', 'ao_channel', 'ai_channel']\n if not all(key in self.properties['scan'] for key in required_keys):\n self.logger.error(\"'scan' should contain: \"+', '.join(required_keys))\n return\n try:\n start = self.properties['scan']['start']\n stop = self.properties['scan']['stop']\n step = self.properties['scan']['step']\n ch_ao = int(self.properties['scan']['ao_channel'])\n ch_ai = int(self.properties['scan']['ai_channel'])\n except:\n self.logger.error(\"Error occured while reading scan config values\")\n return\n if ch_ai not in [1,2] or ch_ao not in [1,2]:\n self.logger.error(\"AI and AO channel need to be 1 or 2\")\n return\n if 'stabilize_time' in self.properties['scan']:\n stabilize = self.properties['scan']['stabilize_time']\n else:\n self.logger.info(\"stabilize_time not found in config, using 0s\")\n stabilize = 0\n num_points = np.int(round( (stop-start)/step+1 )) # use round to catch the occasional rounding error\n if num_points <= 0:\n self.logger.error(\"Start, stop and step result in 0 or fewer points to sweep\")\n return\n\n self.voltages_to_scan = np.linspace(start, stop, num_points)\n\n self.scan_voltages = []\n self.measured_voltages = []\n\n self._busy = True # indicate that operator is busy\n\n for i, voltage in enumerate(self.voltages_to_scan):\n self.logger.debug('applying {} to ch {}'.format(voltage, ch_ao))\n self.analog_out(ch_ao, voltage)\n sleep(stabilize)\n measured = self.analog_in()[ch_ai - 1]\n self.measured_voltages.append(measured)\n self.scan_voltages.append(voltage)\n\n # The remainder of the loop adds functionality to plot data and pause and stop the scan when it's run from a gui:\n self._new_scan_data = True\n # before the end of the loop: halt if pause is True\n while self._pause:\n sleep(0.05)\n if self._stop: break\n # if (soft) stop was requested, break out of loop\n if self._stop:\n break\n\n self._stop = False # reset stop flag to false\n self._busy = False # indicate operator is not busy anymore\n self._pause = False # is this necessary?\n\n return self.scan_voltages, self.measured_voltages", "def start_scan(self):\r\n try:\r\n out = self.get_output(\"scan on\")\r\n except BluetoothctlError, e:\r\n print(e)\r\n return None", "def StopScanning(self):\n try:\n if(self.__is_connected and self.__is_scanning):\n self.__is_scanning = False\n self.__s.write(b'\\xA5\\x65')\n time.sleep(0.5)\n self.__s.reset_input_buffer()\n self.__stop_motor()\n return True\n else:\n return False\n except Exception as e:\n return False", "def scan():\n _rpc.request('AudioLibrary.Scan')", "def _fillscan(scan, radar, index=0):\n\n startray = radar.sweep_start_ray_index['data'][index]\n stopray = radar.sweep_end_ray_index['data'][index]\n sweep_times = radar.time['data'][startray:stopray+1]\n\n # Dataset-specific 'where'\n scan.elangle = radar.elevation[\"data\"][startray] * dr\n scan.rstart = float(radar.range[\"meters_to_center_of_first_gate\"])\n scan.rscale = float(radar.range[\"meters_between_gates\"])\n scan.a1gate = int(np.argmin(sweep_times) + startray)\n # These are not settable in RAVE\n #scan.nrays = stopray - startray + 1\n #scan.nbins = radar.ngates\n\n # Dataset-specific 'what'\n dt_start = netCDF4.num2date(sweep_times.min(), radar.time['units'])\n scan.startdate = dt_start.strftime('%Y%m%d')\n scan.starttime = dt_start.strftime('%H%M%S')\n dt_end = netCDF4.num2date(sweep_times.max(), radar.time['units'])\n scan.enddate = dt_end.strftime('%Y%m%d')\n scan.endtime = dt_end.strftime('%H%M%S')\n\n # Dataset-specific 'how'.\n # Such optional attributes have to be named specifically.\n scan.addAttribute(\"how/startazA\",\n radar.azimuth[\"data\"][startray:stopray+1])\n\n # Quantity/parameter-specific 'what'\n # Py-ART delagates any scaling and offset of data to the\n # field 'data' dictionary object, only the 'final' values are available\n # for general purpose use. In additional all bad/missing/undetected\n # data is indicated by possible masking.\n # RAVE has conventions for scaling/offset and missing data vs undetected\n # data. These are not used here.\n for quant in radar.fields.keys():\n param = _polarscanparam.new()\n param.quantity = str(quant)\n param.gain = 1.0 # See above discussion\n param.offset = 0.0\n param.nodata = get_fillvalue()\n param.undetect = get_fillvalue()\n sweep_data = radar.fields[quant]['data'][startray:stopray+1]\n param.setData(np.ma.filled(sweep_data, get_fillvalue()))\n scan.addParameter(param)\n\n # Unambiguous velocity (Nyquist interval)\n if radar.instrument_parameters is not None:\n inst_params = radar.instrument_parameters\n if 'nyquist_velocity' in inst_params:\n scan.addAttribute(\n 'how/NI',\n float(inst_params['nyquist_velocity']['data'][startray]))\n\n # Site-specific navigation with PROJ.4. to make the object \"transformable\"\n scan.projection = _projection.new(\n 'longlat',\n 'Site-specific longlat projection',\n '+proj=latlong +ellps=WGS84 +datum=WGS84')\n return", "def scan_interval(self, scan_interval):\n\n self._scan_interval = scan_interval", "def valid_scan_type(scan_type):\n if scan_type in scan_types:\n return True\n else:\n return False", "def scan_page(self, scan_page):\n\n self._scan_page = scan_page", "def scan():\n global droneList\n if(isSim):\n connect()\n else:\n app.logger.info(\"scanning crazyflies\")\n updateDrones(droneList)\n return updateStats()", "def _set_scan_start(self, value):\n ao_ch, _ = self._verify_scan_channels()\n if ao_ch is None: # if _verify_scan_channels() returns nothing that means channel is invalid or not found\n return\n value = self.analog_out(ao_ch, value, verify_only=True)\n self.properties['scan']['start'] = value\n self._set_scan_step()", "def start_scanner(self):\n if not self._scanner:\n self._scanner = threading.Thread(target=self._scan_buttons)\n self._scanner.start()\n return True\n else:\n return False", "def __start_scanner_with_config(\n self, url: str, scanner_config: collections.OrderedDict\n ) -> int:\n scanner_id = -1\n user_id = None\n context_id = None\n target = None\n\n if self._is_not_empty(\"url\", scanner_config):\n target = str(scanner_config[\"url\"])\n else:\n logging.warning(\n \"The active scanner configuration section has no specific 'url' target defined, trying to use scanType target instead with url: '%s'\",\n url,\n )\n target = url\n\n # \"Context\" is an optional config for Scanner\n if self._is_not_empty(\"context\", scanner_config):\n\n scanner_context_config = self.get_config.get_active_context_config\n context_id = int(scanner_context_config[\"id\"])\n\n # \"User\" is an optional config for Scanner in addition to the context\n if self._is_not_empty(\"user\", scanner_config):\n user_name = str(scanner_config[\"user\"])\n # search for the configured user by its user name in the active context\n user_id = ZapConfigurationContextUsers.get_context_user_by_name(\n scanner_context_config, user_name\n )[\"id\"]\n\n # Configure HTTP ActiveScan\n logging.debug(\"Trying to configure ActiveScan with %s\", scanner_config)\n self.__configure_scanner(self.get_zap.ascan, scanner_config)\n\n policy = scanner_config[\"defaultPolicy\"]\n if self._is_not_empty_string(\"policy\", scanner_config):\n policy = scanner_config[\"policy\"]\n\n # ActiveScan with user\n if (\n (context_id is not None)\n and int(context_id) >= 0\n and (user_id is not None)\n and int(user_id) >= 0\n ):\n logging.info(\n \"Starting ActiveScan(url=%s, contextid=%s, userid=%s, scanpolicyname=%s)\",\n target,\n context_id,\n user_id,\n policy,\n )\n scanner_id = self.get_zap.ascan.scan_as_user(\n url=target, contextid=context_id, userid=user_id, scanpolicyname=policy\n )\n else:\n logging.info(\n \"Starting ActiveScan(url=%s, contextid=%s, scanpolicyname=%s)\",\n target,\n context_id,\n policy,\n )\n scanner_id = self.get_zap.ascan.scan(\n url=target, contextid=context_id, scanpolicyname=policy\n )\n\n return scanner_id", "def start_input(self, device_name, config_name=None):\n try:\n #device_id = self._available_devices[device_name]\n # Check if we supplied a new map, if not use the preferred one\n for d in readers.devices():\n if d.name == device_name:\n self._input_device = d\n if not config_name:\n config_name = self.get_saved_device_mapping(device_name)\n self.set_input_map(device_name, config_name)\n self._input_device.open()\n self._input_device.input_map = self._input_map\n self._input_device.input_map_name = config_name\n self._selected_mux.add_device(self._input_device, None)\n # Update the UI with the limiting for this device\n self.limiting_updated.call(self._input_device.limit_rp,\n self._input_device.limit_yaw,\n self._input_device.limit_thrust)\n self._read_timer.start()\n return self._input_device.supports_mapping\n except Exception:\n self.device_error.call(\n \"Error while opening/initializing input device\\n\\n%s\" %\n (traceback.format_exc()))\n\n if not self._input_device:\n self.device_error.call(\n \"Could not find device {}\".format(device_name))\n return False", "def tray_scan_started(self):\n if not DBUS_AVAIL:\n return\n self._is_scanning = True\n self.init_network_menu()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for scanning_interval, mapped from YANG variable /access_points/access_point/radios/radio/config/scanning_interval (uint8)
def _set_scanning_interval(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="scanning-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """scanning_interval must be of a type compatible with uint8""", 'defined-type': "uint8", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="scanning-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True)""", }) self.__scanning_interval = t if hasattr(self, '_set'): self._set()
[ "def scan_interval(self, scan_interval):\n\n self._scan_interval = scan_interval", "def option_scan_interval(self):\n scan_interval = self.config_entry.options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)\n return timedelta(seconds=scan_interval)", "def _set_polling_interval(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(20), is_leaf=True, yang_name=\"polling-interval\", rest_name=\"polling-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Interface counter polling interval', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='uint32', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"polling_interval must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(20), is_leaf=True, yang_name=\"polling-interval\", rest_name=\"polling-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Interface counter polling interval', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='uint32', is_config=True)\"\"\",\n })\n\n self.__polling_interval = t\n if hasattr(self, '_set'):\n self._set()", "def _set_rp_adv_interval(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'10..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(60), is_leaf=True, yang_name=\"rp-adv-interval\", rest_name=\"rp-adv-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set RP candidate advertisement message interval'}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='uint32', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"rp_adv_interval must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'10..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(60), is_leaf=True, yang_name=\"rp-adv-interval\", rest_name=\"rp-adv-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set RP candidate advertisement message interval'}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='uint32', is_config=True)\"\"\",\n })\n\n self.__rp_adv_interval = t\n if hasattr(self, '_set'):\n self._set()", "def scan_interval_ids(self, scan_interval_ids):\n\n self._scan_interval_ids = scan_interval_ids", "def monitoring_interval(self) -> typing.Optional[aws_cdk.core.Duration]:\n return self._values.get('monitoring_interval')", "def advertisement_interval(self, advertisement_interval: str):\n\n self._advertisement_interval = advertisement_interval", "def scan_interval_ids(self):\n return self._scan_interval_ids", "def set_interval(self, interval):\n self.interval = interval\n self.xml.load_interval(interval)", "def set_interval(self,interval: int):\n self.attr_interval = interval", "def c_in_scan(self, low_counter_num, high_counter_num, samples_per_counter,\r\n rate, options, flags, data):\r\n # type: (int, int, int, float, ScanOption, CInScanFlag, Array[int]) -> float\r\n rate = c_double(rate)\r\n err = lib.ulCInScan(self.__handle, low_counter_num, high_counter_num,\r\n samples_per_counter, byref(rate),\r\n options, flags, data)\r\n if err != 0:\r\n raise ULException(err)\r\n return rate.value", "def advertisement_interval(self) -> str:\n return self._advertisement_interval", "def _scan(self):\n # Set the scan parameters\n if self._data_range is not None:\n image_range = tuple(self._data_range)\n else:\n image_range = (self._starting_frame, self._starting_frame)\n oscillation = (self._starting_angle, self._oscillation_range)\n\n # Create the scan object\n return self._scan_factory.make_scan(\n image_range,\n 0.0,\n oscillation,\n [0] * (image_range[-1] - image_range[0] + 1),\n deg=True,\n )", "def get_speed_up_interval(speed_up_type,\n cfg_base_dir='configs/_base_/post_processing/'):\n\n if speed_up_type == 'deciwatch':\n speed_up_type = 'deciwatch_interval5_q3'\n assert speed_up_type in [\n 'deciwatch_interval5_q1',\n 'deciwatch_interval5_q2',\n 'deciwatch_interval5_q3',\n 'deciwatch_interval5_q4',\n 'deciwatch_interval5_q5',\n 'deciwatch_interval10_q1',\n 'deciwatch_interval10_q2',\n 'deciwatch_interval10_q3',\n 'deciwatch_interval10_q4',\n 'deciwatch_interval10_q5',\n ]\n cfg = os.path.join(cfg_base_dir, speed_up_type + '.py')\n if isinstance(cfg, str):\n cfg = mmcv.Config.fromfile(cfg)\n elif not isinstance(cfg, mmcv.Config):\n raise TypeError('config must be a filename or Config object, '\n f'but got {type(cfg)}')\n\n return cfg['speed_up_cfg']['interval']", "def set_watchdog_interval(self, interval, timeout=RESPONSE_DELAY):\n\n if(interval < 4) or (interval > 180):\n print(\"Wrong argument. min:4 max:180\")\n return 2\n\n command.create_set_command(\n command.PROTOCOL_COMMAND_SET_WATCHDOG_INTERVAL, interval, 1\n )\n command.send_command()\n delay_ms(timeout)\n raw = command.receive_command(COMMAND_SIZE_FOR_UINT8)\n\n status = raw[PROTOCOL_HEADER_SIZE]\n return status", "def probe_interval_in_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"probe_interval_in_seconds\")", "def _set_hello_interval(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int,restriction_dict={'range': ['-32768..32767']}, int_size=16), restriction_dict={'range': [u'10..3600']}), default=RestrictedClassType(base_type=int,restriction_dict={'range': ['-32768..32767']}, int_size=16)(30), is_leaf=True, yang_name=\"hello-interval\", rest_name=\"hello-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set hello message interval'}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='int16', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"hello_interval must be of a type compatible with int16\"\"\",\n 'defined-type': \"int16\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int,restriction_dict={'range': ['-32768..32767']}, int_size=16), restriction_dict={'range': [u'10..3600']}), default=RestrictedClassType(base_type=int,restriction_dict={'range': ['-32768..32767']}, int_size=16)(30), is_leaf=True, yang_name=\"hello-interval\", rest_name=\"hello-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set hello message interval'}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='int16', is_config=True)\"\"\",\n })\n\n self.__hello_interval = t\n if hasattr(self, '_set'):\n self._set()", "def _determine_beacon_config(self, current_beacon_config, key):\n\n interval = False\n if isinstance(current_beacon_config, dict):\n interval = current_beacon_config.get(key, False)\n\n return interval", "def get_polling_interval():\n if AppContext.__polling_interval is None:\n cr = ConfigurationReader()\n AppContext.__polling_interval = cr.get_int_key_in_section(Constant.CONFIG_SECTION_APP,\n Constant.POLLING_INTERVAL)\n\n return AppContext.__polling_interval" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for scanning_dwell_time, mapped from YANG variable /access_points/access_point/radios/radio/config/scanning_dwell_time (uint16)
def _set_scanning_dwell_time(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="scanning-dwell-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint16', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """scanning_dwell_time must be of a type compatible with uint16""", 'defined-type': "uint16", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="scanning-dwell-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint16', is_config=True)""", }) self.__scanning_dwell_time = t if hasattr(self, '_set'): self._set()
[ "def _set_blacklist_time(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name=\"blacklist-time\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint16', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"blacklist_time must be of a type compatible with uint16\"\"\",\n 'defined-type': \"uint16\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name=\"blacklist-time\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint16', is_config=False)\"\"\",\n })\n\n self.__blacklist_time = t\n if hasattr(self, '_set'):\n self._set()", "def _set_blacklist_time(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name=\"blacklist-time\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint16', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"blacklist_time must be of a type compatible with uint16\"\"\",\n 'defined-type': \"uint16\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name=\"blacklist-time\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint16', is_config=True)\"\"\",\n })\n\n self.__blacklist_time = t\n if hasattr(self, '_set'):\n self._set()", "def time_selection(self):\r\n\t\tbus.write_byte_data(TMD2671_DEFAULT_ADDRESS, TMD2671_REG_PTIME | TMD2671_COMMAND_BIT, TMD2671_REG_PTIME_2_72)\r\n\t\t\r\n\t\t\"\"\"Select the WTIME register configuration from the given provided values\"\"\"\r\n\t\tbus.write_byte_data(TMD2671_DEFAULT_ADDRESS, TMD2671_REG_WTIME | TMD2671_COMMAND_BIT, TMD2671_REG_WTIME_2_72)", "def scan_time(self):\n ret = self._get_attr(\"scanTime\")\n return ret", "async def set_dhw_ovrd(call: ServiceCall) -> None:\n gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]\n await gw_dev.gateway.set_hot_water_ovrd(call.data[ATTR_DHW_OVRD])", "def __encode_time(self, time_lsw, time_msw):\n\n msw_word_len = self._config.get(time_msw).word_len\n msw_data = self.raw.get(time_msw)\n lsw_data = self.raw.get(time_lsw)\n double_word = ((msw_data << msw_word_len) | lsw_data)\n return double_word", "def option_scan_interval(self):\n scan_interval = self.config_entry.options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)\n return timedelta(seconds=scan_interval)", "def _set_connection_time(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name=\"connection-time\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint16', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"connection_time must be of a type compatible with uint16\"\"\",\n 'defined-type': \"uint16\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name=\"connection-time\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint16', is_config=False)\"\"\",\n })\n\n self.__connection_time = t\n if hasattr(self, '_set'):\n self._set()", "def time_selection(self):\n\t\tbus.write_byte_data(TSL27721_DEFAULT_ADDRESS, TSL27721_REG_ATIME | TSL27721_COMMAND_BIT, TSL27721_REG_ATIME_2_73)\n\t\t\n\t\t\"\"\"Select the PTIME register configuration from the given provided values\"\"\"\n\t\tbus.write_byte_data(TSL27721_DEFAULT_ADDRESS, TSL27721_REG_PTIME | TSL27721_COMMAND_BIT, TSL27721_REG_PTIME_2_73)\n\t\t\n\t\t\"\"\"Select the WTIME register configuration from the given provided values\"\"\"\n\t\tbus.write_byte_data(TSL27721_DEFAULT_ADDRESS, TSL27721_REG_WTIME | TSL27721_COMMAND_BIT, TSL27721_REG_WTIME_2_73)", "def get_updated_time_day(self, time, day, ride_time ):\n if (time + int(ride_time)) < 24: # Same day\n time = time + math.ceil(ride_time) #Since next ride is available at hourly interval, we take math.ceil to calculate the next request time\n else: # next day\n time = (time + math.ceil(ride_time)) % 24 \n num_of_days = (time + math.ceil(ride_time)) // 24\n day = (day + num_of_days ) % 7\n return time, day", "def electrons_released_from_electrons_and_dwell_time(self, electrons, dwell_time=1):\r\n return electrons * (1 - self.fill_fraction_from_time_elapsed(dwell_time))", "def _estimate_scan_duration(dx):\n sdur = []\n for channel in np.unique(dx.channel):\n d = dx[(dx.scan==1) & (dx.channel==channel)]\n sdur.append((d.time.tolist()[-1].to_pydatetime() - d.time.tolist()[-2].to_pydatetime()).total_seconds())\n return int( (np.max(sdur)+10)/60. )", "def _set_scanning_interval(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"scanning-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"scanning_interval must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"scanning-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__scanning_interval = t\n if hasattr(self, '_set'):\n self._set()", "def SetFallTime(self, time):\n self._PWriteInt('red', 'device/tfall', time)", "def _set_restart_time(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(\n v,\n base=RestrictedClassType(\n base_type=RestrictedClassType(\n base_type=int,\n restriction_dict={\"range\": [\"0..65535\"]},\n int_size=16,\n ),\n restriction_dict={\"range\": [\"0..4096\"]},\n ),\n is_leaf=True,\n yang_name=\"restart-time\",\n parent=self,\n path_helper=self._path_helper,\n extmethods=self._extmethods,\n register_paths=True,\n namespace=\"http://openconfig.net/yang/network-instance\",\n defining_module=\"openconfig-network-instance\",\n yang_type=\"uint16\",\n is_config=True,\n )\n except (TypeError, ValueError):\n raise ValueError(\n {\n \"error-string\": \"\"\"restart_time must be of a type compatible with uint16\"\"\",\n \"defined-type\": \"uint16\",\n \"generated-type\": \"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['0..4096']}), is_leaf=True, yang_name=\"restart-time\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint16', is_config=True)\"\"\",\n }\n )\n\n self.__restart_time = t\n if hasattr(self, \"_set\"):\n self._set()", "def measurement_time_max(self):\n meas_time_ms = 1.25\n if self._overscan_temperature != OVERSCAN_DISABLE:\n meas_time_ms += (2.3 * _BME280_OVERSCANS.get(self._overscan_temperature))\n if self.overscan_pressure != OVERSCAN_DISABLE:\n meas_time_ms += (2.3 * _BME280_OVERSCANS.get(self.overscan_pressure) + 0.575)\n if self.overscan_humidity != OVERSCAN_DISABLE:\n meas_time_ms += (2.3 * _BME280_OVERSCANS.get(self.overscan_humidity) + 0.575)\n return meas_time_ms", "def SetRiseTime(self, time):\n self._PWriteInt('red', 'device/trise', time)", "async def async_set_day_time(self):\n curr_time = datetime.now()\n day = 0 if curr_time.weekday() == 6 else curr_time.weekday() + 1\n set_time_command = ExtendedSetCommand(\n self._address, cmd2=0x02, data1=0x02, data2=day\n )\n return await set_time_command.async_send(\n data3=curr_time.hour, data4=curr_time.minute, data5=curr_time.second\n )", "def GetPowerDownTime(self):\n\t\tpowerdown_time = RTCC_Struct(0,0,0,0,0,0,0)\n\t\tpowerdown_time.min = self.bcd2dec( self.readRegister(PWRDNMIN)) \n\t\tpowerdown_time.hour = self.bcd2dec( self.readRegister(PWRDNHOUR))\n\t\tpowerdown_time.date = self.bcd2dec( self.readRegister(PWRDNDATE))\n\t\tpowerdown_time.month = self.bcd2dec( self.readRegister(PWRDNMTH))\n\t\treturn powerdown_time", "def night_mode(self, value):\n self._night_mode = value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for scanning_defer_clients, mapped from YANG variable /access_points/access_point/radios/radio/config/scanning_defer_clients (uint8)
def _set_scanning_defer_clients(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="scanning-defer-clients", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """scanning_defer_clients must be of a type compatible with uint8""", 'defined-type': "uint8", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="scanning-defer-clients", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True)""", }) self.__scanning_defer_clients = t if hasattr(self, '_set'): self._set()
[ "def _set_clients(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_clients_openconfig_wifi_mac__ssids_ssid_clients, is_container='container', yang_name=\"clients\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"clients must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_clients_openconfig_wifi_mac__ssids_ssid_clients, is_container='container', yang_name=\"clients\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__clients = t\n if hasattr(self, '_set'):\n self._set()", "def assign_clients(self):\n for drone in self.drones:\n if self.solution[drone]:\n drone.specify_client(self.solution[drone].pop(0))", "def _set_client(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"mac\",yc_client_openconfig_wifi_mac__ssids_ssid_clients_client, yang_name=\"client\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='mac', extensions=None), is_container='list', yang_name=\"client\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"client must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"mac\",yc_client_openconfig_wifi_mac__ssids_ssid_clients_client, yang_name=\"client\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='mac', extensions=None), is_container='list', yang_name=\"client\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__client = t\n if hasattr(self, '_set'):\n self._set()", "def _set_num_associated_clients(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-associated-clients\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"num_associated_clients must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-associated-clients\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__num_associated_clients = t\n if hasattr(self, '_set'):\n self._set()", "def _set_client_rf(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_client_rf_openconfig_wifi_mac__ssids_ssid_clients_client_client_rf, is_container='container', yang_name=\"client-rf\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='container', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"client_rf must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_client_rf_openconfig_wifi_mac__ssids_ssid_clients_client_client_rf, is_container='container', yang_name=\"client-rf\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='container', is_config=False)\"\"\",\n })\n\n self.__client_rf = t\n if hasattr(self, '_set'):\n self._set()", "def _setClients(self, clients):\n for client in clients:\n for stepRec in self.steps:\n if client.name == stepRec['clientName']:\n # Initialize client online/offline deferreds\n onlineDeferred = defer.Deferred().addCallback(self.handleClientReconnect)\n offlineDeferred = defer.Deferred().addCallback(self.handleClientDisconnect)\n # Add these deferreds to corresponding client's dictionaries - to be called back when appropriate events occur\n client.addOnlineDeferred(onlineDeferred, reset = True)\n client.addOfflineDeferred(offlineDeferred, reset = True)\n # Set current step record's 'client' data\n stepRec['client'] = client\n break\n \n # All clients have been set: check 'online' status of all clients, and set workflow's status correspondingly\n self._checkOnline()", "def find_clients(self, clients: List[wrappers.Window], **matchers: Any) -> List[wrappers.Window]:\n return [r for r in clients if r.matches(**matchers)]", "def inc_ini(self):\n if len(self.clients) < self.MAX_CONN:\n if self.ini < self.MAX_CONN:\n self.ini += 1\n else:\n free = [ i for i in range(1, len(self.clients)+1) if not i in self.clients.keys() ]\n self.ini = free[0]\n return True\n return False", "def number_of_clients(self):\n ret = self._get_attr(\"numberOfClients\")\n return ret", "def get_connected_clients(self) -> set:\n return self.connected_clients", "def SetDHCPClient(self, client):\n print \"Setting dhcp client to %i\" % (int(client))\n self.dhcp_client = int(client)\n self.wifi.dhcp_client = int(client)\n self.wired.dhcp_client = int(client)\n self.config.set(\"Settings\", \"dhcp_client\", client, write=True)", "def clientBusy(self, client):\n\n if client in self._freeClients:\n self._freeClients.remove(client)\n\n self._busyClients.add(client)\n\n self.log.debug(\"Busied client: {client!r}\", client=client)\n self._logClientStats()", "def preserve_client_ip_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"preserve_client_ip_enabled\")", "def revguard_client_brand(self, revguard_client_brand):\n\n self._revguard_client_brand = revguard_client_brand", "def GetProjectAutocompleteExclusion(self, cnxn, project_id):\n ac_exclusion_rows = self.acexclusion_tbl.Select(\n cnxn, cols=['user_id'], project_id=project_id, ac_exclude=True)\n ac_exclusion_ids = [row[0] for row in ac_exclusion_rows]\n no_expand_rows = self.acexclusion_tbl.Select(\n cnxn, cols=['user_id'], project_id=project_id, no_expand=True)\n no_expand_ids = [row[0] for row in no_expand_rows]\n return ac_exclusion_ids, no_expand_ids", "def get_client_list(self, globs):\n return self._expand_globs(globs, self.core.metadata.clients)", "def first_client(clients, flag_mask, me):\n for client in clients:\n if client.intents & flag_mask == flag_mask:\n return client\n \n return me", "def _set_client_connection(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_client_connection_openconfig_wifi_mac__ssids_ssid_clients_client_client_connection, is_container='container', yang_name=\"client-connection\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='container', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"client_connection must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_client_connection_openconfig_wifi_mac__ssids_ssid_clients_client_client_connection, is_container='container', yang_name=\"client-connection\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='container', is_config=False)\"\"\",\n })\n\n self.__client_connection = t\n if hasattr(self, '_set'):\n self._set()", "def ParseAllowedClientsDicts(self, nfs_share_resource, allowed_clients_dicts):\n allowed_clients = []\n for allowed_client in allowed_clients_dicts:\n mount_permissions = self.nfs_mount_permissions_str_to_message[\n allowed_client['mount-permissions']]\n network_full_name = util.NFSNetworkFullName(\n nfs_share_resource=nfs_share_resource,\n allowed_client_dict=allowed_client)\n allowed_clients.append(\n self.messages.AllowedClient(\n network=network_full_name,\n allowedClientsCidr=allowed_client['cidr'],\n mountPermissions=mount_permissions,\n allowDev=allowed_client['allow-dev'],\n allowSuid=allowed_client['allow-suid'],\n noRootSquash=not allowed_client['enable-root-squash'],\n )\n )\n return allowed_clients", "def get_clientrssi():\n input = os.popen(\n '/System/Library/PrivateFrameworks/Apple80211.framework/Versions/A/Resources/airport -I')\n return int(''.join([x.split()[1] for x in input if 'agrCtlRSSI' in x]))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for scanning_defer_traffic, mapped from YANG variable /access_points/access_point/radios/radio/config/scanning_defer_traffic (boolean)
def _set_scanning_defer_traffic(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="scanning-defer-traffic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='boolean', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """scanning_defer_traffic must be of a type compatible with boolean""", 'defined-type': "boolean", 'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="scanning-defer-traffic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='boolean', is_config=True)""", }) self.__scanning_defer_traffic = t if hasattr(self, '_set'): self._set()
[ "def ingress_traffic_allowed(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ingress_traffic_allowed\")", "def bandwidth_corrected(self, value: Optional[Boolean]):\n\n if value is not None:\n attest(\n isinstance(value, bool),\n f'\"bandwidth_corrected\" property: \"{value}\" type is not \"bool\"!',\n )\n\n self._bandwidth_corrected = value", "def get_traffic_meter_enabled(self):\n response = self._get(\n c.SERVICE_DEVICE_CONFIG, c.GET_TRAFFIC_METER_ENABLED\n )\n return h.zero_or_one_dict_to_boolean(response)", "def _set_bpdu_guard(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"bpdu-guard\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"bpdu_guard must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"bpdu-guard\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__bpdu_guard = t\n if hasattr(self, '_set'):\n self._set()", "def set_ra(self, b):\n _ldns.ldns_pkt_set_ra(self, b)\n #parameters: ldns_pkt *,bool,\n #retvals: ", "def dns_active(self, dns_active: bool):\n self._indicator_data['flag1'] = self.util.to_bool(dns_active)", "def set_ad(self, b):\n _ldns.ldns_pkt_set_ad(self, b)\n #parameters: ldns_pkt *,bool,\n #retvals: ", "def set_ads_drate(self, ads_num, data_rate):\n\t\treturn self.config_ads(ads_num, 1, data_rate)", "def incoming_traffic_blocked(self):\n if \"incomingTrafficBlocked\" in self._prop_dict:\n return self._prop_dict[\"incomingTrafficBlocked\"]\n else:\n return None", "def check_lazy_plan(self, distance, grasp_switched, distance_travelled):\n do_lazy_plan = distance > self.lazy_threshold and \\\n distance_travelled < self.distance_travelled_threshold and \\\n self.robot.arm_discretized_plan is not None and \\\n self.robot.arm_wp_target_index != len(self.robot.arm_discretized_plan) and \\\n not grasp_switched\n return do_lazy_plan", "def SetPreferWiredNetwork(self, value):\n self.config.set(\"Settings\", \"prefer_wired\", bool(value), write=True)\n self.prefer_wired = bool(value)", "async def set_is_watering(self, is_watering: bool):\n self._is_watering = is_watering", "def is_direct_transfer(filespair):\n # type: (dict) -> bool\n return 'storage_account_settings' not in filespair['destination']", "def parse_dependent_requests_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"parse_dependent_requests_enabled\")", "def _set_dhcp_required(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"dhcp-required\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"dhcp_required must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"dhcp-required\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__dhcp_required = t\n if hasattr(self, '_set'):\n self._set()", "def _set_dhcp_required(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"dhcp-required\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"dhcp_required must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"dhcp-required\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__dhcp_required = t\n if hasattr(self, '_set'):\n self._set()", "def allow_drag(self, flag: bool):\n self._allow_drag = flag\n if self._allow_drag and not self._drag_setup:\n self.bind_all('<Motion>', self._drag_handler)\n self.bind_all('<ButtonRelease-1>', self._drag_handler)\n self._drag_setup = True", "def is_scan_enabled(self):\n return self._is_scan_enabled", "def SendStartScanSignal(self):\n self._scanning = True", "def user_traffic_over_limit(username):\n if not CHECK_SHARE_LINK_TRAFFIC:\n return False\n\n from seahub_extra.plan.models import UserPlan\n from seahub_extra.plan.settings import PLAN\n up = UserPlan.objects.get_valid_plan_by_user(username)\n plan = 'Free' if up is None else up.plan_type\n traffic_limit = int(PLAN[plan]['share_link_traffic']) * 1024 * 1024 * 1024\n\n try:\n stat = get_user_traffic_stat(username)\n except Exception as e:\n logger = logging.getLogger(__name__)\n logger.error('Failed to get user traffic stat: %s' % username,\n exc_info=True)\n return True\n\n if stat is None: # No traffic record yet\n return False\n\n month_traffic = stat['file_view'] + stat['file_download'] + stat['dir_download']\n return True if month_traffic >= traffic_limit else False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Getter method for noise_floor, mapped from YANG variable /access_points/access_point/radios/radio/state/counters/noise_floor (int8)
def _get_noise_floor(self): return self.__noise_floor
[ "def noise_floor(self):\n return 2", "def find_least_noise(self, low_slice, high_slice, directory='D:/Research/Python Data/Spectral CT/'):\n subfolder = '/Slices/'\n path = self.save_dir + subfolder\n vials = np.load(self.save_dir + '/Vial_Masks.npy')\n noise_vals = np.zeros(high_slice-low_slice+1)\n for i in np.arange(low_slice, high_slice+1):\n img = np.load(path + 'Bin6_Slice' + str(i) + '.npy')\n noise_vals[i-low_slice] = np.nanstd(vials[0]*img)\n\n idx = np.argmin(noise_vals)\n return idx+low_slice, noise_vals[idx]", "def get_image_readnoise(header, survey):\n check_survey_validity(survey)\n if survey == \"PS1\":\n readnoise = header[\"HIERARCH CELL.READNOISE\"]\n elif survey == \"DES\":\n # see https://arxiv.org/pdf/0810.3600.pdf\n readnoise = 7.0 # electrons per pixel\n elif survey == \"SDSS\":\n readnoise = 0.0\n elif survey == \"2MASS\":\n # https://iopscience.iop.org/article/10.1086/498708/pdf\n # 6 combined images\n readnoise = 4.5 * np.sqrt(6) # not used\n elif survey == \"LegacySurvey\":\n readnoise = 1.0 # not used\n elif survey == \"Spitzer\":\n if header[\"INSTRUME\"] == \"IRAC\":\n # Table 2.3 of IRAC Instrument Handbook\n # very rough average\n readnoise_dict = {1: 16.0, 2: 12.0, 3: 10.0, 4: 8.0}\n channel = header[\"CHNLNUM\"]\n readnoise = readnoise_dict[channel]\n elif header[\"INSTRUME\"] == \"MIPS\":\n # Table 2.4 of MIPS Instrument Handbook\n readnoise = 40.0\n elif survey == \"VISTA\":\n # very rough average for all filters in\n # http://casu.ast.cam.ac.uk/surveys-projects/vista/technical/vista-gain\n readnoise = 24.0\n elif survey == \"HST\":\n # tipically 0.0\n readnoise = header[\"PCTERNOI\"]\n elif survey=='SkyMapper':\n # https://rsaa.anu.edu.au/observatories/instruments/skymapper-instrument\n readnoise = 5 # electrons\n elif survey=='SPLUS':\n readnoise = header[\"HIERARCH OAJ QC NCNOISE\"]\n elif survey=='UKIDSS':\n readnoise = header[\"READNOIS\"]\n else:\n readnoise = 0.0\n\n return readnoise", "def get_noise_floor(noise_dir, thresh, species, spec_params, save_dir, verbose=False, save = False):\n\n pups = [i.split('_noise')[0] for i in os.listdir(noise_dir) if i.startswith(species)]\n all_thresholds = []\n all_pups = []\n noise_wavs = []\n\n for pup in pups:\n #get the noise clip\n noise_wav = os.path.join(noise_dir,pup+'_noiseclip.wav')\n\n #make a spectrogram (modified from Goffinet et al. Elife 2022 https://autoencoded-vocal-analysis.readthedocs.io/)\n\n EPSILON = 1e-9\n fs, noise_audio = wavfile.read(noise_wav)\n f,t,noise_spec = stft(noise_audio, \n nperseg=spec_params['nperseg'], \n noverlap=spec_params['noverlap'],\n fs=spec_params['fs'])\n\n i1 = np.searchsorted(f, spec_params['min_freq'])\n i2 = np.searchsorted(f, spec_params['max_freq'])\n f, noise_spec = f[i1:i2], noise_spec[i1:i2]\n noise_spec = np.log(np.abs(noise_spec)+EPSILON)\n\n #calculate the threshold\n noise_spec_vals = noise_spec.flatten()\n median_value = np.median(noise_spec_vals)\n std_value = np.nanstd(noise_spec_vals)\n floor = median_value + thresh*std_value\n\n #update\n all_pups.append(pup+'.wav')\n all_thresholds.append(floor)\n noise_wavs.append(noise_wav.split('/')[-1])\n\n if verbose:\n #plot\n fig = plt.figure(figsize=[20,5])\n ax1= fig.add_subplot(1, 2, 1)\n ax1.imshow(noise_spec, origin='lower')\n ax2= fig.add_subplot(1, 2, 2)\n sns.histplot(noise_spec_vals,ax=ax2, binwidth=.01, color='black')\n plt.axvline(x=floor,color='red')\n plt.show()\n\n #save\n if not os.path.exists(os.path.join(save_dir,'00_plots')):\n os.mkdir(os.path.join(save_dir,'00_plots'))\n print('made a directory:', os.path.join(save_dir,'00_plots'))\n hist_name = pup+'_noise_hist.jpeg'\n plt.savefig(os.path.join(save_dir,'00_plots',hist_name), dpi=300)\n\n floor_df = pd.DataFrame()\n floor_df['source_file'] = all_pups\n floor_df['noise_source'] = noise_wavs\n floor_df['noise_floor'] = all_thresholds\n\n if save:\n floor_df.to_csv(os.path.join(save_dir,'all_noise_floors.csv'), index=False)\n print('saved a csv of noise floors to...', save_dir+'all_noise_floors.csv')\n return floor_df", "def noise_floor_test(ensemble):\n\n return ADCP_FLAGS['no_test']", "def getReadNoise(self):\n \n rn = self._rdnoise\n return rn", "def readnoise(self):\n self.get_metadata()\n # --> This gets better with FITS header units\n readnoise = self.meta.get('rdnoise')\n if self.unit == u.adu:\n gain = self.meta.get('gain')\n readnoise /= gain\n return readnoise", "def random_floor_tile(self):\n\n if not Tile.floor in self.tiles.values():\n raise ValueError(\"No floor tile found\")\n\n Point = namedtuple(\"Point\", ['x', 'y'])\n\n # Get list all unoccupied floor tiles positions (floor tiles\n # with no entities on them)\n floor_tiles = []\n for (x, y), tile in self.tiles.items():\n if tile == Tile.floor and self.get_entity_at(x, y) == None:\n floor_tiles.append(Point(x, y))\n\n if len(floor_tiles) == 0:\n raise ValueError(\"No unoccupied floor tiles\")\n\n # Take random unoccupied floor tile\n return random.choice(floor_tiles)", "def ifloor(x):\n\n return np.floor(x).astype(int)", "def get_white_noise_image(img_data, noise_ratio=1):\n noise_img = np.random.uniform(-10., 10., img_data.shape).astype(np.float32)\n img_data = noise_ratio * noise_img + (1. - noise_ratio) * img_data\n return img_data", "def getnoise(size):\n noisesize=10\n noise = np.random.uniform(-1, 1, size=(size, noisesize))\n return noise", "def make_sonar_noise(self, reading):\n if random.randint(0, 10) > 7:\n new_reading = random.randint(0, 39)\n else:\n new_reading = reading\n\n return new_reading", "def noised_unif(img,min_,max_):\n noise = np.random.uniform(min_,max_,img.shape)\n img_noise = np.clip(img.astype(float)+noise,0,255).astype('uint8')\n return img_noise,noise", "def generate_noise(samples):\n return np.random.normal(0, 1, (samples, LATENT_DIM))", "def _noisy_W(self, noise_lvl=0.):\n W_nz = self.W + self.srng.normal(size=self.W.shape, avg=0., std=noise_lvl)\n return W_nz", "def noised_snp(img,pad):\n noise = np.random.randint(0,255,img.shape)\n img_noise = img.copy()\n img_noise[noise < pad] = 0\n img_noise[noise > 255-pad] = 255\n noise[noise < pad] = 0\n noise[noise > 255-pad] = 255\n noise[(noise != 0) & (noise != 255)] = 127\n return img_noise,noise", "def GenerateWhiteNoise(cls, template):\n generator = pydub.generators.WhiteNoise(\n sample_rate=template.frame_rate,\n bit_depth=template.sample_width * 8)\n return generator.to_audio_segment(duration=len(template), volume=0.0)", "def noise_from_yaml(survey, band, pdfs_dir=pdfs_dir,\n yaml_dir=characteristics_dir):\n # Generates nobs simulated noise profiles.\n try:\n pdf = np.loadtxt(\"%s/2d%s_%s.txt\" % (pdfs_dir, band, survey))\n rand_idx = np.random.randint(len(pdf))\n seeing = pdf[rand_idx, 0]\n sky_brightness = pdf[rand_idx, 1]\n yaml_file = '%s/%s_%s.yaml' % (yaml_dir, band, survey)\n with open(yaml_file, 'r') as config_file:\n survey_noise = yaml.safe_load(config_file)\n survey_noise['seeing'] = seeing\n survey_noise['sky_brightness'] = sky_brightness\n except FileNotFoundError:\n print('%s band in survey %s is not supported.' % (band, survey))\n print('Please make sure the appropriate config file exists.')\n raise\n except OSError:\n print('%s band in survey %s is not supported.' % (band, survey))\n print('Please make sure the appropriate 2d pdf exists.')\n raise\n\n return survey_noise", "def generate_starting_floor(self):\n\n if self.id == 0:\n self.source_flr = 4\n else:\n self.source_flr = 4", "def get_noise_params(level):\n\n chr_i_min = 5\n chr_i_max = 100\n chr_size_min = 7\n chr_size_max = 21\n ill_i_min = 5\n ill_i_max = 100\n ill_size_min = 1\n ill_size_max = 2\n\n level = (level - 1)/9.\n\n return dict(\n chr_i = int((chr_i_max - chr_i_min) * level + chr_i_min),\n chr_size = int((chr_size_max - chr_size_min) * level + chr_size_min),\n ill_i = int((ill_i_max - ill_i_min) * level + ill_i_min),\n ill_size = int((ill_size_max - ill_size_min) * level + ill_size_min)\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for noise_floor, mapped from YANG variable /access_points/access_point/radios/radio/state/counters/noise_floor (int8)
def _set_noise_floor(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name="noise-floor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """noise_floor must be of a type compatible with int8""", 'defined-type': "int8", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name="noise-floor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=False)""", }) self.__noise_floor = t if hasattr(self, '_set'): self._set()
[ "def noise_floor(self):\n return 2", "def find_least_noise(self, low_slice, high_slice, directory='D:/Research/Python Data/Spectral CT/'):\n subfolder = '/Slices/'\n path = self.save_dir + subfolder\n vials = np.load(self.save_dir + '/Vial_Masks.npy')\n noise_vals = np.zeros(high_slice-low_slice+1)\n for i in np.arange(low_slice, high_slice+1):\n img = np.load(path + 'Bin6_Slice' + str(i) + '.npy')\n noise_vals[i-low_slice] = np.nanstd(vials[0]*img)\n\n idx = np.argmin(noise_vals)\n return idx+low_slice, noise_vals[idx]", "def get_noise_floor(noise_dir, thresh, species, spec_params, save_dir, verbose=False, save = False):\n\n pups = [i.split('_noise')[0] for i in os.listdir(noise_dir) if i.startswith(species)]\n all_thresholds = []\n all_pups = []\n noise_wavs = []\n\n for pup in pups:\n #get the noise clip\n noise_wav = os.path.join(noise_dir,pup+'_noiseclip.wav')\n\n #make a spectrogram (modified from Goffinet et al. Elife 2022 https://autoencoded-vocal-analysis.readthedocs.io/)\n\n EPSILON = 1e-9\n fs, noise_audio = wavfile.read(noise_wav)\n f,t,noise_spec = stft(noise_audio, \n nperseg=spec_params['nperseg'], \n noverlap=spec_params['noverlap'],\n fs=spec_params['fs'])\n\n i1 = np.searchsorted(f, spec_params['min_freq'])\n i2 = np.searchsorted(f, spec_params['max_freq'])\n f, noise_spec = f[i1:i2], noise_spec[i1:i2]\n noise_spec = np.log(np.abs(noise_spec)+EPSILON)\n\n #calculate the threshold\n noise_spec_vals = noise_spec.flatten()\n median_value = np.median(noise_spec_vals)\n std_value = np.nanstd(noise_spec_vals)\n floor = median_value + thresh*std_value\n\n #update\n all_pups.append(pup+'.wav')\n all_thresholds.append(floor)\n noise_wavs.append(noise_wav.split('/')[-1])\n\n if verbose:\n #plot\n fig = plt.figure(figsize=[20,5])\n ax1= fig.add_subplot(1, 2, 1)\n ax1.imshow(noise_spec, origin='lower')\n ax2= fig.add_subplot(1, 2, 2)\n sns.histplot(noise_spec_vals,ax=ax2, binwidth=.01, color='black')\n plt.axvline(x=floor,color='red')\n plt.show()\n\n #save\n if not os.path.exists(os.path.join(save_dir,'00_plots')):\n os.mkdir(os.path.join(save_dir,'00_plots'))\n print('made a directory:', os.path.join(save_dir,'00_plots'))\n hist_name = pup+'_noise_hist.jpeg'\n plt.savefig(os.path.join(save_dir,'00_plots',hist_name), dpi=300)\n\n floor_df = pd.DataFrame()\n floor_df['source_file'] = all_pups\n floor_df['noise_source'] = noise_wavs\n floor_df['noise_floor'] = all_thresholds\n\n if save:\n floor_df.to_csv(os.path.join(save_dir,'all_noise_floors.csv'), index=False)\n print('saved a csv of noise floors to...', save_dir+'all_noise_floors.csv')\n return floor_df", "def setNoise(self,value=0):\n self.noise = value\n if self.noise >= self.threshold:\n self.refresh()", "def noise_floor_test(ensemble):\n\n return ADCP_FLAGS['no_test']", "def get_image_readnoise(header, survey):\n check_survey_validity(survey)\n if survey == \"PS1\":\n readnoise = header[\"HIERARCH CELL.READNOISE\"]\n elif survey == \"DES\":\n # see https://arxiv.org/pdf/0810.3600.pdf\n readnoise = 7.0 # electrons per pixel\n elif survey == \"SDSS\":\n readnoise = 0.0\n elif survey == \"2MASS\":\n # https://iopscience.iop.org/article/10.1086/498708/pdf\n # 6 combined images\n readnoise = 4.5 * np.sqrt(6) # not used\n elif survey == \"LegacySurvey\":\n readnoise = 1.0 # not used\n elif survey == \"Spitzer\":\n if header[\"INSTRUME\"] == \"IRAC\":\n # Table 2.3 of IRAC Instrument Handbook\n # very rough average\n readnoise_dict = {1: 16.0, 2: 12.0, 3: 10.0, 4: 8.0}\n channel = header[\"CHNLNUM\"]\n readnoise = readnoise_dict[channel]\n elif header[\"INSTRUME\"] == \"MIPS\":\n # Table 2.4 of MIPS Instrument Handbook\n readnoise = 40.0\n elif survey == \"VISTA\":\n # very rough average for all filters in\n # http://casu.ast.cam.ac.uk/surveys-projects/vista/technical/vista-gain\n readnoise = 24.0\n elif survey == \"HST\":\n # tipically 0.0\n readnoise = header[\"PCTERNOI\"]\n elif survey=='SkyMapper':\n # https://rsaa.anu.edu.au/observatories/instruments/skymapper-instrument\n readnoise = 5 # electrons\n elif survey=='SPLUS':\n readnoise = header[\"HIERARCH OAJ QC NCNOISE\"]\n elif survey=='UKIDSS':\n readnoise = header[\"READNOIS\"]\n else:\n readnoise = 0.0\n\n return readnoise", "def make_sonar_noise(self, reading):\n if random.randint(0, 10) > 7:\n new_reading = random.randint(0, 39)\n else:\n new_reading = reading\n\n return new_reading", "def random_floor_tile(self):\n\n if not Tile.floor in self.tiles.values():\n raise ValueError(\"No floor tile found\")\n\n Point = namedtuple(\"Point\", ['x', 'y'])\n\n # Get list all unoccupied floor tiles positions (floor tiles\n # with no entities on them)\n floor_tiles = []\n for (x, y), tile in self.tiles.items():\n if tile == Tile.floor and self.get_entity_at(x, y) == None:\n floor_tiles.append(Point(x, y))\n\n if len(floor_tiles) == 0:\n raise ValueError(\"No unoccupied floor tiles\")\n\n # Take random unoccupied floor tile\n return random.choice(floor_tiles)", "def noised_unif(img,min_,max_):\n noise = np.random.uniform(min_,max_,img.shape)\n img_noise = np.clip(img.astype(float)+noise,0,255).astype('uint8')\n return img_noise,noise", "def generate_noise(samples):\n return np.random.normal(0, 1, (samples, LATENT_DIM))", "def getnoise(size):\n noisesize=10\n noise = np.random.uniform(-1, 1, size=(size, noisesize))\n return noise", "def generate_starting_floor(self):\n\n if self.id == 0:\n self.source_flr = 4\n else:\n self.source_flr = 4", "def get_white_noise_image(img_data, noise_ratio=1):\n noise_img = np.random.uniform(-10., 10., img_data.shape).astype(np.float32)\n img_data = noise_ratio * noise_img + (1. - noise_ratio) * img_data\n return img_data", "def readnoise(self):\n self.get_metadata()\n # --> This gets better with FITS header units\n readnoise = self.meta.get('rdnoise')\n if self.unit == u.adu:\n gain = self.meta.get('gain')\n readnoise /= gain\n return readnoise", "def noised_snp(img,pad):\n noise = np.random.randint(0,255,img.shape)\n img_noise = img.copy()\n img_noise[noise < pad] = 0\n img_noise[noise > 255-pad] = 255\n noise[noise < pad] = 0\n noise[noise > 255-pad] = 255\n noise[(noise != 0) & (noise != 255)] = 127\n return img_noise,noise", "def _noisy_W(self, noise_lvl=0.):\n W_nz = self.W + self.srng.normal(size=self.W.shape, avg=0., std=noise_lvl)\n return W_nz", "def getReadNoise(self):\n \n rn = self._rdnoise\n return rn", "def _random_noise(self, arr):\n rnd_snr = random.randint(self.noise_range[0], self.noise_range[1])\n NOISE_FACTOR = 1 / (10 ** (rnd_snr / 10))\n\n return arr + np.random.normal(0, NOISE_FACTOR, len(arr))", "def GenerateWhiteNoise(cls, template):\n generator = pydub.generators.WhiteNoise(\n sample_rate=template.frame_rate,\n bit_depth=template.sample_width * 8)\n return generator.to_audio_segment(duration=len(template), volume=0.0)", "def generate_noise(noise_params: configparser.ConfigParser, signal: np.ndarray,\n data_points: int) -> np.ndarray:\n snr = float(noise_params[SIGNAL_TO_NOISE])\n if snr != 0.0:\n noise = np.random.normal(size=data_points)\n # work out the current SNR\n current_snr = np.mean(signal) / np.std(noise)\n # scale the noise by the snr ratios (smaller noise <=> larger snr)\n noise *= (current_snr / snr)\n else:\n noise = np.zeros(data_points)\n # return the new signal with noise\n return noise" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for enabled, mapped from YANG variable /access_points/access_point/radios/radio/state/enabled (boolean)
def _set_enabled(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='boolean', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """enabled must be of a type compatible with boolean""", 'defined-type': "boolean", 'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='boolean', is_config=False)""", }) self.__enabled = t if hasattr(self, '_set'): self._set()
[ "def _is_enabled(self, state):\n enabled = True\n\n if isinstance(self._enabled, State):\n enabled = bool(state.get(\n self._enabled.name, self._enabled.default))\n\n else:\n enabled = bool(self._enabled)\n\n return enabled", "def _set_enabled(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, default=YANGBool(\"false\"), is_leaf=True, yang_name=\"enabled\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"enabled must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, default=YANGBool(\"false\"), is_leaf=True, yang_name=\"enabled\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__enabled = t\n if hasattr(self, '_set'):\n self._set()", "def _set_enabled(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"enabled\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"enabled must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"enabled\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__enabled = t\n if hasattr(self, '_set'):\n self._set()", "def _set_enabled(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"enabled\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"enabled must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"enabled\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__enabled = t\n if hasattr(self, '_set'):\n self._set()", "def SetEnabled(self, state):\n if state:\n level = COMMAND_VALUE_ON\n else:\n level = COMMAND_VALUE_OFF\n\n try:\n self.RawWrite(COMMAND_SET_ENABLED, [level])\n except KeyboardInterrupt:\n raise\n except:\n self.Print('Failed sending motor drive enabled state!')", "def _set_enabled(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"enabled\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/probes', defining_module='openconfig-probes', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"enabled must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"enabled\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/probes', defining_module='openconfig-probes', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__enabled = t\n if hasattr(self, '_set'):\n self._set()", "def _set_enabled(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"enabled\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"enabled must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"enabled\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__enabled = t\n if hasattr(self, '_set'):\n self._set()", "def setEnabled(self, value):\n self._enabled = value", "def updateNetworkVlansEnabledState(self, networkId: str, enabled: bool):\n\n kwargs = locals()\n\n metadata = {\n 'tags': ['VLANs'],\n 'operation': 'updateNetworkVlansEnabledState',\n }\n resource = f'/networks/{networkId}/vlansEnabledState'\n\n body_params = ['enabled']\n payload = {k: v for (k, v) in kwargs.items() if k in body_params}\n\n return self._session.put(metadata, resource, payload)", "def GetEnabled(self):\n return self._is_enabled", "def is_enabled(self) -> bool:\n if not self._system.dax_sim_enabled:\n # Check if the system was just booted\n last_asf = self.core_cache.get(self._CACHE_LAST_ASF_KEY)\n if len(last_asf) == 0:\n # Device was just booted, trap RF is off\n return False\n\n # Return the enabled flag stored as a system dataset\n # Can raise a KeyError if the key was not set before, which means the state is ambiguous\n enabled: bool = self.get_dataset_sys(self._ENABLED_KEY) # Helps the type checker\n return enabled", "def set_manual_gain_enabled(self, enabled):\n result = librtlsdr.rtlsdr_set_tuner_gain_mode(self.dev_p, int(enabled))\n if result < 0:\n raise IOError('Error code %d when setting gain mode'\\\n % (result))\n\n return", "def is_enabled(node):\n return not node[\"disable\"].value()", "def is_enabled(self):\n return self.element_info.enabled #and self.top_level_parent().element_info.enabled", "def is_enabled(self):\n return getattr(self._thread_locals, 'enabled', True)", "def is_enabled_for(self, security_state: Target.SecurityState) -> bool:\n assert isinstance(security_state, Target.SecurityState)\n\n # Call to superclass to read CSW. We want to bypass our CSW cache since the enable signal can change\n # asynchronously.\n csw = AccessPort.read_reg(self, self._reg_offset + MEM_AP_CSW)\n if security_state is Target.SecurityState.NONSECURE:\n # Nonsecure transfers are always allowed when security transfers are enabled.\n return (csw & (CSW_DEVICEEN | CSW_SDEVICEEN)) != 0\n elif security_state is Target.SecurityState.SECURE:\n return (csw & CSW_SDEVICEEN) != 0\n else:\n assert False, \"unsupported security state\"", "def SetEnabled(self, enabled):\n if self._enabled != enabled:\n self._enabled = enabled\n for action in self._actions:\n action._SetGroupEnabled(enabled)", "def ultrasonic_enable(self, enable):\n self.comm('ultrasonic_enable {0}'.format('true' if enable else 'false'))", "def toggle_state(self):\n if self.__is_enabled:\n self.get_widget().configure(state='disabled')\n else:\n self.get_widget().configure(state='enabled`')\n self.__is_enabled = not self.__is_enabled", "def enabled(self):\n return bool(self._data.get(b'strokeEnabled'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for transmit_power, mapped from YANG variable /access_points/access_point/radios/radio/state/transmit_power (int8)
def _set_transmit_power(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8)(9), is_leaf=True, yang_name="transmit-power", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """transmit_power must be of a type compatible with int8""", 'defined-type': "int8", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8)(9), is_leaf=True, yang_name="transmit-power", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=False)""", }) self.__transmit_power = t if hasattr(self, '_set'): self._set()
[ "def _set_transmit_power(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8)(9), is_leaf=True, yang_name=\"transmit-power\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"transmit_power must be of a type compatible with int8\"\"\",\n 'defined-type': \"int8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8)(9), is_leaf=True, yang_name=\"transmit-power\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=True)\"\"\",\n })\n\n self.__transmit_power = t\n if hasattr(self, '_set'):\n self._set()", "def set_transmit_power(self, power):\n (status, null) = self.__device.set_transmit_power(int(power,0))\n if(status != 0x01):\n print self.__device.decode_error_status(status)", "def set_tx_power(self, tx_power):\r\n valid_tx_power_values = [-40, -20, -16, -12, -8, -4, 0, 3, 4]\r\n if tx_power not in valid_tx_power_values:\r\n raise ValueError(\"Invalid transmit power value {}. Must be one of: {}\".format(tx_power, valid_tx_power_values))\r\n self.ble_driver.ble_gap_tx_power_set(tx_power)", "def get_transmit_power(self):\n (status, power) = self.__device.get_transmit_power()\n self.__device.decode_error_status(status, cmd='get_transmit_power', print_on_error=True)\n return \"%d dBm\" % (power)", "def change_txpower():\n\n txpower_response = webcli_command('radioSettings.txPower')\n current_txpower = txpower_response['radioSettings']['txPower']\n print(\"Current Transmit Power: \" + current_txpower)\n\n # rudimentary check to ensure we're actually making a change\n new_txpower = '10dbm'\n if current_txpower == '10dbm':\n new_txpower = '12dbm'\n\n change_txpower_response = webcli_command('radioSettings.txpower=' + new_txpower)\n changed_txpower = change_txpower_response['radioSettings']['txPower']\n print(\"Changed Transmit Power: \" + changed_txpower)", "def set_power(self, power):\n pass", "def set_power(self, power):\r\n self._power = power", "def set_wifi_power(self, standard, wifi_power):\n POWER_VALUES_2G = [\"-1\", \"2\", \"5\", \"8\", \"11\", \"14\", \"17\", \"20\", \"max\"]\n POWER_VALUES_5G = [\"-1\", \"2\", \"5\", \"8\", \"11\", \"14\", \"17\", \"max\"]\n \n # Control of the value to set\n if standard not in self.WIFI_STANDARD_5G \\\n and str(wifi_power) not in POWER_VALUES_2G :\n raise Exception(-5, \\\n \"Unsupported wifi power value for 5GHz '%s'\" % str(wifi_power)) \n elif standard in self.WIFI_STANDARD_5G \\\n and str(wifi_power) not in POWER_VALUES_5G :\n raise Exception(-5, \\\n \"Unsupported wifi power value for 2.4GHz '%s'\" \\\n % str(wifi_power))\n\n # Set the power value\n cmd = 'power local ' + str(wifi_power)\n for radio in ('0','1'):\n self._send_cmd(\"interface dot11radio \" + str(radio))\n self._send_cmd(cmd)\n self._send_cmd(\"exit\") # exit interface", "def wifi_power(self, power: object = None) -> object:\n if power is None:\n self._logger.info(\"Retrieving current WiFi radio power... [NOT IMPLEMENTED]\")\n raise NotImplementedError(\"Command 'wifi_power' is not implemented yet\")\n if (isinstance(power, str) and power.lower() == 'off') or not power:\n return self.wifi_off()\n raise NotImplementedError(\"Command 'wifi_power(<value>)' is not implemented yet\")", "def setRadioChannelAndTx(self, channel, tx_power):\n if channel == None:\n channel = -1\n if tx_power == None:\n tx_power = -1\n\n return self.sendCommand(\"RADIO SET %s %s\\r\\n\" % (channel, tx_power))", "def tx_power(self):\n out = self.__fcobj._execute_transceiver_cmd()\n if self.__swobj.is_connection_type_ssh():\n shintd = ShowInterfaceTransceiverDetail(out)\n tp = shintd.tx_power\n if tp is not None:\n return tp.strip()\n return None\n try:\n table_calibaration = out[\"TABLE_calibration\"][\"ROW_calibration\"]\n if type(table_calibaration) is list:\n table_calibaration = table_calibaration[0]\n table_calibaration_detail = table_calibaration[\"TABLE_detail\"][\"ROW_detail\"]\n if type(table_calibaration_detail) is list:\n table_calibaration_detail = table_calibaration_detail[0]\n txpow = get_key(interfacekeys.TX_POWER, self._SW_VER)\n tp = table_calibaration_detail.get(txpow, None)\n if tp is not None:\n return tp.strip()\n return None\n except KeyError:\n return None", "def getTXPower(wifi):\n try:\n txpower = wifi.wireless_info.getTXPower()\n except IOError, (errno, strerror):\n return None\n else:\n if txpower.fixed:\n fixed = \"=\"\n else:\n fixed = \":\"\n return \"Tx-Power%c%s \" % (fixed, wifi.getTXPower())", "def SetEncoderSpeed(self, power):\n pwm = int(PWM_MAX * power)\n if pwm > PWM_MAX:\n pwm = PWM_MAX\n\n try:\n self.RawWrite(COMMAND_SET_ENC_SPEED, [pwm])\n except KeyboardInterrupt:\n raise\n except:\n self.Print('Failed sending motor encoder move speed limit!')", "def include_tx_power(self, show_power=None):\n if show_power is None:\n return self.broadcaster.include_tx_power\n else:\n self.broadcaster.include_tx_power = show_power", "def set_channel_power(self, channel, power):\n assert isinstance(channel, int), \"Channel must be an int\"\n assert isinstance(power, float), \"Power must be a float\"\n\n if power < -2.0 or power > 13.0:\n print(\"Warning: you might be using power outside supported range\")\n\n # select channel\n self.inst.write(\"CH {}\".format(channel))\n # turn on/off selected channel\n self.inst.write(\"LEVEL {}\".format(power))", "def getTerminalPower(self):\n return float(self.query(\"MEAS:POW?\"))", "def getTerminalPower(self):\n return float(self.instr.query(\"MEAS:POW?\"))", "def set_loraPower(pwr):\n\t\tcommand = \"set_config=pwr_level:%s\" % pwr\n\t\treturn uart_tx(command)", "def get_power(self):\r\n return self._power", "def get_power(self, t: Time):\n t = t.as_decimal_hour\n return self.P15_ip.solve(t) # unit: kW" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for transmit_eirp, mapped from YANG variable /access_points/access_point/radios/radio/state/transmit_eirp (uint8)
def _set_transmit_eirp(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="transmit-eirp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """transmit_eirp must be of a type compatible with uint8""", 'defined-type': "uint8", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="transmit-eirp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=False)""", }) self.__transmit_eirp = t if hasattr(self, '_set'): self._set()
[ "def _set_steering_rssi(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name=\"steering-rssi\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='int8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"steering_rssi must be of a type compatible with int8\"\"\",\n 'defined-type': \"int8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name=\"steering-rssi\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='int8', is_config=False)\"\"\",\n })\n\n self.__steering_rssi = t\n if hasattr(self, '_set'):\n self._set()", "def _set_steering_rssi(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name=\"steering-rssi\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='int8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"steering_rssi must be of a type compatible with int8\"\"\",\n 'defined-type': \"int8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name=\"steering-rssi\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='int8', is_config=True)\"\"\",\n })\n\n self.__steering_rssi = t\n if hasattr(self, '_set'):\n self._set()", "def _set_rssi(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name=\"rssi\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='int8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"rssi must be of a type compatible with int8\"\"\",\n 'defined-type': \"int8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name=\"rssi\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='int8', is_config=False)\"\"\",\n })\n\n self.__rssi = t\n if hasattr(self, '_set'):\n self._set()", "def _set_transmit_power(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8)(9), is_leaf=True, yang_name=\"transmit-power\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"transmit_power must be of a type compatible with int8\"\"\",\n 'defined-type': \"int8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8)(9), is_leaf=True, yang_name=\"transmit-power\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=True)\"\"\",\n })\n\n self.__transmit_power = t\n if hasattr(self, '_set'):\n self._set()", "def IR_wake_up_trought_Xbee(self):\n self.check_serial()\n\n frame = bytearray(100) \n for i in range(100):\n frame[i] = 0xAA\n \n try :\n self.serial.write(str(frame))\n self.serial.flushInput()\n self.serial.flushOutput()\n \n except OSError as e: # bug fix python before 2.7\n raise IOError(e)", "def _set_neighbor_rssi(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name=\"neighbor-rssi\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='int8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"neighbor_rssi must be of a type compatible with int8\"\"\",\n 'defined-type': \"int8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name=\"neighbor-rssi\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='int8', is_config=False)\"\"\",\n })\n\n self.__neighbor_rssi = t\n if hasattr(self, '_set'):\n self._set()", "def SetRiseTime(self, time):\n self._PWriteInt('red', 'device/trise', time)", "def send_enq(self):\n self.serial.write(ENQ)", "async def async_set_ir_mode(self, ir_mode):\n await self.upv_object.set_camera_ir(self._device_id, ir_mode)", "def signal_rssi(self):\n return max(min(self.signal_quality / 2 - 100, -50), -100)", "def make_RxSelReg(rx_wait: int, uart: UARTSel=UARTSel_ModulatedAnalog) -> bytes:\n if rx_wait > 0b11111:\n raise ValueError('rx_wait maximum is 0b11111, it is {}'.format(rx_wait))\n result = rx_wait\n result |= (uart << 6)\n return bytes([result])", "def _set_transceiver(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=ReferenceType(referenced_path='/oc-platform:components/oc-platform:component[oc-platform:name=current()/../oc-port:hardware-port]/oc-platform:subcomponents/oc-platform:subcomponent/oc-platform:name', caller=self._path() + ['transceiver'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"transceiver\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"transceiver must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=ReferenceType(referenced_path='/oc-platform:components/oc-platform:component[oc-platform:name=current()/../oc-port:hardware-port]/oc-platform:subcomponents/oc-platform:subcomponent/oc-platform:name', caller=self._path() + ['transceiver'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name=\"transceiver\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__transceiver = t\n if hasattr(self, '_set'):\n self._set()", "def set_ee_signal_value(self, ee_signal_type, value, endpoint_id=None, timeout=5.0):\n (ept_id, endpoint_info) = self.get_endpoint_info(endpoint_id)\n if ee_signal_type in endpoint_info:\n self.set_signal_value(endpoint_info[ee_signal_type], value)", "def _radio433_transmit_ppm(pauses, pulse_length):\n with _connect_to_arduino() as ser:\n assert ser.readline().startswith('?')\n ser.write(\"R{0}\\n\".format(len(pauses)))\n for i in xrange(len(pauses)*2-1):\n ser.write(\"{0}\\n\".format(pauses[i/2] if i%2 else pulse_length))\n assert ser.readline().startswith('!')", "def send_ack(self):\n self.serial.write(ACK)", "def send(self):\r\n global draw_ir_prog\r\n global ir_prog\r\n ir_prog = 0\r\n draw_ir_prog = True\r\n self.inc_ir_prog()\r\n \r\n #Weather\r\n start = 0\r\n end = 0\r\n i = 0\r\n \r\n for w in w_types:\r\n if(weather.startswith(w)):\r\n start = i\r\n if(weather.endswith(w)):\r\n end = i\r\n i += 1\r\n \r\n print(weather + \" is -> \" + str(start) + \" : \" + str(end))\r\n self.inc_ir_prog()\r\n #Time\r\n h = hour\r\n m = minute\r\n if(am_pm == \"PM\"):\r\n if(h != 12):\r\n h += 12\r\n h = h % 24\r\n elif(h == 12):\r\n h = 0\r\n self.inc_ir_prog()\r\n\r\n #Alarm\r\n if(alarm == True):\r\n alh = al_h\r\n alm = al_m\r\n if(al_am_pm == \"PM\"):\r\n if(alh != 12):\r\n alh += 12\r\n alh = alh % 24\r\n elif(alh == 12):\r\n alh = 0\r\n else:\r\n alh = 255\r\n alm = 255\r\n self.inc_ir_prog()\r\n \r\n val = bytearray([start, end, alh, alm, h, m])\r\n self.inc_ir_prog()\r\n \r\n try:\r\n ser = serial.Serial(port, 300, serial.EIGHTBITS, serial.PARITY_NONE, serial.STOPBITS_TWO)\r\n for i in range(5):\r\n ser.write(val)\r\n self.inc_ir_prog()\r\n print(\"sent\")\r\n except:\r\n print(\"error sending, please check you have selected the correct port\")\r\n\r\n draw_ir_prog = False", "def setRadioChannel(self,channel):\n \n data=self.EZSPtrans([0x9A, channel&xFF]);\n return ord(data[5]);", "def set_ir_filter(self, address):\n address = int(address, 0)\n (status, null) = self.__device.set_ir_filter(address)\n self.__device.decode_error_status(status, cmd='set_ir_filter(%d)' % address, print_on_error=True)", "def transmit_mode(self, mode: Optional[TransmitMode] = None):\n if mode is None:\n return self._remote_mode\n else:\n self._remote_mode = mode\n data = bytearray(bytes([mode]))\n data.append(0x00)\n return self.__do_call(FunctionBytes.TRANSMIT, data)", "def set_rssi(self, rssi_list):\n return _raw_util.raw_message_sptr_set_rssi(self, rssi_list)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for channel, mapped from YANG variable /access_points/access_point/radios/radio/state/channel (uint8)
def _set_channel(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..165']}), is_leaf=True, yang_name="channel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """channel must be of a type compatible with uint8""", 'defined-type': "uint8", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..165']}), is_leaf=True, yang_name="channel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=False)""", }) self.__channel = t if hasattr(self, '_set'): self._set()
[ "def _set_channel(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..165']}), is_leaf=True, yang_name=\"channel\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"channel must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..165']}), is_leaf=True, yang_name=\"channel\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__channel = t\n if hasattr(self, '_set'):\n self._set()", "def setRadioChannel(self,channel):\n \n data=self.EZSPtrans([0x9A, channel&xFF]);\n return ord(data[5]);", "def set_radio_channel(self, radio, channel):\n if (radio == 'working') or (radio == '0'):\n (status, null) = self.__device.set_radio_channel(0, int(channel,0))\n if(status != 0x01):\n print self.__device.decode_error_status(status)\n elif (radio == 'monitor') or (radio == '1'):\n (status, null) = self.__device.set_radio_channel(1, int(channel,0))\n if(status != 0x01):\n print self.__device.decode_error_status(status)\n else:\n print(self.help('set_radio_channel'))", "def selectchannel(self, c, channel):\n dev = self.selectedDevice(c)\n dev.onlyChannel = channel\n if channel > 0:\n dev.selectChannel(channel)\n return channel", "def update_channel(self, channel):", "def set_channel(self, chan, val):\n try:\n self.dmx_frame[chan] = val\n except OverflowError:\n raise ValueError(\"Channel value {} out of range. \"\n \"DMX uses 8bit unsigned values (0-255).\"\n .format(val))", "def _set_neighbor_channel(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"neighbor-channel\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"neighbor_channel must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"neighbor-channel\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__neighbor_channel = t\n if hasattr(self, '_set'):\n self._set()", "def get_radio_channel(self):\n (status, channel) = self.__device.get_radio_channel()\n self.__device.decode_error_status(status, cmd='get_radio_channel', print_on_error=True)\n return \"Ch.%d - %dMHz\" % (channel, dec.channel_to_freq.get(channel, \"Unknown channel\"))", "def set_channel(self):\n\t\tself.channel = int(input(\"Enter the Channel No. = \"))\n\t\twhile self.channel > 7 :\n\t\t\tself.channel = int(input(\"Enter the Channel No. = \"))\n\t\t\n\t\treturn self.channel", "def channel_state(self):\n raise NotImplementedError", "def channel(self) -> 'Channel': # stub\n return self._channel", "def _set_num_channels(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"num_channels must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__num_channels = t\n if hasattr(self, '_set'):\n self._set()", "def _set_num_channels(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"num_channels must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__num_channels = t\n if hasattr(self, '_set'):\n self._set()", "def set_active_channel(self, channel):\n if not channel in xrange(6):\n raise LaserSwitchLogicError(\"Cannot set selected Laser Switch channel to {0} - must be between 0 and 5 inclusive.\".format(channel))\n while(channel != self.get_selected_channel()):\n self.selected_channel_up()\n self.execute()", "def reset_channel(self, channel):\n self.channels_fired[channel] = False", "def setRadioChannelAndTx(self, channel, tx_power):\n if channel == None:\n channel = -1\n if tx_power == None:\n tx_power = -1\n\n return self.sendCommand(\"RADIO SET %s %s\\r\\n\" % (channel, tx_power))", "def setChannel(self, chan: str, chanData: np.ndarray) -> None:\n self.data[chan] = chanData", "def EnableChannel(self, channel, enable_state=ENABLE_ON):\n channel_num = self._channel_map[channel]\n self._PWriteInt(channel, 'device/ch%d_enable' % channel_num, enable_state)", "def channel(self) -> Channel:\n return self._channel" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for channel_width, mapped from YANG variable /access_points/access_point/radios/radio/state/channel_width (uint8)
def _set_channel_width(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8)(20), is_leaf=True, yang_name="channel-width", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """channel_width must be of a type compatible with uint8""", 'defined-type': "uint8", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8)(20), is_leaf=True, yang_name="channel-width", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=False)""", }) self.__channel_width = t if hasattr(self, '_set'): self._set()
[ "def set_chan_width(self, chan, width):\n self._set_chan_width(chan, width)", "def _set_num_channels(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"num_channels must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__num_channels = t\n if hasattr(self, '_set'):\n self._set()", "def _set_num_channels(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"num_channels must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/port', defining_module='openconfig-platform-port', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__num_channels = t\n if hasattr(self, '_set'):\n self._set()", "def width(self, val):\n if (val is None) or (val == -1):\n if not self.log.full(): self.log.put_nowait((logging.WARNING, \"CV2:Width not changed:{}\".format(val)))\n return\n if self.cam_open:\n with self.cam_lock: \n isok = self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, val)\n if isok:\n if not self.log.full(): self.log.put_nowait((logging.INFO, \"CV2:Width:{}\".format(val)))\n else:\n if not self.log.full(): self.log.put_nowait((logging.ERROR, \"CV2:Failed to set width to {}!\".format(val)))", "def width(self):\n return capi.get_band_xsize(self.ptr)", "def _set_channel(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..165']}), is_leaf=True, yang_name=\"channel\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"channel must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..165']}), is_leaf=True, yang_name=\"channel\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__channel = t\n if hasattr(self, '_set'):\n self._set()", "def _set_channel(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..165']}), is_leaf=True, yang_name=\"channel\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"channel must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..165']}), is_leaf=True, yang_name=\"channel\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__channel = t\n if hasattr(self, '_set'):\n self._set()", "def width(self):\n if self.cam_open:\n return self.cam.get(cv2.CAP_PROP_FRAME_WIDTH)\n else: return float(\"NaN\")", "def maximum_channel_length(self) -> int:\n return self['channellen']", "def setWidth(self, w):\n if not isinstance(w, (int,float)):\n raise TypeError('width must be numeric value')\n if w <= 0:\n raise ValueError('width must be positive')\n self._width = w\n self._canvasChanged()", "def set_channel_wavelength(self, channel, wavelength):\n assert isinstance(channel, int), \"Channel must be an int\"\n assert isinstance(wavelength, float), \"Wavelength must be a float\"\n\n default_wavelengths = (\n 1544.53,\n 1545.32,\n 1546.92,\n 1547.72,\n 1555.72,\n 1558.98,\n 1561.42,\n 1562.23,\n )\n\n if (wavelength - default_wavelengths[channel - 1]) ** 2 > 9:\n print(\n \"Warning: you might be using a wavelength outside supported range, \"\n \"default is {} and you're using {}\".format(\n default_wavelengths[channel - 1], wavelength\n )\n )\n\n # select channel\n self.inst.write(\"CH {}\".format(channel))\n # turn on/off selected channel\n self.inst.write(\"WAVE {}\".format(wavelength))", "def set_width(self, width):\n self.width = width\n self.changed = True", "def num_channels(self):\n return self.train.images.shape[3]", "def get_num_inchannels(self):\n return self.in_channels", "def n_channels(self):\n return self.colours.shape[1]", "def frame_width(self):\n # type: () -> int\n return self._frame_width", "def setWidthOfBand(self, width) -> None:\n ...", "def fc_len(self):\n out = self.out_len_conv(self.in_len, self.conv_block[0])\n out = int(out/2)\n out = self.out_len_conv(out, self.conv_block[4]) \n out = int(out/2)\n out = out*self.conv_block[4].out_channels\n return out", "def frame_width(self, frame_width):\n # type: (int) -> None\n\n if frame_width is not None:\n if not isinstance(frame_width, int):\n raise TypeError(\"Invalid type for `frame_width`, type has to be `int`\")\n\n self._frame_width = frame_width", "def num_sense_channels(self):\n return len(self.sense_channels)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for dca, mapped from YANG variable /access_points/access_point/radios/radio/state/dca (boolean)
def _set_dca(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="dca", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='boolean', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """dca must be of a type compatible with boolean""", 'defined-type': "boolean", 'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="dca", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='boolean', is_config=False)""", }) self.__dca = t if hasattr(self, '_set'): self._set()
[ "def dns_active(self, dns_active: bool):\n self._indicator_data['flag1'] = self.util.to_bool(dns_active)", "def set_cd(self, b):\n _ldns.ldns_pkt_set_cd(self, b)\n #parameters: ldns_pkt *,bool,\n #retvals: ", "def is_ca(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"is_ca\")", "def set_dcc_selection(self, value):\n assert type(value) is bool\n read = bytearray(self._device.readRaw(PI3HDMI336_TOTAL_BYTES))\n read[PI3HDMI336_OFFSET_BYTE1] = \\\n (read[PI3HDMI336_OFFSET_BYTE1] & (~PI3HDMI336_BYTE1_DCC_CHANNEL)) | value\n self._device.writeRaw(read)", "def test_sdca_sparse_and_dense_consistency(self):\n\n def create_solver():\n return SDCA(max_iter=1, verbose=False, l_l2sq=1e-3,\n seed=TestSolver.sto_seed)\n\n self._test_solver_sparse_and_dense_consistency(create_solver)", "def is_dcp(self):\n return self.args[0].is_convex()", "def test_solver_sdca(self):\n solver = SDCA(l_l2sq=1e-5, max_iter=100, verbose=False, tol=0)\n self.check_solver(solver, fit_intercept=False, model=\"logreg\",\n decimal=1)\n\n # Now a specific test with a real prox for SDCA\n np.random.seed(12)\n n_samples = Test.n_samples\n n_features = Test.n_features\n\n for fit_intercept in [True, False]:\n y, X, coeffs0, interc0 = TestSolver.generate_logistic_data(\n n_features, n_samples)\n\n model = ModelLogReg(fit_intercept=fit_intercept).fit(X, y)\n ratio = 0.5\n l_enet = 1e-2\n\n # SDCA \"elastic-net\" formulation is different from elastic-net\n # implementation\n l_l2_sdca = ratio * l_enet\n l_l1_sdca = (1 - ratio) * l_enet\n sdca = SDCA(l_l2sq=l_l2_sdca, max_iter=100, verbose=False, tol=0,\n seed=Test.sto_seed).set_model(model)\n prox_l1 = ProxL1(l_l1_sdca)\n sdca.set_prox(prox_l1)\n coeffs_sdca = sdca.solve()\n\n # Compare with SVRG\n svrg = SVRG(max_iter=100, verbose=False, tol=0,\n seed=Test.sto_seed).set_model(model)\n prox_enet = ProxElasticNet(l_enet, ratio)\n svrg.set_prox(prox_enet)\n coeffs_svrg = svrg.solve(step=0.1)\n\n np.testing.assert_allclose(coeffs_sdca, coeffs_svrg)", "def dns_active(self) -> bool:\n return self._indicator_data.get('flag1') # type: ignore", "def _set_csa(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"csa\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"csa must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"csa\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__csa = t\n if hasattr(self, '_set'):\n self._set()", "def _set_csa(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"csa\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"csa must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"csa\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__csa = t\n if hasattr(self, '_set'):\n self._set()", "def ddeta(\n self,\n varname,\n hcoord=None,\n scoord=None,\n hboundary=\"extend\",\n hfill_value=None,\n sboundary=\"extend\",\n sfill_value=None,\n attrs=None,\n ):\n\n assert isinstance(\n varname, str\n ), \"varname should be a string of the name of a variable stored in the Dataset\"\n assert varname in self.ds, 'variable called \"varname\" must be in Dataset'\n var = xroms.ddeta(\n self.ds[varname],\n self.grid,\n hcoord=hcoord,\n scoord=scoord,\n hboundary=hboundary,\n hfill_value=hfill_value,\n sboundary=sboundary,\n sfill_value=sfill_value,\n attrs=attrs,\n )\n\n self._ds[var.name] = var\n return self._ds[var.name]", "def is_dazed(sim_info: SimInfo) -> bool:\n return CommonMoodUtils.has_mood(sim_info, CommonMoodId.DAZED)", "def set_dtr(self, value, *args, **kwargs):\n with self.change_connection():\n self.connection.dtr = bool(value)", "def ds_factory_enabled(self) -> ConfigNodePropertyBoolean:\n return self._ds_factory_enabled", "def datacenter_configured(name):\n proxy_type = __salt__[\"vsphere.get_proxy_type\"]()\n if proxy_type == \"esxdatacenter\":\n dc_name = __salt__[\"esxdatacenter.get_details\"]()[\"datacenter\"]\n else:\n dc_name = name\n log.info(\"Running datacenter_configured for datacenter '%s'\", dc_name)\n ret = {\"name\": name, \"changes\": {}, \"result\": None, \"comment\": \"Default\"}\n comments = []\n si = None\n try:\n si = __salt__[\"vsphere.get_service_instance_via_proxy\"]()\n dcs = __salt__[\"vsphere.list_datacenters_via_proxy\"](\n datacenter_names=[dc_name], service_instance=si\n )\n if not dcs:\n if __opts__[\"test\"]:\n comments.append(f\"State will create datacenter '{dc_name}'.\")\n else:\n log.debug(\"Creating datacenter '%s'\", dc_name)\n __salt__[\"vsphere.create_datacenter\"](dc_name, si)\n comments.append(f\"Created datacenter '{dc_name}'.\")\n log.info(comments[-1])\n ret[\"changes\"].update({\"new\": {\"name\": dc_name}})\n else:\n comments.append(\n f\"Datacenter '{dc_name}' already exists. Nothing to be done.\"\n )\n log.info(comments[-1])\n __salt__[\"vsphere.disconnect\"](si)\n ret[\"comment\"] = \"\\n\".join(comments)\n ret[\"result\"] = None if __opts__[\"test\"] and ret[\"changes\"] else True\n return ret\n except salt.exceptions.CommandExecutionError as exc:\n log.error(\"Error: %s\", exc)\n if si:\n __salt__[\"vsphere.disconnect\"](si)\n ret.update(\n {\"result\": False if not __opts__[\"test\"] else None, \"comment\": str(exc)}\n )\n return ret", "def set_dms(self, dms):\n return _radio_astro_swig.detect_set_dms(self, dms)", "def ds_factory_enabled(self, ds_factory_enabled: ConfigNodePropertyBoolean):\n\n self._ds_factory_enabled = ds_factory_enabled", "def get_dca(msa_file,\r\n max_len):\r\n msa_prpc = np.load(msa_file)\r\n msa_prpc = msa_prpc[:max_len, :max_len, :]\r\n pad_len = max(0, max_len - msa_prpc.shape[0])\r\n # todo : log ?\r\n\r\n if msa_prpc.shape[0] < max_len:\r\n msa_prpc = np.pad(msa_prpc, [[0, pad_len], [0, pad_len], [0, 0]])\r\n\r\n return msa_prpc", "def create_dcta(self, **kwargs):\n try:\n dcta_data = self.client.post('dctas/', {'campaign': self.slug, **kwargs})\n except APIException as error:\n raise UpdateError(\n 'Could not create new DCTA on campaign {}: {}'.format(self.name, error.message)\n )\n\n dcta = DCTA.deserialize(self.client, dcta_data)\n\n return dcta", "def set_dac(self, dac_value, dac_id=1, length=None, is_seq=False, ttls=0):\n if length==None:\n length=42*9e-8\n s_content = '<analogout id=\"%d\" dac_value=\"%i\"/><ttlout value=\"0x%06x\"/>' \\\n % (dac_id, dac_value, ttls)\n self.state_list.append(StateSimple(length, s_content))\n if not is_seq:\n s_content = '<analogout id=\"%d\" dac_value=\"0\"/><ttlout value=\"0x%06x\"/>' \\\n % (dac_id, ttls)\n self.state_list.append(StateSimple(42*9e-8, s_content))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for dtp, mapped from YANG variable /access_points/access_point/radios/radio/state/dtp (boolean)
def _set_dtp(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="dtp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='boolean', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """dtp must be of a type compatible with boolean""", 'defined-type': "boolean", 'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="dtp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='boolean', is_config=False)""", }) self.__dtp = t if hasattr(self, '_set'): self._set()
[ "def _set_trust_dscp(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"trust-dscp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"trust_dscp must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"trust-dscp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__trust_dscp = t\n if hasattr(self, '_set'):\n self._set()", "def _set_trust_dscp(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"trust-dscp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"trust_dscp must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"trust-dscp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__trust_dscp = t\n if hasattr(self, '_set'):\n self._set()", "def is_pds_state(self, *args):\n return _wali.WPDS_is_pds_state(self, *args)", "def tddMode(self, on = True):\n if on is True:\n on = 'ON'\n\n elif on is False:\n on = 'OFF'\n\n elif on is None:\n on = 'MATE'\n\n command = 'TDD MODE {}'.format(on)\n d = self.sendCommand(command)\n d = d.addCallback(self.checkFailure)\n # planned eventual failure case (not capable)\n d = d.addCallback(self.checkFailure, failure = '0')\n d = d.addCallback(self.resultAsInt)\n return d", "def set_dtr(self, value, *args, **kwargs):\n with self.change_connection():\n self.connection.dtr = bool(value)", "def write_gpio_pin_digital_state(self, identifier: int, state: bool) -> None:\n self._written_digital_state[identifier] = state", "def srt_changed(self, state):\n self.skip_rigid_transformation_new = (state == QtCore.Qt.Checked)", "def test_digital_state_setter() -> None:\n driver = MockGPIOPinDriver()\n pin = GPIOPin(\n 0,\n driver,\n initial_mode=GPIOPinMode.DIGITAL_INPUT,\n hardware_modes={\n GPIOPinMode.DIGITAL_OUTPUT,\n GPIOPinMode.DIGITAL_INPUT,\n GPIOPinMode.DIGITAL_INPUT_PULLUP,\n GPIOPinMode.ANALOGUE_INPUT,\n },\n )\n\n pin.mode = GPIOPinMode.DIGITAL_OUTPUT\n pin.digital_state = True\n assert driver._written_digital_state[0]\n pin.digital_state = False\n assert not driver._written_digital_state[0]", "def AltimeterPowerCheckbox(self,state):\n if state == QtCore.Qt.Checked:\n self.SDS_params.altimeter_pow_enabled = 1\n else:\n self.SDS_params.altimeter_pow_enabled = 0\n\n self.SDS_params.parse_params()\n self.SDS_params.send(self.SDS_params.altimeter_channel, self.SDS_params.altimeter_pow_message)", "async def get_ptp_enabled(self):\n return unpack('I', await self._execute_command('#GetPtpEnabled').content)[0] > 0", "def bool_string(state):\n if isinstance(state, dict) and const.CONF_STATE in state:\n state = state[const.CONF_STATE]\n return const.STATE_ON if state else const.STATE_OFF", "def get_as_bool(driver_id, bool_value):\n if driver_id == 'pgsql':\n if bool_value is True:\n return 'Y'\n else:\n return 'N'\n else:\n if type(bool_value) == bool and bool_value:\n return 'true'\n else:\n return 'false'", "def _update_is_passive(self):\n passive_setting = self._view.settings().get('wrap_as_you_type_passive')\n if passive_setting in (None, False, True):\n self.is_passive = bool(passive_setting)\n else:\n self.is_passive = False\n raise UserFacingError('The value must be a boolean')", "def setdtr(self, dtr):\n try:\n self.ser.setDTR(dtr)\n self.log(\"DTR set to \"+`dtr`)\n return True\n except SilentException:\n return False", "def test_bool_direct(self):\n for source in (\"direct\", \"default\"):\n self.assertEqual(self.setting.detect_type(True, source), \"bool\")", "def get_ntp_enabled(self):\n return None", "def is_datetime(self) -> \"bool\":\n return self._value.getType() == Value.DTVAL", "def DDISPY(self,order,x0,y0,t):\n return poly.DPOLY[self.DISPY_POLYNAME[order]](self.DISPY_DATA[order],x0,y0,t)", "def HasDRT(self):\n return self.__has('DRT')", "def setDTR(self, value:bool)->None:\n self.serial.setDTR(value)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for dtp_min, mapped from YANG variable /access_points/access_point/radios/radio/state/dtp_min (int8)
def _set_dtp_min(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8)(3), is_leaf=True, yang_name="dtp-min", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """dtp_min must be of a type compatible with int8""", 'defined-type': "int8", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8)(3), is_leaf=True, yang_name="dtp-min", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=False)""", }) self.__dtp_min = t if hasattr(self, '_set'): self._set()
[ "def minimum_temperature(self, value: float) -> None:\n self._min_temp = value", "def state_min(self):\n return self.__state_min", "def min_start_time(self, min_start_time):\n if min_start_time is not None and len(min_start_time) > 5:\n raise ValueError(\"Invalid value for `min_start_time`, length must be less than or equal to `5`\") # noqa: E501\n\n self._min_start_time = min_start_time", "def t_min(self):\n t_min = self.get('t_min')\n if np.isfinite(t_min):\n return Time(t_min, format='mjd')\n else:\n return None", "def min_vpu_low(self, min_vpu_low: float):\n\n self._min_vpu_low = min_vpu_low", "def min_sample_value(self, min_sample_value):\n self._min_sample_value = min_sample_value", "def set_tmin(self):\n\t\t#pass\n\t\tif len(self.data) == 10:\n\t\t\tld = len(self.data[0])\n\t\t\tcheck = self.data[3:7]\n\t\t\tnew_min = [1000 for i in range(len(self.data[0]))]\n\t\t\tfor tt in check:\n\t\t\t\t#print 'tt: ' + str(tt)\n\t\t\t\tfor j in range(len(tt)):\n\t\t\t\t\tif tt[j] >= self.min_times[j] and tt[j] < new_min[j]:\n\t\t\t\t\t\tnew_min[j] = tt[j]\n\n\t\t\tfor i in range(ld):\n\t\t\t\tif new_min[i] != 1000:\n\t\t\t\t\tself.t_min[i] = new_min[i]", "def min_start_date(self, min_start_date):\n\n self._min_start_date = min_start_date", "def minimum(cls, state):\n if isinstance(state, str):\n state = getattr(State, state)\n return state * cls.state_offset", "def src_port_min(self, src_port_min):\n\n self._src_port_min = src_port_min", "def test_Tmin(self):\n self.assertAlmostEqual(self.stick.Tmin.value_si, self.Tmin, 6)", "def min_study_duration(self, min_study_duration):\n\n self._min_study_duration = min_study_duration", "def default_min(self, default_min):\n\n self._default_min = default_min", "def min_heat_setpoint(self, min_heat_setpoint):\n\n self._min_heat_setpoint = min_heat_setpoint", "def _set_min_threshold(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name=\"min-threshold\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint64', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"min_threshold must be of a type compatible with uint64\"\"\",\n 'defined-type': \"uint64\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name=\"min-threshold\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint64', is_config=False)\"\"\",\n })\n\n self.__min_threshold = t\n if hasattr(self, '_set'):\n self._set()", "def _set_min_ttl(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"min-ttl\", rest_name=\"min-ttl\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"min_ttl must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"min-ttl\", rest_name=\"min-ttl\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__min_ttl = t\n if hasattr(self, '_set'):\n self._set()", "def min_event_delay(self, min_event_delay: ConfigNodePropertyInteger):\n\n self._min_event_delay = min_event_delay", "def setMinValue(self, value):\n self.minValue = value", "def min(self):\n return capi.get_band_minimum(self.ptr, byref(c_int()))", "def minimum_value(self):\n ret = self._get_attr(\"minimumValue\")\n return ret" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for dtp_max, mapped from YANG variable /access_points/access_point/radios/radio/state/dtp_max (int8)
def _set_dtp_max(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8)(15), is_leaf=True, yang_name="dtp-max", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """dtp_max must be of a type compatible with int8""", 'defined-type': "int8", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8)(15), is_leaf=True, yang_name="dtp-max", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=False)""", }) self.__dtp_max = t if hasattr(self, '_set'): self._set()
[ "def target_temperature_max(self) -> Optional[float]:\n if self._state is None:\n return None\n limits = self._device_conf.get(\"max\", {})\n return limits.get(str(_operation_mode_to(self.operation_mode)), {}).get(\"max\", 31)", "def state_max(self):\n return self.__state_max", "def set_max_utilization(self, max_utilization):\n if max_utilization is not None:\n self.max_utilization = max_utilization\n else:\n self.max_utilization = _MAX_UTILIZATION", "def max_rate(self) -> float:\n type_info = cast(\n EventableStateVariableTypeInfo, self._state_variable_info.type_info\n )\n return type_info.max_rate or 0.0", "def set_max_rate_deviation(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_set_max_rate_deviation(self, *args, **kwargs)", "def set_max_data_values(self, max_data_values: int) -> None:\n self._max_data_values = max_data_values", "def max_speed(self, value):\n\n pass", "def maximum_value(self):\n ret = self._get_attr(\"maximumValue\")\n return ret", "def setMaxValue(self, value):\n self.maxValue = value", "def t_max(self):\n t_max = self.get('t_max')\n if np.isfinite(t_max):\n return Time(t_max, format='mjd')\n else:\n return None", "def dst_port_max(self, dst_port_max):\n\n self._dst_port_max = dst_port_max", "def setMaxPeriod(self, maxPeriod):\n hal.setCounterMaxPeriod(self.counter, float(maxPeriod))", "def _get_maximumValue(self) -> \"int\" :\n return _core.IntegerSpinnerCommandInput__get_maximumValue(self)", "def _set_max_ttl(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"max-ttl\", rest_name=\"max-ttl\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"max_ttl must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"max-ttl\", rest_name=\"max-ttl\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__max_ttl = t\n if hasattr(self, '_set'):\n self._set()", "def getMaxSpeed(self):\n return getHandle().maxSpeed", "def _set_max_threshold(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name=\"max-threshold\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint64', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"max_threshold must be of a type compatible with uint64\"\"\",\n 'defined-type': \"uint64\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name=\"max-threshold\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint64', is_config=False)\"\"\",\n })\n\n self.__max_threshold = t\n if hasattr(self, '_set'):\n self._set()", "def onUpdateMaximumTimer(self):\n self.emitMaximumChanged(self.maximum.text())", "def MaxLspPerPcUpdate(self):\n return self._get_attribute('maxLspPerPcUpdate')", "def max_sample_value(self, max_sample_value):\n self._max_sample_value = max_sample_value", "def setMaxPeriod(self, maxPeriod):\n hal.setEncoderMaxPeriod(self.encoder, maxPeriod)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for antenna_gain, mapped from YANG variable /access_points/access_point/radios/radio/state/antenna_gain (int8)
def _set_antenna_gain(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name="antenna-gain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """antenna_gain must be of a type compatible with int8""", 'defined-type': "int8", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name="antenna-gain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='int8', is_config=False)""", }) self.__antenna_gain = t if hasattr(self, '_set'): self._set()
[ "def set_analog_gain(self, gain):\n if gain < 0:\n raise ValueError('Gain register must be greater than 0.')\n self.i2c.mem_write(int(gain), self.bus_addr, 1)", "def set_pga_gain(self, pga_num, gain):\n\t\treturn self.config_ads(pga_num, 2, gain)", "def _set_gain(self, adjustment: int) -> int:\n return _lib.opus_decoder_ctl(self._state, CTL_SET_GAIN, adjustment)", "def set_gain(self, dB: float) -> int:\n\n dB_Q8 = max(-32768, min(32767, round(dB * 256))) # dB * 2^n where n is 8 (Q8)\n return self._set_gain(dB_Q8)", "def set_gain(self, gain):\n if gain is None:\n r = self.subdev.gain_range()\n gain = (r[0] + r[1])/2 # set gain to midpoint\n self.gain = gain\n return self.subdev.set_gain(gain)", "def setGain(self, gain: 'float') -> \"void\":\n return _coin.SoAudioDevice_setGain(self, gain)", "def Set_ALS_Gain(self,gain):\n\t\tif gain in self._Gain_LOOKUP:\n\t\t\tregval = self._read_reg(self._REG_ALS_CONTR)\n\t\t\tregval = (regval & self._Gain_CLEAR) | self._Gain_LOOKUP[gain][0]\n\t\t\tself._write_reg(self._REG_ALS_CONTR,regval)", "def test_set_gain():\n _setup()\n\n as7262.set_gain(1)\n assert as7262._as7262.CONTROL.get_gain_x() == 1\n\n # Should snap to the highest gain value\n as7262.set_gain(999)\n assert as7262._as7262.CONTROL.get_gain_x() == 64\n\n # Should snap to the lowest gain value\n as7262.set_gain(-1)\n assert as7262._as7262.CONTROL.get_gain_x() == 1", "def get_RxGain(rfCfgReg: int) -> RxGain:\n return RxGain((rfCfgReg[0] & 0x70) >> 4)", "def set_gain(self):\n DescStr = 'Setting Gain for AHF_Camera '\n if (self.AHFgainMode & 2):\n DescStr += 'from current illumination'\n else:\n DescStr += \"from ISO \" + str(self.iso)\n if (self.AHFgainMode & 1):\n DescStr += ' with white balancing'\n else:\n DescStr += \" with No white balancing\"\n print (DescStr)\n if (self.AHFgainMode & 1):\n self.awb_mode = 'auto'\n else:\n self.awb_mode = 'off'\n self.awb_gains = (1, 1)\n # if (self.AHFgainMode & 2):\n self.exposure_mode = 'auto'\n # else:\n # self.exposure_mode = 'off'\n super().start_preview(fullscreen=False, window=self.AHFpreview)\n sleep(2.0) # let gains settle, then fix values\n if (self.AHFgainMode & 1):\n savedGain = self.awb_gains\n self.awb_gains = savedGain\n self.awb_mode = \"off\"\n # if (self.AHFgainMode & 2):\n self.exposure_mode = 'off'\n super().stop_preview()\n print (\"Red Gain for white balance =\" + str(float(self.awb_gains[0])))\n print (\"Blue Gain for white balance =\" + str(float(self.awb_gains[1])))\n print (\"Analog Gain = \" + str(float(self.analog_gain)))\n print (\"Digital Gain = \" + str(float(self.digital_gain)))\n return", "def getRelativeGain(self):\n if len(self.gainSettings) > 0 :\n return self.gainSettings\n\n xdim = len(self.antennaGrid)\n ydim = len(self.antennaGrid[0])\n self.gainSettings = [[self.beamStrength / self.beamStrength for y in range(ydim)] for x in range(xdim)]\n\n return self.gainSettings", "def process_gain(self):\n return 1", "def set_sum_input_gain(self, input_channel: Channel, gain: float):\n assert Channel.INPUT_A <= input_channel <= Channel.INPUT_C\n return self._invoke(0x16 + input_channel - Channel.INPUT_A, Channel.SETUP, _15db_range(gain))", "def gainToInt(self,gain):\n dial_num = int(gain*2)\n self.logger.debug(\"%f converted to %d\", gain, dial_num)\n return dial_num # int((gain*2)+23)", "def power_on(self, gain):\n\t\t# Turn on channel 0\n\t\tself._bus.write_byte_data(TSL2561_ADDRESS, TSL2561_CHANNEL_0 | TSL2561_CMD, TSL2561_POWER_ON)\n\t\tself._bus.write_byte_data(TSL2561_ADDRESS, TSL2561_CHANNEL_0 | TSL2561_CMD, gain)\n\t\t# Turn on channel 1\n\t\tself._bus.write_byte_data(TSL2561_ADDRESS, TSL2561_CHANNEL_1 | TSL2561_CMD, TSL2561_POWER_ON)\n\t\tself._bus.write_byte_data(TSL2561_ADDRESS, TSL2561_CHANNEL_1 | TSL2561_CMD, gain)", "def set_manual_gain_enabled(self, enabled):\n result = librtlsdr.rtlsdr_set_tuner_gain_mode(self.dev_p, int(enabled))\n if result < 0:\n raise IOError('Error code %d when setting gain mode'\\\n % (result))\n\n return", "def apply_gain(infile, gain):\n fs1, x = monoWavRead(filename=infile)\n\n x = np.copy(x)\n x = x * (10 ** (gain / 20.0))\n x = np.minimum(np.maximum(-1.0, x), 1.0)\n #Change the output file name to suit your requirements here\n outfile_name = os.path.basename(infile).split(\".\")[0] + (\"_gain%s.wav\" % str(gain))\n outfile = os.path.join(outfile_path, outfile_name)\n write(filename = outfile, rate = fs1, data = x)\n if (FILE_DELETION):\n extractFeaturesAndDelete(outfile)", "def get_ml_gain_increment(self):\n frames = self.integration.frames\n valid_frames = frames.valid & frames.is_unflagged('MODELING_FLAGS')\n return snf.get_ml_gain_increment(\n frame_data=frames.data,\n signal_wc=frames.temp_wc,\n signal_wc2=frames.temp_wc2,\n sample_flags=frames.sample_flag,\n channel_indices=self.mode.channel_group.indices,\n valid_frames=valid_frames)", "def mag_gain(self, gain=0x20):\n self._mag_gain = gain\n self.i2c.writeto_mem(self.ADDRESS_MAG, self.REGISTER_MAG_CRB_REG_M, self._mag_gain)\n if self._mag_gain == MAGGAIN_1_3:\n self._lsm303mag_gauss_lsb_xy = 1100.0\n self._lsm303mag_gauss_lsb_z = 980.0\n elif self._mag_gain == MAGGAIN_1_9:\n self._lsm303mag_gauss_lsb_xy = 855.0\n self._lsm303mag_gauss_lsb_z = 760.0\n elif self._mag_gain == MAGGAIN_2_5:\n self._lsm303mag_gauss_lsb_xy = 670.0\n self._lsm303mag_gauss_lsb_z = 600.0\n elif self._mag_gain == MAGGAIN_4_0:\n self._lsm303mag_gauss_lsb_xy = 450.0\n self._lsm303mag_gauss_lsb_z = 400.0\n elif self._mag_gain == MAGGAIN_4_7:\n self._lsm303mag_gauss_lsb_xy = 400.0\n self._lsm303mag_gauss_lsb_z = 355.0\n elif self._mag_gain == MAGGAIN_5_6:\n self._lsm303mag_gauss_lsb_xy = 330.0\n self._lsm303mag_gauss_lsb_z = 295.0\n elif self._mag_gain == MAGGAIN_8_1:\n self._lsm303mag_gauss_lsb_xy = 230.0\n self._lsm303mag_gauss_lsb_z = 205.0", "def test_15_flux_and_bandpass_calibrators_gain():\n\tcasalog.origin(\"test_15_flux_and_bandpass_calibrators_gain\")\n\tcasalog.post(\"starting\")\n\n\tgaincal(vis='G192_flagged_6s.ms', caltable='calG192.G1', field='0,3', \\\n\t gaintable=['calG192.antpos', 'calG192.gaincurve', 'calG192.requantizer', \\\n\t 'calG192.opacity', 'calG192.K0', \\\n\t 'calG192.B0'], \\\n\t gaintype='G', refant='ea05', calmode='ap', solint='30s', minsnr=3)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for scanning, mapped from YANG variable /access_points/access_point/radios/radio/state/scanning (boolean)
def _set_scanning(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="scanning", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='boolean', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """scanning must be of a type compatible with boolean""", 'defined-type': "boolean", 'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="scanning", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='boolean', is_config=False)""", }) self.__scanning = t if hasattr(self, '_set'): self._set()
[ "def SendStartScanSignal(self):\n self._scanning = True", "def is_scan_enabled(self):\n return self._is_scan_enabled", "def pitch_scan_status_changed(self, status):\n self.scan_status = status", "def _scan(self):\n # Set the scan parameters\n if self._data_range is not None:\n image_range = tuple(self._data_range)\n else:\n image_range = (self._starting_frame, self._starting_frame)\n oscillation = (self._starting_angle, self._oscillation_range)\n\n # Create the scan object\n return self._scan_factory.make_scan(\n image_range,\n 0.0,\n oscillation,\n [0] * (image_range[-1] - image_range[0] + 1),\n deg=True,\n )", "def _set_scanning_interval(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"scanning-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"scanning_interval must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"scanning-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__scanning_interval = t\n if hasattr(self, '_set'):\n self._set()", "def start_scan(self):\r\n try:\r\n out = self.get_output(\"scan on\")\r\n except BluetoothctlError, e:\r\n print(e)\r\n return None", "def recognition_enabled(self, state):\n if self.have_recognised_words:\n self.recogniser_state = state\n return\n\n if state and not self.recogniser_state:\n self.nottreal.view.wizard_window.toggle_recogniser()\n elif not state and self.recogniser_state:\n self.nottreal.view.wizard_window.toggle_recogniser()\n\n self.recogniser_state = state", "def StopScanning(self):\n try:\n if(self.__is_connected and self.__is_scanning):\n self.__is_scanning = False\n self.__s.write(b'\\xA5\\x65')\n time.sleep(0.5)\n self.__s.reset_input_buffer()\n self.__stop_motor()\n return True\n else:\n return False\n except Exception as e:\n return False", "def start_scanner(self):\n if not self._scanner:\n self._scanner = threading.Thread(target=self._scan_buttons)\n self._scanner.start()\n return True\n else:\n return False", "def test_digital_state_setter() -> None:\n driver = MockGPIOPinDriver()\n pin = GPIOPin(\n 0,\n driver,\n initial_mode=GPIOPinMode.DIGITAL_INPUT,\n hardware_modes={\n GPIOPinMode.DIGITAL_OUTPUT,\n GPIOPinMode.DIGITAL_INPUT,\n GPIOPinMode.DIGITAL_INPUT_PULLUP,\n GPIOPinMode.ANALOGUE_INPUT,\n },\n )\n\n pin.mode = GPIOPinMode.DIGITAL_OUTPUT\n pin.digital_state = True\n assert driver._written_digital_state[0]\n pin.digital_state = False\n assert not driver._written_digital_state[0]", "def _driving_state(self):\n angle_diff = self.angles[1:] - self.angles[:-1]\n self.angle_diff = np.concatenate((np.array([0.0]), angle_diff), axis=0)\n\n self.is_turning = np.absolute(self.angle_diff) > self.turning_threshold", "def SendEndScanSignal(self):\n self._scanning = False", "def tray_scan_started(self):\n if not DBUS_AVAIL:\n return\n self._is_scanning = True\n self.init_network_menu()", "def scan():\n _rpc.request('AudioLibrary.Scan')", "def scan_page(self, scan_page):\n\n self._scan_page = scan_page", "def read_gpio_pin_digital_state(self, identifier: int) -> bool:\n return self._digital_state[identifier]", "def SetEnabled(self, state):\n if state:\n level = COMMAND_VALUE_ON\n else:\n level = COMMAND_VALUE_OFF\n\n try:\n self.RawWrite(COMMAND_SET_ENABLED, [level])\n except KeyboardInterrupt:\n raise\n except:\n self.Print('Failed sending motor drive enabled state!')", "def valid_scan_type(scan_type):\n if scan_type in scan_types:\n return True\n else:\n return False", "def write_gpio_pin_digital_state(self, identifier: int, state: bool) -> None:\n self._written_digital_state[identifier] = state", "def _set_scan_start(self, value):\n ao_ch, _ = self._verify_scan_channels()\n if ao_ch is None: # if _verify_scan_channels() returns nothing that means channel is invalid or not found\n return\n value = self.analog_out(ao_ch, value, verify_only=True)\n self.properties['scan']['start'] = value\n self._set_scan_step()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for scanning_interval, mapped from YANG variable /access_points/access_point/radios/radio/state/scanning_interval (uint8)
def _set_scanning_interval(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="scanning-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """scanning_interval must be of a type compatible with uint8""", 'defined-type': "uint8", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="scanning-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=False)""", }) self.__scanning_interval = t if hasattr(self, '_set'): self._set()
[ "def _set_scanning_interval(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"scanning-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"scanning_interval must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"scanning-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__scanning_interval = t\n if hasattr(self, '_set'):\n self._set()", "def scan_interval(self, scan_interval):\n\n self._scan_interval = scan_interval", "def option_scan_interval(self):\n scan_interval = self.config_entry.options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)\n return timedelta(seconds=scan_interval)", "def _set_polling_interval(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(20), is_leaf=True, yang_name=\"polling-interval\", rest_name=\"polling-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Interface counter polling interval', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='uint32', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"polling_interval must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(20), is_leaf=True, yang_name=\"polling-interval\", rest_name=\"polling-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Interface counter polling interval', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='uint32', is_config=True)\"\"\",\n })\n\n self.__polling_interval = t\n if hasattr(self, '_set'):\n self._set()", "def scan_interval_ids(self):\n return self._scan_interval_ids", "def scan_interval_ids(self, scan_interval_ids):\n\n self._scan_interval_ids = scan_interval_ids", "def _set_rp_adv_interval(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'10..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(60), is_leaf=True, yang_name=\"rp-adv-interval\", rest_name=\"rp-adv-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set RP candidate advertisement message interval'}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='uint32', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"rp_adv_interval must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'10..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(60), is_leaf=True, yang_name=\"rp-adv-interval\", rest_name=\"rp-adv-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set RP candidate advertisement message interval'}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='uint32', is_config=True)\"\"\",\n })\n\n self.__rp_adv_interval = t\n if hasattr(self, '_set'):\n self._set()", "def _scan(self):\n # Set the scan parameters\n if self._data_range is not None:\n image_range = tuple(self._data_range)\n else:\n image_range = (self._starting_frame, self._starting_frame)\n oscillation = (self._starting_angle, self._oscillation_range)\n\n # Create the scan object\n return self._scan_factory.make_scan(\n image_range,\n 0.0,\n oscillation,\n [0] * (image_range[-1] - image_range[0] + 1),\n deg=True,\n )", "def monitoring_interval(self) -> typing.Optional[aws_cdk.core.Duration]:\n return self._values.get('monitoring_interval')", "def set_interval(self,interval: int):\n self.attr_interval = interval", "def advertisement_interval(self, advertisement_interval: str):\n\n self._advertisement_interval = advertisement_interval", "def set_interval(self, interval):\n self.interval = interval\n self.xml.load_interval(interval)", "def c_in_scan(self, low_counter_num, high_counter_num, samples_per_counter,\r\n rate, options, flags, data):\r\n # type: (int, int, int, float, ScanOption, CInScanFlag, Array[int]) -> float\r\n rate = c_double(rate)\r\n err = lib.ulCInScan(self.__handle, low_counter_num, high_counter_num,\r\n samples_per_counter, byref(rate),\r\n options, flags, data)\r\n if err != 0:\r\n raise ULException(err)\r\n return rate.value", "def pitch_scan_status_changed(self, status):\n self.scan_status = status", "def get_scan_status(self):\r\n # type: () -> tuple[ScanStatus, TransferStatus]\r\n scan_status = c_uint()\r\n transfer_status = TransferStatus()\r\n\r\n err = lib.ulCInScanStatus(self.__handle, byref(scan_status),\r\n byref(transfer_status))\r\n if err != 0:\r\n raise ULException(err)\r\n\r\n return ScanStatus(scan_status.value), transfer_status", "def man_scan(self, asynchron=True):\r\n interval = self.Stat.interval\r\n self.scan_start()\r\n\r\n def asynchron_timer(interval=interval):\r\n self.get_scancount()\r\n self.get_awn()\r\n if self.Stat.scancount != 0:\r\n self.step_next()\r\n timer = Timer(interval, asynchron_timer)\r\n timer.start()\r\n else:\r\n pass\r\n\r\n if asynchron is False:\r\n from time import sleep\r\n sleep(3)\r\n while True:\r\n self.get_scancount()\r\n if self.Stat.scancount == 0:\r\n break\r\n self.step_next()\r\n sleep(interval)\r\n\r\n else:\r\n from threading import Timer\r\n timer = Timer(interval, asynchron_timer)\r\n timer.start()", "def advertisement_interval(self) -> str:\n return self._advertisement_interval", "def probe_interval_in_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"probe_interval_in_seconds\")", "def _estimate_scan_duration(dx):\n sdur = []\n for channel in np.unique(dx.channel):\n d = dx[(dx.scan==1) & (dx.channel==channel)]\n sdur.append((d.time.tolist()[-1].to_pydatetime() - d.time.tolist()[-2].to_pydatetime()).total_seconds())\n return int( (np.max(sdur)+10)/60. )", "def scan_time(self):\n ret = self._get_attr(\"scanTime\")\n return ret" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for scanning_dwell_time, mapped from YANG variable /access_points/access_point/radios/radio/state/scanning_dwell_time (uint16)
def _set_scanning_dwell_time(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="scanning-dwell-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint16', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """scanning_dwell_time must be of a type compatible with uint16""", 'defined-type': "uint16", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="scanning-dwell-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint16', is_config=False)""", }) self.__scanning_dwell_time = t if hasattr(self, '_set'): self._set()
[ "def time_selection(self):\r\n\t\tbus.write_byte_data(TMD2671_DEFAULT_ADDRESS, TMD2671_REG_PTIME | TMD2671_COMMAND_BIT, TMD2671_REG_PTIME_2_72)\r\n\t\t\r\n\t\t\"\"\"Select the WTIME register configuration from the given provided values\"\"\"\r\n\t\tbus.write_byte_data(TMD2671_DEFAULT_ADDRESS, TMD2671_REG_WTIME | TMD2671_COMMAND_BIT, TMD2671_REG_WTIME_2_72)", "async def set_dhw_ovrd(call: ServiceCall) -> None:\n gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]\n await gw_dev.gateway.set_hot_water_ovrd(call.data[ATTR_DHW_OVRD])", "def scan_time(self):\n ret = self._get_attr(\"scanTime\")\n return ret", "def _set_blacklist_time(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name=\"blacklist-time\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint16', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"blacklist_time must be of a type compatible with uint16\"\"\",\n 'defined-type': \"uint16\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name=\"blacklist-time\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint16', is_config=False)\"\"\",\n })\n\n self.__blacklist_time = t\n if hasattr(self, '_set'):\n self._set()", "def _set_blacklist_time(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name=\"blacklist-time\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint16', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"blacklist_time must be of a type compatible with uint16\"\"\",\n 'defined-type': \"uint16\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name=\"blacklist-time\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint16', is_config=True)\"\"\",\n })\n\n self.__blacklist_time = t\n if hasattr(self, '_set'):\n self._set()", "def __encode_time(self, time_lsw, time_msw):\n\n msw_word_len = self._config.get(time_msw).word_len\n msw_data = self.raw.get(time_msw)\n lsw_data = self.raw.get(time_lsw)\n double_word = ((msw_data << msw_word_len) | lsw_data)\n return double_word", "def electrons_released_from_electrons_and_dwell_time(self, electrons, dwell_time=1):\r\n return electrons * (1 - self.fill_fraction_from_time_elapsed(dwell_time))", "def get_updated_time_day(self, time, day, ride_time ):\n if (time + int(ride_time)) < 24: # Same day\n time = time + math.ceil(ride_time) #Since next ride is available at hourly interval, we take math.ceil to calculate the next request time\n else: # next day\n time = (time + math.ceil(ride_time)) % 24 \n num_of_days = (time + math.ceil(ride_time)) // 24\n day = (day + num_of_days ) % 7\n return time, day", "def _estimate_scan_duration(dx):\n sdur = []\n for channel in np.unique(dx.channel):\n d = dx[(dx.scan==1) & (dx.channel==channel)]\n sdur.append((d.time.tolist()[-1].to_pydatetime() - d.time.tolist()[-2].to_pydatetime()).total_seconds())\n return int( (np.max(sdur)+10)/60. )", "def SetFallTime(self, time):\n self._PWriteInt('red', 'device/tfall', time)", "def time_selection(self):\n\t\tbus.write_byte_data(TSL27721_DEFAULT_ADDRESS, TSL27721_REG_ATIME | TSL27721_COMMAND_BIT, TSL27721_REG_ATIME_2_73)\n\t\t\n\t\t\"\"\"Select the PTIME register configuration from the given provided values\"\"\"\n\t\tbus.write_byte_data(TSL27721_DEFAULT_ADDRESS, TSL27721_REG_PTIME | TSL27721_COMMAND_BIT, TSL27721_REG_PTIME_2_73)\n\t\t\n\t\t\"\"\"Select the WTIME register configuration from the given provided values\"\"\"\n\t\tbus.write_byte_data(TSL27721_DEFAULT_ADDRESS, TSL27721_REG_WTIME | TSL27721_COMMAND_BIT, TSL27721_REG_WTIME_2_73)", "def SetRiseTime(self, time):\n self._PWriteInt('red', 'device/trise', time)", "def ah_day_night(event_time, horizon='0'):\n time_str = event_time.strftime(\"%Y/%m/%d %H:%M\")\n light = type_of_light('51.153526', '0.858348', time_str,'0', horizon)\n if light == \"day\":\n return 1\n else:\n return 0", "def _set_fec_dm_state_dw(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"fec-dm-state-dw\", rest_name=\"fec-dm-state-dw\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"fec_dm_state_dw must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"fec-dm-state-dw\", rest_name=\"fec-dm-state-dw\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__fec_dm_state_dw = t\n if hasattr(self, '_set'):\n self._set()", "def option_scan_interval(self):\n scan_interval = self.config_entry.options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)\n return timedelta(seconds=scan_interval)", "def night_mode(self, value):\n self._night_mode = value", "def _set_connection_time(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name=\"connection-time\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint16', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"connection_time must be of a type compatible with uint16\"\"\",\n 'defined-type': \"uint16\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name=\"connection-time\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint16', is_config=False)\"\"\",\n })\n\n self.__connection_time = t\n if hasattr(self, '_set'):\n self._set()", "def time_a_switch(self):\n while not self.ruggeduino_input():\n pass\n start = time()\n while self.ruggeduino_input():\n pass\n return time() - start", "def get_objectScanTime(self):\n return self.data[self.system_idx][\"objectScanTime\"]", "async def async_set_day_time(self):\n curr_time = datetime.now()\n day = 0 if curr_time.weekday() == 6 else curr_time.weekday() + 1\n set_time_command = ExtendedSetCommand(\n self._address, cmd2=0x02, data1=0x02, data2=day\n )\n return await set_time_command.async_send(\n data3=curr_time.hour, data4=curr_time.minute, data5=curr_time.second\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for scanning_defer_clients, mapped from YANG variable /access_points/access_point/radios/radio/state/scanning_defer_clients (uint8)
def _set_scanning_defer_clients(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="scanning-defer-clients", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """scanning_defer_clients must be of a type compatible with uint8""", 'defined-type': "uint8", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="scanning-defer-clients", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='uint8', is_config=False)""", }) self.__scanning_defer_clients = t if hasattr(self, '_set'): self._set()
[ "def _set_clients(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_clients_openconfig_wifi_mac__ssids_ssid_clients, is_container='container', yang_name=\"clients\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"clients must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_clients_openconfig_wifi_mac__ssids_ssid_clients, is_container='container', yang_name=\"clients\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__clients = t\n if hasattr(self, '_set'):\n self._set()", "def assign_clients(self):\n for drone in self.drones:\n if self.solution[drone]:\n drone.specify_client(self.solution[drone].pop(0))", "def _set_client_rf(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_client_rf_openconfig_wifi_mac__ssids_ssid_clients_client_client_rf, is_container='container', yang_name=\"client-rf\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='container', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"client_rf must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_client_rf_openconfig_wifi_mac__ssids_ssid_clients_client_client_rf, is_container='container', yang_name=\"client-rf\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='container', is_config=False)\"\"\",\n })\n\n self.__client_rf = t\n if hasattr(self, '_set'):\n self._set()", "def _setClients(self, clients):\n for client in clients:\n for stepRec in self.steps:\n if client.name == stepRec['clientName']:\n # Initialize client online/offline deferreds\n onlineDeferred = defer.Deferred().addCallback(self.handleClientReconnect)\n offlineDeferred = defer.Deferred().addCallback(self.handleClientDisconnect)\n # Add these deferreds to corresponding client's dictionaries - to be called back when appropriate events occur\n client.addOnlineDeferred(onlineDeferred, reset = True)\n client.addOfflineDeferred(offlineDeferred, reset = True)\n # Set current step record's 'client' data\n stepRec['client'] = client\n break\n \n # All clients have been set: check 'online' status of all clients, and set workflow's status correspondingly\n self._checkOnline()", "def _set_client(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"mac\",yc_client_openconfig_wifi_mac__ssids_ssid_clients_client, yang_name=\"client\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='mac', extensions=None), is_container='list', yang_name=\"client\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"client must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"mac\",yc_client_openconfig_wifi_mac__ssids_ssid_clients_client, yang_name=\"client\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='mac', extensions=None), is_container='list', yang_name=\"client\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__client = t\n if hasattr(self, '_set'):\n self._set()", "def _set_num_associated_clients(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-associated-clients\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"num_associated_clients must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-associated-clients\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__num_associated_clients = t\n if hasattr(self, '_set'):\n self._set()", "def clientBusy(self, client):\n\n if client in self._freeClients:\n self._freeClients.remove(client)\n\n self._busyClients.add(client)\n\n self.log.debug(\"Busied client: {client!r}\", client=client)\n self._logClientStats()", "def find_clients(self, clients: List[wrappers.Window], **matchers: Any) -> List[wrappers.Window]:\n return [r for r in clients if r.matches(**matchers)]", "def get_connected_clients(self) -> set:\n return self.connected_clients", "def number_of_clients(self):\n ret = self._get_attr(\"numberOfClients\")\n return ret", "def first_client(clients, flag_mask, me):\n for client in clients:\n if client.intents & flag_mask == flag_mask:\n return client\n \n return me", "def revguard_client_brand(self, revguard_client_brand):\n\n self._revguard_client_brand = revguard_client_brand", "def inc_ini(self):\n if len(self.clients) < self.MAX_CONN:\n if self.ini < self.MAX_CONN:\n self.ini += 1\n else:\n free = [ i for i in range(1, len(self.clients)+1) if not i in self.clients.keys() ]\n self.ini = free[0]\n return True\n return False", "def set_fsclient(self, fs_client):\n self._afs = fs_client", "def filter_clients(clients, flag_mask, me):\n iterator = iter(clients)\n for client in iterator:\n if client.intents & flag_mask == flag_mask:\n break\n \n else:\n yield me\n yield me\n return\n \n yield client\n yield client\n \n for client in iterator:\n if client.intents & flag_mask == flag_mask:\n yield client", "def get_clientrssi():\n input = os.popen(\n '/System/Library/PrivateFrameworks/Apple80211.framework/Versions/A/Resources/airport -I')\n return int(''.join([x.split()[1] for x in input if 'agrCtlRSSI' in x]))", "async def send_clients_event(self):\n\t\tself.check_and_repair_host()\n\n\t\tawait self.broadcast(self.clients_message())", "def SetDHCPClient(self, client):\n print \"Setting dhcp client to %i\" % (int(client))\n self.dhcp_client = int(client)\n self.wifi.dhcp_client = int(client)\n self.wired.dhcp_client = int(client)\n self.config.set(\"Settings\", \"dhcp_client\", client, write=True)", "def onPlayerConnect(self, client):\n self.debug('Connecting slot: %s, %s, %s' % (client.cid, client.name, client.ip))\n countryId = self.gi.id_by_addr(str(client.ip))\n countryCode = GeoIP.id_to_country_code(countryId)\n country = self.idToCountry(countryId)\n self.debug('Country: %s' % (country))\n if self.isAllowConnect(countryCode):\n if 0 < len(self.cf_allow_message) and (not self.isMessageExcludeFrom(countryCode)):\n message = self.getMessage('cf_allow_message', { 'name':client.name, 'country':country})\n self.console.say(message)\n pass # do nothing\n else:\n if 0 < len(self.cf_deny_message) and (not self.isMessageExcludeFrom(countryCode)):\n message = self.getMessage('cf_deny_message', { 'name':client.name, 'country':country})\n self.console.say(message)\n client.kick(': Your Country was REJECTED by B3 - CountryFilter')\n self.debug('Connecting done.')", "def _set_client_connection(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_client_connection_openconfig_wifi_mac__ssids_ssid_clients_client_client_connection, is_container='container', yang_name=\"client-connection\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='container', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"client_connection must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_client_connection_openconfig_wifi_mac__ssids_ssid_clients_client_client_connection, is_container='container', yang_name=\"client-connection\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='container', is_config=False)\"\"\",\n })\n\n self.__client_connection = t\n if hasattr(self, '_set'):\n self._set()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for scanning_defer_traffic, mapped from YANG variable /access_points/access_point/radios/radio/state/scanning_defer_traffic (boolean)
def _set_scanning_defer_traffic(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="scanning-defer-traffic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='boolean', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """scanning_defer_traffic must be of a type compatible with boolean""", 'defined-type': "boolean", 'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="scanning-defer-traffic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='boolean', is_config=False)""", }) self.__scanning_defer_traffic = t if hasattr(self, '_set'): self._set()
[ "def ingress_traffic_allowed(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ingress_traffic_allowed\")", "def get_traffic_meter_enabled(self):\n response = self._get(\n c.SERVICE_DEVICE_CONFIG, c.GET_TRAFFIC_METER_ENABLED\n )\n return h.zero_or_one_dict_to_boolean(response)", "def bandwidth_corrected(self, value: Optional[Boolean]):\n\n if value is not None:\n attest(\n isinstance(value, bool),\n f'\"bandwidth_corrected\" property: \"{value}\" type is not \"bool\"!',\n )\n\n self._bandwidth_corrected = value", "def dns_active(self, dns_active: bool):\n self._indicator_data['flag1'] = self.util.to_bool(dns_active)", "def _set_bpdu_guard(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"bpdu-guard\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"bpdu_guard must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"bpdu-guard\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__bpdu_guard = t\n if hasattr(self, '_set'):\n self._set()", "def set_ra(self, b):\n _ldns.ldns_pkt_set_ra(self, b)\n #parameters: ldns_pkt *,bool,\n #retvals: ", "async def set_is_watering(self, is_watering: bool):\n self._is_watering = is_watering", "def check_lazy_plan(self, distance, grasp_switched, distance_travelled):\n do_lazy_plan = distance > self.lazy_threshold and \\\n distance_travelled < self.distance_travelled_threshold and \\\n self.robot.arm_discretized_plan is not None and \\\n self.robot.arm_wp_target_index != len(self.robot.arm_discretized_plan) and \\\n not grasp_switched\n return do_lazy_plan", "def set_ad(self, b):\n _ldns.ldns_pkt_set_ad(self, b)\n #parameters: ldns_pkt *,bool,\n #retvals: ", "def dns_active(self) -> bool:\n return self._indicator_data.get('flag1') # type: ignore", "def incoming_traffic_blocked(self):\n if \"incomingTrafficBlocked\" in self._prop_dict:\n return self._prop_dict[\"incomingTrafficBlocked\"]\n else:\n return None", "def is_direct_transfer(filespair):\n # type: (dict) -> bool\n return 'storage_account_settings' not in filespair['destination']", "def allow_drag(self, flag: bool):\n self._allow_drag = flag\n if self._allow_drag and not self._drag_setup:\n self.bind_all('<Motion>', self._drag_handler)\n self.bind_all('<ButtonRelease-1>', self._drag_handler)\n self._drag_setup = True", "def getEntertainmentFastPassAvailable(self):\n bool = self.__data['fastPass']\n if bool == 'true':\n return True\n else:\n return False", "def SendStartScanSignal(self):\n self._scanning = True", "def set_ads_drate(self, ads_num, data_rate):\n\t\treturn self.config_ads(ads_num, 1, data_rate)", "def isDelaySensorPending(self) -> \"SbBool\":\n return _coin.SoSensorManager_isDelaySensorPending(self)", "def parse_dependent_requests_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"parse_dependent_requests_enabled\")", "def user_traffic_over_limit(username):\n if not CHECK_SHARE_LINK_TRAFFIC:\n return False\n\n from seahub_extra.plan.models import UserPlan\n from seahub_extra.plan.settings import PLAN\n up = UserPlan.objects.get_valid_plan_by_user(username)\n plan = 'Free' if up is None else up.plan_type\n traffic_limit = int(PLAN[plan]['share_link_traffic']) * 1024 * 1024 * 1024\n\n try:\n stat = get_user_traffic_stat(username)\n except Exception as e:\n logger = logging.getLogger(__name__)\n logger.error('Failed to get user traffic stat: %s' % username,\n exc_info=True)\n return True\n\n if stat is None: # No traffic record yet\n return False\n\n month_traffic = stat['file_view'] + stat['file_download'] + stat['dir_download']\n return True if month_traffic >= traffic_limit else False", "def is_scan_enabled(self):\n return self._is_scan_enabled" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Getter method for software_selectable, mapped from YANG variable /access_points/access_point/radios/radio/state/software_selectable (boolean)
def _get_software_selectable(self): return self.__software_selectable
[ "def glIsSoftwareRenderer(self):\n return self.__glIsSoftware", "def software_type(self):\n return self._software_type", "def feature_enabled(self, feature_name):\n feature_list = self.prop('available-features-list', None)\n if feature_list is None:\n raise ValueError(\"Firmware features are not supported on CPC {}\"\n .format(self.name))\n for feature in feature_list:\n if feature['name'] == feature_name:\n break\n else:\n raise ValueError(\"Firmware feature {} is not available on CPC {}\"\n .format(feature_name, self.name))\n return feature['state'] # pylint: disable=undefined-loop-variable", "def _get_isTrackingToImproveSoftwareEnabled(self) -> \"bool\" :\n return _core.ProductUsageData__get_isTrackingToImproveSoftwareEnabled(self)", "def isEnabled(self) -> \"SbBool\":\n return _coin.SoAudioDevice_isEnabled(self)", "def _get_isMultiSelectEnabled(self) -> \"bool\" :\n return _core.ButtonRowCommandInput__get_isMultiSelectEnabled(self)", "def is_enabled_for(self, security_state: Target.SecurityState) -> bool:\n assert isinstance(security_state, Target.SecurityState)\n\n # Call to superclass to read CSW. We want to bypass our CSW cache since the enable signal can change\n # asynchronously.\n csw = AccessPort.read_reg(self, self._reg_offset + MEM_AP_CSW)\n if security_state is Target.SecurityState.NONSECURE:\n # Nonsecure transfers are always allowed when security transfers are enabled.\n return (csw & (CSW_DEVICEEN | CSW_SDEVICEEN)) != 0\n elif security_state is Target.SecurityState.SECURE:\n return (csw & CSW_SDEVICEEN) != 0\n else:\n assert False, \"unsupported security state\"", "def softwareEnvModifier(self):\n return self.__softwareEnvModifier", "def is_enabled(self) -> bool:\n if not self._system.dax_sim_enabled:\n # Check if the system was just booted\n last_asf = self.core_cache.get(self._CACHE_LAST_ASF_KEY)\n if len(last_asf) == 0:\n # Device was just booted, trap RF is off\n return False\n\n # Return the enabled flag stored as a system dataset\n # Can raise a KeyError if the key was not set before, which means the state is ambiguous\n enabled: bool = self.get_dataset_sys(self._ENABLED_KEY) # Helps the type checker\n return enabled", "def _get_isSelected(self) -> \"bool\" :\n return _core.ListItem__get_isSelected(self)", "def _get_isMultiSelectEnabled(self) -> \"bool\" :\n return _core.FileDialog__get_isMultiSelectEnabled(self)", "def get_available_software_updates(self, per_user=True):\n self._run_if_none(self._available_software_updates)\n if per_user is True:\n return self._user_tagged_available_software_updates\n return self._available_software_updates", "def is_selected(self):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.object.selected\", \r\n self._object._eco_id)\r\n val = p2e._app.Request(arg_str)\r\n return p2e._base._util._convert_str_to_type(val, int)", "def isSelected(self, *args) -> \"SbBool\":\n return _coin.SoSelection_isSelected(self, *args)", "def isPortableEnabled(): #$NON-NLS-1$\r\n global PORTABLE_ENABLED\r\n if PORTABLE_ENABLED is None:\r\n PORTABLE_ENABLED = False\r\n\r\n # If the OS Util thinks we're installed as a portable app, then it is\r\n # probably true, unless overridden by the cmd line.\r\n osutil = getOSUtil()\r\n if osutil.isInstalledAsPortable():\r\n PORTABLE_ENABLED = True\r\n \r\n # The command line parameter can always override the value.\r\n cmdLineParams = getCommandLineParameters()\r\n if cmdLineParams is not None and u\"portable\" in cmdLineParams: #$NON-NLS-1$\r\n PORTABLE_ENABLED = cmdLineParams[u\"portable\"] == u\"true\" #$NON-NLS-1$ #$NON-NLS-2$\r\n\r\n return PORTABLE_ENABLED", "def pip_enabled(self, state):\n if self.with_pip_check_box.isChecked():\n self.requirements_line.setEnabled(True)\n self.select_file_button.setEnabled(True)\n else:\n self.requirements_line.setEnabled(False)\n self.select_file_button.setEnabled(False)", "def select(self, boolean_expression, mode=\"replace\", name=\"default\", executor=None):\n\t\tif boolean_expression is None and not self.has_selection(name=name):\n\t\t\tpass # we don't want to pollute the history with many None selections\n\t\t\tself.signal_selection_changed.emit(self) # TODO: unittest want to know, does this make sense?\n\t\telse:\n\t\t\tdef create(current):\n\t\t\t\treturn SelectionExpression(self, boolean_expression, current, mode) if boolean_expression else None\n\t\t\treturn self._selection(create, name)", "def get_software_edition(self):\n\n return self.get_attribute_values(CPEComponent.ATT_SW_EDITION)", "def _get_isEnabledCheckBoxChecked(self) -> \"bool\" :\n return _core.GroupCommandInput__get_isEnabledCheckBoxChecked(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for software_selectable, mapped from YANG variable /access_points/access_point/radios/radio/state/software_selectable (boolean)
def _set_software_selectable(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="software-selectable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='boolean', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """software_selectable must be of a type compatible with boolean""", 'defined-type': "boolean", 'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="software-selectable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='boolean', is_config=False)""", }) self.__software_selectable = t if hasattr(self, '_set'): self._set()
[ "def _get_software_selectable(self):\n return self.__software_selectable", "def glIsSoftwareRenderer(self):\n return self.__glIsSoftware", "def software_type(self):\n return self._software_type", "def set_software(self, software):\n self.directives.append(\"-l software={}\".format(software))", "def __setSoftware(self, software):\n assert isinstance(software, uver.Versioned.Software), \\\n \"Invalid software type!\"\n\n self.__software = software", "def _set_isTrackingToImproveSoftwareEnabled(self, *args) -> \"bool\" :\n return _core.ProductUsageData__set_isTrackingToImproveSoftwareEnabled(self, *args)", "def _get_isTrackingToImproveSoftwareEnabled(self) -> \"bool\" :\n return _core.ProductUsageData__get_isTrackingToImproveSoftwareEnabled(self)", "def software_type(self, software_type):\n self._software_type = software_type", "def _get_isMultiSelectEnabled(self) -> \"bool\" :\n return _core.ButtonRowCommandInput__get_isMultiSelectEnabled(self)", "def isEnabled(self) -> \"SbBool\":\n return _coin.SoAudioDevice_isEnabled(self)", "def softwareEnvModifier(self):\n return self.__softwareEnvModifier", "def isSelected(self, *args) -> \"SbBool\":\n return _coin.SoSelection_isSelected(self, *args)", "def pip_enabled(self, state):\n if self.with_pip_check_box.isChecked():\n self.requirements_line.setEnabled(True)\n self.select_file_button.setEnabled(True)\n else:\n self.requirements_line.setEnabled(False)\n self.select_file_button.setEnabled(False)", "def feature_enabled(self, feature_name):\n feature_list = self.prop('available-features-list', None)\n if feature_list is None:\n raise ValueError(\"Firmware features are not supported on CPC {}\"\n .format(self.name))\n for feature in feature_list:\n if feature['name'] == feature_name:\n break\n else:\n raise ValueError(\"Firmware feature {} is not available on CPC {}\"\n .format(feature_name, self.name))\n return feature['state'] # pylint: disable=undefined-loop-variable", "def _get_isMultiSelectEnabled(self) -> \"bool\" :\n return _core.FileDialog__get_isMultiSelectEnabled(self)", "def is_enabled_for(self, security_state: Target.SecurityState) -> bool:\n assert isinstance(security_state, Target.SecurityState)\n\n # Call to superclass to read CSW. We want to bypass our CSW cache since the enable signal can change\n # asynchronously.\n csw = AccessPort.read_reg(self, self._reg_offset + MEM_AP_CSW)\n if security_state is Target.SecurityState.NONSECURE:\n # Nonsecure transfers are always allowed when security transfers are enabled.\n return (csw & (CSW_DEVICEEN | CSW_SDEVICEEN)) != 0\n elif security_state is Target.SecurityState.SECURE:\n return (csw & CSW_SDEVICEEN) != 0\n else:\n assert False, \"unsupported security state\"", "def enable_selection(self):\n\t\tENABLE_CONFIGURATION = (TSL27721_REG_ENABLE_WEN | TSL27721_REG_ENABLE_PEN | TSL27721_REG_ENABLE_AEN | TSL27721_REG_ENABLE_PON)\n\t\tbus.write_byte_data(TSL27721_DEFAULT_ADDRESS, TSL27721_REG_ENABLE | TSL27721_COMMAND_BIT, ENABLE_CONFIGURATION)", "def _set_isMultiSelectEnabled(self, *args) -> \"bool\" :\n return _core.FileDialog__set_isMultiSelectEnabled(self, *args)", "def get_available_software_updates(self, per_user=True):\n self._run_if_none(self._available_software_updates)\n if per_user is True:\n return self._user_tagged_available_software_updates\n return self._available_software_updates", "def isPortableEnabled(): #$NON-NLS-1$\r\n global PORTABLE_ENABLED\r\n if PORTABLE_ENABLED is None:\r\n PORTABLE_ENABLED = False\r\n\r\n # If the OS Util thinks we're installed as a portable app, then it is\r\n # probably true, unless overridden by the cmd line.\r\n osutil = getOSUtil()\r\n if osutil.isInstalledAsPortable():\r\n PORTABLE_ENABLED = True\r\n \r\n # The command line parameter can always override the value.\r\n cmdLineParams = getCommandLineParameters()\r\n if cmdLineParams is not None and u\"portable\" in cmdLineParams: #$NON-NLS-1$\r\n PORTABLE_ENABLED = cmdLineParams[u\"portable\"] == u\"true\" #$NON-NLS-1$ #$NON-NLS-2$\r\n\r\n return PORTABLE_ENABLED" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Getter method for channel_change_reason, mapped from YANG variable /access_points/access_point/radios/radio/state/channel_change_reason (identityref)
def _get_channel_change_reason(self): return self.__channel_change_reason
[ "def _set_channel_change_reason(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'DFS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi-types:DFS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi:DFS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'NOISE': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi-types:NOISE': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi:NOISE': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'ERRORS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi-types:ERRORS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi:ERRORS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'BETTER_CHANNEL': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi-types:BETTER_CHANNEL': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi:BETTER_CHANNEL': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}},), is_leaf=True, yang_name=\"channel-change-reason\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='identityref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"channel_change_reason must be of a type compatible with identityref\"\"\",\n 'defined-type': \"openconfig-access-points:identityref\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'DFS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi-types:DFS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi:DFS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'NOISE': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi-types:NOISE': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi:NOISE': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'ERRORS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi-types:ERRORS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi:ERRORS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'BETTER_CHANNEL': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi-types:BETTER_CHANNEL': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi:BETTER_CHANNEL': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}},), is_leaf=True, yang_name=\"channel-change-reason\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='identityref', is_config=False)\"\"\",\n })\n\n self.__channel_change_reason = t\n if hasattr(self, '_set'):\n self._set()", "def get_change(self, change_id: ChangeID) -> Change:\n resp = self._request('GET', '/v1/changes/{}'.format(change_id))\n return Change.from_dict(resp['result'])", "async def edit(\n self,\n reason: Optional[str] = None,\n **kwargs\n ):\n headers = {}\n\n if reason is not None:\n headers[\"X-Audit-Log-Reason\"] = str(reason)\n\n data = await self._http.patch(\n f\"channels/{self.id}\",\n kwargs,\n headers=headers\n )\n data.update(construct_client_dict(\n self._client,\n {\"type\": ChannelType(data.pop(\"type\"))}\n ))\n channel_cls = _channel_type_map.get(data[\"type\"], Channel)\n return channel_cls.from_dict(data)", "def detected_change(self):\n return self.broker.detected_change(**{\"ChangeTraceID\": self.ChangeTraceID})", "def change_id(self):\n try:\n cids = self.tags[\"Change-Id\"]\n except KeyError:\n return None\n\n assert len(cids) == 1\n return cids[0]", "def getChannel(self):\n for evt in self.__events:\n if isinstance(evt, ChannelEvent):\n return evt.channel\n return None", "def get_change(self, cnum=None, cdate=None, db=None):\n if cdate is None:\n row = self._find_change(cnum)\n if not row:\n return\n cdate = from_utimestamp(row[0])\n ts = to_utimestamp(cdate)\n fields = {}\n change = {'date': cdate, 'fields': fields}\n for field, author, old, new in self.env.db_query(\"\"\"\n SELECT field, author, oldvalue, newvalue\n FROM ticket_change WHERE ticket=%s AND time=%s\n \"\"\", (self.id, ts)):\n fields[field] = {'author': author, 'old': old, 'new': new}\n if field == 'comment':\n change['author'] = author\n elif not field.startswith('_'):\n change.setdefault('author', author)\n if fields:\n return change", "def on_channel_closed(self, channel, reply_code, reply_text):\n self.log.debug('CHANNEL%i closed', channel.channel_number)", "def get_message_channel(self, message):\n if isinstance(message.get(\"channel\"), str): return message[\"channel\"]\n return None", "def _hangup_reason(self, channel, event):\n hangup_cause = int(event['Cause'])\n\n # See https://wiki.asterisk.org/wiki/display/AST/Hangup+Cause+Mappings\n if hangup_cause == AST_CAUSE_NORMAL_CLEARING:\n # If channel is not up, the call never really connected.\n # This happens when call confirmation is unsuccessful.\n if channel.is_up:\n return 'completed'\n else:\n return 'no-answer'\n elif hangup_cause == AST_CAUSE_USER_BUSY:\n return 'busy'\n elif hangup_cause in (AST_CAUSE_NO_USER_RESPONSE, AST_CAUSE_NO_ANSWER):\n return 'no-answer'\n elif hangup_cause == AST_CAUSE_ANSWERED_ELSEWHERE:\n return 'answered-elsewhere'\n elif hangup_cause == AST_CAUSE_CALL_REJECTED:\n return 'rejected'\n elif hangup_cause == AST_CAUSE_UNKNOWN:\n # Sometimes Asterisk doesn't set a proper hangup cause.\n # If our a_chan is already up, this probably means the\n # call was successful. If not, that means A hanged up,\n # which we assign the \"cancelled\" status.\n if channel.is_up:\n return 'completed'\n else:\n return 'cancelled'\n else:\n return 'failed'", "def _GetRecommendationChangeType(recommendation_change_type):\n if not recommendation_change_type:\n return None\n\n messages = recommender_service.RecommenderMessages()\n\n if recommendation_change_type.lower() == 'leave_unchanged':\n return messages.GoogleCloudRecommenderV1alpha2MarkInsightDismissedRequest.RecommendationChangeTypeValueValuesEnum(\n 'LEAVE_RECOMMENDATIONS_UNCHANGED')\n elif recommendation_change_type.lower() == 'dismiss':\n return messages.GoogleCloudRecommenderV1alpha2MarkInsightDismissedRequest.RecommendationChangeTypeValueValuesEnum(\n 'DISMISS_RECOMMENDATIONS')\n else:\n return None", "def delete_change(self, cnum=None, cdate=None, when=None):\n if cdate is None:\n row = self._find_change(cnum)\n if not row:\n return\n cdate = from_utimestamp(row[0])\n ts = to_utimestamp(cdate)\n if when is None:\n when = datetime.now(utc)\n when_ts = to_utimestamp(when)\n\n with self.env.db_transaction as db:\n # Find modified fields and their previous value\n fields = [(field, old, new)\n for field, old, new in db(\"\"\"\n SELECT field, oldvalue, newvalue FROM ticket_change\n WHERE ticket=%s AND time=%s\n \"\"\", (self.id, ts))\n if field != 'comment' and not field.startswith('_')]\n for field, oldvalue, newvalue in fields:\n # Find the next change\n for next_ts, in db(\"\"\"SELECT time FROM ticket_change\n WHERE ticket=%s AND time>%s AND field=%s\n LIMIT 1\n \"\"\", (self.id, ts, field)):\n # Modify the old value of the next change if it is equal\n # to the new value of the deleted change\n db(\"\"\"UPDATE ticket_change SET oldvalue=%s\n WHERE ticket=%s AND time=%s AND field=%s\n AND oldvalue=%s\n \"\"\", (oldvalue, self.id, next_ts, field, newvalue))\n break\n else:\n # No next change, edit ticket field\n if field in self.std_fields:\n db(\"UPDATE ticket SET %s=%%s WHERE id=%%s\"\n % field, (oldvalue, self.id))\n else:\n db(\"\"\"UPDATE ticket_custom SET value=%s\n WHERE ticket=%s AND name=%s\n \"\"\", (oldvalue, self.id, field))\n\n # Delete the change\n db(\"DELETE FROM ticket_change WHERE ticket=%s AND time=%s\",\n (self.id, ts))\n\n # Update last changed time\n db(\"UPDATE ticket SET changetime=%s WHERE id=%s\",\n (when_ts, self.id))\n\n self._fetch_ticket(self.id)", "async def on_channel_update(before: discord.Channel, after: discord.Channel):\n if after.is_private:\n return\n\n changelog_channel = get_changelog_channel(after.server)\n if not changelog_channel:\n return\n\n # We only want to update when a name change is performed\n if before.name == after.name:\n return\n\n # Differ between voice channels and text channels\n if after.type == discord.ChannelType.text:\n await log_change(\n changelog_channel, \"Channel **#{0.name}** changed name to {1.mention}, **{1.name}**.\".format(before, after))\n else:\n await log_change(\n changelog_channel, \"Voice channel **{0.name}** changed name to **{1.name}**.\".format(before, after))", "def modify_comment(self, cdate, author, comment, when=None):\n ts = to_utimestamp(cdate)\n if when is None:\n when = datetime.now(utc)\n when_ts = to_utimestamp(when)\n\n with self.env.db_transaction as db:\n # Find the current value of the comment\n old_comment = False\n for old_comment, in db(\"\"\"\n SELECT newvalue FROM ticket_change\n WHERE ticket=%s AND time=%s AND field='comment'\n \"\"\", (self.id, ts)):\n break\n if comment == (old_comment or ''):\n return\n\n # Comment history is stored in fields named \"_comment%d\"\n # Find the next edit number\n fields = db(\"\"\"SELECT field FROM ticket_change\n WHERE ticket=%%s AND time=%%s AND field %s\n \"\"\" % db.like(),\n (self.id, ts, db.like_escape('_comment') + '%'))\n rev = max(int(field[8:]) for field, in fields) + 1 if fields else 0\n db(\"\"\"INSERT INTO ticket_change\n (ticket,time,author,field,oldvalue,newvalue)\n VALUES (%s,%s,%s,%s,%s,%s)\n \"\"\", (self.id, ts, author, '_comment%d' % rev,\n old_comment or '', str(when_ts)))\n if old_comment is False:\n # There was no comment field, add one, find the\n # original author in one of the other changed fields\n old_author = None\n for old_author, in db(\"\"\"\n SELECT author FROM ticket_change\n WHERE ticket=%%s AND time=%%s AND NOT field %s LIMIT 1\n \"\"\" % db.like(),\n (self.id, ts, db.like_escape('_') + '%')):\n db(\"\"\"INSERT INTO ticket_change\n (ticket,time,author,field,oldvalue,newvalue)\n VALUES (%s,%s,%s,'comment','',%s)\n \"\"\", (self.id, ts, old_author, comment))\n else:\n db(\"\"\"UPDATE ticket_change SET newvalue=%s\n WHERE ticket=%s AND time=%s AND field='comment'\n \"\"\", (comment, self.id, ts))\n\n # Update last changed time\n db(\"UPDATE ticket SET changetime=%s WHERE id=%s\",\n (when_ts, self.id))\n\n self.values['changetime'] = when", "def getRefChan(self, spwid=int(0)):\n schema = {'spwid': {'type': 'cInt'}}\n doc = {'spwid': spwid}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getRefChan_result = self._swigobj.getRefChan(_pc.document['spwid'])\n return _getRefChan_result", "def abort_change(self, change_id: ChangeID) -> Change:\n body = {'action': 'abort'}\n resp = self._request('POST', '/v1/changes/{}'.format(change_id), body=body)\n return Change.from_dict(resp['result'])", "def channelClosed(self, channel):\n if channel in self.channelsToRemoteChannel: # actually open\n channel.localClosed = channel.remoteClosed = True\n del self.localToRemoteChannel[channel.id]\n del self.channels[channel.id]\n del self.channelsToRemoteChannel[channel]\n for d in self.deferreds.pop(channel.id, []):\n d.errback(error.ConchError(\"Channel closed.\"))\n log.callWithLogger(channel, channel.closed)", "def channelLeft(self, channel):", "def get_channel_id_from_message(message_id):\n data = get_data()\n for channel in data[\"channels\"]:\n for message in channel[\"messages\"]:\n if message[\"message_id\"] == message_id:\n return channel[\"channel_id\"]\n\n return None", "def status_change_comment(self, status_change_comment):\n\n self._status_change_comment = status_change_comment" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for channel_change_reason, mapped from YANG variable /access_points/access_point/radios/radio/state/channel_change_reason (identityref)
def _set_channel_change_reason(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'DFS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi-types:DFS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi:DFS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'NOISE': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi-types:NOISE': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi:NOISE': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'ERRORS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi-types:ERRORS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi:ERRORS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'BETTER_CHANNEL': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi-types:BETTER_CHANNEL': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi:BETTER_CHANNEL': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}},), is_leaf=True, yang_name="channel-change-reason", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='identityref', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """channel_change_reason must be of a type compatible with identityref""", 'defined-type': "openconfig-access-points:identityref", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'DFS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi-types:DFS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi:DFS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'NOISE': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi-types:NOISE': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi:NOISE': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'ERRORS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi-types:ERRORS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi:ERRORS': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'BETTER_CHANNEL': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi-types:BETTER_CHANNEL': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}, 'oc-wifi:BETTER_CHANNEL': {'@module': 'openconfig-wifi-types', '@namespace': 'http://openconfig.net/yang/wifi/types'}},), is_leaf=True, yang_name="channel-change-reason", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='identityref', is_config=False)""", }) self.__channel_change_reason = t if hasattr(self, '_set'): self._set()
[ "async def edit(\n self,\n reason: Optional[str] = None,\n **kwargs\n ):\n headers = {}\n\n if reason is not None:\n headers[\"X-Audit-Log-Reason\"] = str(reason)\n\n data = await self._http.patch(\n f\"channels/{self.id}\",\n kwargs,\n headers=headers\n )\n data.update(construct_client_dict(\n self._client,\n {\"type\": ChannelType(data.pop(\"type\"))}\n ))\n channel_cls = _channel_type_map.get(data[\"type\"], Channel)\n return channel_cls.from_dict(data)", "def status_change_comment(self, status_change_comment):\n\n self._status_change_comment = status_change_comment", "def setDisconnectReason(self, disconnectCode):\n requesterId = self.air.getAvatarIdFromSender()\n self.notify.info(\"Client %s leaving for reason %s (%s).\" % (\n requesterId, disconnectCode,\n OTPGlobals.DisconnectReasons.get(disconnectCode,\n 'invalid reason')))\n\n if disconnectCode in OTPGlobals.DisconnectReasons:\n if hasattr(self.air, 'setAvatarDisconnectReason'):\n self.air.setAvatarDisconnectReason(requesterId, disconnectCode)\n else:\n self.air.writeServerEvent(\n 'suspicious', requesterId, 'invalid disconnect reason: %s' % disconnectCode)", "def on_channel_closed(self, channel, reply_code, reply_text):\n self.log.debug('CHANNEL%i closed', channel.channel_number)", "def change_mode(self, change_mode):\n allowed_values = [\"immediate\", \"delayed\"]\n if change_mode not in allowed_values:\n raise ValueError(\n \"Invalid value for `change_mode` ({0}), must be one of {1}\"\n .format(change_mode, allowed_values)\n )\n\n self._change_mode = change_mode", "def _hangup_reason(self, channel, event):\n hangup_cause = int(event['Cause'])\n\n # See https://wiki.asterisk.org/wiki/display/AST/Hangup+Cause+Mappings\n if hangup_cause == AST_CAUSE_NORMAL_CLEARING:\n # If channel is not up, the call never really connected.\n # This happens when call confirmation is unsuccessful.\n if channel.is_up:\n return 'completed'\n else:\n return 'no-answer'\n elif hangup_cause == AST_CAUSE_USER_BUSY:\n return 'busy'\n elif hangup_cause in (AST_CAUSE_NO_USER_RESPONSE, AST_CAUSE_NO_ANSWER):\n return 'no-answer'\n elif hangup_cause == AST_CAUSE_ANSWERED_ELSEWHERE:\n return 'answered-elsewhere'\n elif hangup_cause == AST_CAUSE_CALL_REJECTED:\n return 'rejected'\n elif hangup_cause == AST_CAUSE_UNKNOWN:\n # Sometimes Asterisk doesn't set a proper hangup cause.\n # If our a_chan is already up, this probably means the\n # call was successful. If not, that means A hanged up,\n # which we assign the \"cancelled\" status.\n if channel.is_up:\n return 'completed'\n else:\n return 'cancelled'\n else:\n return 'failed'", "def channelClosed(self, channel):\n if channel in self.channelsToRemoteChannel: # actually open\n channel.localClosed = channel.remoteClosed = True\n del self.localToRemoteChannel[channel.id]\n del self.channels[channel.id]\n del self.channelsToRemoteChannel[channel]\n for d in self.deferreds.pop(channel.id, []):\n d.errback(error.ConchError(\"Channel closed.\"))\n log.callWithLogger(channel, channel.closed)", "def delete_change(self, cnum=None, cdate=None, when=None):\n if cdate is None:\n row = self._find_change(cnum)\n if not row:\n return\n cdate = from_utimestamp(row[0])\n ts = to_utimestamp(cdate)\n if when is None:\n when = datetime.now(utc)\n when_ts = to_utimestamp(when)\n\n with self.env.db_transaction as db:\n # Find modified fields and their previous value\n fields = [(field, old, new)\n for field, old, new in db(\"\"\"\n SELECT field, oldvalue, newvalue FROM ticket_change\n WHERE ticket=%s AND time=%s\n \"\"\", (self.id, ts))\n if field != 'comment' and not field.startswith('_')]\n for field, oldvalue, newvalue in fields:\n # Find the next change\n for next_ts, in db(\"\"\"SELECT time FROM ticket_change\n WHERE ticket=%s AND time>%s AND field=%s\n LIMIT 1\n \"\"\", (self.id, ts, field)):\n # Modify the old value of the next change if it is equal\n # to the new value of the deleted change\n db(\"\"\"UPDATE ticket_change SET oldvalue=%s\n WHERE ticket=%s AND time=%s AND field=%s\n AND oldvalue=%s\n \"\"\", (oldvalue, self.id, next_ts, field, newvalue))\n break\n else:\n # No next change, edit ticket field\n if field in self.std_fields:\n db(\"UPDATE ticket SET %s=%%s WHERE id=%%s\"\n % field, (oldvalue, self.id))\n else:\n db(\"\"\"UPDATE ticket_custom SET value=%s\n WHERE ticket=%s AND name=%s\n \"\"\", (oldvalue, self.id, field))\n\n # Delete the change\n db(\"DELETE FROM ticket_change WHERE ticket=%s AND time=%s\",\n (self.id, ts))\n\n # Update last changed time\n db(\"UPDATE ticket SET changetime=%s WHERE id=%s\",\n (when_ts, self.id))\n\n self._fetch_ticket(self.id)", "def modify_comment(self, cdate, author, comment, when=None):\n ts = to_utimestamp(cdate)\n if when is None:\n when = datetime.now(utc)\n when_ts = to_utimestamp(when)\n\n with self.env.db_transaction as db:\n # Find the current value of the comment\n old_comment = False\n for old_comment, in db(\"\"\"\n SELECT newvalue FROM ticket_change\n WHERE ticket=%s AND time=%s AND field='comment'\n \"\"\", (self.id, ts)):\n break\n if comment == (old_comment or ''):\n return\n\n # Comment history is stored in fields named \"_comment%d\"\n # Find the next edit number\n fields = db(\"\"\"SELECT field FROM ticket_change\n WHERE ticket=%%s AND time=%%s AND field %s\n \"\"\" % db.like(),\n (self.id, ts, db.like_escape('_comment') + '%'))\n rev = max(int(field[8:]) for field, in fields) + 1 if fields else 0\n db(\"\"\"INSERT INTO ticket_change\n (ticket,time,author,field,oldvalue,newvalue)\n VALUES (%s,%s,%s,%s,%s,%s)\n \"\"\", (self.id, ts, author, '_comment%d' % rev,\n old_comment or '', str(when_ts)))\n if old_comment is False:\n # There was no comment field, add one, find the\n # original author in one of the other changed fields\n old_author = None\n for old_author, in db(\"\"\"\n SELECT author FROM ticket_change\n WHERE ticket=%%s AND time=%%s AND NOT field %s LIMIT 1\n \"\"\" % db.like(),\n (self.id, ts, db.like_escape('_') + '%')):\n db(\"\"\"INSERT INTO ticket_change\n (ticket,time,author,field,oldvalue,newvalue)\n VALUES (%s,%s,%s,'comment','',%s)\n \"\"\", (self.id, ts, old_author, comment))\n else:\n db(\"\"\"UPDATE ticket_change SET newvalue=%s\n WHERE ticket=%s AND time=%s AND field='comment'\n \"\"\", (comment, self.id, ts))\n\n # Update last changed time\n db(\"UPDATE ticket SET changetime=%s WHERE id=%s\",\n (when_ts, self.id))\n\n self.values['changetime'] = when", "def get_change(self, change_id: ChangeID) -> Change:\n resp = self._request('GET', '/v1/changes/{}'.format(change_id))\n return Change.from_dict(resp['result'])", "def detected_change(self):\n return self.broker.detected_change(**{\"ChangeTraceID\": self.ChangeTraceID})", "def abort_change(self, change_id: ChangeID) -> Change:\n body = {'action': 'abort'}\n resp = self._request('POST', '/v1/changes/{}'.format(change_id), body=body)\n return Change.from_dict(resp['result'])", "async def on_channel_update(before: discord.Channel, after: discord.Channel):\n if after.is_private:\n return\n\n changelog_channel = get_changelog_channel(after.server)\n if not changelog_channel:\n return\n\n # We only want to update when a name change is performed\n if before.name == after.name:\n return\n\n # Differ between voice channels and text channels\n if after.type == discord.ChannelType.text:\n await log_change(\n changelog_channel, \"Channel **#{0.name}** changed name to {1.mention}, **{1.name}**.\".format(before, after))\n else:\n await log_change(\n changelog_channel, \"Voice channel **{0.name}** changed name to **{1.name}**.\".format(before, after))", "def change_id(self):\n try:\n cids = self.tags[\"Change-Id\"]\n except KeyError:\n return None\n\n assert len(cids) == 1\n return cids[0]", "def eventsCancel(self, eid, cancelMessage, callback):\n j = Json().put(u\"eid\", eid).put(u\"cancel_message\", cancelMessage)\n self.callMethodRetBoolean(u\"events.cancel\", j.getJavaScriptObject(), callback)", "def getChannel(self):\n for evt in self.__events:\n if isinstance(evt, ChannelEvent):\n return evt.channel\n return None", "def change_amount(self, change_amount):\n if self._configuration.client_side_validation and change_amount is None:\n raise ValueError(\"Invalid value for `change_amount`, must not be `None`\") # noqa: E501\n if (self._configuration.client_side_validation and\n change_amount is not None and change_amount < -1): # noqa: E501\n raise ValueError(\"Invalid value for `change_amount`, must be a value greater than or equal to `-1`\") # noqa: E501\n\n self._change_amount = change_amount", "def channelLeft(self, channel):", "def match_end_reason_id(self, state_id, match):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter method for counters, mapped from YANG variable /access_points/access_point/radios/radio/state/counters (container)
def _set_counters(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=yc_counters_openconfig_access_points__access_points_access_point_radios_radio_state_counters, is_container='container', yang_name="counters", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """counters must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=yc_counters_openconfig_access_points__access_points_access_point_radios_radio_state_counters, is_container='container', yang_name="counters", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/wifi/access-points', defining_module='openconfig-access-points', yang_type='container', is_config=False)""", }) self.__counters = t if hasattr(self, '_set'): self._set()
[ "def _set_counters(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_counters_openconfig_wifi_mac__ssids_ssid_state_counters, is_container='container', yang_name=\"counters\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='container', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"counters must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_counters_openconfig_wifi_mac__ssids_ssid_state_counters, is_container='container', yang_name=\"counters\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='container', is_config=False)\"\"\",\n })\n\n self.__counters = t\n if hasattr(self, '_set'):\n self._set()", "def _set_counters(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_counters_openconfig_spanning_tree__stp_rstp_interfaces_interface_state_counters, is_container='container', yang_name=\"counters\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"counters must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_counters_openconfig_spanning_tree__stp_rstp_interfaces_interface_state_counters, is_container='container', yang_name=\"counters\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=False)\"\"\",\n })\n\n self.__counters = t\n if hasattr(self, '_set'):\n self._set()", "def _set_counters(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_counters_openconfig_spanning_tree__stp_mstp_mst_instances_mst_instance_interfaces_interface_state_counters, is_container='container', yang_name=\"counters\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"counters must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_counters_openconfig_spanning_tree__stp_mstp_mst_instances_mst_instance_interfaces_interface_state_counters, is_container='container', yang_name=\"counters\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=False)\"\"\",\n })\n\n self.__counters = t\n if hasattr(self, '_set'):\n self._set()", "def _set_counters(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_counters_openconfig_wifi_mac__ssids_ssid_clients_client_state_counters, is_container='container', yang_name=\"counters\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='container', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"counters must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_counters_openconfig_wifi_mac__ssids_ssid_clients_client_state_counters, is_container='container', yang_name=\"counters\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='container', is_config=False)\"\"\",\n })\n\n self.__counters = t\n if hasattr(self, '_set'):\n self._set()", "def counters ( self ) :\n return self._counters", "def _set_counter_value(self, frame_id, name, value):\n if name not in self.__counters:\n self.__counters[name] = [None] * self.__frame_count\n self.__counters[name][frame_id] = value", "def retrieve_interfaces_interface_state_counters_counters_by_id(name): # noqa: E501\n return 'do some magic!'", "def retrieve_interfaces_interface_subinterfaces_subinterface_state_counters_counters_by_id(name, index): # noqa: E501\n return 'do some magic!'", "def _set_counter(self, value):\n self._counterVar = value", "def _cx_counters_psutil(self):\n for iface, counters in psutil.net_io_counters(pernic=True).iteritems():\n metrics = {\n 'bytes_rcvd': counters.bytes_recv,\n 'bytes_sent': counters.bytes_sent,\n 'packets_in.count': counters.packets_recv,\n 'packets_in.error': counters.errin,\n 'packets_out.count': counters.packets_sent,\n 'packets_out.error': counters.errout,\n }\n self._submit_devicemetrics(iface, metrics)", "def getIbvCountersWrapper(args):\n return getIbvCounters(*args)", "def counters(cli_opts, json): # noqa: B902\n\n return_code = fib.FibCountersCmd(cli_opts).run(json)\n sys.exit(return_code)", "def reset_counters(self) -> dict[str, float]:\n res = self.get_stats()\n self._start_time = time.time()\n self._count = 0\n return res", "def _build_counters(counters):\n return [\n {'name': name, 'delta': value}\n for name, value in counters.items()\n ]", "def cuid_counter():\n return cuid.counter", "def parse_counters(self, counters=None):\n if self.stderr == sys.stderr:\n raise AssertionError('You must call sandbox() first;'\n ' parse_counters() is for testing only.')\n\n stderr_results = parse_mr_job_stderr(self.stderr.getvalue(), counters)\n return stderr_results['counters']", "def counter(self):\n counter = Counter({Status.PASSED: 0, Status.FAILED: 0, \"total\": 0})\n\n for child in self:\n if child.category == ReportCategories.ERROR:\n counter.update({Status.ERROR: 1, \"total\": 1})\n elif child.category == ReportCategories.TASK_RERUN:\n pass\n else:\n counter.update(child.counter)\n\n return counter", "def get_network_counters(args={}):\n res = {}\n\n if args.get('net_io_counters', False):\n res['net_io_counters'] = to_dict(psutil.net_io_counters(pernic=pernic))\n\n return res", "def update_counter(counter, frame, cap):\n counter.config(text = \"Frame {} / {}\".format(int(frame.frame.frame_num), int(cap.total_frames)))", "def get_interface_counter_value(dut, ports, properties, cli_type=\"\"):\n cli_type = st.get_ui_type(dut, cli_type=cli_type)\n if not isinstance(ports, list):\n ports = [ports]\n if not isinstance(properties, list):\n properties = [properties]\n counters_dict = dict()\n output = show_interface_counters_all(dut, cli_type=cli_type)\n for each_port in ports:\n entries = filter_and_select(output, properties, {'iface': each_port})[0]\n counters_dict[each_port] = entries\n return convert_to_bits(counters_dict)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }