query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
When the user posts the find_org_to_create_account form, redirect to that page
def find_org_to_create_account(request): if request.method != 'POST' or not request.POST.get('organization_slug'): return HttpResponseRedirect(reverse('home')) else: org_slug = request.POST.get('organization_slug') return HttpResponseRedirect(reverse('create_org_account', args=[org_slug]))
[ "def form_valid(self, form):\n # Redirect the user to the next step in the Member-creation process\n return HttpResponseRedirect(\n self.get_success_url(self.organization.slug, self.member.username)\n )", "def create_organization():\n\n form = SQLFORM(db.organization)\n\n if form.accepts(request.vars, session): \n response.flash='record inserted'\n\n organization_id = dict(form.vars)['id']\n organization = db(db.organization.id==organization_id).select()\n\n session.organization = organization.as_list()[0]\n session.organization_id = organization.as_list()[0]['id']\n\n redirect(URL(r=request, f='index'))\n\n elif form.errors: response.flash='form errors'\n return dict(form=form)", "def form_valid(self, form):\n self.object = form.save()\n login(self.request, self.object)\n send_mail(subject=\"New Signup\", message=\"Your account has been successfully created\",\n from_email=None, recipient_list=[self.object.email])\n return redirect('leads:list')", "def create_account(request):\n\n curr_hunt = Hunt.objects.get(is_current_hunt=True)\n teams = curr_hunt.real_teams.all().exclude(team_name=\"Admin\").order_by('pk')\n if request.method == 'POST':\n uf = UserForm(request.POST, prefix='user')\n pf = PersonForm(request.POST, prefix='person')\n if uf.is_valid() and pf.is_valid():\n user = uf.save()\n user.set_password(user.password)\n user.backend = 'django.contrib.auth.backends.ModelBackend'\n user.save()\n person = pf.save(commit=False)\n person.is_shib_acct = False\n person.user = user\n person.save()\n login(request, user)\n logger.info(\"User created: %s\" % (str(person)))\n return index(request)\n else:\n return render(request, \"create_account.html\", {'uf': uf, 'pf': pf, 'teams': teams})\n else:\n uf = UserForm(prefix='user')\n pf = PersonForm(prefix='person')\n return render(request, \"create_account.html\", {'uf': uf, 'pf': pf, 'teams': teams})", "def oauth_start_flow():\n # Have to do authentication!\n rest.default_user_authentication()\n\n account_type = flask.request.args.get('type')\n if account_type is None:\n flask.abort(400)\n\n cls = ACCOUNT_TYPES.get(account_type, None)\n if cls is None:\n flask.about(400)\n\n key = str(uuid.uuid4())\n instance = cls(id=key)\n instance.put()\n\n return flask.redirect(instance.AUTH_URL %\n {'client_id': instance.CLIENT_ID,\n 'state': key})", "def office_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n office_form = OfficeForm()\n return render_to_response('office_form.html', {'form': office_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n office_form = OfficeForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if office_form.is_valid():\n of = office_form.save(commit=False)\n of.company = company\n of.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('office_form.html', \n {'form': office_form, 'form_errors': office_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def org_organisation_create_onaccept(form):\n\n db = current.db\n s3db = current.s3db\n ftable = s3db.pr_forum\n\n # Lookup the Reserves Forum\n forum = db(ftable.name == \"Reserves\").select(ftable.pe_id,\n limitby = (0, 1)\n ).first()\n try:\n reserves_pe_id = forum.pe_id\n except AttributeError:\n current.log.error(\"Unable to link Org Forum to Reserves Forum: Forum not Found\")\n return\n\n form_vars_get = form.vars.get\n organisation_id = form_vars_get(\"id\")\n\n # Lookup the Organisation\n otable = s3db.org_organisation\n org = db(otable.id == organisation_id).select(otable.pe_id,\n limitby = (0, 1)\n ).first()\n org_pe_id = org.pe_id\n\n # Create Forum\n record = {\"organisation_id\": organisation_id,\n \"name\": \"%s Reserves\" % form_vars_get(\"name\"),\n }\n forum_id = ftable.insert(**record)\n record[\"id\"] = forum_id\n s3db.update_super(ftable, record)\n forum_pe_id = record[\"pe_id\"]\n\n # Add the Hierarchy links\n s3db.pr_add_affiliation(org_pe_id, forum_pe_id, role=\"Realm Hierarchy\")\n s3db.pr_add_affiliation(reserves_pe_id, forum_pe_id, role=\"Realm Hierarchy\")", "def test_cms_wizards_organization_submit_form(self):\n # A parent page to list organizations should pre-exist\n create_page(\n \"Organizations\", \"richie/fullwidth.html\", \"en\", reverse_id=\"organizations\"\n )\n\n # We can submit a form with just the title set\n form = OrganizationWizardForm(data={\"title\": \"My title\"})\n self.assertTrue(form.is_valid())\n page = form.save()\n organization = page.organization\n\n # The page and its related extension have been created as draft\n self.assertEqual(Page.objects.count(), 2)\n self.assertEqual(Page.objects.drafts().count(), 2)\n self.assertEqual(page.get_title(), \"My title\")\n # The slug should have been automatically set\n self.assertEqual(page.get_slug(), \"my-title\")\n # The code is left blank in this case\n self.assertIsNone(organization.code)", "def sign_up(request):\n #just in case is already registered \n if Developer.all().filter(\"user =\", request.user).get():\n return HttpResponseRedirect(\"/\")\n \n return SignUpWizard([SignUpStep1Form, SignUpStep2Form, SignUpStep3Form])(request)", "def award_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n award_form = AwardForm()\n return render_to_response('award_form.html', {'form': award_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n award_form = AwardForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if award_form.is_valid():\n af = award_form.save(commit=False)\n af.company = company\n af.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('award_form.html', \n {'form': award_form, 'form_errors': award_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def create_account(request, role):\n context = {}\n if request.method == \"POST\":\n if(role.lower() == \"academic\"):\n form = AcademicRegisterForm(request.POST)\n elif(role.lower() == \"average\"):\n form = AvgRegisterForm(request.POST)\n\n if(form.is_valid()):\n createNewUser(form)\n username = form.cleaned_data.get('username')\n messages.success(request, f\"Account has been created for {username}!\")\n return redirect('login')\n else:\n if(role.lower() == \"academic\"):\n form = AcademicRegisterForm()\n elif(role.lower() == \"average\"):\n form = AvgRegisterForm()\n else:\n context['error'] = \"URL does not exist. Please return to home and try again\"\n return render(request, 'classroom_main/create_account.html', context)\n\n context[\"type\"] = role\n context['title'] = \"Sign up to the Online Coding Classroom\"\n context['form'] = form\n\n return render(request, 'classroom_main/create_account.html', context)", "def funding_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n funding_form = FundingForm()\n return render_to_response('funding_form.html', {'form': funding_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n funding_form = FundingForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if funding_form.is_valid():\n of = funding_form.save(commit=False)\n of.company = company\n of.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('funding_form.html', \n {'form': funding_form, 'form_errors': funding_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def goto_make_new_user():\n\n return render_template('users/new.html')", "def post(self):\n cont = self.request_string('continue', default=\"/\")\n self.redirect(users.create_login_url(cont))", "def management_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n management_form = ManagementForm()\n return render_to_response('management_form.html', {'form': management_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n management_form = ManagementForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if management_form.is_valid():\n mf = management_form.save(commit=False)\n mf.company = company\n mf.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('management_form.html', \n {'form': management_form, 'form_errors': management_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def home_page():\n return redirect ('/register')", "def test_redirect_to_invite(self):\n # Issue POST request\n values = {\n 'project': self.project.sodar_uuid,\n 'role': self.role_guest.pk,\n 'text': 'test@example.com',\n }\n\n with self.login(self.user):\n response = self.client.post(\n reverse('projectroles:autocomplete_user_redirect'), values\n )\n\n self.assertEqual(response.status_code, 200)\n\n # Assert correct redirect url\n with self.login(self.user):\n data = json.loads(response.content)\n self.assertEqual(data['success'], True)\n self.assertEqual(\n data['redirect_url'],\n reverse(\n 'projectroles:invite_create',\n kwargs={'project': self.project.sodar_uuid},\n ),\n )", "def acquisition_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n acquisition_form = AcquisitionForm()\n return render_to_response('acquisition_form.html', {'form': acquisition_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n acquisition_form = AcquisitionForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if acquisition_form.is_valid():\n aqf = acquisition_form.save(commit=False)\n aqf.company = company\n aqf.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('acquisition_form.html', \n {'form': acquisition_form, 'form_errors': acquisition_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def create_account():\n\n return render_template('account.html')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
df is a function of x_i, y_i, beta
def sgd_step(df, alpha, prev_beta, xy_i): x_i, y_i = xy_i gradient = df(x_i, y_i, prev_beta) return [beta_j + alpha * df_j for beta_j, df_j in zip(prev_beta, gradient)]
[ "def create_beta_posteriors(df):\n goods = df.num_matured - df.fpd\n df['alpha_p'] = df.alpha + df.fpd\n df['beta_p'] = df.beta + goods\n return df", "def create_beta_priors(df):\n df['alpha'] = np.minimum(np.maximum((1 - df.expected) * np.power(df.expected, 2) / df.variance - df.expected, 0.1), 15)\n df['beta'] = df.alpha / df.expected - df.alpha\n return df", "def smale_beta(f, x0, df, args=()):\n _args = (x0,) + args\n beta = numpy.abs(f(*_args) / df[0](*_args))\n return beta", "def jacb(beta, x):\n xc, yc, r = beta\n xi, yi = x\n\n df_db = empty((beta.size, x.shape[1]))\n df_db[0] = 2 * (xc - xi) # d_f/dxc\n df_db[1] = 2 * (yc - yi) # d_f/dyc\n df_db[2] = -2 * r # d_f/dr\n\n return df_db", "def predict(alpha, beta, x_i):\n return beta * x_i + alpha", "def test_fn(df, fn):\n\n y_pred = []\n y_true = []\n\n for key in df.index:\n y_t, *inputs = df.loc[key]\n y_true.append(y_t)\n y_p = fn(*inputs)\n y_pred.append(y_p)\n\n # linear regression without intercept\n c = np.mean(y_true) / np.mean(y_pred)\n y_pred = np.multiply(y_pred, c)\n\n rmse = np.sqrt(np.mean(np.subtract(y_pred, y_true) ** 2))\n return rmse, y_pred, y_true, c", "def fit_df(df, **kwargs):\n fit_func = kwargs.get('fit_func', single_exp)\n xvar = kwargs.get('xvar', 'hours')\n yvar = kwargs.get('yvar', 'yfp_norm')\n # Reset index just in case a multi-indexed dataframe\n # was passed\n if isinstance(df.index, pd.MultiIndex):\n df.reset_index(inplace=True)\n if (xvar in list(df.columns)) and (yvar in list(df.columns)):\n pass\n else:\n print(f'xvar {xvar} and and yvar {yvar} not found in df columns\\n{df.columns}')\n return df, pd.DataFrame(None)\n x_input = df[xvar]\n y_input = df[yvar]\n # If fitting fails at this step, return None and \n # print out the exception. Usually occur because \n # of bad data or the curve_fit failed to converge\n try:\n popt, pcov = curve_fit(fit_func, x_input, y_input)\n except Exception as e:\n if e == RuntimeError:\n print(f'Runtime isue fitting data using {fit_func}\\n')\n else:\n print('Error encountered:\\n', e)\n return df, pd.DataFrame(None)\n # Increase density of x points for smoothed\n # y predicted\n x_smooth_min = 0\n x_smooth_max = np.max(x_input)*1.2\n x_smooth_delta = x_smooth_max/50\n x_smooth = np.arange(x_smooth_min,\n x_smooth_max,\n x_smooth_delta)\n \n y_pred = fit_func(x_input, *popt)\n y_pred_smooth = fit_func(x_smooth, *popt)\n # Define r_sq and standard error of the estimate (est_std_err)\n residuals = y_input - y_pred\n r_sq = get_r_squared(y=y_input, y_pred=y_pred)\n est_std_err = get_estimate_std_err(y=y_input, y_pred=y_pred)\n shapiro_p = get_shapiro_p(residuals)\n \n dict_keys = ['xvar',\n 'yvar',\n 'x_input',\n 'y_input',\n 'x_smooth',\n 'y_pred_smooth',\n 'y_pred',\n 'residuals',\n 'r_sq',\n 'est_std_err',\n 'shapiro_p']\n \n # Have to set the scope of eval() to local variables\n # only since by default it looks only for global variables\n func_locals = locals()\n dict_vals = [eval(key, func_locals) for key in dict_keys]\n params_dict = dict(zip(dict_keys, dict_vals))\n # Expand popt (params found by curve_fit(x, y)) into\n # single values insteady of the original tuple\n popt_names = list('abcdefghijklmnop')\n for i in range(0, len(popt)):\n name = popt_names[i]\n params_dict[name] = popt[i]\n \n short_df, smooth_df = params_dict_to_df(params_dict)\n # Add back in all the original data in the sampledf\n # to output alogside the fits. Will include a bunch\n # of labels like genotype, chase_method, substrate\n # etc.\n for col in df.columns:\n if col not in list(short_df.columns):\n short_df.loc[:, col] = df.loc[:, col]\n # Now add all the labels to the smooth df as well\n for col in short_df.columns:\n if col not in list(smooth_df.columns):\n smooth_df.loc[:, col] = short_df.loc[:, col].unique()[0]\n return short_df, smooth_df", "def calculate_beta(df, ticker_dict, beta_cap_floor=-10.0, beta_cap_ceil=10.0):\n \n df1 = df.copy()\n df_temp = df1.assign(beta=0.)\n df_temp = df_temp.loc[:,['beta']]\n \n for index, row in df_temp.iterrows():\n date_end = index[0] - pd.tseries.offsets.MonthEnd(1) # dates for prior month\n date_begin = date_end - pd.tseries.offsets.MonthBegin(1)\n ticker = index[1]\n\n df2 = ticker_dict[ticker][date_begin:date_end].copy() # select prior month's data\n df2 = df2[df2.index.dayofweek < 5] # remove weekends\n df2 = df2.loc[:,['Adj_Close','QQQ_Adj_Close']] # only need ticker's price and QQQ price\n \n X = df2.values[:,[0]]\n y = df2.values[:,[1]]\n reg = LinearRegression().fit(X, y) # run regression\n df_temp.at[index,'beta'] = reg.coef_ # put in beta\n \n df1 = df1.assign(beta=df_temp.beta.clip(beta_cap_floor, beta_cap_ceil)) # add beta column, with beta clipped\n return df1", "def evaluate_df(self, df):\n ## Check invariant; model inputs must be subset of df columns\n var_diff = set(self.var).difference(set(df.columns))\n if len(var_diff) != 0:\n raise ValueError(\n \"Model inputs not a subset of given columns;\\n\"\n + \"missing var = {}\".format(var_diff)\n )\n\n df_tmp = df.copy().drop(self.out, axis=1, errors=\"ignore\")\n ## Evaluate each function\n for func in self.functions:\n ## Concatenate to make intermediate results available\n df_tmp = concat((df_tmp, func.eval(df_tmp)), axis=1)\n\n return df_tmp[self.out]", "def _lbeta_fwd(x, y):\n return _lbeta_naive_gradient(x, y), (x, y)", "def prod_logistic_predict(df, cutoff, varlist, input_model, beta=1):\n\t\n\n\tmodel = pickle.load(open(input_model , \"rb\"))\n\t\n\tdf_c = sm_api.add_constant(df, has_constant='add')\n\t\n\t#df_c['p_1'] = model.predict(df_c[varlist])\n\t\n\tdf_c['p_1'] = model.predict(df[varlist])\n\t\n\t\t\n\tdf['p_1'] = (beta*df['p_1']) / (beta*df['p_1'] - df['p_1'] +1)\n\t\n\t\n\tconditions = [\n\t\t\t(df_c['p_1'] < cutoff[1]),\n\t\t\t(df_c['p_1'] >= cutoff[1]) & (df_c['p_1'] < cutoff[2]),\n\t\t\t(df_c['p_1'] >= cutoff[2]) & (df_c['p_1'] < cutoff[3]),\n\t\t\t(df_c['p_1'] >= cutoff[3]) & (df_c['p_1'] < cutoff[4]),\n\t\t\t(df_c['p_1'] >= cutoff[4]) & (df_c['p_1'] < cutoff[5]),\n\t\t\t(df_c['p_1'] >= cutoff[5]) & (df_c['p_1'] < cutoff[6]),\n\t\t\t(df_c['p_1'] >= cutoff[6]) & (df_c['p_1'] < cutoff[7]),\n\t\t\t(df_c['p_1'] >= cutoff[7]) & (df_c['p_1'] < cutoff[8]),\n\t\t\t(df_c['p_1'] >= cutoff[8]) & (df_c['p_1'] < cutoff[9]),\n\t\t\t(df_c['p_1'] >= cutoff[9])]\n\t\t\t\n\tchoices = [10,9,8,7,6,5,4,3,2,1]\n\n\tdf_c['decile'] = np.select(conditions, choices, default=0).astype(int)\n\t\n\t\n\tscored_data = df_c[['p_1', 'decile']]\n\n\treturn scored_data", "def generate_x_y_params(self,df):\r\n y=df.iloc[:,-1] \r\n X=df.loc[:, df.columns != y.name]\r\n return X,y", "def eval(self, df):\n df_res = self.func(df)\n return df_res[self.out]", "def error(alpha, beta, x_i, y_i):\n return y_i - predict(alpha, beta, x_i)", "def calc_beta(state):\n vs = state.variables\n settings = state.settings\n vs.beta = update(\n vs.beta,\n at[:, 2:-2],\n 0.5\n * (\n (vs.coriolis_t[:, 3:-1] - vs.coriolis_t[:, 2:-2]) / vs.dyu[2:-2]\n + (vs.coriolis_t[:, 2:-2] - vs.coriolis_t[:, 1:-3]) / vs.dyu[1:-3]\n ),\n )\n vs.beta = utilities.enforce_boundaries(vs.beta, settings.enable_cyclic_x)", "def squared_error_gradient(x_i, y_i, beta):\n return [-2 * x_ij * error(x_i, y_i, beta) for x_ij in x_i]", "def fit_beta (self, wins_frame):\n win_pct = wins_frame.pct\n \n fitted = scipy.stats.beta.fit(win_pct, floc =0 , fscale = 1)\n return fitted", "def predict(X, beta):\n return np.dot(X, beta)", "def squared_error_gradient(x_i, y_i, beta):\n return [-2 * x_ij * error(x_i, y_i, beta)\n for x_ij in x_i]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the color of the mask at position. Using 2 bits as a color.
def get_color(mask: int, position: int): return (mask >> (position << 1)) & 3
[ "def mask_color(self):\n return self._mask_color", "def GetOrFindMaskColour(*args, **kwargs):\n return _core_.Image_GetOrFindMaskColour(*args, **kwargs)", "def set_color(mask: int, position: int, color: int):\n return mask | (color << (position << 1))", "def get_color(self, point):\n \n d = point - self._origin\n dist = int(d.dot(d) ** 0.5) % 2\n if dist == 0:\n return self.c1.dup()\n else:\n return self.c2.dup()", "def getColor(self):\n retval = self.pixels[self.x, self.y]\n return Color(retval)", "def get_color(self, _pos):\n return self.__framebuffer[_pos]", "def _mask_get(mask, pos):\n return (mask[pos // MASK_BITSIZE] >> (pos % MASK_BITSIZE)) & 1", "def GetColor(n):\n if n > 0:\n return (98 / 255.0, 162 / 255.0, 209 / 255.0)\n else:\n return (233 / 255.0, 133 / 255.0, 131 / 255.0)", "def _get_color__(number, delta):\n r, g, b = _get_pattern__(number) # which channels to set a value to\n val = 255 - delta * int(number / 8) # the value to set for each color channel\n return r * val, g * val, b * val", "def GetMaskBlue(*args, **kwargs):\n return _core_.Image_GetMaskBlue(*args, **kwargs)", "def get_colour(v):\n v %= 1\n RGB = np.array([ # this will cover the whole RGB spectrum\n [1, 1, 0, 0, 0, 1],\n [0, 1, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 1],\n ])\n RGB = RGB[:, [0, 1, 4, 5]] # Select a colour range that looks good\n nVals = len(RGB[0])\n\n v = (v * nVals) % nVals # value within [0, nVals)\n left = int(v) % nVals\n right = int(v + 1) % nVals\n dec = v % 1 # value within [0, 1)\n\n left_rgb = RGB[:, left]\n right_rgb = RGB[:, right]\n\n shift = right_rgb - left_rgb\n return left_rgb + dec * shift", "def _coloring(self, masks: tf.Tensor) -> tf.Tensor:\n b, h, w, n = utilities.resolve_shape(masks)\n palette = tf.random.uniform((1, n, 3), 0.5, 1.)\n colored = tf.reshape(\n tf.matmul(tf.reshape(masks, (b, -1, n)), palette), (b, h, w, 3))\n return colored", "def getPixelColor(self, n):\n\t\treturn self.leds[n]", "def mask_to_rgb(mask):\n colours = visualize.random_colors(mask.shape[2])\n rgb_mask = np.zeros((mask.shape[0], mask.shape[1], 3))\n\n for i in range(mask.shape[2]):\n for c in range(3):\n rgb_mask[:, :, c] = np.where(mask[:, :, i] != 0, int(colours[i][c] * 255), rgb_mask[:, :, c])\n\n return rgb_mask", "def get_red(x, y, slot = 0):\r\n return __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3]", "def getColor(self, x, y):\n if self._checkRange(x, y, \"getColor\"):\n retval = self.pixels[x, y]\n return Color(retval)", "def get(self, i, j):\n if 0 <= i < self.width and 0 <= j < self.height:\n return color(self[i+j*self.width], base=255)", "def getRGB(self, x, y):\n if self._checkRange(x, y, \"getRGB\"):\n return self.pixels[x, y][:3]", "def get_color(self, point):\n return self._color.dup()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the color of the mask at position. Using 2 bits as a color.
def set_color(mask: int, position: int, color: int): return mask | (color << (position << 1))
[ "def get_color(mask: int, position: int):\n return (mask >> (position << 1)) & 3", "def SetMaskColour(*args, **kwargs):\n return _gdi_.Bitmap_SetMaskColour(*args, **kwargs)", "def SetMaskColour(*args, **kwargs):\n return _core_.Image_SetMaskColour(*args, **kwargs)", "def SetMask(self, index: int, mask: float) -> None:\n ...", "def set_red(x, y, value, slot = 0):\r\n __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3] = value", "def set_pixel(framebuf, x, y, color):\n index = (y * framebuf.stride + x) >> 2\n pixel = framebuf.buf[index]\n\n shift = (x & 0b11) << 1\n mask = 0b11 << shift\n color = (color & 0b11) << shift\n\n framebuf.buf[index] = color | (pixel & (~mask))", "def _set_color_mode(self, mode):\n self._write(ST7789_COLMOD, bytes([mode & 0x77]))", "def set_red(self, x, y, newval):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].set_red(newval)", "def set_color(self, color, pos):\n TM16XXBase.send_data(self, ((pos << 1) + 1), color)", "def SetColor(self, p_int, p_int_1, p_int_2, p_int_3):\n ...", "def setPixelColor(self, n, color):\n\t\t#print \"pxl %s = %s\" % (n, color)\n\t\tif isinstance(n, slice):\n\t\t\tself.leds[n] = [color]*len(self.leds[n])\n\t\telse:\n\t\t\tif n >= 0 or n <= self.size:\n\t\t\t\tself.leds[n] = color\n\t\t#pprint(self.leds)", "def set_color(self, n, color):\r\n self.pixel_array[(n - 1) % LED_COUNT] = (color[1]<<16) + (color[0]<<8) + color[2]", "def set_mask(self, mask):\n self.mask = mask", "def __setitem__(self, pos, value):\n\t\t#pprint(pos)\n\t\t#pprint(self.leds.__getitem__(pos))\n\t\t# Handle if a slice of positions are passed in by setting the appropriate\n\t\t# LED data values to the provided values.\n\t\tself.setPixelColor(pos, value)", "def setPixel (self, x, y, colour):\r\n self.image [y][x] = colour", "def changeColor( self ):\n\t\t\n\t\tx, y = self.position.xy\n\t\tself.color = ( int((x / WINDOW_X) * 128), int((x / WINDOW_X) * 128) + int((y / WINDOW_Y) * 128 ), int((y / WINDOW_Y) * 128))", "def set(self, pixel, position):\n index = position * 3\n red, green, blue = pixel\n self.lights[index] = self.gamma[green]\n self.lights[index + 1] = self.gamma[red]\n self.lights[index + 2] = self.gamma[blue]", "def set_blue(x, y, value, slot = 0):\r\n __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3 + 2] = value", "def set_pixel(self, x, y, color):\n self._data[y][x] = color" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new ir.Set instance with given attributes. Absolutely all ir.Set instances must be created using this constructor.
def new_set(*, ctx: context.ContextLevel, **kwargs) -> irast.Set: ir_set = irast.Set(**kwargs) ctx.all_sets.append(ir_set) return ir_set
[ "def newChemAtomSet(self, **attrlinks):\n return ChemAtomSet(self, **attrlinks)", "def __init__(self, values=None):\n\n self.dict = {} # each instance of Set has its own dict property\n # which is what we'll use to track memnerships\n if values is not None:\n for value in values:\n self.add(value)", "def new_set_from_set(\n ir_set: irast.Set, *,\n preserve_scope_ns: bool=False,\n path_id: typing.Optional[irast.PathId]=None,\n stype: typing.Optional[s_types.Type]=None,\n ctx: context.ContextLevel) -> irast.Set:\n if path_id is None:\n path_id = ir_set.path_id\n if not preserve_scope_ns:\n path_id = path_id.merge_namespace(ctx.path_id_namespace)\n if stype is None:\n stype = ir_set.stype\n result = new_set(\n path_id=path_id,\n path_scope_id=ir_set.path_scope_id,\n stype=stype,\n expr=ir_set.expr,\n ctx=ctx\n )\n result.rptr = ir_set.rptr\n return result", "def Set(*args, **kwargs):\n return _xrc.XmlResource_Set(*args, **kwargs)", "def __init__(self, attributes_names: list):\r\n self.attributes_names = attributes_names", "def __init__(self):\n self.set = {}", "def set(**attrs):", "def set_attributes(self, attributes):\n self.attributes = dict(attributes) # overwrite the existing registry of attributes with the input attributes", "def set_hdf5_attributes(dset, attributes):\n for key in attributes.iterkeys():\n dset.attrs[key] = attributes[key]\n\n return dset", "def __init__(self, attributes=None):\n self._attributes = attributes\n self._examples = []", "def __init__(self, set):\n Rule.__init__(self)\n self.__set = set", "def __init__(self, *args):\n _snap.TIntSet_swiginit(self, _snap.new_TIntSet(*args))", "def __init__(self, set_ptr=None):\n\n if set_ptr is None:\n self.set = ipset.ipset_new()\n else:\n self.set = set_ptr", "def __init__(self, name: unicode, set: ghidra.util.graph.KeyIndexableSet):\n ...", "def __new__(cls, variable, expr, **kwargs):\n variable = repack_if_can(sympify(unpack_if_can(variable)))\n for v in pack_if_not(variable):\n if not is_Symbol(v):\n raise TypeError('variable is not a symbol or matrix symbol: %s' % v)\n if not is_Boolean(expr):\n raise TypeError('expression is not boolean or relational: %r' % expr)\n\n return Set.__new__(cls, variable, expr, **kwargs)", "def asSet(self) -> OclWrapper_Set:\r\n return OclWrapper_Set(self._wrapped)", "def construct(self):\n\n newSet = {}\n current_index = 0\n\n for key_1, value_1 in self._sets[self._currentSet].items():\n current_index += 1\n for key_2,value_2 in list(self._sets[self._currentSet].items())[current_index:]:\n # join the 2 tuples\n join = key_1 + key_2\n # remove duplicates\n join = tuple(set(join))\n # get combinations\n combined = tuple(combinations(join, self._currentSet+1))\n # sort combination\n combined = tuple(sorted(combined[0]))\n\n # append new combination to dict\n if len(combined) != 0 :\n newSet[combined] = 0\n\n self._currentSet += 1\n # append the new itemset in the sets dict \n self._sets[self._currentSet] = newSet", "def __init__(self, name=None):\n self.id = id # Unique identifier for the set\n self._next_id = 0 # Holds unique ids for graphs\n self._graphs = {} # Holds graphs, keyed by unique id\n self.name = name # Holds description of graph", "def create_set(self, node, name):\r\n return self._send({'name': 'createSet', 'args': [node, name]})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new ir.Set from another ir.Set. The new Set inherits source Set's scope, schema item, expression, and, if preserve_scope_ns is set, path_id. If preserve_scope_ns is False, the new Set's path_id will be namespaced with the currently active scope namespace.
def new_set_from_set( ir_set: irast.Set, *, preserve_scope_ns: bool=False, path_id: typing.Optional[irast.PathId]=None, stype: typing.Optional[s_types.Type]=None, ctx: context.ContextLevel) -> irast.Set: if path_id is None: path_id = ir_set.path_id if not preserve_scope_ns: path_id = path_id.merge_namespace(ctx.path_id_namespace) if stype is None: stype = ir_set.stype result = new_set( path_id=path_id, path_scope_id=ir_set.path_scope_id, stype=stype, expr=ir_set.expr, ctx=ctx ) result.rptr = ir_set.rptr return result
[ "def new_set(*, ctx: context.ContextLevel, **kwargs) -> irast.Set:\n ir_set = irast.Set(**kwargs)\n ctx.all_sets.append(ir_set)\n return ir_set", "def copy_to_set(self) -> Set:\n return self.copy().flatten_to_set(modify=False)", "def copy(self):\n newset = OrderedSet()\n newset.items = self.items[:]\n newset.indices = self.indices.copy()\n newset._setup_quick_lookup_methods()\n return newset", "def generate_set(self, left: 'parser.Source', right: 'parser.Source', kind: 'dsl.Set.Kind') -> 'parser.Source':", "def asSet(self) -> OclWrapper_Set:\r\n return OclWrapper_Set(self._wrapped)", "def to_set(self):\n\n return to_set(self, set)", "def create_set(self, node, name):\r\n return self._send({'name': 'createSet', 'args': [node, name]})", "def _create_record_id_set(\n self, record_id: str, observed_id_set: Optional[Set] = None\n ) -> Set[str]:\n if observed_id_set is None:\n observed_id_set = set()\n\n if record_id in self._groups:\n return self._groups[record_id]\n else:\n db_record = self._database.get_record_by_id(record_id)\n if not db_record:\n logger.warning(\n f\"Record ID set creator could not resolve \"\n f\"lookup for {record_id} in ID set: \"\n f\"{observed_id_set}\"\n )\n return observed_id_set - {record_id}\n\n record_xrefs = db_record.get(\"xrefs\")\n if not record_xrefs:\n return observed_id_set | {db_record[\"concept_id\"]}\n else:\n local_id_set = set(record_xrefs)\n merged_id_set = {record_id} | observed_id_set\n for local_record_id in local_id_set - observed_id_set:\n merged_id_set |= self._create_record_id_set(\n local_record_id, merged_id_set\n )\n return merged_id_set", "def copy(self):\n products_by_target = defaultdict(OrderedSet)\n for key, value in self._products_by_target.items():\n products_by_target[key] = OrderedSet(value)\n return UnionProducts(products_by_target=products_by_target)", "def copy(self):\n r = SubsSet()\n r.rewrites = self.rewrites.copy()\n for expr, var in self.items():\n r[expr] = var\n return r", "def copySet(_session, _set_src, _set_dst, _segment):\n it = _session.create_iterator(_session.sc_constraint_new(sc_constants.CONSTR_3_f_a_a,\n _set_src,\n sc.SC_ARC,\n 0), True)\n \n while not it.is_over():\n# s_el = it.value(2)\n# _idtf = _session.get_idtf(s_el)\n# el = s_el\n# if isSystemId(_idtf):\n# el = _session.create_el(_segment, _session.get_type(s_el))\n createPair(_session, _segment, _set_dst, it.value(2), _session.get_type(it.value(1)))\n it.next()", "def copy(self) -> \"WeightedSet\":\n newset = WeightedSet()\n newset._weights.update(self._weights)\n return newset", "def fixSets(namespace):\n\ttry:\n\t\tset\n\texcept:\n\t\timport sets\n\t\tnamespace[\"set\"] = sets.Set\n\t\tnamespace[\"frozenset\"] = sets.ImmutableSet", "def make_set(node):\n node.parent = node\n node.rank = 0", "def addSet(db, setSymbols, setName, setDescription, setDim):\r\n addedSet = db.add_set(setName, setDim, setDescription)\r\n for symbol in setSymbols:\r\n addedSet.add_record(symbol)\r\n\r\n return addedSet", "def as_set(self):\n from ..sets import Union\n if len(self.free_symbols) == 1:\n return Union(*[arg.as_set() for arg in self.args])\n raise NotImplementedError('Sorry, Or.as_set has not yet been'\n ' implemented for multivariate'\n ' expressions')", "def create_set_graph(self):\n\n # create all TestSet objects\n universal_modes = self.config['modes']\n for set_name, set_info in self.config['series'].items():\n modes = universal_modes + set_info['modes']\n set_obj = TestSet(self.pav_cfg, set_name, set_info['tests'], modes,\n self.config['host'], set_info['only_if'],\n set_info['not_if'], self)\n self.test_sets[set_name] = set_obj\n\n # create doubly linked graph of TestSet objects\n for set_name in self.dep_graph:\n self.test_sets[set_name].before.update(self.dep_graph[set_name])\n\n next_list = []\n for s_n in self.dep_graph:\n if set_name in self.dep_graph[s_n]:\n next_list.append(s_n)\n self.test_sets[set_name].after.update(next_list)\n\n return", "def from_sets(cls, set1, set2, universe_size=None):\n if not isinstance(set1, Set):\n set1 = set(set1)\n if not isinstance(set2, Set):\n set2 = set(set2)\n TP = len(set1 & set2)\n FP = len(set2) - TP\n FN = len(set1) - TP\n if universe_size is None:\n TN = 0\n else:\n TN = universe_size - TP - FP - FN\n if TN < 0:\n raise ValueError(\n \"universe_size must be at least as large as set union\")\n return cls(TP, FN, FP, TN)", "def create_rrset_set(zone, rrsets, source=None):\n rrset_set = set()\n for rrset in rrsets:\n name = rrset['name']\n typeStr = rrset['type']\n if typeStr == 'SOA' or (typeStr == 'NS' and name == zone):\n continue\n if source:\n rrset['source'] = source\n rrset_set.add(ResourceRecordSet(**rrset))\n return rrset_set" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return ir.Set for a pointer defined as a computable.
def computable_ptr_set( rptr: irast.Pointer, *, unnest_fence: bool=False, same_computable_scope: bool=False, ctx: context.ContextLevel) -> irast.Set: ptrcls = rptr.ptrcls source_set = rptr.source source_scls = source_set.stype # process_view() may generate computable pointer expressions # in the form "self.linkname". To prevent infinite recursion, # self must resolve to the parent type of the view NOT the view # type itself. Similarly, when resolving computable link properties # make sure that we use rptr.ptrcls.derived_from. if source_scls.is_view(ctx.env.schema): source_set = new_set_from_set( source_set, preserve_scope_ns=True, ctx=ctx) source_set.stype = source_scls.peel_view(ctx.env.schema) source_set.shape = [] if source_set.rptr is not None: schema = ctx.env.schema derived_from = source_set.rptr.ptrcls.get_derived_from(schema) if (derived_from is not None and not derived_from.generic(schema) and derived_from.get_derived_from(schema) is not None and ptrcls.is_link_property(schema)): source_set.rptr.ptrcls = derived_from try: qlexpr, qlctx, inner_source_path_id, path_id_ns = \ ctx.source_map[ptrcls] except KeyError: ptrcls_default = ptrcls.get_default(ctx.env.schema) if not ptrcls_default: ptrcls_sn = ptrcls.get_shortname(ctx.env.schema) raise ValueError( f'{ptrcls_sn!r} is not a computable pointer') if isinstance(ptrcls_default, s_expr.ExpressionText): qlexpr = astutils.ensure_qlstmt(qlparser.parse(ptrcls_default)) else: qlexpr = qlast.BaseConstant.from_python(ptrcls_default) qlctx = None inner_source_path_id = None path_id_ns = None if qlctx is None: # Schema-level computable, completely detached context newctx = ctx.detached else: newctx = _get_computable_ctx( rptr=rptr, source=source_set, source_scls=source_scls, inner_source_path_id=inner_source_path_id, path_id_ns=path_id_ns, same_scope=same_computable_scope, qlctx=qlctx, ctx=ctx) if ptrcls.is_link_property(ctx.env.schema): source_path_id = rptr.source.path_id.ptr_path() else: source_path_id = rptr.target.path_id.src_path() path_id = source_path_id.extend( ptrcls, s_pointers.PointerDirection.Outbound, ptrcls.get_target(ctx.env.schema), ns=ctx.path_id_namespace, schema=ctx.env.schema) with newctx() as subctx: subctx.view_scls = ptrcls.get_target(ctx.env.schema) subctx.view_rptr = context.ViewRPtr( source_scls, ptrcls=ptrcls, rptr=rptr) subctx.anchors[qlast.Source] = source_set subctx.empty_result_type_hint = ptrcls.get_target(ctx.env.schema) if isinstance(qlexpr, qlast.Statement) and unnest_fence: subctx.stmt_metadata[qlexpr] = context.StatementMetadata( is_unnest_fence=True) comp_ir_set = dispatch.compile(qlexpr, ctx=subctx) if ptrcls in ctx.pending_cardinality: comp_ir_set_copy = copy.copy(comp_ir_set) specified_card, source_ctx = ctx.pending_cardinality[ptrcls] stmtctx.get_pointer_cardinality_later( ptrcls=ptrcls, irexpr=comp_ir_set_copy, specified_card=specified_card, source_ctx=source_ctx, ctx=ctx) def _check_cardinality(ctx): if ptrcls.singular(ctx.env.schema): stmtctx.enforce_singleton_now(comp_ir_set_copy, ctx=ctx) stmtctx.at_stmt_fini(_check_cardinality, ctx=ctx) comp_ir_set.stype = ptrcls.get_target(ctx.env.schema) comp_ir_set.path_id = path_id comp_ir_set.rptr = rptr rptr.target = comp_ir_set return comp_ir_set
[ "def as_set(self):\n from ..sets import Union\n if len(self.free_symbols) == 1:\n return Union(*[arg.as_set() for arg in self.args])\n raise NotImplementedError('Sorry, Or.as_set has not yet been'\n ' implemented for multivariate'\n ' expressions')", "def as_set(self):\n if len(self.free_symbols) == 1:\n return self.args[0].as_set().complement(S.Reals)\n raise NotImplementedError('Sorry, Not.as_set has not yet been'\n ' implemented for mutivariate'\n ' expressions')", "def getSet(unique_name):", "def asSet(self) -> OclWrapper_Set:\r\n return OclWrapper_Set(self._wrapped)", "def visit_Set(self, node):\n self.generic_visit(node)\n return to_call(to_attribute(self.operator, '__set__'), node.elts)", "def set_of(element: Type) -> SetType:\n return SetType(element)", "def create_closedSet(self):\n # EXAMPLE: return a data structure suitable to hold the set of nodes already evaluated\n return set()", "def generate_set(self, left: 'parser.Source', right: 'parser.Source', kind: 'dsl.Set.Kind') -> 'parser.Source':", "def owningSet(self) -> ghidra.util.graph.KeyIndexableSet:\n ...", "def pointer_to_atoms(self, pointer: MultiValues, size: int, endness: str) -> Set[MemoryLocation]:\n result = set()\n for vs in pointer.values():\n for value in vs:\n atom = self.pointer_to_atom(value, size, endness)\n if atom is not None:\n result.add(atom)\n\n return result", "def set(x):\n pass", "def as_set(self):\n return set(self)", "def can_be_set(self):\n regs = []\n map(regs.extend, map(lambda (a, pops): pops, self.gadgets))\n return set(regs)", "def zset(raw):\n return to_iter(raw, type_='set')", "def generate_set(self, left: sql.Selectable, right: sql.Selectable, kind: dsl.Set.Kind) -> sql.Selectable:\n return self.SET[kind](left, right)", "def sat_generate_candidate_assignments(self):\n # YOUR CODE HERE\n short = min(len(c) for c in self.clauses)\n for c in self.clauses:\n if len(c) == short:\n return set(c.literals)\n # return (set(x.literals) for x in self.clauses if len(x) == min(len(c) for c in self.clauses))", "def create_C1(data_set):\r\n C1 = set()\r\n for t in data_set:\r\n for item in t:\r\n item_set = frozenset([item])\r\n C1.add(item_set)\r\n return C1", "def cfset_to_set(cfset):\n count = cf.CFSetGetCount(cfset)\n buffer = (c_void_p * count)()\n cf.CFSetGetValues(cfset, byref(buffer))\n return set([cftype_to_value(c_void_p(buffer[i])) for i in range(count)])", "def create_C1(data_set):\n C1 = set()\n for t in data_set:\n for item in t:\n item_set = frozenset([item])\n C1.add(item_set)\n return C1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return founder and offspring subset of basename.ped containing only the markers in lcd lcd contains a sorted list of (chrom,offset,rs) for the common snps in all maps we need to keep genotypes all in the same column order
def subsetPed(basename="",lcdmap = [],faff='1', ofaff='2'): mf = file('%s.map' % basename,'r').readlines() lmap = [x.strip().split() for x in mf] rscols = {} # lookup marker table colrs = [] # lookup rs from column for i,m in enumerate(lmap): # get columns to keep in the order we want them rscols[m[1]] = i # keep track of where each rs is in this map colrs.append(m[1]) # and keep the list of rs for tracking alleles wewant = [rscols[x[2]] for x in lcdmap] # columns we want to keep print '#Subsetped faff=%s ofaff=%s keeping %d (%s) of potential lcd %d for %s' % \ (faff,ofaff,len(wewant),wewant[:20],len(lcdmap),basename) pf = file('%s.ped' % basename,'r') ogeno = [] # offspring new lines fgeno = [] # founders oped = [] # for pedigrees fped = [] rsadict = {} # keep a count of alleles - seems to be a problem for i,l in enumerate(pf): if (i+1) % 500 == 0: print '%s at line %d' % (basename,i+1) ll = l.strip().split() ped = ll[:6] founder = (ll[2] == '0' and ll[3] == '0') aff = faff if not founder: aff = ofaff ped[5] = aff # adjust as needed if founder: fped.append(ped) else: oped.append(ped) gt = ll[6:] geno = [] for snp in wewant: # columns in order thisrs = colrs[snp] base = snp*2 g1 = gt[base] g2 = gt[base+1] geno.append(g1) geno.append(g2) if not rsadict.get(thisrs,None): rsadict[thisrs] = {} if g1 <> '0': if not rsadict[thisrs].get(g1,None): rsadict[thisrs][g1] = 1 else: rsadict[thisrs][g1] += 1 if g2 <> '0': if not rsadict[thisrs].get(g2,None): rsadict[thisrs][g2] = 1 else: rsadict[thisrs][g2] += 1 keepgt = array.array('c',geno) if founder: fgeno.append(keepgt) else: ogeno.append(keepgt) print '#Subsetped %s %d fgeno %d ogeno' % (basename,len(fgeno),len(ogeno)) return fped,oped,fgeno,ogeno,rsadict
[ "def mergePed(bnlist=[],faff=[],ofaff=[],newbasename='newped',fo=0):\r\n lcdmap = getLCD(bnlist) # list of chr,offset,rs for all snp common to all files\r\n print 'got %d lcd snps-%s' % (len(lcdmap),lcdmap[:5])\r\n cfped = []\r\n coped = []\r\n cfgeno = []\r\n cogeno = []\r\n allrsa = {}\r\n ignorers = {}\r\n for i,basename in enumerate(bnlist):\r\n fped,oped,fgeno,ogeno,trsadict = subsetPed(basename,lcdmap,faff[i],ofaff[i])\r\n print '%s gave %d fgeno' % (basename,len(fgeno))\r\n for rs in trsadict.keys():\r\n tk = trsadict[rs].keys()\r\n if len(tk) > 2:\r\n print 'for %s, rs %s has alleles %s' % (basename, rs, trsadict[rs])\r\n if not allrsa.get(rs,None):\r\n allrsa[rs] = {}\r\n for a in tk:\r\n if not allrsa[rs].get(a,None):\r\n allrsa[rs][a] = trsadict[rs][a]\r\n else:\r\n allrsa[rs][a] += trsadict[rs][a]\r\n tk = allrsa[rs].keys()\r\n if len(tk) > 2 and not ignorers.get(rs,None): # new\r\n #print 'After merge basename %s, rs %s has alleles %s' % (basename, rs,allrsa[rs])\r\n ignorers[rs] = rs\r\n cfped += fped\r\n coped += oped\r\n cfgeno += fgeno\r\n cogeno += ogeno\r\n print 'after merge all have %d fgeno and %d ogeno' % (len(cfgeno),len(cogeno))\r\n # now have offspring and founder rows in lcdmap order\r\n # write map file\r\n print '### found %d markers > 2 alleles' % (len(ignorers.keys()))\r\n keepmarkers = [x for x in range(len(lcdmap)) if not ignorers.get(lcdmap[x][2],None)]\r\n newmap = ['\\t'.join((lcdmap[x][0],lcdmap[x][2],'0','%d' % lcdmap[x][1])) for x in keepmarkers] # chrom,offset,rs\r\n f = file('%s.map' % newbasename,'w')\r\n f.write('%s\\n' % '\\n'.join(newmap))\r\n f.close()\r\n for i,geno in enumerate(cfgeno): # convert each array into a list and keep the good markers\r\n gs = ''.join(['%s%s' % (geno[2*x],geno[2*x + 1]) for x in keepmarkers])\r\n g = array.array('c',gs) # good ones\r\n cfgeno[i] = g # replace\r\n print 'cfgeno converted'\r\n if not fo: # not founders only - note arrays are not lists!\r\n cfped += copy.copy(coped) #\r\n del coped\r\n for i,geno in enumerate(cogeno): # convert each array into a list and keep the good markers\r\n gs = ''.join(['%s%s' % (geno[2*x],geno[2*x + 1]) for x in keepmarkers])\r\n g = array.array('c',gs) # good ones\r\n cfgeno.append(g) # extend founders\r\n del cogeno\r\n print 'after if not fo now have %d cfgeno' % (len(cfgeno))\r\n f = file('%s.ped' % newbasename,'w')\r\n for n,ped in enumerate(cfped):\r\n l = ' '.join(ped + list(cfgeno[n]))\r\n if n % 100 == 0 and n > 0:\r\n print 'writing line %d' % n\r\n f.write(l)\r\n f.write('\\n')\r\n f.close()\r\n print 'wrote %d map rows and %d ped rows to %s' % (len(newmap),len(cfped),newbasename)", "def get_file_chromosomes(self) -> List[str]:", "def exonBedSplitAndChangeName(infile,maf_dir,outfile_dir=\"\"):\n chrStrand_linespList_dict={}\n with open(infile,'r') as inf:\n for line in inf:\n linesp=line.rstrip().split(\"\\t\")\n if len(linesp)==6:\n chrStrand=\"{0}{1}\".format(linesp[0],linesp[5])\n if chrStrand not in chrStrand_linespList_dict: chrStrand_linespList_dict[chrStrand]=set()\n # change name to chr_start_end\n linesp[3]=\"{0}_{1}_{2}\".format(linesp[0],linesp[1],linesp[2])\n # convert start,end to int\n linesp[1]=int(linesp[1])\n linesp[2]=int(linesp[2]) \n chrStrand_linespList_dict[chrStrand].add(tuple(linesp))\n #\n for chrStrand in chrStrand_linespList_dict:\n linespList=list(chrStrand_linespList_dict[chrStrand])\n sortedLinespList=sorted(linespList, key=lambda linesp: linesp[1])\n # remove regions that maf already extracted (maf file chr_start_end.maf already existed)\n # if maf_dir not exist or not defined, skip this step\n if os.path.isdir(maf_dir):\n sortedLinespList=filterExistedMafRegions(sortedLinespList, maf_dir) \n # split lines into non overlapping list\n nonover_linespList_list=splitIntoNonoverlappingList(sortedLinespList)\n if not outfile_dir:\n outfile_dir=os.path.dirname(infile) \n for index,linespList in enumerate(nonover_linespList_list):\n outfile=os.path.join(outfile_dir,chrStrand.replace(\"+\",\"plus\").replace(\"-\",\"minus\")+\"_\"+str(index)+\".bed\")\n linespList_text=[\"\\t\".join([str(el) for el in linesp]) for linesp in linespList ]\n with open(outfile,'w') as outf:\n outf.write(\"\\n\".join(linespList_text))", "def getStartEndCoords(fileName1, fileName2):\n uniqueNames = dict()\n with open(fileName1, \"r\", encoding=\"utf8\") as f1:\n f1 = csv.reader(f1, delimiter='\\t')\n for ls in f1:\n start = ls[0][4:].strip()\n normStart = norm.normalize_alphabet(start)\n start_reg = ls[1]#.strip().split(\",\")\n startKey = ','.join([normStart] + start_reg.strip().split(\",\"))\n startKey_orig = ','.join([start] + start_reg.strip().split(\",\"))\n end = ls[2][4:].strip()\n normEnd = norm.normalize_alphabet(end)\n end_reg = ls[3]#.strip().split(\",\")\n endKey = ','.join([normEnd] + end_reg.strip().split(\",\"))\n endKey_orig = ','.join([end] + end_reg.strip().split(\",\"))\n\n with open(fileName2, \"r\", encoding=\"utf8\") as jsonFile: \n allData = json.load(jsonFile)\n for d in allData[\"features\"]:\n # populates the uniqueNames dictionary for start and end toponyms\n if not any(x in uniqueNames for x in [startKey, startKey_orig]):\n uniqueNames.update(populateDict(start, start_reg, d))\n if not any(x in uniqueNames for x in [endKey, endKey_orig]):\n uniqueNames.update(populateDict(end, end_reg, d))\n if not any(x in uniqueNames for x in [startKey, startKey_orig]):\n for uri in gv.found_URIs:\n if any(x in uri for x in [startKey, startKey_orig])\\\n and re.match(r'\\d', gv.found_URIs[uri]) == None:\n tmp = {}\n tmp[startKey_orig] = {}\n tmp[startKey_orig]['lat'] = \"null\"\n tmp[startKey_orig]['lon'] = \"null\"\n tmp[startKey_orig]['region'] = start_reg\n tmp[startKey_orig]['cornuUri'] = gv.found_URIs[uri]\n uniqueNames.update(tmp)\n if startKey_orig not in uniqueNames:\n tmp = {}\n tmp[startKey_orig] = {}\n tmp[startKey_orig]['lat']= \"null\"\n tmp[startKey_orig]['lon'] = \"null\"\n tmp[startKey_orig]['region'] = start_reg\n tmp[startKey_orig]['cornuUri'] = \"null\"\n uniqueNames.update(tmp)\n\n if not any(x in uniqueNames for x in [endKey, endKey_orig]):\n for uri in gv.found_URIs:\n if any(x in uri for x in [endKey, endKey_orig])\\\n and re.match(r'\\d', gv.found_URIs[uri]) == None:\n tmp = {}\n tmp[endKey_orig] = {}\n tmp[endKey_orig]['lat'] = \"null\"\n tmp[endKey_orig]['lon'] = \"null\"\n tmp[endKey_orig]['region'] = end_reg\n tmp[endKey_orig]['cornuUri'] = gv.found_URIs[uri]\n uniqueNames.update(tmp)\n if endKey_orig not in uniqueNames:\n tmp = {}\n tmp[endKey_orig] = {}\n tmp[endKey_orig]['lat']= \"null\"\n tmp[endKey_orig]['lon'] = \"null\"\n tmp[endKey_orig]['region'] = end_reg\n tmp[endKey_orig]['cornuUri'] = \"null\"\n uniqueNames.update(tmp)\n return uniqueNames", "def find_alt_loci_in_bc_files(folder, refseq):\n\tmin_total_reads_by_strand = 5\n\tmin_count_allele_by_strand = 1\n\thet = dict()\n\thomoplasmic = dict()\n\tmajority_alt = dict()\n\tbaseToIndexF = {'A': 3, 'T': 4, 'C': 5, 'G': 6}\n\tbaseToIndexR = {'A': 7, 'T': 8, 'C': 9, 'G': 10}\n\tfor fname in glob.glob(folder):\n\t\tbasename = os.path.basename(fname).rstrip('.bc')\n\t\thet[basename] = dict()\n\t\thomoplasmic[basename] = dict()\n\t\tmajority_alt[basename] = dict()\n\t\tbcfile = open(fname, 'r')\n\t\tfor li in bcfile:\n\t\t\tq = li.rstrip('\\n').split('\\t')\n\t\t\ts = q[0:3]\n\t\t\tfor i in q[3:11]:\n\t\t\t\ts.append(int(i))\n\t\t\ttotalfor = sum(s[3:7])\n\t\t\ttotalrev = sum(s[7:11])\n\t\t\tif((totalfor < min_total_reads_by_strand)\n\t\t\t\tor (totalrev < min_total_reads_by_strand)):\n\t\t\t\t# This site not is usable.\t\n\t\t\t\tcontinue\n\t\t\tobsAlleles = {'A': 0, 'T': 0, 'C': 0, 'G': 0}\n\t\t\tfor i, a in enumerate(['A', 'T', 'C', 'G'], start=3):\n\t\t\t\tobsAlleles[a] = s[i] + s[4+i]\n\t\t\tsorted_bc = sorted(obsAlleles.items(), key=itemgetter(1))\n\t\t\tmajAllele = sorted_bc[-1][0]\t\n\t\t\tminorAllele = sorted_bc[-2][0]\n\t\t\tif majAllele != refseq[int(s[1])-1] and (\n\t\t\t\tsorted_bc[-2][1] == 0):\n\t\t\t\thomoplasmic[basename][s[1]] = li\n\t\t\tif majAllele != refseq[int(s[1])-1] and (\n\t\t\t\tsorted_bc[-2]):\n\t\t\t\tmajority_alt[basename][s[1]] = li\n\t\t\tif((s[baseToIndexF[majAllele]] >= min_count_allele_by_strand) \n\t\t\tand (s[baseToIndexR[majAllele]] >= min_count_allele_by_strand) \n\t\t\tand (s[baseToIndexF[minorAllele]] >= min_count_allele_by_strand) \n\t\t\tand (s[baseToIndexR[minorAllele]] >= min_count_allele_by_strand)):\n\t\t\t\t# We are heteroplasmic\n\t\t\t\thet[basename][s[1]] = li\n\t\t\telse:\n\t\t\t\t# No heteroplasmy\n\t\t\t\tpass \n\t\tbcfile.close()\n\tprint \"\"\"Het loci: %i, Homo loci: %i, Majority alt loci: %i\"\"\" % (\n\t\tsum([len(het[person]) for person in het]),\n\t\tsum([len(homoplasmic[person]) for person in homoplasmic]),\n\t\tsum([len(majority_alt[person]) for person in majority_alt]))\n\treturn (het, homoplasmic, majority_alt)", "def get_reps_filenames(celltype): \n prefix = os.path.join(os.getcwd(),'peaks',celltype,'MACS2')\n reps = os.listdir(prefix)\n return [os.path.join(prefix,rep) for rep in reps if rep.endswith('sorted.bdg')]", "def get_prefix_suffix(file_path, num_prefix=5, num_suffix=5, suffix_right=True, *cols):\n pres, sufs = [], []\n col1, col2 = cols[0], cols[1]\n with open(file_path, 'r', encoding='utf-8') as f:\n for line in f.readlines():\n tmp = json.loads(line)\n text1 = tmp[col1]\n pres.append(' '.join(text1.split()[: num_prefix]))\n text2 = tmp[col2]\n if len(text2.split()) < num_suffix:\n sufs.append(text2)\n else:\n if suffix_right:\n sufs.append(' '.join(text2.split()[-num_suffix:]))\n else:\n sufs.append(' '.join(text2.split()[: num_suffix]))\n\n return pres, sufs", "def split_per(folderin, folderout, split_col='ECO_ID', colNms=['i_h100','i_cd',\n 'doy','i_wflen','i_acqdate','b1','vcf','ECO_NAME','ECO_ID','BIOME','geometry']):\n\n split_files = glob.glob(folderin + '*.shp')\n\n for filename in split_files:\n print(filename)\n basename = os.path.splitext(os.path.basename(filename))[0]\n dfa = gpd.read_file(filename)\n df = dfa.astype({split_col: 'int32'}) \n ecoNames = list(np.unique(df[split_col]))#get list of unique ecoregions \n \n for eco in ecoNames:\n #create new df with just columns I want\n df2 = gpd.GeoDataFrame(df, columns=colNms)\n ID = str(eco)\n df_eco = df.loc[df2[split_col]==eco, colNms]\n df_eco.to_file(folderout + '/{}_eco_{}.shp'.format(basename, ID))", "def F_subset_OMHCHO(self,path):\n # find out list of l2 files to subset\n if os.path.isfile(path):\n self.F_update_popy_with_control_file(path)\n l2_list = self.l2_list\n l2_dir = self.l2_dir\n else:\n import glob\n l2_dir = path\n l2_list = []\n cwd = os.getcwd()\n os.chdir(l2_dir)\n start_date = self.start_python_datetime.date()\n end_date = self.end_python_datetime.date()\n days = (end_date-start_date).days+1\n DATES = [start_date + datetime.timedelta(days=d) for d in range(days)]\n for DATE in DATES:\n flist = glob.glob('OMI-Aura_L2-OMHCHO_'+DATE.strftime(\"%Ym%m%d\")+'t*.he5')\n l2_list = l2_list+flist\n os.chdir(cwd)\n self.l2_dir = l2_dir\n self.l2_list = l2_list\n \n maxsza = self.maxsza\n maxcf = self.maxcf\n west = self.west\n east = self.east\n south = self.south\n north = self.north\n maxMDQF = self.maxMDQF\n maxEXTQF = self.maxEXTQF\n \n data_fields = ['AMFCloudFraction','AMFCloudPressure','AirMassFactor','Albedo',\\\n 'ReferenceSectorCorrectedVerticalColumn','ColumnUncertainty','MainDataQualityFlag',\\\n 'PixelCornerLatitudes','PixelCornerLongitudes','FittingRMS']\n data_fields_l2g = ['cloud_fraction','cloud_pressure','amf','albedo',\\\n 'column_amount','column_uncertainty','MainDataQualityFlag',\\\n 'PixelCornerLatitudes','PixelCornerLongitudes','FittingRMS']\n geo_fields = ['Latitude','Longitude','TimeUTC','SolarZenithAngle',\\\n 'TerrainHeight','XtrackQualityFlagsExpanded']\n geo_fields_l2g = ['latc','lonc','TimeUTC','SolarZenithAngle',\\\n 'terrain_height','XtrackQualityFlagsExpanded']\n swathname = 'OMI Total Column Amount HCHO'\n \n l2g_data = {}\n for fn in l2_list:\n fn_dir = l2_dir+fn\n self.logger.info('Loading'+fn_dir)\n outp_he5 = self.F_read_he5(fn_dir,swathname,data_fields,geo_fields,data_fields_l2g,geo_fields_l2g)\n f1 = outp_he5['SolarZenithAngle'] <= maxsza\n f2 = outp_he5['cloud_fraction'] <= maxcf\n f3 = outp_he5['MainDataQualityFlag'] <= maxMDQF \n f4 = outp_he5['latc'] >= south\n f5 = outp_he5['latc'] <= north\n tmplon = outp_he5['lonc']-west\n tmplon[tmplon < 0] = tmplon[tmplon < 0]+360\n f6 = tmplon >= 0\n f7 = tmplon <= east-west\n f8 = outp_he5['UTC_matlab_datenum'] >= self.start_matlab_datenum\n f9 = outp_he5['UTC_matlab_datenum'] <= self.end_matlab_datenum\n f10 = outp_he5['XtrackQualityFlagsExpanded'] <= maxEXTQF\n validmask = f1 & f2 & f3 & f4 & f5 & f6 & f7 & f8 & f9 & f10\n self.logger.info('You have '+'%s'%np.sum(validmask)+' valid L2 pixels')\n \n l2g_data0 = {}\n \n Lat_lowerleft = outp_he5['PixelCornerLatitudes'][0:-1,0:-1][validmask]\n Lat_upperleft = outp_he5['PixelCornerLatitudes'][1:,0:-1][validmask]\n Lat_lowerright = outp_he5['PixelCornerLatitudes'][0:-1,1:][validmask]\n Lat_upperright = outp_he5['PixelCornerLatitudes'][1:,1:][validmask] \n Lon_lowerleft = outp_he5['PixelCornerLongitudes'][0:-1,0:-1][validmask]\n Lon_upperleft = outp_he5['PixelCornerLongitudes'][1:,0:-1][validmask]\n Lon_lowerright = outp_he5['PixelCornerLongitudes'][0:-1,1:][validmask]\n Lon_upperright = outp_he5['PixelCornerLongitudes'][1:,1:][validmask]\n l2g_data0['latr'] = np.column_stack((Lat_lowerleft,Lat_upperleft,Lat_upperright,Lat_lowerright))\n l2g_data0['lonr'] = np.column_stack((Lon_lowerleft,Lon_upperleft,Lon_upperright,Lon_lowerright))\n for key in outp_he5.keys():\n if key not in {'MainDataQualityFlag','PixelCornerLatitudes',\\\n 'PixelCornerLongitudes','TimeUTC','XtrackQualityFlagsExpanded'}:\n l2g_data0[key] = outp_he5[key][validmask]\n l2g_data = self.F_merge_l2g_data(l2g_data,l2g_data0)\n self.l2g_data = l2g_data\n if not l2g_data:\n self.nl2 = 0\n else:\n self.nl2 = len(l2g_data['latc'])", "def split_decode_file():\n # split files by chromosome\n header = []\n current_chrom = 'chr1'\n # file_template = decode_folder + '/{}.deCODE_2019.GRCh38.txt'\n file_template = decode_folder + '/{}.deCODE_2019_hg19.txt'\n decode_file = decode_folder + '/aau1043_DataS3_hg19_liftOver.bed'\n w = open(file_template.format(current_chrom), 'a')\n print('NOTE: appending to map files, not overwriting. may cause duplicates')\n with open(decode_file, 'r') as f:\n for line in f:\n # save the header info\n if line.startswith('#'):\n header.append(line)\n # save the column labels\n elif line.startswith('Chr'):\n header.append('# ' + line)\n # write header to first file now\n w.write(''.join(header))\n # the remaining lines are data\n else:\n # get the chromosome for the current line\n ch = line.split()[0]\n # if the chromosome matches the open file, write to it\n if ch == current_chrom:\n w.write(line)\n # if a new chromosome arises, switch to a new writefile\n else:\n w.close()\n current_chrom = ch\n w = open(file_template.format(current_chrom), 'a')\n # write header to file\n w.write(''.join(header))\n w.write(line)\n\n # close the last open file\n w.close()", "def subFarms(self,partition,full_name=None):\n if self.inUse.value() is None: self.load()\n got = []\n for i in xrange(len(self.inUse.data)):\n if self.inUse.data[i]==partition:\n if full_name:\n got.append(self.name+'_'+self.subfarms.data[i])\n else:\n got.append(self.subfarms.data[i])\n return got", "def F_subset_S5PHCHO(self,path): \n # find out list of l2 files to subset\n if os.path.isfile(path):\n self.F_update_popy_with_control_file(path)\n l2_list = self.l2_list\n l2_dir = self.l2_dir\n else:\n import glob\n l2_dir = path\n l2_list = []\n cwd = os.getcwd()\n os.chdir(l2_dir)\n start_date = self.start_python_datetime.date()\n end_date = self.end_python_datetime.date()\n days = (end_date-start_date).days+1\n DATES = [start_date + datetime.timedelta(days=d) for d in range(days)]\n for DATE in DATES:\n flist = glob.glob('S5P_OFFL_L2__HCHO___'+DATE.strftime(\"%Y%m%d\")+'T*.nc')\n l2_list = l2_list+flist\n os.chdir(cwd)\n self.l2_dir = l2_dir\n self.l2_list = l2_list\n \n maxsza = self.maxsza\n maxcf = self.maxcf\n west = self.west\n east = self.east\n south = self.south\n north = self.north\n min_qa_value = self.min_qa_value\n \n # absolute path of useful variables in the nc file\n # not sure about cloud fraction\n # the time_utc string is empty?! why are you doing this to the user!\n data_fields = ['/PRODUCT/SUPPORT_DATA/INPUT_DATA/cloud_fraction_crb',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/latitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/longitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/solar_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/viewing_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/surface_albedo',\\\n '/PRODUCT/latitude',\\\n '/PRODUCT/longitude',\\\n '/PRODUCT/qa_value',\\\n '/PRODUCT/time',\\\n '/PRODUCT/delta_time',\\\n '/PRODUCT/formaldehyde_tropospheric_vertical_column',\\\n '/PRODUCT/formaldehyde_tropospheric_vertical_column_precision'] \n # standardized variable names in l2g file. should map one-on-one to data_fields\n data_fields_l2g = ['cloud_fraction','latitude_bounds','longitude_bounds','SolarZenithAngle',\\\n 'vza','albedo','latc','lonc','qa_value','time','delta_time',\\\n 'column_amount','column_uncertainty']\n self.logger.info('Read, subset, and store level 2 data to l2g_data')\n self.logger.info('Level 2 data are located at '+l2_dir)\n l2g_data = {}\n for fn in l2_list:\n fn_dir = l2_dir+fn\n self.logger.info('Loading '+fn)\n outp_nc = self.F_read_S5P_nc(fn_dir,data_fields,data_fields_l2g)\n f1 = outp_nc['SolarZenithAngle'] <= maxsza\n f2 = outp_nc['cloud_fraction'] <= maxcf\n # ridiculously, qa_value has a scale_factor of 0.01. so error-prone\n f3 = outp_nc['qa_value'] >= min_qa_value \n f4 = outp_nc['latc'] >= south\n f5 = outp_nc['latc'] <= north\n tmplon = outp_nc['lonc']-west\n tmplon[tmplon < 0] = tmplon[tmplon < 0]+360\n f6 = tmplon >= 0\n f7 = tmplon <= east-west\n f8 = outp_nc['UTC_matlab_datenum'] >= self.start_matlab_datenum\n f9 = outp_nc['UTC_matlab_datenum'] <= self.end_matlab_datenum\n validmask = f1 & f2 & f3 & f4 & f5 & f6 & f7 & f8 & f9\n self.logger.info('You have '+'%s'%np.sum(validmask)+' valid L2 pixels')\n l2g_data0 = {}\n # yep it's indeed messed up\n Lat_lowerleft = np.squeeze(outp_nc['latitude_bounds'][:,:,0])[validmask]\n Lat_upperleft = np.squeeze(outp_nc['latitude_bounds'][:,:,3])[validmask]\n Lat_lowerright = np.squeeze(outp_nc['latitude_bounds'][:,:,1])[validmask]\n Lat_upperright = np.squeeze(outp_nc['latitude_bounds'][:,:,2])[validmask]\n Lon_lowerleft = np.squeeze(outp_nc['longitude_bounds'][:,:,0])[validmask]\n Lon_upperleft = np.squeeze(outp_nc['longitude_bounds'][:,:,3])[validmask]\n Lon_lowerright = np.squeeze(outp_nc['longitude_bounds'][:,:,1])[validmask]\n Lon_upperright = np.squeeze(outp_nc['longitude_bounds'][:,:,2])[validmask]\n l2g_data0['latr'] = np.column_stack((Lat_lowerleft,Lat_upperleft,Lat_upperright,Lat_lowerright))\n l2g_data0['lonr'] = np.column_stack((Lon_lowerleft,Lon_upperleft,Lon_upperright,Lon_lowerright))\n for key in outp_nc.keys():\n if key not in {'latitude_bounds','longitude_bounds','time_utc','time','delta_time'}:\n l2g_data0[key] = outp_nc[key][validmask]\n l2g_data = self.F_merge_l2g_data(l2g_data,l2g_data0)\n self.l2g_data = l2g_data\n if not l2g_data:\n self.nl2 = 0\n else:\n self.nl2 = len(l2g_data['latc'])", "def genomes_to_fingerprint_db(genome_files, model, fs, nfft, noverlap, only_genomes, min_peak_amplitude, peak_fan, max_hash_time, outputdir=\"\"):\n\n fingerprint_database = defaultdict(dict)\n fingerprint_database_comphmm = defaultdict(dict)\n revcomp_fingerprint_database = defaultdict(dict)\n revcomp_fingerprint_database_temphmm = defaultdict(dict)\n template_model_dict = dict([(a[0], a[1]) for a in model[0]])\n comp_model_dict = dict([(a[0], a[1]) for a in model[1]])\n nt_bases = set('ATCG')\n genome_ind = 0\n genome_files = list(set(genome_files)) #ensures unique entries in file list\n for g in genome_files:\n genome_ind +=1\n if len(genome_files) < 20:\n print g\n fname = g.split(\".gz\") \n if len(fname) > 1: \n gzf = gzip.open(g, \"rb\")\n lines = gzf.read().split(\"\\n\")\n gzf.close()\n fname = fname[0]\n else:\n fname = fname[0]\n with open(fname, \"r\") as gf:\n lines = gf.readlines()\n chr_ind = [i for i,s in enumerate(lines) if s.startswith('>')]\n for i in range(len(chr_ind)):\n if i+1 == len(chr_ind):\n seq = \"\".join(lines[(1+chr_ind[i]):])\n else:\n seq = \"\".join(lines[(1+chr_ind[i]):chr_ind[i+1]])\n seq = seq.replace('\\n', '').upper()\n kmer_temphmm = []\n kmer_comphmm = []\n kmer_comp_comphmm = []\n kmer_comp_temphmm = []\n for i in range(0, len(seq)-4):\n if all((seq_i in nt_bases) for seq_i in seq[i:i+5]):\n kmer_temphmm.append(template_model_dict[seq[i:i+5]])\n kmer_comphmm.append(comp_model_dict[seq[i:i+5]])\n kmer_comp_comphmm.append(comp_model_dict[comp(seq[i:i+5])])\n kmer_comp_temphmm.append(template_model_dict[comp(seq[i:i+5])])\n kmer_comp_comphmm.reverse() # reverse in order to keep same offset as the template\n kmer_comp_temphmm.reverse()\n fp = fingerprint_events(kmer_temphmm, plot_spectro_name=\"fp_events_%s.png\"%(fname), plot_name=\"fp_peaks_%s.png\"%(fname), fs=fs, nfft=nfft, noverlap=noverlap, min_peak_amplitude=min_peak_amplitude, peak_fan=peak_fan, max_hash_time=max_hash_time)\n g_fp = {key:value for (key, value) in fp} # if key not in g_fp else key.append[value]}\n #g_fp = list_to_dict(fp)\n\n fp_comphmm = fingerprint_events(kmer_comphmm, plot_spectro_name=\"fp_events_comphmm_%s.png\"%(fname), plot_name=\"fp_peaks_comphmm_%s.png\"%(fname), fs=fs, nfft=nfft, noverlap=noverlap, min_peak_amplitude=min_peak_amplitude, peak_fan=peak_fan, max_hash_time=max_hash_time)\n g_fp_comphmm = {key:value for (key,value) in fp_comphmm}\n #g_fp_comphmm = list_to_dict(fp_comphmm)\n\n fp_comp = fingerprint_events(kmer_comp_comphmm, plot_spectro_name=\"fp_events_comp_%s.png\"%(fname), plot_name=\"fp_peaks_comp_%s.png\"%(fname), fs=fs, nfft=nfft, noverlap=noverlap, min_peak_amplitude=min_peak_amplitude, peak_fan=peak_fan, max_hash_time=max_hash_time)\n g_fp_comp = {key:value for (key, value) in fp_comp}\n #g_fp_comp = list_to_dict(fp_comp)\n\n fp_comp_temphmm = fingerprint_events(kmer_comp_temphmm, plot_spectro_name=\"fp_events_comp_temphmm_%s.png\"%(fname), plot_name=\"fp_peaks_comp_temphmm_%s.png\"%(fname), fs=fs, nfft=nfft, noverlap=noverlap, min_peak_amplitude=min_peak_amplitude, peak_fan=peak_fan, max_hash_time=max_hash_time)\n g_fp_comp_temphmm = {key:value for (key, value) in fp_comp_temphmm}\n #g_fp_comp_temphmm = list_to_dict(fp_comp_temphmm)\n if only_genomes:\n pickle.dump(g_fp, open(outputdir+fname+\"_genome_forward.p\", \"wb\"))\n pickle.dump(g_fp_comphmm, open(outputdir+fname+\"_genome_forward_comphmm.p\", \"wb\"))\n pickle.dump(g_fp_comp, open(outputdir+fname+\"_genome_complement.p\", \"wb\"))\n pickle.dump(g_fp_temphmm, open(outputdir+fname+\"_genome_complement_temphmm.p\", \"wb\"))\n else: \n fingerprint_database[fname] = g_fp\n fingerprint_database_comphmm[fname] = g_fp_comphmm\n revcomp_fingerprint_database[fname] = g_fp_comp\n revcomp_fingerprint_database_temphmm[fname] = g_fp_comp_temphmm\n return (fingerprint_database, fingerprint_database_comphmm, revcomp_fingerprint_database, revcomp_fingerprint_database_temphmm)", "def combine_mappings(fasta_fh, mapping_fh, denoised_seqs_fh, otu_picker_otu_map_fh, out_dir):\n\n #read in mapping from split_library file\n labels = imap(lambda (a,b): a, MinimalFastaParser(fasta_fh))\n #mapping from seq_id to sample_id\n sample_id_mapping = extract_read_to_sample_mapping(labels)\n\n denoiser_mapping = read_denoiser_mapping(mapping_fh)\n #read in cd_hit otu map\n # and write out combined otu_picker+denoiser map \n otu_fh = open(out_dir+\"/denoised_otu_map.txt\",\"w\")\n for otu_line in otu_picker_otu_map_fh:\n otu_split = otu_line.split()\n \n otu = otu_split[0]\n ids = otu_split[1:]\n \n get_sample_id = sample_id_mapping.get\n #concat lists\n #make sure the biggest one is first for pick_repr\n all_ids = sort_ids(ids, denoiser_mapping)\n all_ids.extend(sum([denoiser_mapping[id] for id in ids], []))\n try:\n otu_fh.write(\"%s\\t\" % otu +\n \"\\t\".join(map(get_sample_id, all_ids))+\"\\n\")\n except TypeError:\n #get returns Null if denoiser_mapping id not present in sample_id_mapping\n print \"Found id in denoiser output, which was not found in split_libraries \"+\\\n \"output FASTA file. Wrong file?\"\n exit()\n\n fasta_out_fh = open(out_dir+\"/denoised_all.fasta\",\"w\")\n for label, seq in MinimalFastaParser(denoised_seqs_fh):\n id = label.split()[0]\n newlabel = \"%s %s\" %(sample_id_mapping[id], id)\n fasta_out_fh.write(Sequence(name= newlabel, seq=seq).toFasta()+\"\\n\")", "def mafExtractCmds(mafsInRegion_bin,bed_files_dir,maf_dir,extracted_chr_start_end_maf_dir, gzInput=True ,verbLevel=1):\n splitted_bed_files=glob.glob(os.path.join(bed_files_dir,\"*.bed\"))\n chrN_cmd_list={}\n for bed_file in splitted_bed_files:\n headPath, tailFN = os.path.split(bed_file)\n if tailFN.find(\"plus\")>=0:\n chrN=tailFN.split(\"plus\")[0]\n else:\n chrN=tailFN.split(\"minus\")[0]\n cmd=\"{0} {1} -outDir {2} {3}\".format(mafsInRegion_bin, bed_file, extracted_chr_start_end_maf_dir, os.path.join(maf_dir,chrN+\".maf\"))\n if verbLevel>0:\n cmd=\"echo 'Working on: {0}'\\n\".format(bed_file) +cmd\n if chrN not in chrN_cmd_list: chrN_cmd_list[chrN]=[]\n chrN_cmd_list[chrN].append(cmd)\n # combine and add gunzip support\n combined_cmd_list=[]\n for chrN in chrN_cmd_list:\n cmd_list=chrN_cmd_list[chrN]\n if gzInput:\n gunzip_cmd=\"gunzip -c {0} > {1}\".format(os.path.join(maf_dir,chrN+\".maf.gz\"),os.path.join(maf_dir,chrN+\".maf\"))\n rm_cmd=\"rm {0}\".format(os.path.join(maf_dir,chrN+\".maf\"))\n cmd_list=[gunzip_cmd,] + cmd_list + [rm_cmd]\n combined_cmd_list+=cmd_list\n return combined_cmd_list", "def split_by_chr(file):\n chr_to_file = {}\n fh = open(file) # Open your file whatever it is\n fh.readline() \n for line in fh:\n dirname = os.path.basename(sys.argv[1]) + '_By_chromosome'\n if not os.path.isdir(os.path.dirname(sys.argv[1]) + '/' + dirname + '/'): # if By_chromosome directory doesn't exist...\n os.mkdir(os.path.dirname(sys.argv[1]) + '/' + dirname + '/') # ...make it!\n LineElements = line.split() # Split your current line\n chromosome = LineElements[0]\n if chromosome not in chr_to_file:\n OutFile = os.path.dirname(sys.argv[1]) + '/' + dirname + '/' + 'chr' + chromosome + '.gff'\n chr_to_file[chromosome] = open(OutFile, 'w')\n chr_to_file[chromosome].write(\"Chr\\tReads\\tContext\\tStart\\tEnd\\tMC\\tStrand\\tReading_frame\\n\")\n chr_to_file[chromosome].write(line)\n\n for chromosome in chr_to_file:\n chr_to_file[chromosome].close()\n return None", "def F_subset_S5PNO2(self,path): \n # find out list of l2 files to subset\n if os.path.isfile(path):\n self.F_update_popy_with_control_file(path)\n l2_list = self.l2_list\n l2_dir = self.l2_dir\n else:\n import glob\n l2_dir = path\n l2_list = []\n cwd = os.getcwd()\n os.chdir(l2_dir)\n start_date = self.start_python_datetime.date()\n end_date = self.end_python_datetime.date()\n days = (end_date-start_date).days+1\n DATES = [start_date + datetime.timedelta(days=d) for d in range(days)]\n for DATE in DATES:\n flist = glob.glob('S5P_RPRO_L2__NO2____'+DATE.strftime(\"%Y%m%d\")+'T*.nc')\n l2_list = l2_list+flist\n os.chdir(cwd)\n self.l2_dir = l2_dir\n self.l2_list = l2_list\n \n maxsza = self.maxsza\n maxcf = self.maxcf\n west = self.west\n east = self.east\n south = self.south\n north = self.north\n min_qa_value = self.min_qa_value\n \n # absolute path of useful variables in the nc file\n data_fields = ['/PRODUCT/SUPPORT_DATA/DETAILED_RESULTS/cloud_fraction_crb_nitrogendioxide_window',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/latitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/longitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/solar_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/viewing_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/surface_albedo_nitrogendioxide_window',\\\n '/PRODUCT/latitude',\\\n '/PRODUCT/longitude',\\\n '/PRODUCT/qa_value',\\\n '/PRODUCT/time_utc',\\\n '/PRODUCT/nitrogendioxide_tropospheric_column',\\\n '/PRODUCT/nitrogendioxide_tropospheric_column_precision'] \n # standardized variable names in l2g file. should map one-on-one to data_fields\n data_fields_l2g = ['cloud_fraction','latitude_bounds','longitude_bounds','SolarZenithAngle',\\\n 'vza','albedo','latc','lonc','qa_value','time_utc',\\\n 'column_amount','column_uncertainty']\n self.logger.info('Read, subset, and store level 2 data to l2g_data')\n self.logger.info('Level 2 data are located at '+l2_dir)\n l2g_data = {}\n for fn in l2_list:\n fn_dir = l2_dir+fn\n self.logger.info('Loading '+fn)\n outp_nc = self.F_read_S5P_nc(fn_dir,data_fields,data_fields_l2g)\n f1 = outp_nc['SolarZenithAngle'] <= maxsza\n f2 = outp_nc['cloud_fraction'] <= maxcf\n # ridiculously, qa_value has a scale_factor of 0.01. so error-prone\n f3 = outp_nc['qa_value'] >= min_qa_value \n f4 = outp_nc['latc'] >= south\n f5 = outp_nc['latc'] <= north\n tmplon = outp_nc['lonc']-west\n tmplon[tmplon < 0] = tmplon[tmplon < 0]+360\n f6 = tmplon >= 0\n f7 = tmplon <= east-west\n f8 = outp_nc['UTC_matlab_datenum'] >= self.start_matlab_datenum\n f9 = outp_nc['UTC_matlab_datenum'] <= self.end_matlab_datenum\n validmask = f1 & f2 & f3 & f4 & f5 & f6 & f7 & f8 & f9\n self.logger.info('You have '+'%s'%np.sum(validmask)+' valid L2 pixels')\n l2g_data0 = {}\n # yep it's indeed messed up\n Lat_lowerleft = np.squeeze(outp_nc['latitude_bounds'][:,:,0])[validmask]\n Lat_upperleft = np.squeeze(outp_nc['latitude_bounds'][:,:,3])[validmask]\n Lat_lowerright = np.squeeze(outp_nc['latitude_bounds'][:,:,1])[validmask]\n Lat_upperright = np.squeeze(outp_nc['latitude_bounds'][:,:,2])[validmask]\n Lon_lowerleft = np.squeeze(outp_nc['longitude_bounds'][:,:,0])[validmask]\n Lon_upperleft = np.squeeze(outp_nc['longitude_bounds'][:,:,3])[validmask]\n Lon_lowerright = np.squeeze(outp_nc['longitude_bounds'][:,:,1])[validmask]\n Lon_upperright = np.squeeze(outp_nc['longitude_bounds'][:,:,2])[validmask]\n l2g_data0['latr'] = np.column_stack((Lat_lowerleft,Lat_upperleft,Lat_upperright,Lat_lowerright))\n l2g_data0['lonr'] = np.column_stack((Lon_lowerleft,Lon_upperleft,Lon_upperright,Lon_lowerright))\n for key in outp_nc.keys():\n if key not in {'latitude_bounds','longitude_bounds','time_utc','time','delta_time'}:\n l2g_data0[key] = outp_nc[key][validmask]\n l2g_data = self.F_merge_l2g_data(l2g_data,l2g_data0)\n self.l2g_data = l2g_data\n if not l2g_data:\n self.nl2 = 0\n else:\n self.nl2 = len(l2g_data['latc'])", "def read_files():\n with open(\"CvixLerC9.loc\") as loc, open(\"CvixLerC9.qua\") as qua:\n qua_file = (qua.read().split('\\n'))\n qua_file = qua_file[8:-1]\n new_qua = []\n for q in qua_file:\n new_qua.append(q.split('\\t')) # [['1', '1.279502474'], ['3', '0.303712231']....]\n\n new_loc = {}\n header = ''\n read = False\n for i in loc:\n i = i.replace(\"\\n\", '')\n if read:\n for j in i:\n if \" \" != j:\n if header in new_loc.keys():\n new_loc[header].append(j)\n else:\n new_loc[header] = [j]\n if \"(a,b)\" in i:\n header = i\n read = True\n else:\n read = False\n\n elif read:\n for j in i:\n if \" \" != j:\n if header in new_loc.keys():\n new_loc[header].append(j)\n else:\n new_loc[header] = [j]\n\n return new_loc, new_qua", "def subcatsMWfootprint_diagnostics(catname='Skelton',plotdir='/Users/kschmidt/work/MUSE/MWv2_analysis/continuum_source_selection/',\n skeltonwhitakermag='814',xrange=None,bins=None,verbose=True):\n\n ids = np.array([])\n mags = np.array([])\n magnames = np.array([])\n\n if (catname.lower() == 'skelton') or (catname.lower() == 'skelton_goodss') or (catname.lower() == 'all'):\n photcat_goodss = '/Users/kschmidt/work/catalogs/MUSE_GTO/goodss_3dhst.v4.1_inMUSEWideFootprint.fits'\n photdat_goodss = afits.open(photcat_goodss)[1].data\n ids = np.append(ids,photdat_goodss['id']+1100000000)\n if skeltonwhitakermag in ['775','606']:\n magcol = 'f_F'+skeltonwhitakermag+'W'\n else:\n magcol = 'f_F'+skeltonwhitakermag+'Wcand'\n mags = np.append(mags,25.0-2.5*np.log10(photdat_goodss[magcol]))\n magnames = np.append(magnames,magcol.replace('_','\\_'))\n\n if (catname.lower() == 'skelton') or (catname.lower() == 'skelton_cosmos') or (catname.lower() == 'all'):\n photcat_cosmos = '/Users/kschmidt/work/catalogs/MUSE_GTO/cosmos_3dhst.v4.1_inMUSEWideFootprint.fits'\n photdat_cosmos = afits.open(photcat_cosmos)[1].data\n ids = np.append(ids,photdat_cosmos['id']+2100000000)\n magcol = 'f_F'+skeltonwhitakermag+'W'\n mags = np.append(mags,25.0-2.5*np.log10(photdat_cosmos[magcol]))\n magnames = np.append(magnames,magcol.replace('_','\\_'))\n\n if (catname.lower() == 'whitaker') or (catname.lower() == 'all'):\n photcat = '/Users/kschmidt/work/catalogs/MUSE_GTO/hlsp_hlf_hst_60mas_goodss_v2.0_catalog_inMUSEWideFootprint.fits'\n photdat = afits.open(photcat)[1].data\n ids = np.append(ids,photdat['id']+1200000000)\n magcol = 'f_f'+skeltonwhitakermag+'w'\n mags = np.append(mags,25.0-2.5*np.log10(photdat[magcol]))\n magnames = np.append(magnames,magcol.replace('_','\\_'))\n\n if (catname.lower() == 'laigle') or (catname.lower() == 'all'):\n photcat = '/Users/kschmidt/work/catalogs/MUSE_GTO/cosmos2015_laigle_v1.1_candelsregion_inMUSEWideFootprint.fits'\n photdat = afits.open(photcat)[1].data\n ids = np.append(ids,photdat['NUMBER']+2200000000)\n magcol = 'V_MAG_ISO'\n mags = np.append(mags,photdat[magcol])\n magnames = np.append(magnames,magcol.replace('_','\\_'))\n\n if len(ids) == 0:\n sys.exit('No IDs available for \"catname='+str(catname)+'\"')\n\n goodent = np.where((mags < 40) & (mags > 5) & np.isfinite(mags))[0]\n mags_good = mags[goodent]\n ids_good = ids[goodent]\n\n Nbad = len(ids) - len(ids_good)\n Ncosmos = len(np.where(ids_good > 1.9e9)[0])\n Ngoodss = len(np.where(ids_good < 1.9e9)[0])\n Ntotal = Ngoodss+Ncosmos\n\n if verbose: print(' - Read the catalog selection \"'+catname+'\" finding the following number of sources:')\n if verbose: print(' (discarding '+str(Nbad)+' sources for not being finite or having poor mags)')\n if verbose: print(' Total : '+str(Ntotal))\n if verbose: print(' GOODS-S : '+str(Ngoodss))\n if verbose: print(' COSMOS : '+str(Ncosmos))\n\n # - - - - - - - - - - - - - - - - - - - - PLOTTING - - - - - - - - - - - - - - - - - - - -\n if catname.lower() == 'all':\n magext = 'm'+skeltonwhitakermag\n else:\n magext = magcol\n plotname = plotdir+'mag_histogram_'+catname.lower()+'_'+magext+'.pdf'\n if verbose: print(' - Setting up and generating histogram of MUSE-Wide sources in \\n '+plotname)\n fig = plt.figure(figsize=(5, 4))\n fig.subplots_adjust(wspace=0.1, hspace=0.1,left=0.2, right=0.95, bottom=0.2, top=0.95)\n Fsize = 14\n lthick = 1.5\n marksize = 3\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif',size=Fsize)\n plt.rc('xtick', labelsize=Fsize)\n plt.rc('ytick', labelsize=Fsize)\n plt.clf()\n plt.ioff()\n #plt.title('M^\\star',fontsize=Fsize)\n\n if xrange is None:\n xrange = [np.min(mags_good),np.max(mags_good)]\n\n if (bins is None):\n bin_dz = 0.1\n bins = np.arange(np.min(mags_good),np.max(mags_good)+bin_dz,bin_dz)\n if xrange is not None:\n bins = np.arange(np.min(xrange),np.max(xrange)+bin_dz,bin_dz)\n\n\n magranges = [[0,24],[24,25],[25,26],[26,99],[0,26]]\n colors = ['blue','green','orange','red','black']\n\n for mm, magrange in enumerate(magranges):\n goodent = np.where((mags_good > magrange[0]) & (mags_good <= magrange[1]))[0]\n Ngood = len(goodent)\n\n if Ngood>1:\n goodIDs = ids[goodent]\n goodmag = mags_good[goodent]\n goodcolor = colors[mm]\n magmin = np.min(goodmag)\n magmax = np.max(goodmag)\n\n infostr = ' Histinfo:'\n\n percent = float(Ngood)/float(Ntotal)*100.\n label = str(magrange[0])+'$<$mag$<=$'+str(magrange[1])+' \\n('+str(Ngood)+' obj; '+str('%.2f' % percent)+'\\%)'\n\n if mm < len(magranges)-1:\n fillval = True\n linest = '-'\n else:\n fillval = False\n linest = ':'\n hist = plt.hist(goodmag,color=goodcolor,bins=bins,histtype=\"step\",lw=lthick,label=label,ls=linest,\n fill=fillval,fc=goodcolor,alpha=0.8)\n\n plt.xlim(xrange)\n plt.xlabel('AB magnitude \\n('+', '.join(list(magnames))+')', fontsize=Fsize)\n\n #plt.ylim(yrange)\n plt.ylabel(catname.replace('_','\\_')+' catalog objects\\nover MUSE-Wide 100 field footprint', fontsize=Fsize)\n\n #--------- LEGEND ---------\n anchorpos = (0.5, 1.2)\n leg = plt.legend(fancybox=True,numpoints=1, loc='upper left',prop={'size':Fsize-3},ncol=1)#,\n #bbox_to_anchor=anchorpos) # add the legend\n leg.get_frame().set_alpha(0.7)\n #--------------------------\n\n plt.savefig(plotname)\n plt.clf()\n plt.close('all')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
take a list of basenames, get lcd and merge set founder affection according to faff flag and offspring according to ofaff flag
def mergePed(bnlist=[],faff=[],ofaff=[],newbasename='newped',fo=0): lcdmap = getLCD(bnlist) # list of chr,offset,rs for all snp common to all files print 'got %d lcd snps-%s' % (len(lcdmap),lcdmap[:5]) cfped = [] coped = [] cfgeno = [] cogeno = [] allrsa = {} ignorers = {} for i,basename in enumerate(bnlist): fped,oped,fgeno,ogeno,trsadict = subsetPed(basename,lcdmap,faff[i],ofaff[i]) print '%s gave %d fgeno' % (basename,len(fgeno)) for rs in trsadict.keys(): tk = trsadict[rs].keys() if len(tk) > 2: print 'for %s, rs %s has alleles %s' % (basename, rs, trsadict[rs]) if not allrsa.get(rs,None): allrsa[rs] = {} for a in tk: if not allrsa[rs].get(a,None): allrsa[rs][a] = trsadict[rs][a] else: allrsa[rs][a] += trsadict[rs][a] tk = allrsa[rs].keys() if len(tk) > 2 and not ignorers.get(rs,None): # new #print 'After merge basename %s, rs %s has alleles %s' % (basename, rs,allrsa[rs]) ignorers[rs] = rs cfped += fped coped += oped cfgeno += fgeno cogeno += ogeno print 'after merge all have %d fgeno and %d ogeno' % (len(cfgeno),len(cogeno)) # now have offspring and founder rows in lcdmap order # write map file print '### found %d markers > 2 alleles' % (len(ignorers.keys())) keepmarkers = [x for x in range(len(lcdmap)) if not ignorers.get(lcdmap[x][2],None)] newmap = ['\t'.join((lcdmap[x][0],lcdmap[x][2],'0','%d' % lcdmap[x][1])) for x in keepmarkers] # chrom,offset,rs f = file('%s.map' % newbasename,'w') f.write('%s\n' % '\n'.join(newmap)) f.close() for i,geno in enumerate(cfgeno): # convert each array into a list and keep the good markers gs = ''.join(['%s%s' % (geno[2*x],geno[2*x + 1]) for x in keepmarkers]) g = array.array('c',gs) # good ones cfgeno[i] = g # replace print 'cfgeno converted' if not fo: # not founders only - note arrays are not lists! cfped += copy.copy(coped) # del coped for i,geno in enumerate(cogeno): # convert each array into a list and keep the good markers gs = ''.join(['%s%s' % (geno[2*x],geno[2*x + 1]) for x in keepmarkers]) g = array.array('c',gs) # good ones cfgeno.append(g) # extend founders del cogeno print 'after if not fo now have %d cfgeno' % (len(cfgeno)) f = file('%s.ped' % newbasename,'w') for n,ped in enumerate(cfped): l = ' '.join(ped + list(cfgeno[n])) if n % 100 == 0 and n > 0: print 'writing line %d' % n f.write(l) f.write('\n') f.close() print 'wrote %d map rows and %d ped rows to %s' % (len(newmap),len(cfped),newbasename)
[ "def subsetPed(basename=\"\",lcdmap = [],faff='1', ofaff='2'):\r\n mf = file('%s.map' % basename,'r').readlines()\r\n lmap = [x.strip().split() for x in mf]\r\n rscols = {} # lookup marker table\r\n colrs = [] # lookup rs from column\r\n for i,m in enumerate(lmap): # get columns to keep in the order we want them\r\n rscols[m[1]] = i # keep track of where each rs is in this map\r\n colrs.append(m[1]) # and keep the list of rs for tracking alleles\r\n wewant = [rscols[x[2]] for x in lcdmap] # columns we want to keep\r\n print '#Subsetped faff=%s ofaff=%s keeping %d (%s) of potential lcd %d for %s' % \\\r\n (faff,ofaff,len(wewant),wewant[:20],len(lcdmap),basename)\r\n pf = file('%s.ped' % basename,'r')\r\n ogeno = [] # offspring new lines\r\n fgeno = [] # founders\r\n oped = [] # for pedigrees\r\n fped = []\r\n rsadict = {} # keep a count of alleles - seems to be a problem\r\n for i,l in enumerate(pf):\r\n if (i+1) % 500 == 0:\r\n print '%s at line %d' % (basename,i+1)\r\n ll = l.strip().split()\r\n ped = ll[:6]\r\n founder = (ll[2] == '0' and ll[3] == '0') \r\n aff = faff\r\n if not founder:\r\n aff = ofaff\r\n ped[5] = aff # adjust as needed\r\n if founder:\r\n fped.append(ped)\r\n else:\r\n oped.append(ped)\r\n gt = ll[6:]\r\n geno = []\r\n for snp in wewant: # columns in order\r\n thisrs = colrs[snp]\r\n base = snp*2\r\n g1 = gt[base]\r\n g2 = gt[base+1]\r\n geno.append(g1)\r\n geno.append(g2)\r\n if not rsadict.get(thisrs,None):\r\n rsadict[thisrs] = {}\r\n if g1 <> '0':\r\n if not rsadict[thisrs].get(g1,None):\r\n rsadict[thisrs][g1] = 1\r\n else:\r\n rsadict[thisrs][g1] += 1 \r\n if g2 <> '0':\r\n if not rsadict[thisrs].get(g2,None):\r\n rsadict[thisrs][g2] = 1\r\n else:\r\n rsadict[thisrs][g2] += 1\r\n keepgt = array.array('c',geno)\r\n if founder:\r\n fgeno.append(keepgt)\r\n else:\r\n ogeno.append(keepgt)\r\n print '#Subsetped %s %d fgeno %d ogeno' % (basename,len(fgeno),len(ogeno))\r\n return fped,oped,fgeno,ogeno,rsadict", "def default_flags(self, flist):\n flags = ''\n if flist:\n for f in flist:\n flags += f[1].upper() if f[2] else f[1]\n return flags", "def get_files_suffix_list(suffixes, flist, Lshow=False, Ldir=False):\n matched_files=[]\n dirs=[]\n files=[]\n for fname in flist:\n if os.path.isdir(fname):\n dirs.append(fname)\n else:\n files.append(fname)\n for suff in suffixes:\n for fname in files:\n #print(f\" {suff} in {fname} ?\")\n if fname.endswith(suff):\n matched_files.append(fname)\n matched_files.extend(dirs) \n return matched_files", "def readrf(dirpath,egain=1,dlgain=1,dlen=[1,1],magtype='bb',\r\n bbfile=r'c:\\Peacock\\PHD\\BIRRP\\bbconv.txt',ffactor=1):\r\n impfn=[]\r\n tipfn=[]\r\n for filename in os.listdir(dirpath):\r\n if fnmatch.fnmatch(filename, '*.1r1.rf'):\r\n impfn.append(filename)\r\n elif fnmatch.fnmatch(filename, '*.1r2.rf'):\r\n impfn.append(filename)\r\n elif fnmatch.fnmatch(filename, '*.2r1.rf'):\r\n impfn.append(filename)\r\n elif fnmatch.fnmatch(filename, '*.2r2.rf'):\r\n impfn.append(filename)\r\n elif fnmatch.fnmatch(filename, '*.3r1.rf'):\r\n tipfn.append(filename)\r\n elif fnmatch.fnmatch(filename, '*.3r2.rf'):\r\n tipfn.append(filename)\r\n if len(impfn)!=4:\r\n raise ValueError('Somethings a miss, did not find all .rf files') \r\n if len(tipfn)==2:\r\n print 'Got Tipper files'\r\n \r\n \r\n #get ofil from filenames\r\n ofilloc=impfn[0].find('.')\r\n ofil=impfn[0][0:ofilloc]\r\n z=[]\r\n period=[]\r\n freq=[]\r\n tip=[]\r\n for ll in range(4):\r\n impfid=file(os.path.join(dirpath,impfn[ll]),'r')\r\n implines=impfid.readlines()\r\n period=[]\r\n freq=[]\r\n zijreal=[]\r\n zijimag=[]\r\n zijerr=[]\r\n for ii in range(len(implines)):\r\n line=implines[ii]\r\n line=line.rstrip()\r\n impstr=line.split(' ')\r\n implst=[]\r\n for kk in range(len(impstr)):\r\n if len(impstr[kk])>=3:\r\n if impstr=='NaN':\r\n implst.append(0.0)\r\n elif impstr=='+Inf':\r\n implst.append(0.0)\r\n else:\r\n implst.append(float(impstr[kk]))\r\n period.append(implst[0])\r\n freq.append(implst[1])\r\n zijreal.append(implst[2])\r\n zijimag.append(implst[3])\r\n zijerr.append(implst[4])\r\n z.append([zijreal,zijimag,zijerr])\r\n try:\r\n z=np.array(z)*ffactor\r\n #convert electric channels to microV/m\r\n z[0:2,:,:]=z[0:2,:,:]*float(dlgain)/(float(dlen[0])*float(egain))\r\n z[2:,:,:]=z[2:,:,:]*float(dlgain)/(float(dlen[1])*float(egain))\r\n except ValueError:\r\n raise ValueError('BIRRP has output uneven file lengths, try running '+\r\n 'again with slight change in parameters.')\r\n\r\n #get tipper\r\n if len(tipfn)==2: \r\n for tfn in tipfn:\r\n tipfid=file(os.path.join(dirpath,tfn),'r')\r\n tiplines=tipfid.readlines()\r\n tipijreal=[]\r\n tipijimag=[]\r\n tipijerr=[]\r\n for ii in range(len(tiplines)):\r\n line=tiplines[ii]\r\n line=line.rstrip()\r\n tipstr=line.split(' ')\r\n tiplst=[]\r\n for kk in range(len(tipstr)):\r\n if len(tipstr[kk])>=3:\r\n if tipstr=='NaN':\r\n tiplst.append(0.0)\r\n elif impstr=='+Inf':\r\n tiplst.append(0.0)\r\n else:\r\n tiplst.append(float(tipstr[kk]))\r\n tipijreal.append(tiplst[2])\r\n tipijimag.append(tiplst[3])\r\n tipijerr.append(tiplst[4])\r\n tip.append([tipijreal,tipijimag,tipijerr])\r\n tip=np.array(tip)\r\n \r\n #flip array so frequency is decreasing, period is increasing\r\n freq=np.array(freq)\r\n period=np.array(period)\r\n if freq[0]<freq[-1]:\r\n freq=freq[::-1]\r\n period=period[::-1]\r\n z=z[:,:,::-1]\r\n if type(tip)!=list:\r\n tip=tip[:,:,::-1]\r\n \r\n #convert magnetics\r\n if magtype.lower()=='bb':\r\n zconv=bbconvz(z,freq,bbfile,dlgain)\r\n if magtype.lower()=='lp':\r\n zconv=lpconvz(z,dlgain)\r\n \r\n \r\n return ofil,period,freq,zconv,tip", "def make_master_flats(dc):\n\n\t## Make EXTcheck: is there always the same number of extensions in each file\n\tprint \"Making master flats\"\n\t\n\t## Choose extensions you are using\n\t\n\tfor flat_type in ['FFS']: # Currently FFD is unsupported. If you have FFDs, add them to the list but you must have ONLY FFDs or ONLY FFSs in the dir. Otherwise the first element in the list will get overwritten!\n\t\t#~ print \"\\n\", flat_type, \"\\n\"\n\t\tfor i in dc:\n\t\t\tTRIM, TRIM1, VR, PS, PS1, OS, OS1 = CCD_sections((i[0], i[1]))\n\t\t\tfilelist = []\n\t\t\tfor f in glob.glob(RAW+'*'+flat_type+'*fits'):\n\t\t\t\tccd_conf = []\n\t\t\t\theader0 = fits.getheader(f)\n\t\t\t\theader1 = fits.getheader(f, ext=1)\n\t\t\t\tif header0['OBSMODE']==flat_type:\n\t\t\t\t\tfor KW in ['BINX', 'BINY']:\n\t\t\t\t\t\tccd_conf.append(header0[KW])\n\t\t\t\t\tfor KW in ['NAXIS1', 'NAXIS2']:\n\t\t\t\t\t\tccd_conf.append(header1[KW])\n\t\t\t\t\t\tif tuple(ccd_conf)==i:\n\t\t\t\t\t\t\tfilelist.append(f)\n\t\t\tlfl = len(filelist)\n\t\t\tif lfl > 0:\n\t\t\t\tBIN=CD+'/'+str(i[0])+'x'+str(i[1])+'/'\n\t\t\t\tWD=BIN+str(i[-2])+'x'+str(i[-1])+'/' # Bottom level dir with calibrated and master frames\n\t\t\t\tB=check_exist(WD, 'MF.fits', i)\n\t\t\t\tif B=='n':\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\thdul = fits.HDUList()\n\t\t\t\t\thdul.append(fits.ImageHDU())\n\t\t\t\t\t#~ MB = fits.open(WD+'MB.fits')\n\t\t\t\t\tx = np.array(range(0,i[-1]))\n\t\t\t\t\tfor EXT in (extensions):\n\t\t\t\t\t\tprint \"##################################################\"\n\t\t\t\t\t\tprint \"Stacking \"+`lfl`+' '+`i[-2]`+'x'+`i[-1]`+' channel '+`EXT`+' flat frames!'\n\t\t\t\t\t\tif EXT==1:\n\t\t\t\t\t\t\tPSC=PS1\n\t\t\t\t\t\t\tOSC=OS1\n\t\t\t\t\t\t\tTR=TRIM1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tPSC=PS1\n\t\t\t\t\t\t\tOSC=OS\n\t\t\t\t\t\t\tTR=TRIM\n\t\t\t\t\t\tsc = -1 # counts how many flats have mean>limit\n\t\t\t\t\t\tfor n, fn in enumerate(filelist):\n\t\t\t\t\t\t\tprint \"Files left:\",`lfl-n`+'/'+`lfl`\n\t\t\t\t\t\t\tim = fits.getdata(fn, ext=EXT)\n\t\t\t\t\t\t\tmeanval = np.mean(im[VR[0]:VR[1], TR[0]:TR[1]])\n\t\t\t\t\t\t\t#~ maxval = np.max(im[VR[0]:VR[1], TR[0]:TR[1]])\n\t\t\t\t\t\t\tmaxval = stats.scoreatpercentile(im[VR[0]:VR[1], TR[0]:TR[1]], 90)\n\t\t\t\t\t\t\texptime = fits.getheader(fn)['EXPTIME']\n\t\t\t\t\t\t\t#~ if meanval > 15000. and meanval < 40000. and maxval < 50000. and exptime>5.:\n\t\t\t\t\t\t\tif meanval > 16000. and meanval < 40000. and exptime>=5.:\n\t\t\t\t\t\t\t\tsc+=1\n\t\t\t\t\t\t\t\t#~ im[im<1]=1\n\t\t\t\t\t\t\t\tmscrow, sigmarow = median_row(OSC, PSC, TR, im)\n\t\t\t\t\t\t\t\tsh = np.shape(im)\n\t\t\t\t\t\t\t\tfor y in range(0, sh[0]):\n\t\t\t\t\t\t\t\t\tim[y] = im[y]-mscrow[y]\n\t\t\t\t\t\t\t\tF=im\n\t\t\t\t\t\t\t\tnorm = np.median(F[VR[0]:VR[1], TR[0]:TR[1]])\n\t\t\t\t\t\t\t\tF = F/norm #+np.min(F)+0.0001\n\t\t\t\t\t\t\t\tif sc==0:\n\t\t\t\t\t\t\t\t\tstack_arr = F\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tstack_arr = np.dstack((stack_arr, F))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tprint \"****************************************\"\n\t\t\t\t\t\t\t\tprint \"Rejected\", fn, \"AVG =\", meanval, \"EXPTIME =\", exptime\n\t\t\t\t\t\t\t\tprint \"****************************************\"\n\t\t\t\t\t\tprint 'Will stack a total of', np.shape(stack_arr)[2], 'flats'\n\t\t\t\t\t\tMF = np.median(stack_arr, axis=2)\n\t\t\t\t\t\thdul.append(fits.ImageHDU(MF))\n\t\t\t\t\t\thdul[EXT].header.set(\"NAXIS1\", np.shape(MF)[1])\n\t\t\t\t\t\thdul[EXT].header.set(\"NAXIS2\", np.shape(MF)[0])\n\t\t\t\t\thdul[0].header.set(\"CALIBR\", \"T\")\n\t\t\t\t\thdul[0].header.set(\"INSTRUME\", \"MAIA\")\n\t\t\t\t\thdul[0].header.set(\"BINX\", i[0])\n\t\t\t\t\thdul[0].header.set(\"BINY\", i[1])\n\t\t\t\t\thdul[0].header.set(\"CALMODE\", \"MASTER FLAT\")\n\t\t\t\t\thdul.writeto(WD+\"MF.fits\", clobber=True)\n\t\t\t\t\tprint \"############################################################\"\n\tprint \"Completed master flats\"", "def enf_featlist(filelist):\n\n # first place the input through the same requirements of any filelist\n filelist = enf_filelist(filelist)\n new_filelist = []\n feat_types = ['shp']\n\n for filename in filelist:\n ext=filename[-3:]\n\n if os.path.isfile(filename):\n for feat_type in feat_types:\n if ext == feat_type:\n new_filelist.append(filename)\n\n return(new_filelist)", "def get_light_sbc(filenames, onoff=True):\n if onoff:\n param = \"on\"\n else:\n param = \"off\"\n return filter_filenames(filenames, [param])", "def gen_antmodelfiles(inpfileL=LOFAR_HAdata_dir+'DefaultCoeffLBA.cc',\n inpfileH=LOFAR_HAdata_dir+'DefaultCoeffHBA.cc',\n outfileL=HA_LBAfile_default,\n outfileH=HA_HBAfile_default\n ):\n \n #Adding nominal frequency channels. The HA model for LOFAR has two BANDS\n #while data recording S/W has 3 intervals based on sampling frequency,\n #namely (0,100), (100,200), (200,300), each with 512 channels.\n #Here I concatenate the two latter intervals.\n\n channels = numpy.linspace(0., sampfreq, nr_channels, endpoint=False)\n convLOFARcc2HA(inpfileL, outfileL, channels)\n inpfileL = outfileL\n convHA2DPE(inpfileL, DP_LBAfile_default)\n channels = numpy.linspace(sampfreq, 3*sampfreq, 2*nr_channels, endpoint=False)\n convLOFARcc2HA(inpfileH, outfileH, channels)\n inpfileH = outfileH\n convHA2DPE(inpfileH, DP_HBAfile_default)", "def combine_mappings(fasta_fh, mapping_fh, denoised_seqs_fh, otu_picker_otu_map_fh, out_dir):\n\n #read in mapping from split_library file\n labels = imap(lambda (a,b): a, MinimalFastaParser(fasta_fh))\n #mapping from seq_id to sample_id\n sample_id_mapping = extract_read_to_sample_mapping(labels)\n\n denoiser_mapping = read_denoiser_mapping(mapping_fh)\n #read in cd_hit otu map\n # and write out combined otu_picker+denoiser map \n otu_fh = open(out_dir+\"/denoised_otu_map.txt\",\"w\")\n for otu_line in otu_picker_otu_map_fh:\n otu_split = otu_line.split()\n \n otu = otu_split[0]\n ids = otu_split[1:]\n \n get_sample_id = sample_id_mapping.get\n #concat lists\n #make sure the biggest one is first for pick_repr\n all_ids = sort_ids(ids, denoiser_mapping)\n all_ids.extend(sum([denoiser_mapping[id] for id in ids], []))\n try:\n otu_fh.write(\"%s\\t\" % otu +\n \"\\t\".join(map(get_sample_id, all_ids))+\"\\n\")\n except TypeError:\n #get returns Null if denoiser_mapping id not present in sample_id_mapping\n print \"Found id in denoiser output, which was not found in split_libraries \"+\\\n \"output FASTA file. Wrong file?\"\n exit()\n\n fasta_out_fh = open(out_dir+\"/denoised_all.fasta\",\"w\")\n for label, seq in MinimalFastaParser(denoised_seqs_fh):\n id = label.split()[0]\n newlabel = \"%s %s\" %(sample_id_mapping[id], id)\n fasta_out_fh.write(Sequence(name= newlabel, seq=seq).toFasta()+\"\\n\")", "def parse_affixes():\n start_time = time.time()\n\n files = []\n for resource in resources.AFFIX_TABLES:\n table_files = paths.DB / resource\n files.extend(glob.glob(str(table_files), recursive=True))\n\n logging.info(f\"Found {len(files)} affix table files.\")\n\n # The affix tables will determine what gear an affix can be applied to.\n affix_tables: dict[str, set] = {}\n affix_files: set[Path] = set()\n for dbr in files:\n table = read(dbr)\n\n # Use the filename to determine what equipment this table is for:\n file_name = os.path.basename(dbr).split(\"_\")\n table_type = get_affix_table_type(file_name[0])\n\n # For each affix in this table, create an entry:\n for field, affix_dbr in table.items():\n if not field.startswith(\"randomizerName\") or not affix_dbr.exists():\n continue\n\n # Add this file as discovered, this will determine what affixes are actually parsed\n affix_files.add(affix_dbr)\n\n if affix_dbr not in affix_tables:\n affix_tables[affix_dbr] = {table_type}\n elif table_type not in affix_tables[affix_dbr]:\n affix_tables[affix_dbr].add(table_type)\n\n logging.info(f\"Found {len(affix_files)} affix files.\")\n\n affixes = {\"prefixes\": {}, \"suffixes\": {}}\n for dbr in affix_files:\n affix = parse(dbr)\n\n # Tinkerer needs a little custom love because it has no properties, but a special text:\n if affix[\"tag\"] == \"x3tagSuffix01\":\n affix[\"properties\"] = {\"description\": texts.get(\"x3tagextrarelic\")}\n\n # Assign the table types to this affix:\n if dbr not in affix_tables:\n # Affix can occur on all equipment:\n affix[\"equipment\"] = {\"none\"}\n else:\n affix[\"equipment\"] = affix_tables[dbr]\n\n # Add affixes to their respective pre- or suffix list.\n if \"Prefix\" in affix[\"tag\"] and \"suffix\" not in dbr.parts:\n affixType = \"prefixes\"\n else:\n affixType = \"suffixes\"\n\n affixTag = affix.pop(\"tag\")\n\n # Either add the affix or add its properties as an alternative\n if affixTag in affixes[affixType]:\n affix_result = affixes[affixType][affixTag]\n # Skip duplicate affix properties:\n if is_duplicate_affix(affix_result, affix):\n continue\n\n # Create a list if it wasn't already one\n if not isinstance(affix_result[\"properties\"], list):\n affix_result[\"properties\"] = [affix_result[\"properties\"], affix[\"properties\"]]\n else:\n affix_result[\"properties\"].append(affix[\"properties\"])\n affix_result[\"equipment\"].update(affix[\"equipment\"])\n else:\n # Make sure to copy here since we alter some properties after looping\n affixes[affixType][affixTag] = affix.copy()\n\n # Parse the equipment & properties one last time to standardize the formats\n for _, v in affixes.items():\n for _, affix in v.items():\n affix[\"equipment\"] = \",\".join(list(affix[\"equipment\"]))\n affix[\"properties\"] = (\n affix[\"properties\"] if isinstance(affix[\"properties\"], list) else [affix[\"properties\"]]\n )\n\n # Log and reset the timer:\n logging.info(f\"Parsed affixes in {time.time() - start_time:.2f} seconds.\")\n\n return affixes", "def getPrefices(fileList):\n # Format:\n # prefix_dictionary[surl] = [oldPrefix, newPrefix]\n # Note: this function returns oldPrefix, newPrefix, prefix_dictionary\n # old/newPrefix are the fixed prefices defined in copysetup[in]\n # In case copyprefix[in] can be used, ie if it is set, it may contain a list of copyprefices that can sort out\n # more complicated cases\n\n prefix_dictionary = {}\n\n # get the file access info (only old/newPrefix are needed here)\n useCT, oldPrefix, newPrefix, useFileStager, directIn = getFileAccessInfo()\n\n # get the copyprefices\n copyprefix = readpar('copyprefixin')\n if copyprefix == \"\":\n copyprefix = readpar('copyprefix')\n\n # should we fall back to copyprefix or use the faxredirector? (this is the case for FAX test jobs since they reset old/newPrefix)\n if oldPrefix == \"\" or newPrefix == \"\" or not (oldPrefix and newPrefix):\n\n # special case for FAX on sites that are not setup for direct i/o in the normal way\n if (readpar('copytoolin').lower() == \"fax\") or (readpar('copytoolin') == \"\" and readpar('copytool').lower() == \"fax\"):\n if \"dummy\" in copyprefix:\n # try to construct the TURL using the copyprefix and the faxredirector\n prefix, dummy = copyprefix.split(\"^\")\n faxredirector = readpar('faxredirector')\n if faxredirector != \"\":\n tolog(\"Using copyprefix and faxredirector for old/newPrefix\")\n oldPrefix = prefix\n newPrefix = faxredirector\n else:\n tolog(\"WARNING: faxredirector not set, do not know how to construct old/newPrefix\")\n else:\n if not \"^\" in copyprefix:\n tolog(\"WARNING: Will default to using lcg-getturls\")\n \n # in case of less complex copyprefix\n if \"^\" in copyprefix and not \",\" in copyprefix and not \"dummy\" in copyprefix:\n prefices = copyprefix.split(\"^\")\n oldPrefix = prefices[0]\n newPrefix = prefices[1]\n\n # in case of more complex copyprefix (the case of copyprefix lists)\n if \"^\" in copyprefix and \",\" in copyprefix and not \"dummy\" in copyprefix:\n\n # handle copyprefix lists\n pfroms, ptos = getCopyprefixLists(copyprefix)\n tolog(\"Copyprefix lists: %s, %s\" % (str(pfroms), str(ptos)))\n\n if not \"\" in pfroms and not \"dummy\" in pfroms and not \"\" in ptos and not \"dummy\" in ptos:\n # create a prefix dictionary for all the files\n for surl in fileList:\n # first get the proper old/newPrefices\n oldPrefix, newPrefix = matchCopyprefixReplica(surl, pfroms, ptos)\n # then fill the dictionary\n prefix_dictionary[surl] = [oldPrefix, newPrefix]\n else:\n if oldPrefix != \"\" and newPrefix != \"\":\n # Use the same prefices for all surls\n for surl in fileList:\n prefix_dictionary[surl] = [oldPrefix, newPrefix]\n\n else: # old/newPrefix are set\n\n # handle copyprefix lists\n pfroms, ptos = getCopyprefixLists(copyprefix)\n tolog(\"Copyprefix lists: %s, %s\" % (str(pfroms), str(ptos)))\n\n if not \"\" in pfroms and not \"dummy\" in pfroms and not \"\" in ptos and not \"dummy\" in ptos:\n # create a prefix dictionary for all the files\n for surl in fileList:\n # first get the proper old/newPrefices\n oldPrefix, newPrefix = matchCopyprefixReplica(surl, pfroms, ptos)\n # then fill the dictionary\n prefix_dictionary[surl] = [oldPrefix, newPrefix]\n else:\n if oldPrefix != \"\" and newPrefix != \"\":\n # Use the same prefices for all surls\n for surl in fileList:\n prefix_dictionary[surl] = [oldPrefix, newPrefix]\n \n if oldPrefix != \"\" and newPrefix != \"\":\n tolog(\"Will use oldPrefix=%s and newPrefix=%s for SURL to TURL conversion\" % (oldPrefix, newPrefix))\n else:\n tolog(\"WARNING: old/newPrefix not known\")\n\n return oldPrefix, newPrefix, prefix_dictionary", "def Parse_folder_to_multi_faa(target_dir,faa_filename):\n os.chdir(target_dir)\n output_handle = open(faa_filename, \"w\")\n for gbk_filename in FileGen(target_dir):\n with open(gbk_filename, \"r\") as input_handle:\n for seq_record in SeqIO.parse(input_handle, \"genbank\") :\n print(\"Dealing with GenBank record %s\" % seq_record.id)\n for seq_feature in seq_record.features :\n if seq_feature.type==\"CDS\" :\n assert len(seq_feature.qualifiers['translation'])==1\n try:\n name = seq_feature.qualifiers['locus_tag'][0]\n except KeyError:\n name = seq_feature.qualifiers['product'][0]\n output_handle.write(\">%s from %s\\n%s\\n\" % (\n name,\n gbk_filename.split(\"/\")[-1],\n seq_feature.qualifiers['translation'][0])) \n output_handle.close()", "def masterFlat(flat_list, master_dark_fname, normalize = 'median', local_sig_bad_pix = 3, \\\n global_sig_bad_pix = 9, local_box_size = 11, hotp_map_fname = None, verbose=False,\n output_dir = None,min_flux=1000):\n\n #Open the master dark\n master_dark_hdu = f.open(master_dark_fname)\n master_dark = master_dark_hdu[0].data\n dark_shape = np.shape(master_dark)\n\n if verbose:\n print((\"Subtracting {} from each flat file\".format(master_dark_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open all files into a 3D array\n #foo = np.empty((dark_shape[0],dark_shape[1],len(flat_list)))\n foo = []\n\n #Open first flat file to check exposure time and filter\n first_flat_hdu = f.open(flat_list[0])\n flat_exp_time = first_flat_hdu[0].header['EXPTIME']\n\n\n\n if dark_exp_time != flat_exp_time:\n print(\"The master dark file doesn't have the same exposure time as the flats. We'll scale the dark for now, but this isn't ideal\", UserWarning)\n factor = flat_exp_time/dark_exp_time\n else:\n factor = 1.\n\n #We've already read it, so we'll stick it in foo\n\n print(\"Combining flat files\")\n for i in range(0,len(flat_list)):\n try: \n #subtract dark for each file, then normalize by mode\n hdu = f.open(flat_list[i],ignore_missing_end=True)\n d_sub = hdu[0].data - factor*master_dark\n if np.nanmedian(d_sub) < min_flux:\n #print(\"Skipping file {}, because its flux is lower than {}\".format(flat_list[i],min_flux))\n continue\n #normalize\n if normalize == 'mode':\n d_sub = d_sub/mode(d_sub, axis = None, nan_policy = 'omit')\n elif normalize == 'median':\n d_sub = d_sub/np.nanmedian(d_sub)\n #foo[:,:,i] = d_sub\n foo.append(d_sub)\n except:\n print(\"Some error. Skipping file {}\".format(i)) \n #Median combine frames\n flat = np.median(foo, axis = 0)\n\n #Filter bad pixels\n #bad_px = sigma_clip(flat, sigma = sig_bad_pix) #old and bad\n ###Major update here: do sigma clipping on the pix-to-pix flat with the large scale vignette removed\n ###Also add local sigma clipping\n def stddevFilter(img, box_size):\n \"\"\" from\n https://stackoverflow.com/questions/28931265/calculating-variance-of-an-image-python-efficiently/36266187#36266187\n This function compute the standard deviation of an image in a\n moving box of a given size. The pixel i,j of the output is the\n standard deviation of the pixel value in the box_size x box_size box\n around the i,j pixel in the original image.\n \"\"\"\n wmean, wsqrmean = (cv2.boxFilter(x, -1, (box_size, box_size), \\\n borderType=cv2.BORDER_REFLECT) for x in (img, img*img))\n return np.sqrt(wsqrmean - wmean*wmean)\n\n #median flat\n median_flat = median_filter(flat, local_box_size) #arbitrary size, shouldn't matter as long as it's big enough\n #standard deviation image\n stddev_im = stddevFilter(flat, local_box_size)\n\n #Local clipping\n local_bad_pix = np.abs(median_flat - flat) > local_sig_bad_pix*stddev_im\n\n #Global clipping here to reject awful pixels and dust, bad columns, etc\n pix_to_pix = flat/median_flat\n global_bad_px = sigma_clip(pix_to_pix, sigma = global_sig_bad_pix).mask #9 seems to work best\n\n #also set all 0 and negative pixels in flat as bad\n non_positive = flat <= 0\n\n #logic combine\n bad_px = np.logical_or(global_bad_px, local_bad_pix)\n\n #also add non_positive pixels\n bad_px = np.logical_or(bad_px, non_positive)\n\n #Normalize good pixel values\n if normalize == 'median':\n norm_flat = flat/np.nanmedian(flat[~bad_px])\n elif normalize == 'mode':\n norm_flat = flat/mode(flat, axis = None, nan_policy = 'omit')\n #Stick it back in the last hdu\n hdu[0].data = norm_flat\n\n #Add pipeline version and history keywords\n vers = version.get_version()\n hdu[0].header.set('PL_VERS',vers,'Version of pipeline used for processing')\n hdu[0].header['HISTORY'] = \"############################\"\n hdu[0].header['HISTORY'] = \"Created master flat by median combining the following:\"\n for i in range(len(flat_list)):\n hdu[0].header['HISTORY'] = flat_list[i]\n hdu[0].header['HISTORY'] = \"Normalized to the median of the master flat\"\n hdu[0].header['HISTORY'] = \"Performed bad pixel local and global sigma clipping with {}, {}sigmas\".format(local_sig_bad_pix, global_sig_bad_pix)\n hdu[0].header['HISTORY'] = \"############################\"\n\n #Parse the last fileanme\n if output_dir is not None:\n flat_outname = flat_list[-1].rsplit('.',1)[0]+\"_master_flat.fits\"\n flat_outname = flat_outname.rsplit('/',1)[-1]\n flat_outname = output_dir+flat_outname\n else:\n flat_outname = flat_list[-1].rsplit('.',1)[0]+\"_master_flat.fits\"\n\n #Write the fits file\n if verbose:\n print((\"Writing master flat to {}\".format(flat_outname)))\n hdu.writeto(flat_outname, overwrite=True)\n\n #If there's already a hot pixel map then we'll add to it.\n if hotp_map_fname != None:\n #read in the existing bp map\n #hdu = f.open(hotp_map_fname)\n #hdu[0].data += np.array(bad_px.mask, dtype=float)\n #hdu[0].data = np.logical_or(hdu[0].data.astype(bool), bad_px) #use logical or to combine bad pixel maps\n #bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n print(\"Will deal with hot pixel map from dark frames in the calibrate function\")\n\n #else:\n #Parse the last fileanme\n if output_dir is not None:\n bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n bp_outname = bp_outname.rsplit('/',1)[-1]\n bp_outname = output_dir+bp_outname\n else:\n bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n\n ##### Now write the bad pixel map\n hdu[0].data = bad_px.astype(int)#np.array(bad_px.mask, dtype=float)\n #Parse the last fileanme\n # bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n\n #Add history keywords\n hdu[0].header['HISTORY'] = \"############################\"\n hdu[0].header['HISTORY'] = \"Created bad pixel map by sigma clipping on pixel-to-pixel flat{}\".format(flat_outname)\n hdu[0].header['HISTORY'] = \"Bad pixel cutoffs: local sigma = {} and global sigma = {} for clipping\".format(local_sig_bad_pix, global_sig_bad_pix)\n #hdu[0].header['HISTORY'] = \"Bad pixel cutoff of {}sigma\".format(sig_bad_pix)\n hdu[0].header['HISTORY'] = \"A pixel value of 1 indicates a bad pixel\"\n hdu[0].header['HISTORY'] = \"############################\"\n\n if verbose:\n print((\"Writing bad pixel map to {}\".format(bp_outname)))\n #Write the fits file\n hdu.writeto(bp_outname, overwrite=True)\n\n return flat_outname, bp_outname", "def cat_sff_files(list_of_file_handles):\r\n # mimicks lazy_parse_sff_handle on multiple files\r\n # Move to cogent???\r\n if (list_of_file_handles == []):\r\n return [], None\r\n try:\r\n flowgrams_and_headers = map(\r\n lazy_parse_sff_handle,\r\n list_of_file_handles)\r\n except ValueError:\r\n raise FileFormatError('Wrong flogram file format. Make sure you pass the sff.txt format ' +\r\n 'produced by sffinfo. The binary .sff will not work here.')\r\n\r\n flowgram_iterators = [a for a, b in flowgrams_and_headers]\r\n return chain(*flowgram_iterators), flowgrams_and_headers[0][1]", "def create_F1_F2_cols(col_base_list, output='both'):\n F12_cols = []\n for x in col_base_list:\n pref = x[:3]\n if output == 'both':\n if pref =='FM_':\n F12_cols.append('FM_F1_'+ x[3:])\n F12_cols.append('FM_F2_' + x[3:])\n else:\n F12_cols.append('F1_' + x)\n F12_cols.append('F2_' + x)\n elif output =='F1':\n if pref =='FM_':\n F12_cols.append('FM_F1_'+ x[3:])\n else:\n F12_cols.append('F1_' + x)\n elif output =='F2':\n if pref =='FM_':\n F12_cols.append('FM_F2_'+ x[3:])\n else:\n F12_cols.append('F2_' + x)\n return F12_cols", "def uniform_filenames(prefs, dry_run=False):\n from glob import glob1\n for pref in prefs:\n dirname, p = os.path.split(pref)\n filters = '_'.join(p.split('_')[1:])\n print dirname, p, filters\n fake, = glob1(dirname, '*{}*fake'.format(filters))\n match, = glob1(dirname, '*{}*match'.format(filters))\n param, = glob1(dirname, '*{}*param'.format(filters))\n ufake = '_'.join(fake.split('_')[1:]).replace('_gst.fake1',\n '.gst').lower()\n umatch = '_'.join(match.split('_')[1:]).lower()\n uparam = param.replace('.param', '.gst.param').lower()\n for old, new in zip([fake, match, param], [ufake, umatch, uparam]):\n cmd = 'mv {dir}/{old} {dir}/{new}'.format(dir=dirname, old=old,\n new=new)\n logger.info(cmd)\n if not dry_run:\n os.system(cmd)", "def concatenate_detected_verified(fasta_name, PATH_FASTA_DETECTED, PATH_FASTA_VERIFIED, INFO_folder, PATH_FASTA_CONCATENATED):\n\n\tprint \"\\n#################\"\n\tprint \"# Concatetaned file\"\n\tprint \"#################\\n\"\n\n\t# NOTE Dictionaire avec en clef l'id espèce/système et en value une liste\n\t# NOTE [\"l'id espèce/système du verifié qui correspond\", [liste des sequences ATPase, IM ...]]\n\tdict_remove = {}\n\n\tprint \"\\n------------------------------------------\"\n\tprint \"| First read : Creation of the dictionnary\"\n\tprint \"------------------------------------------\\n\"\n\n\tfor fasta_file in fasta_name :\n\t\tverified_fasta=os.path.join(PATH_FASTA_VERIFIED, fasta_file)\n\t\tdetected_fasta=os.path.join(PATH_FASTA_DETECTED, fasta_file)\n\t\tconcatenated_fasta=os.path.join(PATH_FASTA_CONCATENATED, fasta_file)\n\n\t\tlist_seq_verified = list(SeqIO.parse(verified_fasta, \"fasta\"))\n\t\tlist_id_verified = [seq.id for seq in list_seq_verified]\n\t\tlist_seq_verified = [seq.seq for seq in list_seq_verified]\n\n\t\tseq_parser = SeqIO.parse(detected_fasta, \"fasta\")\n\t\tnumber_seq = len(list(seq_parser))\n\t\tprogression = 1\n\n\t\tseq_parser = SeqIO.parse(detected_fasta, \"fasta\")\n\n\t\t# IDEA Il faut tester au moins une fois pour voir si lors de la concatenation, je ne me retrouve pas avec des systems ou je n'ai pas tous enlevé. Exemple l'ATPase de X n'est pas la même que celle de Y mais l'IMplatform l'ai si c'est le cas X est a enlevé aussi pour son ATPase\n\t\t# IDEA Si idea précédente vrai alors il faut faire des fichiers temporaires des sequences que l'on garde et concatener par \"cat\" à la fin le fichier temporaire et son homonyme en verifié.\n\n\t\t# NOTE Il y avait un problème : le nom/id de l'epèce + système ne doit pas contenir le _NumX_ car ce Num fait référence au nombre de duplicat de la protéine (exemple deux ATPase gspE)\n\t\t# NOTE Quelques systèmes on des sequences qui sont similaire pour toutes les protéines sauf une exemple ESCO3 et NC_011993 qui sont identique pour tous sauf ATPase (98% seulement)\n\n\t\tfor seq in seq_parser :\n\n\t\t\tsys.stdout.write(\"File : {} -> {:.2f}% : {}/{} sequences detected read\\r\".format(fasta_file, progression/float(number_seq)*100, progression,number_seq))\n\t\t\tsys.stdout.flush()\n\t\t\tprogression += 1\n\n\t\t\tid_seq=seq.id.split(\"_\")\n\t\t\tid_seq=re.sub(\"Num[0-9]_\", \"\", \"_\".join(id_seq[:id_seq.index(\"D\")]))\n\n\t\t\tif id_seq in dict_remove :\n\t\t\t\tcontinue\n\n\t\t\telif seq.seq in list_seq_verified :\n\t\t\t\tindex=list_seq_verified.index(seq.seq)\n\n\t\t\t\tid_seq_verif = list_id_verified[index].split(\"_\")\n\t\t\t\tid_seq_verif = re.sub(\"Num[0-9]_\", \"\", \"_\".join(id_seq_verif[:id_seq_verif.index(\"V\")]))\n\n\t\t\t\t# NOTE dans le dictionnaire je met le système vérifié en premier, toutes les séquences du système identitique en deuxième et la séquence qui en est la cause en troisème\n\t\t\t\tdict_remove[id_seq]=[id_seq_verif,[], seq.id]\n\n\t\tprint\n\t\tprint(\"File : {} -> Done!\".format(fasta_file))\n\n\tprint \"\\n-----------------------------\"\n\tprint \"| Second read : Writing files\"\n\tprint \"-----------------------------\\n\"\n\n\tfor fasta_file in fasta_name :\n\t\tverified_fasta=os.path.join(PATH_FASTA_VERIFIED, fasta_file)\n\t\tdetected_fasta=os.path.join(PATH_FASTA_DETECTED, fasta_file)\n\t\tconcatenated_fasta=os.path.join(PATH_FASTA_CONCATENATED, fasta_file)\n\n\t\tos.system('cat \"{}\" > \"{}\"'.format(verified_fasta, concatenated_fasta))\n\n\t\tseq_parser = SeqIO.parse(detected_fasta, \"fasta\")\n\t\tnumber_seq = len(list(seq_parser))\n\t\tprogression = 1\n\n\t\tseq_parser = SeqIO.parse(detected_fasta, \"fasta\")\n\n\t\twith open(concatenated_fasta, \"a\") as w_file :\n\t\t\tfor seq in seq_parser :\n\n\t\t\t\tsys.stdout.write(\"File : {} -> {:.2f}% : {}/{} sequences detected read\\r\".format(fasta_file, progression/float(number_seq)*100, progression,number_seq))\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tprogression += 1\n\n\t\t\t\tid_seq=seq.id.split(\"_\")\n\t\t\t\tid_seq=re.sub(\"Num[0-9]_\", \"\", \"_\".join(id_seq[:id_seq.index(\"D\")]))\n\n\t\t\t\tif id_seq in dict_remove :\n\t\t\t\t\tdict_remove[id_seq][1].append(seq)\n\n\t\t\t\telse :\n\t\t\t\t\tSeqIO.write(seq, w_file, \"fasta\")\n\t\tprint\n\t\tprint(\"File : {} -> Done!\".format(fasta_file))\n\n\t# NOTE Dict remove complete and all concatenate write\n\twrite_remove_concatenate(dict_remove, INFO_folder)\n\n\treturn", "def flux_calibrate(obj_list, location, default_path, CCD, prefix_string='F_'):\n # Obsolete\n # if CCD.ccd == \"HFOSC\":\n # command_file_path = os.path.join(default_path, 'Database/database',\n # 'setst_HFOSC')\n # elif CCD.ccd == \"HFOSC2\":\n # command_file_path = os.path.join(default_path, 'Database/database',\n # 'setst_HFOSC2')\n\n iaoextinct_path = os.path.join(default_path, 'Database/database',\n 'iaoextinct.dat')\n\n if location != '':\n iraf.cd(os.path.join(os.getcwd(), location))\n\n # Check files are wavelength calibrated and separate object files and\n # standard.\n\n # star files\n obj_stars = []\n std_stars = []\n for file_name in obj_list:\n file_name_chk = os.path.join(location, file_name)\n hdul = fits.open(file_name_chk) # HDU_List\n hdr = hdul[0].header # Primary HDU header\n OBJECT = hdr['OBJECT']\n aperture = hdr['APERTUR']\n try:\n # checking weather Wavelength is done\n Wavelength_cal = hdr['WAVELENG']\n if Wavelength_cal == 'done':\n if aperture == '2 1340 l':\n std_stars.append(file_name)\n elif aperture == '8 167 l':\n obj_stars.append(file_name)\n else:\n print(\"Header error for \"+str(file_name)+\" Please check header term aperture\")\n else:\n print(\"File \"+str(file_name)+\" is not wavelength calibrated.\")\n except:\n pass\n print(\"stars :\", obj_stars)\n print(\"standards:\", std_stars)\n\n # Setting Indian Astronomical Observatory, Hanle\n iraf.observatory(command='list', obsid='set', observatory='iao')\n\n star_list = list(set(obj_stars).union(std_stars))\n\n # This became obsolete\n # for file_name in star_list:\n #\n # # Calculating ST and adding in the header\n # print(file_name, command_file_path)\n # iraf.astutil.asthedit(images=file_name, commands=command_file_path,\n # update='yes')\n #\n # # Setting Airmass to all files before flux calibration.\n # # (ST should be there in the header)\n # iraf.noao.imred.specred.setairmass(images=file_name, observa='iao')\n\n for filename in star_list:\n airmass(filename=filename)\n print(\"Airmass correction is done for all stars\")\n # print(\"Press enter to continue\")\n # raw_input()\n message = \"Press enter 'Yes' to continue\"\n choices = ['Yes']\n options(message, choices)\n\n # Running standard task in IRAF\n file_name = std_stars[0]\n\n std = {'feige34': 'onedstds$spec50cal/',\n 'feige66': 'onedstds$spec50cal/',\n 'feige110': 'onedstds$spec50cal/',\n 'bd284211': 'onedstds$spec50cal/',\n 'hz44': 'onedstds$spec50cal/'}\n\n # standard_star_name = raw_input(\"Type standard star name to continue :\")\n message = \"Select standard star and 'Enter' to continue\"\n choices = std.keys()\n standard_star_name = options(message, choices)\n caldir = std[standard_star_name]\n\n # standard_star_name = 'feige34'\n # Need to set an option to change this for different std stars\n # mag = 11.18 # Magnitude of standard star.\n\n standard_data_file = os.path.splitext(file_name)[0]\n iraf.imred.specred.standard(input=file_name, output=standard_data_file,\n extinct=iaoextinct_path, caldir=caldir,\n observa='iao', star_nam=standard_star_name)\n # , mag = float(mag), magband = 'V'\n # fnuzero= ? (Absolute flux zero point), teff= ?\n # mag = float(mag)Magnitude Of The Standard Star\n # magband = 'V' Magnitude Band\n\n # Running Sensfunc task in IRAF\n iraf.imred.specred.sensfunc(standard=standard_data_file,\n sensitiv=str(standard_data_file)+'sens',\n extinct=iaoextinct_path, observa='iao')\n # extinct='onedstds$ctioextinct.dat'\n\n # Running calibrate task in IRAF\n for file_name in obj_stars:\n iraf.imred.specred.calibrate(input=file_name,\n output=str(prefix_string)+str(file_name),\n extinct='yes', flux='yes',\n extinction=iaoextinct_path, observa='iao',\n sensiti=str(standard_data_file)+'sens')\n # extinct='onedstds$ctioextinct.dat'", "def CenterSpecific_ACANAF():\n\ttry:\n\t\tinVCF = sys.argv[1]\n\t\tCenterName = sys.argv[2]\n\texcept:\n\t\tprint CenterSpecific_ACANAF.__doc__\n\t\tsys.exit(-1)\n\tfin = return_file_handle(inVCF)\n\tfor line in fin:\n\t\tline = line.strip().split('\\t')\n\t\tif re.search(r'^(\\d+|X|Y)|^chr(\\d+|X|Y)', line[0]):\n\t\t\tflag_AF = 0\n\t\t\tflag_AC = 0\n\t\t\tflag_AN = 0\n\t\t\tcommon_AF_flag = 'N'\n\t\t\tgt_data = []\n\t\t\tsamples = len(line[9:])\n\t\t\tfor val in line[7].split(';'):\n\t\t\t\tif val.split('=')[0] == 'AF':\n\t\t\t\t\tcname_AF = CenterName + '_AF=' + val.split('=')[1] + ';'\n\t\t\t\t\tflag_AF = 1\n\t\t\t\t\tif ',' in val.split('=')[1] :\n\t\t\t\t\t\tif min(val.split('=')[1].split(',')) > 0.1 :\n\t\t\t\t\t\t\tcommon_AF_flag = 'Y'\n\t\t\t\t\telif float(val.split('=')[1]) > 0.1 :\n\t\t\t\t\t\tcommon_AF_flag = 'Y'\n\t\t\t\t\tcname_common_flag = CenterName + '_common_AF=' + common_AF_flag + ';'\n\t\t\t\telif val.split('=')[0] == 'AC':\n\t\t\t\t\tcname_AC = CenterName + '_AC=' + val.split('=')[1] + ';'\n\t\t\t\t\tflag_AC = 1\n\t\t\t\telif val.split('=')[0] == 'AN':\n\t\t\t\t\tan = int(val.split('=')[1])/float(2)\n\t\t\t\t\tcr = an / float(samples)\n\t\t\t\t\tcname_AN = CenterName + '_AN=' + val.split('=')[1] + ';'\n\t\t\t\t\tflag_AN = 1\n\t\t\t\t\tcname_CR = CenterName + '_CR=' + str(round(float(cr),5)) + ';'\n\t\t\tif flag_AF == 1 and flag_AC == 1 and flag_AN == 1 :\n\t\t\t\tline[7] = cname_AC + cname_AF + cname_AN + cname_CR + cname_common_flag + line[7]\n\t\t\t\tprint '\\t'.join(line)\n\t\t\telse:\n\t\t\t\tprint '\\t'.join(line)\n\n\n\t\t\t\tfor gt_dat in line[9:]:\n\t\t\t\t\tgt_data.append(gt_dat.split(':')[0])\n\t\t\t\tif all_same(gt_data):\n\t\t\t\t\tline[7] = line[7] + CenterName + '_AC=0;' + CenterName + '_AN=0;' + CenterName + '_AF=0'\n\t\t\t\t\tprint '\\t'.join(line)\n\t\t\t\telse:\n\t\t\t\t\tprint \"Missing AC, AN or AF values for the variant\",'\\t'.join(line[0:6])\n\t\t\t\t\tsys.exit(-1)\n\t\telif re.search(r'^#CHROM',line[0]):\n\t\t\tprint '##INFO=<ID=' + CenterName + '_AC,Number=A,Type=Integer,Description=\"Center specific Allele count in genotypes, for each ALT allele, in the same order as listed\">'\n\t\t\tprint '##INFO=<ID=' + CenterName +'_AF,Number=A,Type=Float,Description=\"Center specific Allele Frequency, for each ALT allele, in the same order as listed\">'\n\t\t\tprint '##INFO=<ID=' + CenterName + '_AN,Number=1,Type=Integer,Description=\"Center specific Total number of alleles in called genotypes\">'\n\t\t\tprint '##INFO=<ID=' + CenterName + '_CR,Number=1,Type=Float,Description=\"Center specific call rate\">'\n\t\t\tprint '\\t'.join(line)\n\t\telse:\n\t\t\tprint '\\t'.join(line)\n\tfin.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test list secrets when not connected to any cluster.
def test_secrets_list_server_not_reachable(): message = "REANA client is not connected to any REANA cluster." reana_token = "000000" runner = CliRunner() result = runner.invoke(cli, ["secrets-list", "-t", reana_token]) assert result.exit_code == 1 assert message in result.output
[ "def test_list_secrets(self):\n pass", "def test_list_secrets(self):\n self.fail(\"test not implemented\")", "async def list_secrets(self):\n pass", "def test_read_namespaced_secret_list_secrets(self):\n pass", "def test_secrets_list_server_no_token():\n message = \"Please provide your access token\"\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n runner = CliRunner(env=env)\n result = runner.invoke(cli, [\"secrets-list\"])\n assert result.exit_code == 1\n assert message in result.output", "def test_check_secrets(self):\n secrets.check_secrets([], argparse.Namespace())", "def secrets(self): # pylint: disable=no-self-use\n return []", "def ListSecrets(self):\r\n return self._secrets.keys()", "def test_service_envelope_encryption_for_secrets(self):\n print(\"AWS - EKS - Service - Secret Encryption\")\n global CLUSTER_DESCRIPTION # pylint: disable=global-statement\n envelope_encryption_config_found = False\n for item in CLUSTER_DESCRIPTION[\"cluster\"][\"encryptionConfig\"][0][\"resources\"]:\n if item == \"secrets\":\n envelope_encryption_config_found = True\n self.assertTrue(envelope_encryption_config_found)", "def secrets(self):\n return k8s_object.ListAsDictionaryWrapper(\n self._volumes,\n self._volume_class,\n value_field='secret',\n filter_func=lambda volume: volume.secret is not None)", "def _all_secrets(cls, *, secretsmanager_client):\n return secretsmanager_client.list_secrets()['SecretList']", "def test_get_secret(self):\n pass", "def test_get_secrets() -> None:\n print(\"Environment variables:\")\n for env_variable, value in os.environ.items():\n print(\"{}: {}\".format(env_variable, value))\n secrets_handler = SecretsHandling(project_root=fixed_paths.repository_root_directory())\n secrets = secrets_handler.get_secrets_from_environment_or_file(SECRETS_IN_ENVIRONMENT)\n for name in SECRETS_IN_ENVIRONMENT:\n assert name in secrets, \"No value found for {}\".format(name)\n assert secrets[name] is not None, \"Value for {} is empty\".format(name)\n # Variable names should automatically be converted to uppercase when using get_secret:\n assert secrets_handler.get_secret_from_environment(name=name.lower()) is not None\n no_such_variable = \"no_such_variable\"\n with pytest.raises(ValueError):\n secrets_handler.get_secret_from_environment(name=no_such_variable)\n assert secrets_handler.get_secret_from_environment(name=no_such_variable, allow_missing=True) is None", "def secrets(self):\n return k8s_object.ListAsDictionaryWrapper(\n self._m,\n self._item_class,\n key_field=self._key_field,\n value_field=self._value_field,\n filter_func=lambda mount: mount.name in self._volumes.secrets)", "def test_secret_exists(self):\n pass", "def secrets():\n click.echo(STEP_PATH / \"secrets\")", "def test_nonexisting_secret(self):\n assert docker.secret(\"NONEXISTING\", self.secrets_path) is None", "def contains_secrets(self) -> bool:\n return pulumi.get(self, \"contains_secrets\")", "def secrets(namespace=\"default\", **kwargs):\n cfg = _setup_conn(**kwargs)\n try:\n api_instance = kubernetes.client.CoreV1Api()\n api_response = api_instance.list_namespaced_secret(namespace)\n\n return [\n secret[\"metadata\"][\"name\"] for secret in api_response.to_dict().get(\"items\")\n ]\n except (ApiException, HTTPError) as exc:\n if isinstance(exc, ApiException) and exc.status == 404:\n return None\n else:\n log.exception(\"Exception when calling CoreV1Api->list_namespaced_secret\")\n raise CommandExecutionError(exc)\n finally:\n _cleanup(**cfg)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test list secrets when access token is not set.
def test_secrets_list_server_no_token(): message = "Please provide your access token" env = {"REANA_SERVER_URL": "localhost"} runner = CliRunner(env=env) result = runner.invoke(cli, ["secrets-list"]) assert result.exit_code == 1 assert message in result.output
[ "def test_list_secrets(self):\n pass", "def test_list_secrets(self):\n self.fail(\"test not implemented\")", "def test_read_namespaced_secret_list_secrets(self):\n pass", "def test_check_secrets(self):\n secrets.check_secrets([], argparse.Namespace())", "def test_secrets_list_server_not_reachable():\n message = \"REANA client is not connected to any REANA cluster.\"\n reana_token = \"000000\"\n runner = CliRunner()\n result = runner.invoke(cli, [\"secrets-list\", \"-t\", reana_token])\n assert result.exit_code == 1\n assert message in result.output", "async def list_secrets(self):\n pass", "def test_get_secrets() -> None:\n print(\"Environment variables:\")\n for env_variable, value in os.environ.items():\n print(\"{}: {}\".format(env_variable, value))\n secrets_handler = SecretsHandling(project_root=fixed_paths.repository_root_directory())\n secrets = secrets_handler.get_secrets_from_environment_or_file(SECRETS_IN_ENVIRONMENT)\n for name in SECRETS_IN_ENVIRONMENT:\n assert name in secrets, \"No value found for {}\".format(name)\n assert secrets[name] is not None, \"Value for {} is empty\".format(name)\n # Variable names should automatically be converted to uppercase when using get_secret:\n assert secrets_handler.get_secret_from_environment(name=name.lower()) is not None\n no_such_variable = \"no_such_variable\"\n with pytest.raises(ValueError):\n secrets_handler.get_secret_from_environment(name=no_such_variable)\n assert secrets_handler.get_secret_from_environment(name=no_such_variable, allow_missing=True) is None", "def test_nonexisting_secret(self):\n assert docker.secret(\"NONEXISTING\", self.secrets_path) is None", "def test_get_secret(self):\n pass", "def test_secret_exists(self):\n pass", "def test_list_o_auth_access_token(self):\n pass", "def _all_secrets(cls, *, secretsmanager_client):\n return secretsmanager_client.list_secrets()['SecretList']", "def test_secret(service_client, arn, token):\n # This is where the secret should be tested against the service\n current = service_client.get_secret_value(SecretId=arn, VersionStage=\"AWSPENDING\")\n if len(current['SecretBinary']) == 16:\n logger.info(\"testSecret: Tested succesfully with version %s for secret %s.\" % (token, arn))\n else:\n raise ValueError(\"Rotation test failed\")", "def secrets(self): # pylint: disable=no-self-use\n return []", "def _secret_not_in_order():\n pecan.abort(400, u._(\"Secret metadata expected but not received.\"))", "def test_secrets() -> Secrets:\n from dotenv import load_dotenv\n from os import getenv\n from pathlib import Path\n env_path = Path('.') / '.env.testing'\n load_dotenv(dotenv_path=env_path)\n return Secrets(\n google_id_token=getenv(\"GOOGLE_ID_TOKEN\"),\n google_user_id=getenv(\"GOOGLE_USER_ID\")\n )", "def test_token_auth_does_not_use_falsey_values(self):\n bad_tokens = [None, \"\"]\n req = requests.Request(\"GET\", \"https://api.github.com/\")\n for token in bad_tokens:\n s = self.build_session()\n s.token_auth(token)\n pr = s.prepare_request(req)\n assert \"Authorization\" not in pr.headers", "def test_get_invalid_secret(self):\n response = self.client.get(\n reverse(\n 'projectroles:api_remote_get', kwargs={'secret': build_secret()}\n )\n )\n self.assertEqual(response.status_code, 401)", "def test_get_invalid_secret(self):\n\n response = self.client.get(\n reverse(\n 'projectroles:api_remote_get', kwargs={'secret': build_secret()}\n )\n )\n\n self.assertEqual(response.status_code, 401)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test adding secrets with wrong format.
def test_secrets_add_wrong_format(secret): reana_token = "000000" env = {"REANA_SERVER_URL": "localhost"} runner = CliRunner(env=env) message = 'For literal strings use "SECRET_NAME=VALUE" format' result = runner.invoke(cli, ["secrets-add", "-t", reana_token, "--env", secret]) assert result.exit_code == 1 assert message in result.output
[ "def test_add_secret_key_value(self):\n pass", "def test_check_secrets(self):\n secrets.check_secrets([], argparse.Namespace())", "def test_list_secrets(self):\n pass", "def test_create_secret(self):\n pass", "def test_list_secrets(self):\n self.fail(\"test not implemented\")", "def test_generate_secret(self):\n random_secret = ef_password.generate_secret(24)\n self.assertEqual(len(random_secret), 24)\n assert not set('[~!@#$%^&*()_+{}\":;\\']+$').intersection(random_secret)", "def test_secret_exists(self):\n pass", "def test_invalid_secrets(self):\n s = SecretsChecker(stage='dev')\n # Override the email field obtained from terraform\n s.email = ['nonsense']\n with self.assertRaises(ValueError):\n s.run()", "def test_update_secret_key_value(self):\n pass", "def test_get_secret_5(self):\n\n # notice space between SECRET and immediately following\n # curly bracket.\n self.assertIsNone(\n get_secret(\"SECRET { ...}\")\n )\n\n # typo in keyword SECRET\n self.assertIsNone(\n get_secret(\"SECRIT { ...}\")\n )\n\n # missing closing bracket\n self.assertIsNone(\n get_secret(\"SECRET { ...\")\n )\n\n # curly brackets missing\n self.assertIsNone(\n get_secret(\"SECRET ...\")\n )", "def test_get_secret(self):\n pass", "def test_get_secrets() -> None:\n print(\"Environment variables:\")\n for env_variable, value in os.environ.items():\n print(\"{}: {}\".format(env_variable, value))\n secrets_handler = SecretsHandling(project_root=fixed_paths.repository_root_directory())\n secrets = secrets_handler.get_secrets_from_environment_or_file(SECRETS_IN_ENVIRONMENT)\n for name in SECRETS_IN_ENVIRONMENT:\n assert name in secrets, \"No value found for {}\".format(name)\n assert secrets[name] is not None, \"Value for {} is empty\".format(name)\n # Variable names should automatically be converted to uppercase when using get_secret:\n assert secrets_handler.get_secret_from_environment(name=name.lower()) is not None\n no_such_variable = \"no_such_variable\"\n with pytest.raises(ValueError):\n secrets_handler.get_secret_from_environment(name=no_such_variable)\n assert secrets_handler.get_secret_from_environment(name=no_such_variable, allow_missing=True) is None", "def test_create_failure_secret_key_no_hex(self):\n\n url = reverse('api_key_secret')\n\n data = {\n 'api_key_id': self.test_api_key_obj.id,\n 'secret_id': self.test_secret_obj.id,\n 'secret_key': 'a123X',\n 'secret_key_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',\n 'title': 'a123',\n 'title_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',\n }\n\n self.client.force_authenticate(user=self.test_user_obj)\n response = self.client.put(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_read_namespaced_secret_list_secrets(self):\n pass", "def test_secret_put_put_get_success(secret):\n secret.put(SECRET_STRING)\n secret.put('stuff')\n secret_value = secret.get()\n assert secret_value['SecretString'] == 'stuff'", "def test_secret(service_client, arn, token):\n # This is where the secret should be tested against the service\n current = service_client.get_secret_value(SecretId=arn, VersionStage=\"AWSPENDING\")\n if len(current['SecretBinary']) == 16:\n logger.info(\"testSecret: Tested succesfully with version %s for secret %s.\" % (token, arn))\n else:\n raise ValueError(\"Rotation test failed\")", "def test_secret_exists_success(secret):\n secret.put(SECRET_STRING)\n assert secret.exists()", "def test_nonexisting_secret(self):\n assert docker.secret(\"NONEXISTING\", self.secrets_path) is None", "def test_create_failure_secret_not_exist(self):\n\n url = reverse('api_key_secret')\n\n data = {\n 'api_key_id': self.test_api_key_obj.id,\n 'secret_id': 'b9362b80-224e-4c9a-8ccd-85a84b1e3739',\n 'secret_key': 'a123',\n 'secret_key_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',\n 'title': 'a123',\n 'title_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',\n }\n\n self.client.force_authenticate(user=self.test_user_obj)\n response = self.client.put(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test adding secrets when they already exist.
def test_secrets_add_already_exist(): status_code = 409 reana_token = "000000" env = {"REANA_SERVER_URL": "localhost"} message = "One of the secrets already exists. No secrets were added." mock_http_response = Mock( status_code=status_code, reason="Conflict", json=Mock(return_value={"message": "Conflict"}), ) rs_api_client_mock = Mock() rs_api_client_mock.api.add_secrets = Mock(side_effect=HTTPError(mock_http_response)) runner = CliRunner(env=env) with runner.isolation(): with patch("reana_client.api.client.current_rs_api_client", rs_api_client_mock): result = runner.invoke( cli, ["secrets-add", "-t", reana_token, "--env", "USER=reanauser"] ) assert message in result.output assert result.exit_code == 1
[ "def test_secret_exists(self):\n pass", "def test_secret_exists_success(secret):\n secret.put(SECRET_STRING)\n assert secret.exists()", "def test_add_secret_key_value(self):\n pass", "def test_nonexisting_secret(self):\n assert docker.secret(\"NONEXISTING\", self.secrets_path) is None", "def test_check_secrets(self):\n secrets.check_secrets([], argparse.Namespace())", "def test_list_secrets(self):\n pass", "def test_create_secret(self):\n pass", "def test_get_secrets() -> None:\n print(\"Environment variables:\")\n for env_variable, value in os.environ.items():\n print(\"{}: {}\".format(env_variable, value))\n secrets_handler = SecretsHandling(project_root=fixed_paths.repository_root_directory())\n secrets = secrets_handler.get_secrets_from_environment_or_file(SECRETS_IN_ENVIRONMENT)\n for name in SECRETS_IN_ENVIRONMENT:\n assert name in secrets, \"No value found for {}\".format(name)\n assert secrets[name] is not None, \"Value for {} is empty\".format(name)\n # Variable names should automatically be converted to uppercase when using get_secret:\n assert secrets_handler.get_secret_from_environment(name=name.lower()) is not None\n no_such_variable = \"no_such_variable\"\n with pytest.raises(ValueError):\n secrets_handler.get_secret_from_environment(name=no_such_variable)\n assert secrets_handler.get_secret_from_environment(name=no_such_variable, allow_missing=True) is None", "def test_list_secrets(self):\n self.fail(\"test not implemented\")", "def test_create_failure_secret_not_exist(self):\n\n url = reverse('api_key_secret')\n\n data = {\n 'api_key_id': self.test_api_key_obj.id,\n 'secret_id': 'b9362b80-224e-4c9a-8ccd-85a84b1e3739',\n 'secret_key': 'a123',\n 'secret_key_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',\n 'title': 'a123',\n 'title_nonce': 'B52032040066AE04BECBBB03286469223731B0E8A2298F26DC5F01222E63D0F5',\n }\n\n self.client.force_authenticate(user=self.test_user_obj)\n response = self.client.put(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_update_secret_key_value(self):\n pass", "def test_secrets_add_wrong_format(secret):\n reana_token = \"000000\"\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n runner = CliRunner(env=env)\n message = 'For literal strings use \"SECRET_NAME=VALUE\" format'\n\n result = runner.invoke(cli, [\"secrets-add\", \"-t\", reana_token, \"--env\", secret])\n assert result.exit_code == 1\n assert message in result.output", "def test_create_container_w_duplicate_secret_refs(self):\n\n secret_resp = self.secret_behaviors.create_secret_from_config()\n secret_refs = [SecretRef(name='1', ref=secret_resp.ref),\n SecretRef(name='2', ref=secret_resp.ref)]\n\n container_resp = self.behaviors.create_container(\n 'name', 'generic', secret_refs)\n\n self.assertEqual(container_resp.status_code, 400)", "def test_secret_put_put_get_success(secret):\n secret.put(SECRET_STRING)\n secret.put('stuff')\n secret_value = secret.get()\n assert secret_value['SecretString'] == 'stuff'", "def test_get_secret(self):\n pass", "def test_generate_secret(self):\n random_secret = ef_password.generate_secret(24)\n self.assertEqual(len(random_secret), 24)\n assert not set('[~!@#$%^&*()_+{}\":;\\']+$').intersection(random_secret)", "def test_create_seed_secrets(self):\n\n url = '/%s/job-types/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n name = 'job-type-post-test-secret'\n manifest['job']['name'] = name\n manifest['job']['interface']['settings'] = [\n {\n 'name': 'VERSION',\n 'secret': True\n },\n {\n 'name': 'DB_HOST',\n 'secret': True\n },\n {\n 'name': 'DB_PASS',\n 'secret': True\n }\n ]\n\n json_data = {\n 'icon_code': 'BEEF',\n 'is_published': False,\n 'max_scheduled': 1,\n 'docker_image': 'my-job-1.0.0-seed:1.0.0',\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n with patch.object(SecretsHandler, '__init__', return_value=None), \\\n patch.object(SecretsHandler, 'set_job_type_secrets', return_value=None) as mock_set_secret:\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n job_type = JobType.objects.filter(name=name).first()\n\n results = json.loads(response.content)\n self.assertEqual(results['id'], job_type.id)\n\n # Secrets sent to Vault\n secrets_name = '-'.join([results['name'], results['version']]).replace('.', '_')\n secrets = json_data['configuration']['settings']\n mock_set_secret.assert_called_once_with(secrets_name, secrets)\n\n #Secrets scrubbed from configuration on return\n self.assertEqual(results['configuration']['settings'], {})", "def test_invalid_secrets(self):\n s = SecretsChecker(stage='dev')\n # Override the email field obtained from terraform\n s.email = ['nonsense']\n with self.assertRaises(ValueError):\n s.run()", "def test_get_secret_5(self):\n\n # notice space between SECRET and immediately following\n # curly bracket.\n self.assertIsNone(\n get_secret(\"SECRET { ...}\")\n )\n\n # typo in keyword SECRET\n self.assertIsNone(\n get_secret(\"SECRIT { ...}\")\n )\n\n # missing closing bracket\n self.assertIsNone(\n get_secret(\"SECRET { ...\")\n )\n\n # curly brackets missing\n self.assertIsNone(\n get_secret(\"SECRET ...\")\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Optimized version of the generic paginate_query_across_partitioned_databases for case schedules queue_schedule_instances uses a lock to ensure that the same case_id cannot be queued within one hour of another instance The celery tasks handle_case_alert_schedule_instance and handle_case_timed_schedule_instance both use locks to ensure only one taks is operating on a case at one time. Each task also checks if the schedule is still valid on this case before processing it further Assumes that q_expression includes active = True
def _paginate_query_across_partitioned_databases(model_class, q_expression, load_source): from corehq.messaging.scheduling.scheduling_partitioned.models import ( CaseAlertScheduleInstance, CaseTimedScheduleInstance, ) if model_class not in (CaseAlertScheduleInstance, CaseTimedScheduleInstance): raise TypeError("Expected CaseAlertScheduleInstance or CaseTimedScheduleInstance") db_names = get_db_aliases_for_partitioned_query() for db_name in db_names: for row in _paginate_query(db_name, model_class, q_expression, load_source): yield row
[ "def schedule_fetch():\n for instance_group_manager in models.InstanceGroupManager.query():\n if instance_group_manager.url:\n if not utils.enqueue_task(\n '/internal/queues/fetch-instances',\n 'fetch-instances',\n params={\n 'key': instance_group_manager.key.urlsafe(),\n },\n ):\n logging.warning(\n 'Failed to enqueue task for InstanceGroupManager: %s',\n instance_group_manager.key,\n )", "def schedule_metadata_tasks():\n # Some metadata tasks will abort if higher precedence tasks are in\n # progress. Avoid scheduling these tasks. The priority here is to\n # get the result of an in-progress metadata operation if one exists.\n for instance in models.Instance.query():\n queue = None\n if instance.active_metadata_update:\n if instance.active_metadata_update.url:\n # Enqueue task to check the in-progress metadata operation.\n queue = 'check-instance-metadata-operation'\n else:\n # Enqueue task to start a metadata operation.\n queue = 'update-instance-metadata'\n elif instance.pending_metadata_updates:\n # Enqueue task to compress a list of desired metadata updates.\n queue = 'compress-instance-metadata-updates'\n if queue:\n utilities.enqueue_task(queue, instance.key)", "def acquire_schedules(self, scheduler_id: str, limit: int) -> List[Schedule]:", "def paginate_query_across_partitioned_databases(model_class, q_expression, annotate=None, query_size=5000,\n values=None, load_source=None):\n db_names = get_db_aliases_for_partitioned_query()\n for db_name in db_names:\n for row in paginate_query(db_name, model_class, q_expression, annotate, query_size, values, load_source):\n yield row", "async def acquire_schedules(self, scheduler_id: str, limit: int) -> list[Schedule]:", "async def acquire_schedules(self, scheduler_id: str, limit: int) -> List[Schedule]:", "def apply_object_jobs(cls, object):\n methods = get_active_discovery_methods()\n # Get current schedules\n current = {} # name -> (interval, failed interval)\n for d in cls._get_collection().find({\n \"key\": object.id,\n \"jcls\": {\n \"$in\": methods\n }\n }, {\"jcls\": 1, \"schedule\": 1}):\n current[d[\"jcls\"]] = (d[\"schedule\"][\"interval\"],\n d[\"schedule\"].get(\"failed_interval\"))\n # Get effective schedules\n bulk = cls._get_collection().initialize_unordered_bulk_op()\n n = 0\n p = object.object_profile\n now = datetime.datetime.now()\n for m in methods:\n if not getattr(p, \"enable_%s\" % m):\n continue\n interval = (\n getattr(p, \"%s_max_interval\" % m),\n getattr(p, \"%s_min_interval\" % m)\n )\n if m in current:\n if current[m] != interval:\n # Change schedule\n logger.debug(\"[%s] changing %s interval %s -> %s\",\n object.name, m, current[m], interval)\n bulk.find({\"key\": object.id, \"jcls\": m}).update({\n \"$set\": {\n \"schedule.interval\": interval[0],\n \"schedule.failed_interval\": interval[1]\n }\n })\n n += 1\n else:\n # Create schedule\n logger.debug(\"[%s] creating schedule for %s\",\n object.name, m)\n bulk.insert({\n \"jcls\": m,\n \"key\": object.id,\n \"s\": \"W\",\n \"data\": None,\n \"ts\": now,\n \"schedule\": {\n \"interval\": interval[0],\n \"failed_interval\": interval[1],\n \"offset\": random.random()\n }\n })\n n += 1\n # Delete stale schedules\n stale = set(current) - set(methods)\n if stale:\n logger.debug(\"[%s] deleting stale schedules: %s\",\n object.name, \", \".join(stale))\n bulk.find({\n \"key\": object.id,\n \"$jcls\": {\n \"$in\": list(stale)\n }\n }).remove()\n n += 1\n if n:\n logger.debug(\"Bulk update schedule\")\n bulk.execute({\"w\": 0})", "def get_schedules(self):\n target_date = self.target_datetime.date()\n course_duration = get_expected_duration(self.course_id)\n schedules = Schedule.objects.select_related('enrollment').filter(\n self.experience_filter,\n enrollment__is_active=True,\n enrollment__course_id=self.course_id,\n enrollment__user__is_active=True,\n start_date__gte=target_date - course_duration,\n start_date__lt=target_date,\n )\n\n template_context = get_base_template_context(self.site)\n for schedule in schedules:\n course = schedule.enrollment.course\n # We don't want to show any updates if the course has ended so we short circuit here.\n if course.end and course.end.date() <= target_date:\n return\n\n # Next Section Updates are only for Self-paced courses since it uses Personalized\n # Learner Schedule logic. See CourseUpdateResolver for Instructor-paced updates\n if not course.self_paced:\n continue\n\n user = schedule.enrollment.user\n start_date = max(filter(None, (schedule.start_date, course.start)))\n LOG.info('Received a schedule for user {} in course {} for date {}'.format(\n user.username, self.course_id, target_date,\n ))\n\n try:\n week_highlights, week_num = get_next_section_highlights(user, course.id, start_date, target_date)\n # (None, None) is returned when there is no section with a due date of the target_date\n if week_highlights is None:\n continue\n except CourseUpdateDoesNotExist as e:\n log_message = self.log_prefix + ': ' + str(e)\n LOG.warning(log_message)\n # continue to the next schedule, don't yield an email for this one\n continue\n unsubscribe_url = None\n if (COURSE_UPDATE_SHOW_UNSUBSCRIBE_WAFFLE_SWITCH.is_enabled() and\n 'bulk_email_optout' in settings.ACE_ENABLED_POLICIES):\n unsubscribe_url = reverse('bulk_email_opt_out', kwargs={\n 'token': UsernameCipher.encrypt(user.username),\n 'course_id': str(course.id),\n })\n\n template_context.update({\n 'course_name': course.display_name,\n 'course_url': _get_trackable_course_home_url(course.id),\n 'week_num': week_num,\n 'week_highlights': week_highlights,\n # This is used by the bulk email optout policy\n 'course_ids': [str(course.id)],\n 'unsubscribe_url': unsubscribe_url,\n })\n template_context.update(_get_upsell_information_for_schedule(user, schedule))\n\n yield (user, course.closest_released_language, template_context)", "def schedule_pending_deletion():\n for instance in models.Instance.query():\n if instance.pending_deletion:\n if not utils.enqueue_task(\n '/internal/queues/delete-instance-pending-deletion',\n 'delete-instance-pending-deletion',\n params={\n 'key': instance.key.urlsafe(),\n },\n ):\n logging.warning('Failed to enqueue task for Instance: %s', instance.key)", "def _enqueue_refresh_tasks(db: Session) -> None:\n logger.debug(\"Enqueuing image vulnerabilities refresh tasks\")\n all_images = db.query(Image.user_id, Image.id, Image.digest).all()\n\n if not all_images:\n logger.debug(\"No images in the system to refresh\")\n return\n\n queue_messages = GrypeDBFeed._create_refresh_tasks(\n all_images, MESSAGE_BATCH_SIZE\n )\n q_client = internal_client_for(SimpleQueueClient, None)\n errors = []\n for task in queue_messages:\n try:\n q_client.enqueue(name=IMAGE_VULNERABILITIES_QUEUE, inobj=task.to_json())\n except Exception as err:\n errors.append((task.to_json_str(), err))\n\n if len(errors) > 0:\n logger.error(\n f\"Failed to create/enqueue %d/%d refresh tasks.\",\n len(errors),\n len(queue_messages),\n )\n raise RefreshTaskCreationError(errors)\n\n logger.debug(\n \"Queued %d task(s) for %d image(s)\", len(queue_messages), len(all_images)\n )", "def reserved_compare(options):\n running_instances = defaultdict(dict)\n reserved_purchases = defaultdict(dict)\n regions = boto.ec2.regions()\n good_regions = [r for r in regions if r.name not in ['us-gov-west-1',\n 'cn-north-1']]\n for region in good_regions:\n if options.trace:\n print \" Scanning region {0}\".format(region.name)\n conn = region.connect()\n filters = {'instance-state-name': 'running'}\n zones = defaultdict(dict)\n\n if options.trace:\n print \" Fetching running instances\"\n reservations = conn.get_all_instances(filters=filters)\n for reservation in reservations:\n for inst in reservation.instances:\n if options.debug:\n print instance_string(inst, options, verbose=True)\n if inst.state != 'running':\n if options.debug:\n print \"Skip {0.id} state {0.state}\".format(inst)\n continue\n if inst.spot_instance_request_id:\n if options.debug:\n print \"Skip {0.id} has spot id {0.spot_instance_request_id}\".format(inst)\n continue\n if 'aws:autoscaling:groupName' in inst.tags:\n if options.debug:\n print \"Skip {0.id} is an autoscale instance\".format(inst)\n continue\n if inst.platform == 'Windows' or inst.platform == 'windows':\n if options.debug:\n print \"Skip {0.id} has platform {0.platform}\".format(inst)\n continue\n if inst.instance_type not in zones[inst.placement]:\n zones[inst.placement][inst.instance_type] = []\n zones[inst.placement][inst.instance_type].append(inst)\n\n if zones:\n running_instances[region.name] = zones\n\n purchased = defaultdict(dict)\n if options.trace:\n print \" Fetching reservations\"\n\n reserved = conn.get_all_reserved_instances()\n for r in reserved:\n if options.debug:\n print reservation_string(r, verbose=True)\n if r.state != 'active':\n continue\n if r.instance_tenancy != 'default':\n print 'WARNING: Non-default tenancy %s: %s' % (r.instance_tenancy, reservation_string(r))\n continue\n if r.instance_type not in purchased[r.availability_zone]:\n purchased[r.availability_zone][r.instance_type] = [r]\n else:\n purchased[r.availability_zone][r.instance_type].append(r)\n\n if purchased:\n reserved_purchases[region.name] = purchased\n\n return check_reservation_use(options, running_instances,\n reserved_purchases)", "def queue_instances(instances):\n for instance_id in instances:\n node = _get_node(instance_id=instance_id)\n __utils__[\"cloud.cache_node\"](node, _get_active_provider_name(), __opts__)", "def get_queue(queue_limits):\n\n queues, limits = queue_limits.items()\n queues.pop('')\n\n while(True): \n \n queued_jobs = qstat_plain()\n jobs = {queue : [j for j in queued_jobs if j.queue == queue] for queue in queues} \n jobs[''] = [j for j in queued_jobs if j.queue not in queues]\n\n for queue in queues:\n if len(jobs[queue]) < queue_limits[queue]:\n yield queue\n else:\n time.sleep(30)", "def instance_backup_schedule_update(self, context, instance_uuid,\n schedule):\n metadata = self._instance_metadata(context, instance_uuid)\n schedule_key = meta.BACKUP_SCHEDULE_KEY\n active_key = meta.BACKUP_ACTIVE_KEY\n if schedule and len(schedule) > 0:\n # Sort items by frequency\n sorted_schedule = sorted(schedule,\n key=lambda item: item[meta.SCHEDULE_FREQUENCY_KEY])\n metadata[schedule_key] = jsonutils.dumps(sorted_schedule)\n metadata[active_key] = True # This lingers forever, on purpose.\n self._instance_metadata_update(context, instance_uuid, metadata)\n return sorted_schedule\n else:\n metadata[schedule_key] = jsonutils.dumps([])\n self._instance_metadata_update(context, instance_uuid, metadata)\n return []", "def rebalance_in_with_queries(self):\n if not self.atomicity:\n self.bucket_util._wait_for_stats_all_buckets(self.cluster,\n self.cluster.buckets)\n\n num_views = self.input.param(\"num_views\", 5)\n is_dev_ddoc = self.input.param(\"is_dev_ddoc\", True)\n reproducer = self.input.param(\"reproducer\", False)\n num_tries = self.input.param(\"num_tries\", 10)\n iterations_to_try = (1, num_tries)[reproducer]\n ddoc_name = \"ddoc1\"\n prefix = (\"\", \"dev_\")[is_dev_ddoc]\n\n query = dict()\n query[\"connectionTimeout\"] = 60000\n query[\"full_set\"] = \"true\"\n\n views = list()\n tasks = list()\n\n if self.test_abort_snapshot:\n self.log.info(\"Creating sync_write abort scenarios for all vbs\")\n for server in self.cluster_util.get_kv_nodes(self.cluster):\n ssh_shell = RemoteMachineShellConnection(server)\n cbstats = Cbstats(server)\n replica_vbs = cbstats.vbucket_list(\n self.cluster.buckets[0].name,\n \"replica\")\n load_gen = doc_generator(self.key, 0, 5000,\n target_vbucket=replica_vbs)\n success = self.bucket_util.load_durable_aborts(\n ssh_shell, [load_gen], self.cluster,\n self.cluster.buckets[0], self.durability_level,\n DocLoading.Bucket.DocOps.UPDATE, \"all_aborts\")\n if not success:\n self.log_failure(\"Simulating aborts failed\")\n ssh_shell.disconnect()\n\n self.validate_test_failure()\n\n for bucket in self.cluster.buckets:\n temp = self.bucket_util.make_default_views(self.default_view,\n self.default_view_name,\n num_views, is_dev_ddoc,\n different_map=reproducer)\n temp_tasks = self.bucket_util.async_create_views(\n self.cluster.master, ddoc_name, temp, bucket)\n views += temp\n tasks += temp_tasks\n\n timeout = None\n if self.active_resident_threshold == 0:\n timeout = max(self.wait_timeout * 4, len(self.cluster.buckets) * self.wait_timeout * self.num_items / 50000)\n\n for task in tasks:\n self.task.jython_task_manager.get_task_result(task)\n\n for bucket in self.cluster.buckets:\n for view in views:\n # run queries to create indexes\n self.bucket_util.query_view(\n self.cluster.master, prefix + ddoc_name, view.name, query,\n bucket=bucket.name)\n\n active_tasks = self.cluster_util.async_monitor_active_task(\n self.cluster.servers[:self.nodes_init], \"indexer\",\n \"_design/\" + prefix + ddoc_name, wait_task=False)\n for active_task in active_tasks:\n self.task.jython_task_manager.get_task_result(active_task)\n self.assertTrue(active_task.result)\n\n expected_rows = self.num_items\n if self.max_verify:\n expected_rows = self.max_verify\n query[\"limit\"] = expected_rows\n query[\"stale\"] = \"false\"\n\n for bucket in self.cluster.buckets:\n result = self.bucket_util.perform_verify_queries(\n self.cluster.master, num_views, prefix, ddoc_name,\n self.default_view_name, query, expected_rows,\n bucket=bucket, wait_time=timeout)\n self.assertTrue(result, \"Failure in view query\")\n\n for i in xrange(iterations_to_try):\n servs_in = self.cluster.servers[self.nodes_init:self.nodes_init + self.nodes_in]\n rebalance = self.task.async_rebalance(self.cluster, servs_in, [])\n self.sleep(self.wait_timeout / 5)\n\n # See that the result of view queries are same as\n # the expected during the test\n for bucket in self.cluster.buckets:\n result = self.bucket_util.perform_verify_queries(\n self.cluster.master, num_views, prefix, ddoc_name,\n self.default_view_name, query, expected_rows,\n bucket=bucket, wait_time=timeout)\n self.assertTrue(result, \"Failure in view query\")\n\n self.task.jython_task_manager.get_task_result(rebalance)\n self.assertTrue(rebalance.result, \"Rebalance Failed\")\n self.sleep(60)\n # verify view queries results after rebalancing\n for bucket in self.cluster.buckets:\n result = self.bucket_util.perform_verify_queries(\n self.cluster.master, num_views, prefix, ddoc_name,\n self.default_view_name, query, expected_rows,\n bucket=bucket, wait_time=timeout)\n self.assertTrue(result, \"Failure in view query\")\n\n if not self.atomicity:\n self.bucket_util.verify_cluster_stats(\n self.cluster, self.num_items, timeout=self.wait_timeout)\n\n if reproducer:\n rebalance = self.task.async_rebalance(self.cluster, [],\n servs_in)\n self.task.jython_task_manager.get_task_result(rebalance)\n self.assertTrue(rebalance.result, \"Rebalance Failed\")\n self.cluster.nodes_in_cluster = list(set(self.cluster.nodes_in_cluster) - set(servs_in))\n self.sleep(self.wait_timeout)\n self.bucket_util.verify_unacked_bytes_all_buckets(self.cluster)", "def queue_schedules(cls, *schedules):\n return cls._action_on_schedules(\"Queue up selected Schedules to run now\", schedules)", "def ActiveSchedulesQuery():\n\n query = db.Query(ActivitySchedule)\n utils.AddFilter(query, 'deleted =', 0)\n\n return query", "def execute(self):\n\n execute_date = dt.datetime.utcnow()\n if self.keep_alive_duration is not None:\n execute_until = execute_date + freq_to_timedelta(self.keep_alive_duration)\n logger.debug(('Job will continue executing until %s as it has a keep'\n 'alive duration of %s'), execute_until, self.keep_alive_duration)\n else:\n execute_until = execute_date\n\n EngineLogging.finish_setup_log()\n\n # process continuously until execute_until date is reached\n # after time is up, job will be end. An external scheduler will create\n # a new one to replace it.\n # catlog code changes are recognised during execution\n\n execution_counter = 0\n constants = {}\n while execute_date <= execute_until:\n\n tenant_id = self.get_payload_param('tenant_id', None)\n\n EngineLogging.start_run_log(tenant_id, self.get_payload_name())\n logger.debug(('Starting execution number: %s with execution date: %s'), execution_counter, execute_date)\n\n # evalute all candidate schedules that were indentified when\n # the job controller was initialized.\n # The resulting dictionary contains a dictionary of status items\n # about each schedule\n try:\n schedule_metadata = self.evaluate_schedules(execute_date)\n except BaseException as e:\n meta = {'execution_date': execute_date, 'previous_execution_date': None, 'next_future_execution': None}\n # can't recover from errors evaluating the schedule\n self.handle_failed_start(meta, exception=e, stage_name='evaluate_schedules', raise_error=True,\n # force an error to be raised\n **meta)\n\n # look for schedules that were flagged 'is_due'.\n # These will be executed.\n\n for (schedule, meta) in list(schedule_metadata.items()):\n\n chunks = []\n can_proceed = True\n exception = None\n\n if not meta['is_due']:\n try:\n self.log_schedule_not_due(schedule=schedule, schedule_metadata=meta)\n except BaseException as e:\n logger.warning('Error logging non-execution data: %s', e)\n exception = e\n\n can_proceed = False\n\n if can_proceed:\n try:\n self.log_start(meta, status='running')\n\n (preload_stages, cols) = self.get_stages(stage_type='preload', granularity=None,\n available_columns=set(), exclude_stages=[])\n except BaseException as e:\n msg = 'Aborted execution. Error getting preload stages'\n self.handle_failed_execution(meta, message=msg, exception=e, stage_name='get_stages(\"preload\")',\n raise_error=False)\n # preload stages are considered critical to successful\n # execution. If a preload stage is optional, handle the\n # error inside the preload stage\n can_proceed = False\n exception = e\n\n # the output of a preload stage is a boolean column # until we retrieve data, it has nowhere to go, # for now we will declare it as a constant\n\n if can_proceed and len(preload_stages) != 0:\n\n logger.debug('Executing preload stages:')\n try:\n (df, can_proceed, has_no_data) = self.execute_stages(preload_stages,\n start_ts=meta['preload_from'],\n end_ts=meta['end_date'], df=None,\n granularity='preload')\n except BaseException as e:\n msg = 'Aborted execution. Error getting preload stages'\n can_proceed = self.handle_failed_execution(meta, message=msg, exception=e,\n stage_name='execute_stages(\"preload\")')\n df = None\n can_proceed = False\n exception = e\n\n else:\n\n for c in cols:\n constants[c] = True\n logger.debug('Preload stages complete')\n\n if can_proceed:\n\n # build a job specification\n try:\n job_spec = self.build_job_spec(schedule=schedule, subsumed=meta['mark_complete'])\n if self.get_payload_param('_get_job_spec', False):\n print(job_spec)\n can_proceed = False\n\n except BaseException as e:\n self.handle_failed_execution(meta, message='Failed when building job spec', exception=e,\n raise_error=None, stage_name='build_job_spec)')\n can_proceed = False\n exception = e\n\n if can_proceed:\n\n # job spec may include one or more skipped stages, e.g. invalid dependencies\n # skipped stages must be accounted for\n\n abort_on_error = False\n\n for s in job_spec['skipped_stages']:\n items = self.get_stage_output_list(s, raise_error=True)\n inputs = self.get_stage_input_set(s, raise_error=True)\n skip_parms = {'skipped_items': items, 'required_inputs': inputs}\n self.handle_failed_stage(stage=s, df=None, message=s.build_status,\n exception=StageException(s.build_status), raise_error=False,\n **skip_parms)\n if not abort_on_error:\n abort_on_error = self.get_stage_param(s, '_abort_on_fail', None)\n\n if len(job_spec['skipped_stages']) > 0:\n if not abort_on_error:\n abort_on_error = self.get_payload_param('_abort_on_fail', False)\n\n if abort_on_error:\n can_proceed = False\n\n if can_proceed:\n # divide up the date range to be processed into chunks\n try:\n chunks = self.get_chunks(start_date=meta['start_date'], end_date=meta['end_date'],\n round_hour=meta['round_hour'], round_min=meta['round_min'],\n schedule=schedule)\n except BaseException as e:\n self.handle_failed_execution(meta, message='Error identifying chunks', exception=e,\n raise_error=False, stage_name='get_chunks)')\n can_proceed = False\n exception = e\n\n for i, (chunk_start, chunk_end) in enumerate(chunks):\n\n # execute the job spec for each chunk.\n # add the constants that were produced by\n # the preload stages\n\n if can_proceed:\n\n kwargs = {'chunk': i, 'start_date': chunk_start, 'end_date': chunk_end}\n\n if len(chunks) > 1:\n self.trace_add('Processing in chunks', log_method=logger.debug, **kwargs)\n else:\n self.trace_add('Processing as a single chunk', log_method=logger.debug, **kwargs)\n\n # execute input level stages\n\n try:\n (df, can_proceed, has_no_data) = self.execute_stages(stages=job_spec['input_level'],\n start_ts=chunk_start, end_ts=chunk_end,\n df=None, constants=constants,\n granularity=None)\n except BaseException as e:\n self.handle_failed_execution(meta, message='Error executing stage', exception=e,\n raise_error=False)\n can_proceed = False\n exception = e\n else:\n if not has_no_data:\n auto_index_name = self.get_payload_param('auto_index_name', '_auto_index_')\n df = reset_df_index(df, auto_index_name=auto_index_name)\n else:\n can_proceed = True\n\n for (grain, stages) in list(job_spec.items()):\n\n if not has_no_data and can_proceed and grain is not None and grain not in ['input_level',\n 'skipped_stages',\n 'preload']:\n\n if self.get_payload_param('aggregate_complete_periods', True):\n\n try:\n if isinstance(grain, str):\n grain_dict = self.get_payload_param('_granularities_dict', {})\n granularity = grain_dict.get(grain)\n else:\n granularity = grain\n\n (grain_df, revised_date) = granularity.align_df_to_start_date(df=df,\n min_date=chunk_start)\n\n except BaseException as e:\n msg = 'Error aligning input data to granularity %s' % grain\n self.trace_add(msg=msg, log_method=logger.warning, error=e)\n grain_df = df\n exception = e\n\n else:\n msg = 'Aligned input data to granularity %s' % grain\n self.trace_add(msg=msg, revised_date=revised_date)\n else:\n\n grain_df = df\n\n try:\n (result, can_proceed, has_no_data) = self.execute_stages(stages=stages,\n start_ts=chunk_start,\n end_ts=chunk_end, df=grain_df,\n granularity=grain)\n except BaseException as e:\n self.handle_failed_execution(meta, message='Error executing stage', exception=e,\n raise_error=False)\n can_proceed = False\n exception = e\n\n if has_no_data and exception is None:\n can_proceed = True\n\n # write results of this execution to the log\n\n if not meta['is_due']:\n status = 'skipped'\n elif can_proceed:\n status = 'complete'\n else:\n status = 'aborted'\n try:\n self.log_completion(metadata=meta, status=status)\n except BaseException as e:\n # an error writing to the job log could invalidate\n # future runs. Abort on error.\n self.handle_failed_execution(meta, message='Error writing execution results to log', exception=e,\n raise_error=True)\n exception = e\n\n if status == 'aborted':\n\n raise_error = self.get_payload_param('_abort_on_fail', False)\n if raise_error:\n stack_trace = self.get_stack_trace()\n if stack_trace is None:\n msg = 'Execution was aborted. Unable to retrieve stack trace for exception: %s' % exception\n else:\n msg = 'Execution was aborted: %s\\n %s' % (exception, stack_trace)\n raise RuntimeError(msg)\n\n try:\n next_execution = self.get_next_future_execution(schedule_metadata)\n except BaseException as e:\n self.handle_failed_execution(meta, message='Error getting next future scheduled execution', exception=e,\n stageName='get_next_future_execution', raise_error=True)\n meta['next_future_execution'] = next_execution\n\n # if there is no future execution that fits withing the timeframe\n # of this job, no need to hang around and wait\n\n if next_execution is not None and next_execution < execute_until:\n self.sleep_until(next_execution)\n else:\n logger.debug(('Ending job normally as there are no scheduled executions '\n ' due before execution end time'))\n self.trace_end()\n break\n\n execution_counter += 1\n execute_date = dt.datetime.utcnow()", "def _reclaim_queued_deletes(self, context):\n interval = CONF.reclaim_instance_interval\n if interval <= 0:\n LOG.debug(\"CONF.reclaim_instance_interval <= 0, skipping...\")\n return\n\n # TODO(comstud, jichenjc): Dummy quota object for now See bug 1296414.\n # The only case that the quota might be inconsistent is\n # the cloud node died between set instance state to SOFT_DELETED\n # and quota commit to DB. When cloud node starts again\n # it will have no idea the reservation is committed or not or even\n # expired, since it's a rare case, so marked as todo.\n quotas = objects.Quotas.from_reservations(context, None)\n\n filters = {'vm_state': vm_states.SOFT_DELETED,\n 'task_state': None,\n 'host': self.host}\n instances = objects.InstanceList.get_by_filters(\n context, filters,\n expected_attrs=objects.instance.INSTANCE_DEFAULT_FIELDS,\n use_slave=True)\n for instance in instances:\n if self._deleted_old_enough(instance, interval):\n bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(\n context, instance.uuid)\n LOG.info(_LI('Reclaiming deleted instance'), instance=instance)\n try:\n self._delete_instance(context, instance, bdms, quotas)\n except Exception as e:\n LOG.warning(_LW(\"Periodic reclaim failed to delete \"\n \"instance: %s\"),\n e, instance=instance)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method is used to terminate a job with the specified or a group of jobs job_id or job_name in a given cluster
def delete(cls, cluster, job, group=None): try: if group is not None: # get the job ids from the db arguments = {'cluster': cluster, 'group': group} db_jobs = cls.cm.find('batchjob', **arguments) list1 = [] for i in db_jobs: list1.append(db_jobs[i]['job_id']) # read active jobs active_jobs = json.loads(cls.queue(cluster)) list2 = [] for i in active_jobs: list2.append(active_jobs[i]['jobid']) # find intersection res = set(list1).intersection(set(list2)) if res is not None: for j in res: cmd = 'scancel {}'.format(str(j)) Shell.ssh(cluster, cmd) print("Deleted {}".format(j)) return "All jobs for group {} killed successfully".format(group) else: args = 'scancel ' if job.isdigit(): args += job else: args += "-n {}".format(job) Shell.ssh(cluster, args) return "Job {} killed successfully".format(job) except Exception as ex: print("in exceptio") print(ex) return ex
[ "def stop_cluster(self, args):\n client = self.get_client()\n client.terminate_job_flows(JobFlowIds=[args['CLUSTERID']])\n\n return [{\"cm\": {\"cloud\": \"aws\",\n \"kind\": \"emr stop cluster request\",\n \"name\": args['CLUSTERID']},\n 'data': {\"name\": args['CLUSTERID']}}]", "def terminate():\n try:\n job_id = flask.request.args[\"id\"]\n except KeyError:\n message = {\"error\": \"no 'id' argument in request\"}\n return flask.jsonify(message)\n\n result = MANAGER.kill_job(job_id)\n return flask.render_template(\"terminate.html\", result=result)", "def kill_job(self, job):\n try:\n job = int(job)\n result = \"job id not found\"\n what_to_kill = \"id\"\n except ValueError:\n result = \"no jobs of this service\"\n what_to_kill = \"service\"\n self.lock.acquire()\n try:\n for job_id, (job_process, data, _, _) in self.running_jobs.items():\n if what_to_kill == \"id\" and job_id == job:\n if job_process.is_alive():\n job_process.terminate()\n result = \"job terminated\"\n else:\n result = \"job no longer running\"\n break\n if data[\"service\"] == job:\n if job_process.is_alive():\n job_process.terminate()\n result = \"job(s) terminated\"\n finally:\n self.lock.release()\n return result", "def terminate_emr(**kwargs):\n task_instance = kwargs[\"ti\"]\n cluster_id = task_instance.xcom_pull(task_ids=\"create_cluster\")\n emr.terminate_cluster(cluster_id)", "def kill_job(self , index):\n job = self.jobs.__getitem__( index )\n if job:\n job.kill()", "def terminate():\n with open (f\"{CLUSTER_FOLDER}/uuid\", \"r\") as f:\n uuid = f.read().strip()\n\n start_time = time.time() \n cluster = delete_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], uuid) \n if(not cluster):\n log(\"Failed to terminate cluster via API.\")\n exit(1)\n\n log(f\"Started termination of cluster '{cluster['id']}'. Waiting for cluster to be terminated...\")\n cluster = get_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], cluster['id'])\n while(TIMEOUT_SECONDS > (time.time()-start_time) and cluster['status']['state'] != 'TERMINATED' and not cluster['status']['failed']):\n time.sleep(5)\n cluster = get_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], cluster['id'])\n\n if(cluster['status']['failed']):\n log(\"Cluster termination failed.\")\n exit(1)\n\n if(TIMEOUT_SECONDS <= (time.time()-start_time)):\n log(\"Timeout while launching cluster.\")\n exit(1)\n\n log(f\"Cluster '{cluster['id']}' is terminated.\")", "def delete_job(self, name: str) -> None:\n batch_client = self.k8s_client.BatchV1Api()\n self.logger.info(\"Deleting job {} in namespace {}\".format(name, self.namespace))\n\n try:\n batch_client.delete_namespaced_job(\n name=name,\n namespace=self.namespace,\n body=self.k8s_client.V1DeleteOptions(propagation_policy=\"Foreground\"),\n )\n except self.k8s_client.rest.ApiException:\n self.logger.exception(\n \"Error attempting to delete job {} in namespace {}\".format(\n name, self.namespace\n )\n )", "def delete_job(api_instance, job_name):\n api_response = api_instance.delete_namespaced_job(\n name=job_name,\n namespace=\"default\",\n body=client.V1DeleteOptions(\n propagation_policy=\"Foreground\", grace_period_seconds=5\n ),\n )\n logger.info(\"Job deleted with status='%s'\" % str(api_response.status))", "def stop(self) -> None:\n self._client.terminate_job(jobId = self.id, reason = self.STOP_REASON)", "def stop_batch_job(self, name, error_on_stopped=False):\n if name not in self.batch_jobs:\n raise ValueError(\"job {} doesn't exists\".format(name))\n if name not in self.jobs:\n if error_on_stopped:\n raise ValueError(\"job {} doesn't exists\".format(name))\n return\n self.remove_job(name)\n args,kwargs=self._batch_jobs_args.pop(name)\n cleanup=self.batch_jobs[name][1]\n if cleanup:\n cleanup(*args,**kwargs)", "def stop_batch_job(self, name, error_on_stopped=False):\n if name not in self.batch_jobs:\n raise ValueError(\"job {} doesn't exists\".format(name))\n if name not in self.jobs:\n if error_on_stopped:\n raise ValueError(\"job {} doesn't exists\".format(name))\n return\n self.remove_job(name)\n _,args,kwargs,cleanup=self._batch_jobs_args.pop(name)\n if cleanup:\n cleanup(*args,**kwargs)", "def delete_job(context, job_id=None):\n endpoint = job_endpoint(context, job_id)\n context.response = requests.delete(endpoint)", "def kill_job(job_pid, term_signal):\n raise exceptions.NoImplementedException(\n message=\"kill_job is still not supported\")", "def cli(ctx, job_id):\n return ctx.gi.jobs.cancel_job(job_id)", "def _delete_job(self, job):", "async def stop(self):\n self.log.info(f'Stopping cluster with name {self.clustername()}')\n if await self.exists(self.clustername()):\n result = self.dataproc_client.delete_cluster(\n project_id=self.project,\n region=self.region,\n cluster_name=self.clustername())\n return result\n self.log.info(f'No cluster with name {self.clustername()}')\n return None", "def delete_job(jobId=None, force=None):\n pass", "def _kill_canceling(self, job):\n pidrecord = os.path.join(job.output_dir, \"jobpid\")\n if os.path.exists(pidrecord):\n with open(pidrecord, 'r') as f:\n pgid = int(f.read())\n self.logger.info(\"Signalling SIGTERM to process group: %d\", pgid)\n try:\n os.killpg(pgid, signal.SIGTERM)\n except OSError as e:\n self.logger.info(\"Unable to kill process group %d: %s\", pgid, e)\n os.unlink(pidrecord)", "def stop_labeling_job(LabelingJobName=None):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to set the tolerance.
def set_tolerance(self, tol): self.tolerance = tol
[ "def setTolerance(self, tolerance = 0.001):\n \"\"\"tolerance: absolute tolerance value\"\"\"\n \n self.h.cvode.atol(tolerance)", "def set_tolerance(self, value):\n\n self._tolerance = value", "def set_tolerance(self, tol):\n self.precision = tol\n return", "def set_tol(self, tol : float):\n self.tol = tol", "def set_tolerance(rel_tolerance=1e-09, abs_tolerance=0.0):\n global REL_TOLERANCE, ABS_TOLERANCE\n REL_TOLERANCE = rel_tolerance\n ABS_TOLERANCE = abs_tolerance", "def set_tolerance(tolerance):\n\treturn dsslib.SolutionF(ctypes.c_int32(9), ctypes.c_double(tolerance))", "def _set_tolerances(self, atol=None, rtol=None, maxiter=None):\n atol = self.atol if atol is None else atol\n rtol = self.rtol if rtol is None else rtol\n maxiter = self.maxiter if maxiter is None else maxiter\n # BUG: PETSc misses rtol requirement by ~10-20X -> Report to petsc4py\n self.ksp.setTolerances(atol=None, rtol=rtol/50, max_it=maxiter)", "def set_tol(self, reltol=1e-6, abstol=1e-6):\n self.reltol = reltol\n self.abstol = abstol\n\n if self.has_integrator():\n if self.parallel:\n #self.integrator.set_options(reltol, abstol)\n pass\n else:\n self.integrator.integrator.set_scalar_tolerances(reltol, abstol)", "def modify_tolerances(self):\r\n\t\tpw.Database.setModelSize(1)\r\n\t\tpw.Grid.setNodeTolerance(9.9999999999999995e-08)\r\n\t\tpw.Grid.setConnectorTolerance(9.9999999999999995e-08)\r\n\t\tpw.Grid.setGridPointTolerance(1e-10)\r\n\t\tpw.Application.markUndoLevel('Modify Tolerances')", "def tolerance(self):\n return self.__tolerance*100.", "def tolerance(self):\n return self.params['tolerance']", "def set_particle_tolerance(self, value):\n\n self._particle_tolerance = value", "def tolerance(self):\n return self._tolerance", "def test_tol(self):\n m = Magnet()\n self.assertEqual(m.tol, 0.05)\n m.tol = 'a'\n self.assertEqual(m.tol, 0.05)\n m.tol = 1\n self.assertEqual(m.tol, 0.05)\n m.tol = 0.1\n self.assertEqual(m.tol, 0.1)", "def SetCoordinateTolerance(self, _arg: 'double const') -> \"void\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVISS3IULL3_Superclass_SetCoordinateTolerance(self, _arg)", "def set_tol(self):\n rtol_scalar = isinstance(self.rtol, (int, float))\n atol_scalar = isinstance(self.atol, (int, float))\n if (rtol_scalar and (not atol_scalar)):\n self.rtol = [self.rtol] * self.neq # Wrap to vector\n elif (atol_scalar and (not rtol_scalar)):\n self.atol = [self.atol] * self.neq # Wrap to vector\n # itol is a flag to indicate whether tolerance parameters are input\n # as scalars or vectors\n self.itol = int(not(atol_scalar & rtol_scalar))", "def SetTolerance(self, toleranceType, toleranceExpr):\n callResult = self._Call(\"SetTolerance\", toleranceType, toleranceExpr)", "def SetCoordinateTolerance(self, _arg: 'double const') -> \"void\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUC3IULL3_Superclass_SetCoordinateTolerance(self, _arg)", "def set_abs_tolerance(self, value):\n\n self._abs_tolerance = value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to revert the direction of the current line. Returns
def revert(self): reverted = Line(l=self) reverted.direction *= -1.0 return reverted
[ "def fliped(self):\n return Line(self.end, self.start, self)", "def revert(self, *args, **kwargs):", "def _backup_line(self):\n if self._orig_line is None:\n self._orig_line = self._line", "def reverseDirection(self):\n if self.extendedDirection == c.Directions.UP:\n self.extendedDirection = c.Directions.DOWN\n elif self.extendedDirection == c.Directions.DOWN:\n self.extendedDirection = c.Directions.UP\n elif self.extendedDirection == c.Directions.LEFT:\n self.extendedDirection = c.Directions.RIGHT\n else:\n self.extendedDirection = c.Directions.LEFT", "def previous_line():\r\n set_point(point().previous_line())", "def revert_step(self) -> NoReturn:\n if(len(self._trajectory)>1):\n self._trajectory.pop()\n self._currentState = self._trajectory[-1]\n self._update_current_vars_from_current_state()\n else:\n warnings.warn(\"Could not revert step, as only 1 step is in the trajectory!\")", "def revertmove(self):\n self.new_angle = self.angle\n self.moverotate = False # will not rotate to move\n self.revert = True\n newangle = self.setrotate()\n for subunit in self.subunit_sprite:\n subunit.new_angle = newangle", "def reverse_direction(self) -> None:\n if self._direction == 'N':\n self._direction = 'S'\n elif self._direction == 'S':\n self._direction = 'N'\n elif self._direction == 'E':\n self._direction = 'W'\n else:\n self._direction = 'E'", "def transposeCurrentAndPreviousLines(self):\r\n self.SendScintilla(QsciScintilla.SCI_LINETRANSPOSE)", "def backward_character():\r\n set_point(point().offset(-1))", "def pre_revert(self):", "def flip(self, bev_direction: str = 'horizontal') -> None:\n pass", "def mirrorHoriz():", "def Reverse(self):\n if (self.translated == False):\n self.alignment = self.alignment[:,::-1]\n self.Show(self.displayedColumn)\n self.BackupAlignment()\n else:\n self.AlertMessage(\"Can't reverse protein sequences.\", 'medium')", "def reverse(self):\n self.pos = 1.0 - np.flip( self.pos )\n self.color = np.flip( self.color, axis=0 )\n self.stopsCache = {}", "def reverse(self):\n self._path = self._path.bezierPathByReversingPath()", "def down(self, wrap = None):\n len_current = self.line_length()\n \n # If there is line wrapping\n if wrap:\n wraps_current = int(len_current / wrap)\n columns_current = len_current % wrap\n \n # If the position is not in the bottom wrap of the line move it down a\n # wrap. Take into account shorter wraps below.\n if len_current > wrap and self.pos < wraps_current * wrap:\n pos_wrap = int(self.pos / wrap)\n if pos_wrap + 1 == wraps_current and self.pos % wrap > columns_current:\n self.pos = (wraps_current * wrap) + columns_current\n else:\n self.pos = self.pos + wrap\n \n # If the position is in the bottom wrap move it to the first wrap of\n # the next line. Take into acount shorter lines below.\n elif self.line < self.buffer.size() - 1:\n len_next = self.line_length(1)\n self.line += 1\n if self.pos % wrap > len_next:\n self.pos = len_next\n else:\n self.pos = self.pos % wrap\n \n # If no wrapping is being done move the line down one and adjust the\n # position if the next line is shorter.\n elif self.line < self.buffer.size() - 1:\n len_next = self.line_length(1)\n self.line += 1\n if self.pos > len_next:\n self.pos = len_next", "def reverse(self):\n self.x = -self.x\n self.y = -self.y\n\n return self", "def flip(self):\n self._start, self._end = self._end, self._start" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to get the abscissa of a point with respect to a line. The abscissa is 0 if the projection of the point and the projection of the frame origin on the line are the same point.
def get_abscissa(self, p): return np.dot(p - self.zero, self.direction)
[ "def _line_x(line, y):\n p1 = line[0]\n p2 = line[1]\n if p2[0] == p1[0]:\n return p1[0]\n m = (p2[1] - p1[1]) / (p2[0] - p1[0])\n if m == 0:\n if p1[1] == y:\n return p1[0]\n return None\n x = p1[0] + (y - p1[1]) / m\n return x", "def get_line_to(self, point):\n\n b = ((self.x - point.x)*point.y - (self.y - point.y)*point.x)/(self.x - point.x)\n\n a = (self.y - point.y)/(self.x - point.x)\n\n return a, b", "def line_mx_plus_b(\n line: shapely.geometry.LineString,\n) -> Tuple[float, float]:\n y2, y1 = line.coords[1][1], line.coords[0][1]\n x2, x1 = line.coords[1][0], line.coords[0][0]\n if x2 - x1 == 0:\n return (1, None)\n m_slope = (y2 - y1) / (x2 - x1)\n\n point_on_line = line.coords[0]\n p_x, p_y = point_on_line\n # solve line eqn for b given a known point on the line\n b_intercept = p_y - m_slope * p_x\n return (m_slope, b_intercept)", "def slope(line: Line) -> float:\n rise = (line.y2 - line.y1)\n run = (line.x2 - line.x1)\n # We're trading off a tiny bit of accuracy for our program not crashing.\n if run == 0:\n run = 0.000000001\n return rise / run", "def _dist_point2line(self, point: ndarray,\n line: Tuple[ndarray, ndarray]) -> ndarray:\n\n assert isinstance(line, tuple)\n point1, point2 = line\n d = abs(np.cross(point2 - point1, point - point1)) / (\n norm(point2 - point1) + 1e-8)\n return d", "def get_slope(point_a: Point, point_b: Point) -> decimal.Decimal:\n return (point_b.y - point_a.y) / (point_b.x - point_a.x)", "def get_line_to(self, provided_point):\n\n \"\"\"Calculate slope\"\"\"\n a = (provided_point.y - self.y) / (provided_point.x - self.x)\n\n \"\"\"Calculate b\"\"\"\n b = self.y - a * self.x\n\n return (a,b)", "def determine_position_from_line(line_point_left: Tuple,\n line_point_right: Tuple,\n point: Tuple) -> int:\n lx, ly = line_point_left\n rx, ry = line_point_right\n px, py = point\n return (px-lx)*(ry-ly) - (py-ly)*(rx-lx)", "def _line_y(line, x):\n p1 = line[0]\n p2 = line[1]\n if p2[0] == p1[0]:\n if p1[0] == x:\n return p1[1]\n return None\n m = (p2[1] - p1[1]) / (p2[0] - p1[0])\n y = p1[1] + m * (x - p1[0])\n return y", "def slope(self):\n if self.b == 0:\n return None\n else:\n return (-1) * self.a/self.b", "def determine_angle_slope(line, ax):\n x, y = line.get_data()\n\n sp1 = ax.transData.transform_point((x[0],y[0]))\n sp2 = ax.transData.transform_point((x[-1],y[-1]))\n\n rise = (sp2[1] - sp1[1])\n run = (sp2[0] - sp1[0])\n\n return degrees(atan(rise/run))", "def distance_to_line(self, point, line):\n px, py = point\n x1, y1, x2, y2 = line\n x_diff = x2 - x1\n y_diff = y2 - y1\n num = abs(y_diff * px - x_diff * py + x2 * y1 - y2 * x1)\n den = math.sqrt(y_diff**2 + x_diff**2)\n return num / den", "def calc_line_x(y, slope, intercept):\n return int((y - intercept)/slope)", "def slope_from_origin(self):\n\n return self.y / self.x", "def slope_from_origin(self):\n\n return (self.y / self.x)", "def distance_point_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n pa = subtract_vectors(a, point)\n pb = subtract_vectors(b, point)\n l = length_vector(cross_vectors(pa, pb))\n l_ab = length_vector(ab)\n return l / l_ab", "def center_point(polyline):\n\tpts = unique(polyline.points)\n\treturn sum(pts) / len(pts)", "def transform_line(line):\n (x1, y1, x2, y2) = line[0][:4]\n if x1 == x2 or y1 == y2:\n return None\n else:\n slope = (y2 - y1) / (x2 - x1)\n\n intercept = y1 - (slope * x1)\n return np.around(slope, 2), np.around(intercept, 2)", "def midpoint_line(line):\n return midpoint_point_point(*line)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the status code as per ttype and it's status_val
def get_status_code(self, ttype, status_val) -> str: # get the status code from __status_code or __default_code pass
[ "def status_type(self):\n return self._status_type", "def ensure_status_value(status: T.Union[int, BaseStatusEnum]) -> int:\n if isinstance(status, BaseStatusEnum):\n return status.value\n else:\n return status", "def get_status(self):\n if self.is_void:\n return u'void'\n\n return self.status_detail", "def get_status(self):\n\n resp = self.sendcmd('TS', '?', expect_response=True, retry=10)\n errors = int(resp[0:4], 16)\n state = resp[4:]\n\n assert len(state) == 2\n\n return errors, state", "def get_status_int(self, tup=None):\n if tup is None:\n tup = self.get_status_bits()\n return int(''.join([str(x) for x in tup]), base=2)", "def get_status(self):\n response_dict = self.response_dict()\n return response_dict.status", "def _get_status_code(response: Response) -> int:\n status_code = response.status_code\n if isinstance(status_code, HTTPStatus):\n return status_code.value\n else:\n return status_code", "def status(self, value):\r\n if isinstance(value, (int, long)):\r\n if 100 <= value <= 999:\r\n st = _RESPONSE_STATUSES.get(value, '')\r\n if st:\r\n self._status = '%d %s' % (value, st)\r\n else:\r\n self._status = str(value)\r\n else:\r\n raise ValueError('Bad response code: %d' % value)\r\n elif isinstance(value, basestring):\r\n if isinstance(value, unicode):\r\n value = value.encode('utf-8')\r\n if _RE_RESPONSE_STATUS.match(value):\r\n self._status = value\r\n else:\r\n raise ValueError('Bad response code: %s' % value)\r\n else:\r\n raise TypeError('Bad type of response code.')", "def get_status_code(self, response):\n if hasattr(response, 'status_int'):\n return response.status_int\n return response.status", "def get_status_code(self, response):\r\n if hasattr(response, 'status_int'):\r\n return response.status_int\r\n else:\r\n return response.status_code", "def getStatus(self):\n if self.status == 0:\n return _xml.STATUS_OK\n elif self.status == 1:\n return _xml.STATUS_WARN\n elif self.status == 2:\n return _xml.STATUS_ERROR\n elif self.status == 'baseError1':\n raise TransportError()\n elif self.status == 'baseError2':\n return _xml.STATUS_DUFF", "def status_to_event_code(status: str):\n return {\n \"sent\": \"txSent\",\n \"pending\": \"txPool\",\n \"pending-simulation\": \"txPoolSimulation\",\n \"stuck\": \"txStuck\",\n \"confirmed\": \"txConfirmed\",\n \"failed\": \"txFailed\",\n \"speedup\": \"txSpeedUp\",\n \"cancel\": \"txCancel\",\n \"dropped\": \"txDropped\",\n }[status]", "def _get_status_code(response):\n return response.status_code", "def status(self, value):\n if isinstance(value, (long, int)):\n if 100 <= value <= 900:\n status = _RESPONSE_STATUSES.get(value, '')\n if status:\n self._status = '%d %s' % (value, status)\n else:\n self._status = str(value)\n else:\n raise ValueError('Bad response code: %d' % value)\n elif isinstance(value, basestring):\n if isinstance(value, unicode):\n value = value.encode('utf-8')\n if _RE_RESPONSE_STATUS.match(value):\n self._status = value\n else:\n raise ValueError('Bad response code: %d' % value)\n else:\n raise TypeError('Bad type of response code.')", "def _GetStatusFromOp(op):\n for prop in op.response.additionalProperties:\n if prop.key == 'status':\n return prop.value.string_value\n return 'UNKNOWN'", "def status_enum_to_msg_constant(status):\n if status == py_trees.common.Status.INVALID:\n return py_trees_msgs.Behaviour.INVALID\n elif status == py_trees.common.Status.RUNNING:\n return py_trees_msgs.Behaviour.RUNNING\n elif status == py_trees.common.Status.SUCCESS:\n return py_trees_msgs.Behaviour.SUCCESS\n elif status == py_trees.common.Status.FAILURE:\n return py_trees_msgs.Behaviour.FAILURE\n else:\n return 0 # unknown status", "def convert_status(status):\n if status:\n return \"Open\"\n return \"Closed\"", "def rtt_get_status(self):\n status = structs.JLinkRTTerminalStatus()\n res = self.rtt_control(enums.JLinkRTTCommand.GETSTAT, status)\n return status", "def status_str(status):\n\n status_map = {\n 0: 'MATCH',\n 10: 'OK',\n 15: 'SKIP',\n 20: 'FAIL',\n 30: 'CRASH'\n }\n return status_map.get(status, 'UNKNOWN')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
To check if payload to be processed with this lambda
def apply_filter(self, payload: dict, ainfos) -> (dict, dict): # check if needs to process by this lambda pass
[ "def verify_payload():\n return True", "def lambda_response_ok(response: dict) -> bool:\n # import pdb; pdb.set_trace()\n failed = response.get(\"FunctionError\")\n if failed:\n print(response[\"Payload\"].read().decode(\"utf-8\"), file=sys.stderr)\n return failed is None", "def payload_check(self, payload, command):\n if payload is None:\n self.send_error('payload', '%s: expected payload' % command)\n return False\n return True", "async def validate(self, payload: Dict[str, Any]) -> None:", "def has_payload(self):\n\n if self._payload:\n return True\n return False", "def check_if_executed(\n self, payload_data: Optional[str | bytes] = None\n ) -> bool:\n payload = (\n payload_data.encode('utf-8')\n if isinstance(payload_data, str)\n else payload_data\n )\n return self.validator.is_executed(payload)", "def _check_payload(self, token): \n try:\n payload = jwt_decode_handler(token)\n except jwt.ExpiredSignature:\n msg = _('Signature has expired.')\n raise serializers.ValidationError(msg)\n except jwt.DecodeError:\n msg = _('Error decoding signature.')\n raise serializers.ValidationError(msg)\n return payload", "def __handover_payload(self, plugin_executor, payload):\n\n #Initialize the executor class of the plugin\n executor = plugin_executor()\n\n #Check if the plugin has a valid payload handler or not and then handover\n #the payload to the handler\n plugin_methods = dir(executor)\n\n if 'handle_payload' not in plugin_methods:\n return False\n\n executor.handle_payload(payload)\n return True", "def lambda_handler(event, context):\n return", "def handle_payload(self, request, payload, **kwargs): \n value_dict = self.parse_payload(payload)\n try:\n consumer = Consumer.objects.get(email=value_dict['email'])\n if not consumer.is_email_verified:\n consumer.is_email_verified = True\n consumer.save()\n if kwargs.get('opting', False):\n consumer.email_subscription.add(1)\n build_session_from_user(request, consumer)\n except (Consumer.DoesNotExist, KeyError):\n pass\n return value_dict", "def is_ready(self) -> bool:\n return self.payload is not None", "def validate_message_payload(payload):\n return objects_module.messages.validate_message_payload(payload)", "def validate_request(event):\n if 'input' in event and len(event['input']) > 1:\n return event['input']\n else:\n raise InvalidEventError('Lambda event does not contain valid input field!')", "def check_message_payload(dequeued_item):\n key_array = [\"dateTime\",\n \"payload\",\n \"messageType\"]\n\n # Note that the \"ttl\" key (and others) may be present but its not checked here!\n\n for key in key_array:\n if key not in dequeued_item.keys():\n return False\n\n key_array = [\"zoomR\",\n \"spatial\",\n \"circuitID\",\n \"reputationEnabled\",\n \"assetID\",\n \"temporal\",\n \"outageTime\",\n \"company\",\n \"votes\",\n \"zoomT\",\n \"longitude\",\n \"latitude\"]\n for key in key_array:\n if key not in dequeued_item[\"payload\"].keys():\n return False\n return True", "def payload_handle(self, payload, mail):\n\t\tif self.payload_is_handleable(payload):\n\t\t\tif self.export_payload:\n\t\t\t\tself.payload_pipe(payload, mail)\n\t\t\tif self.reduce_payload:\n\t\t\t\t# Mark email as deleted:\n\t\t\t\tself.delete_marked.append(self.payload_index(payload, mail))", "def validate_request_body(self):\n try:\n self.data = json.loads(self.request.body.decode('UTF-8'))\n except:\n return False, {'error': 'provide a valid json.'}\n return True,", "def is_empty(payload):\r\n return len(payload) == 0", "def _check_payload(self, check_dict=None):\n request_body = self.request.get_json()\n payload_objects = []\n\n if not check_dict:\n return payload_objects\n elif not request_body:\n raise OppError(\"Missing payload!\")\n\n for obj in check_dict:\n try:\n payload_obj = request_body[obj['name']]\n if obj['is_list']:\n if not isinstance(payload_obj, list):\n raise OppError(\"'%s' object should be in\"\n \" list form!\" % obj['name'])\n else:\n if isinstance(payload_obj, list):\n raise OppError(\"'%s' object should not be \"\n \"in list form!\" % obj['name'])\n payload_objects.append(payload_obj)\n except KeyError:\n if obj['required']:\n raise OppError(\"Required payload object '%s'\"\n \" is missing!\" % obj['name'])\n else:\n if obj['is_list']:\n payload_objects.append([])\n else:\n payload_objects.append({})\n\n return payload_objects", "def isLambda(self):\r\n return self._wrap(type(self.obj) is LambdaType)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use ansi code on 'string' if the output is the terminal of a not Windows platform
def isSpecial(ansiCode,string): if IS_TERMINAL and not IS_WIN32: return ansiCode+string+ANSI_END else: return string
[ "def ansi(self) -> str:\n # noinspection PyTypeChecker\n return str(self.read(), ANSI) # type: ignore", "def ansi_sequence(self):\n codes = \";\".join(str(c) for c in self.ansi_codes)\n return f\"\\033[{codes}m\" if codes else \"\"", "def ansi(key):\n global _ansi\n return _ansi[key]", "def _handle_ansi_color_codes(self, s):\r\n def ansi_code_to_css(code):\r\n return ' '.join(['ansi-%s' % c for c in code.split(';')])\r\n return '<span>' +\\\r\n HtmlReporter._ANSI_COLOR_CODE_RE.sub(\r\n lambda m: '</span><span class=\"%s\">' % ansi_code_to_css(m.group(1)), s) +\\\r\n '</span>'", "def ansi_sequence(self):\n return \"\\033[\" + \";\".join(map(str, self.ansi_codes)) + \"m\"", "def ansi(*args):\n code = Term.ESCAPE_START\n code += ';'.join(args)\n code += Term.ESCAPE_END\n return code", "def TerminalSupportsAnsiColors():\n return (sys.stdout.isatty() and sys.platform[:3] != \"win\")", "def _ansi_equivalent(self, s: str) -> str:\r\n color_id = self._color_id_regexp.search(s).groups()[0]\r\n\r\n # TODO: Replace this with a class the handles dynamic color configuration!\r\n return {\r\n '0': '\\u001b[37m',\r\n '1': '\\u001b[32m',\r\n '2': '\\u001b[31m',\r\n '3': '\\u001b[33m',\r\n '4': '\\u001b[34m',\r\n '5': '\\u001b[36m',\r\n '6': '\\u001b[37m',\r\n '7': '\\u001b[35m',\r\n '8': '\\u001b[30m',\r\n '.': '\\u001b[0m',\r\n }[color_id]", "def get_ansi_string(color=AnsiColor.RESET):\n CSI = '\\033['\n colors = AnsiColor()\n if color not in colors:\n color = AnsiColor.RESET\n return CSI + str(color) + 'm'", "def filter_ANSI (line_input):\n\n line, i, imax = '', 0, len(line_input)\n handling_ansi_sequence = False\n for character in line_input:\n if handling_ansi_sequence:\n if character.lower() in 'abcdhsujkm':\n handling_ansi_sequence = False\n else:\n if character in string.printable and ord(character) != 13:\n line = line + character\n elif ord(character) == 27:\n handling_ansi_sequence = True\n elif ord(character) == 8:\n line = line[:-1]\n return line", "def _ansi_wrap(self, text, fg, bg):\n codes = []\n\n if fg is not None:\n codes.append(30 + self._to_code(fg))\n\n if bg is not None:\n codes.append(40 + self._to_code(bg))\n\n if fg is not None and 'i' in fg:\n codes.append(1) # Bold\n\n if bg is not None and 'i' in bg:\n codes.append(4) # Underscore\n\n return \"\\033[\" + \";\".join([str(code) for code in codes]) + \"m\" + text + \"\\033[0m\"", "def termcolor(code):\n def wrapper(text):\n return u\"\\033[{}m{}\\033[0m\".format(code, text)\n return wrapper", "def bold(string: str) -> str:\n return f\"\\033[1m{string}\\033[0m\"", "def _color_string(string, color):\n if color is None:\n return string\n else:\n return color + string + '\\033[0m'", "def string_filter_ansi(cls, colored_string):\n return cls.ANSI_REG.sub(\"\", colored_string)", "def test_asciitable_m_pretty_ansi(self):\n input = '''\n┏━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━┓ \n┃\\x1b[1m \\x1b[0m\\x1b[1mReleased \\x1b[0m\\x1b[1m \\x1b[0m┃\\x1b[1m \\x1b[0m\\x1b[1mTitle \\x1b[0m\\x1b[1m \\x1b[0m┃\\x1b[1m \\x1b[0m\\x1b[1m Box Office\\x1b[0m\\x1b[1m \\x1b[0m┃ \n┡━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━┩ \n│\\x1b[36m \\x1b[0m\\x1b[36mDec 20, 2019\\x1b[0m\\x1b[36m \\x1b[0m│\\x1b[35m \\x1b[0m\\x1b[35mStar Wars: The Rise of Skywalker \\x1b[0m\\x1b[35m \\x1b[0m│\\x1b[32m \\x1b[0m\\x1b[32m $952,110,690\\x1b[0m\\x1b[32m \\x1b[0m│ \n│\\x1b[36m \\x1b[0m\\x1b[36mMay 25, 2018\\x1b[0m\\x1b[36m \\x1b[0m│\\x1b[35m \\x1b[0m\\x1b[35mSolo: A Star Wars Story \\x1b[0m\\x1b[35m \\x1b[0m│\\x1b[32m \\x1b[0m\\x1b[32m $393,151,347\\x1b[0m\\x1b[32m \\x1b[0m│ \n│\\x1b[36m \\x1b[0m\\x1b[36mDec 15, 2017\\x1b[0m\\x1b[36m \\x1b[0m│\\x1b[35m \\x1b[0m\\x1b[35mStar Wars Ep. V111: The Last Jedi\\x1b[0m\\x1b[35m \\x1b[0m│\\x1b[32m \\x1b[0m\\x1b[32m$1,332,539,889\\x1b[0m\\x1b[32m \\x1b[0m│ \n│\\x1b[36m \\x1b[0m\\x1b[36mDec 16, 2016\\x1b[0m\\x1b[36m \\x1b[0m│\\x1b[35m \\x1b[0m\\x1b[35mRogue One: A Star Wars Story \\x1b[0m\\x1b[35m \\x1b[0m│\\x1b[32m \\x1b[0m\\x1b[32m$1,332,439,889\\x1b[0m\\x1b[32m \\x1b[0m│ \n└──────────────┴───────────────────────────────────┴────────────────┘ \n'''\n expected = [\n {\n \"released\": \"Dec 20, 2019\\nMay 25, 2018\\nDec 15, 2017\\nDec 16, 2016\",\n \"title\": \"Star Wars: The Rise of Skywalker\\nSolo: A Star Wars Story\\nStar Wars Ep. V111: The Last Jedi\\nRogue One: A Star Wars Story\",\n \"box_office\": \"$952,110,690\\n$393,151,347\\n$1,332,539,889\\n$1,332,439,889\"\n }\n ]\n\n self.assertEqual(jc.parsers.asciitable_m.parse(input, quiet=True), expected)", "def convert_ansi(self, paramstring, command):\n if self.convert:\n params = self.extract_params(command, paramstring)\n self.call_win32(command, params)", "def ansi(self) -> str:\n return humanize_list([i.ansi for i in self])", "def ANSIIcode(color='black', style='normal'):\n\n colorCode = colorCodes[color]\n styleCode = styleCodes[style]\n\n return '\\033[' + styleCode + colorCode + 'm'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sort list of TKey by their names ignoring the case
def keyListSort(keyList): keyList.sort(key=lambda y: y.GetName().lower())
[ "def sortCaseInsensitive(*args, **kwargs)->List[AnyStr]:\n pass", "def sort_by_name(list_to_sort):\n return sorted(\n list_to_sort,\n key=lambda k: k['Name'].lower()\n )", "def sortedNames(self):\n \n names = [(item.nicename, item.name) for item in self.values()]\n names.sort()\n return [name[1] for name in names]", "def dictsort_ignorecase(value, arg):\n\n def lower_if_string(object):\n try:\n return object.lower()\n except AttributeError:\n return object\n\n var_resolve = template.Variable(arg).resolve\n decorated = [(lower_if_string(var_resolve(item)), item) for item in value]\n decorated.sort()\n return [item[1] for item in decorated]", "def sort_names_by_values(self, key_list, reverse=False):\n # use last ASCII character to say deficit value should come at last.\n # maybe i should change this later using the `cmp` function.\n deficit = chr(255)\n\n def key(name):\n vals = []\n for keystr in key_list:\n keytuple = self.parse_key(keystr)\n vals.append(self._table.get(keytuple, {}).get(name, deficit))\n return tuple(vals)\n self._name.sort(key=key, reverse=reverse)", "def sort_names(li, by_which):\n \n if by_which == 'first':\n li.sort(key = Name.first)\n elif by_which == 'last':\n li.sort(key = Name.last)", "def titleSort(dictList):\n\tres = sorted(dictList, key=lambda k: getSortTitle(k))\n\treturn res", "def _sort_alphabetically(self):\n self.countries = sorted(self.countries, key=attrgetter('country'))", "def alpha_case_insensitive():\n# fill it out\n return sorted(STRING_LIST, key=lambda s: s.lower())", "def tupleListSort(tupleList):\n tupleList.sort(key=lambda y: y[0].lower())", "def _order_keys(self, keys):\n sorted_keys = list(sorted(keys))\n if self.baseline_label in keys:\n sorted_keys.remove(self.baseline_label)\n sorted_keys.append(self.baseline_label)\n return sorted_keys", "def _sort_by_name(bam_fn):", "def sorted(cls, tags: list, reverse: bool = False) -> list:\n return sorted(tags, key=lambda x: x.name.lower(), reverse=reverse)", "def sortTermsAlphabetically(terms):\n # Tutorial for sorting credit:\n # https://www.geeksforgeeks.org/ways-sort-list-dictionaries-values-python-using-lambda-function/\n sorted_list = sorted(terms, key=lambda i: (i[\"term_header\"], i[\"rating\"]))\n return sorted_list", "def _sort(self, groups):\n return sorted(groups, key=lambda group: (group.name.lower(), group.pubid))", "def sort(results, key):", "def order_by_keys(dict):\n from collections import OrderedDict\n return OrderedDict(sorted(dict.items(), key=lambda s: s[0].lower()))", "def sort_words_case_insensitively(words):\n\n w = [word for word in words if str(word)[0].isalpha()]\n d = [word for word in words if str(word)[0].isdigit()]\n\n\n\n return sorted(w,key=lambda val: val.lower()) + sorted(d,key=lambda val: val.lower())", "def sort_case_insensitive(value: Tuple[str, Item]) -> str:\n return value[0].lower()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sort list of tuple by their first elements ignoring the case
def tupleListSort(tupleList): tupleList.sort(key=lambda y: y[0].lower())
[ "def sort_case_sensitive(value: Tuple[str, Item]) -> str:\n return value[0]", "def sort_case_insensitive(value: Tuple[str, Item]) -> str:\n return value[0].lower()", "def sort_list_of_tuples(list):\n list.sort(key=lambda x: x[0])\n return list", "def sortCaseInsensitive(*args, **kwargs)->List[AnyStr]:\n pass", "def foldsort(seq):\n return sorted(seq, cmp=lambda x, y: cmp(x.lower(), y.lower()))", "def _natsort_key_case_insensitive(item):\n # added the lower() call to allow for case-insensitive sorting\n item = str(item).lower()\n\n try:\n chunks = split('(\\d+(?:\\.\\d+)?)', item)\n except TypeError:\n # if item is a tuple or list (i.e., indexable, but not a string)\n # work with the first element\n chunks = split('(\\d+(?:\\.\\d+)?)', item[0])\n for ii in range(len(chunks)):\n if chunks[ii] and chunks[ii][0] in '0123456789':\n if '.' in chunks[ii]:\n numtype = float\n else:\n numtype = int\n # wrap in tuple with '0' to explicitly specify numbers come first\n chunks[ii] = (0, numtype(chunks[ii]))\n else:\n chunks[ii] = (1, chunks[ii])\n return (chunks, item)", "def __sort_tuple(self, tup):\n\n return tuple(sorted(tup))", "def tuple_sorted(a):\r\n if ((isinstance(a, int) == True) or (isinstance(a, str) == True)):\r\n return a\r\n if ((isinstance(a[0], int) == True) or (isinstance(a[0], str) == True)):\r\n return sorted(a)\r\n else:\r\n w = []\r\n for b in a:\r\n w.append(tuple(tuple_sorted(b)))\r\n return tuple(sorted(tuple(w)))", "def sort_words_case_insensitively(words):\n #temp = sorted(words, key=lambda test_str: test_str[:1].lower() + test_str[1:])\n temp = sorted(words, key=str.lower)\n temp1 = []\n for index, word in enumerate(temp):\n if not word[0].isdigit():\n temp1.append(temp[index])\n for index, word in enumerate(temp):\n if word[0].isdigit():\n temp1.append(temp[index])\n return temp1", "def problem4_1(wordlist):\r\n print(wordlist)\r\n wordlist.sort(key=str.lower)\r\n print(wordlist)", "def alpha_case_insensitive():\n# fill it out\n return sorted(STRING_LIST, key=lambda s: s.lower())", "def keyListSort(keyList):\n keyList.sort(key=lambda y: y.GetName().lower())", "def langsort_tuples (lst, index, lang=None):\n\n reset_locale = _set_lang_locale(lang)\n lst.sort(lambda x, y: locale.strcoll(x[index], y[index]))\n reset_locale()", "def sortval(x):\n y = list(x) # list of strings of length 1\n z = sorted(y,cmp=slp_cmp)\n ans = ''.join(z)\n return ans", "def sortTuple(lstTuples, element):\n\n lstTuples.sort(key=lambda x: x[element-1])\n return lstTuples", "def test_natsort_case_insensitive(self):\r\n\r\n # string with alpha and numerics sort correctly\r\n s = [\r\n 'sample1',\r\n 'sample2',\r\n 'sample11',\r\n 'sample12',\r\n 'SAmple1',\r\n 'Sample2']\r\n\r\n # expected values\r\n exp_natsort = ['SAmple1', 'Sample2', 'sample1', 'sample2', 'sample11',\r\n 'sample12']\r\n exp_natsort_case_insensitive = ['sample1', 'SAmple1', 'sample2',\r\n 'Sample2', 'sample11', 'sample12']\r\n\r\n # test natsort\r\n self.assertEqual(natsort(s), exp_natsort)\r\n # test natsort_case_insensitive\r\n self.assertEqual(natsort_case_insensitive(s),\r\n exp_natsort_case_insensitive)\r\n\r\n s.reverse()\r\n # test natsort\r\n self.assertEqual(natsort(s), exp_natsort)\r\n # test natsort_case_insensitive\r\n self.assertEqual(natsort(list('cbaA321')), list('123Aabc'))\r\n\r\n # strings with alpha only sort correctly\r\n self.assertEqual(natsort_case_insensitive(list('cdBa')), list('aBcd'))\r\n\r\n # string of ints sort correctly\r\n self.assertEqual(natsort_case_insensitive(['11', '2', '1', '0']),\r\n ['0', '1', '2', '11'])\r\n\r\n # strings of floats sort correctly\r\n self.assertEqual(natsort_case_insensitive(['1.11', '1.12', '1.00',\r\n '0.009']), ['0.009', '1.00',\r\n '1.11', '1.12'])\r\n\r\n # string of ints sort correctly\r\n self.assertEqual(natsort_case_insensitive([('11', 'A'), ('2', 'B'),\r\n ('1', 'C'), ('0', 'D')]),\r\n [('0', 'D'), ('1', 'C'),\r\n ('2', 'B'), ('11', 'A')])", "def sort_words_case_insensitively(words):\n\n w = [word for word in words if str(word)[0].isalpha()]\n d = [word for word in words if str(word)[0].isdigit()]\n\n\n\n return sorted(w,key=lambda val: val.lower()) + sorted(d,key=lambda val: val.lower())", "def sort_1(l):\n pass", "def sort_by_name(list_to_sort):\n return sorted(\n list_to_sort,\n key=lambda k: k['Name'].lower()\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use sys.stdout.write to write the string with an indentation equal to indent and specifying the end character
def write(string,indent=0,end=""): sys.stdout.write(" "*indent+string+end)
[ "def _indent_print(str, file, indent=4):\n # type: (six.text_type, TextIO, int) -> None\n file.write(\" \" * indent)\n print(str, file=file)", "def maybePrintIndent(self):\n if self.lasttoken[0] != lex.Token.NEWLINE:\n return\n\n if len(self.lasttoken[1]) > 0:\n self.buffer.scopeline(\"__io.write('\" + self.lasttoken[1] + \"')\")", "def testIndentOutdentWithMargin(self):\n s = MultilineString(indent='\\t', margin='>>> ')\n s.append('test1')\n s.indent()\n s.append('test2')\n s.outdent()\n s.append('test3')\n self.assertEqual('>>> test1\\n>>> \\ttest2\\n>>> test3', str(s))", "def out_indent(indent, *args):\n s = \"\"\n s += indent * \" \"\n s += \" \".join(args)\n return s", "def testIndentOutdent(self):\n s = MultilineString()\n s.append('test1')\n s.indent()\n s.append('test2')\n s.outdent()\n s.append('test3')\n self.assertEqual('test1\\n test2\\ntest3', str(s))", "def _indent(self, indent_per_level=4):\n self._write(\" \" * indent_per_level * len(self._opened_blocks))", "def try_print_indent(self):\n if self.lasttoken[0] != lex.Token.NEWLINE:\n return\n\n if len(self.lasttoken[1]) > 0:\n self.buffer.scope_line(\"__io.write(u'\" + self.lasttoken[1] + \"')\")", "def test_indentOneLine(self):\n result = indent('string', 0)\n self.assertEquals(result, 'string')", "def indent(depth):\n return DELIM * 2 * depth", "def writeText(self, token):\n self.maybePrintIndent()\n self.buffer.writescope(\"__io.write('\")\n self.buffer.write(token)\n self.buffer.writeline(\"')\")", "def test_with_custom_indent(self):\n self.assertEqual(indent('foo', 3), ' foo')", "def write_text(self, token):\n self.try_print_indent()\n self.buffer.write_scope(\"__io.write(u'\")\n self.buffer.write(token)\n self.buffer.write_line(\"')\")", "def printIndent(s,lvl) :\n for line in s.split('\\n') :\n print('%s%s' % (' '*lvl,line))", "def reindent(src, indent):\n return src.replace(\"\\n\", \"\\n\" + \" \"*indent)", "def writeIndentedLines(self,text):\r\n for line in text.splitlines():\r\n self.write(self.oneIndent*self.indentLevel + line + '\\n')", "def console_print(out, *args, **kwargs):\n const_charset = stream_encoding(out)\n out.write(' '.join([a.encode(cons_charset, 'replace') for a in args]))\n if kwargs.get('newline', True):\n out.write('\\n')", "def indent(string, depth=1):\n return re.sub(r'(^|\\n)([^\\n]+)', '\\g<1>' + (' ' * depth) + '\\g<2>', string)", "def indent(startstr='', spc = ' ', indent=col1):\n return spc * ( indent - len(startstr) )", "def indent(str, level):\n if level == 0: return str\n return \"\\n\".join(\"\\t\" * level + line for line in str.splitlines())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print informations given by keyList with a rools style choosen with optDict
def roolsPrint(keyList,optDict,indent=0): if optDict['long'] or optDict['tree']: \ roolsPrintLongLs(keyList,optDict,indent) else: roolsPrintSimpleLs(keyList,indent)
[ "def print_options(show_opts):\n select_dict = {}\n select_number = 1\n\n for key, value in show_opts.items():\n select_dict.update({str(select_number): key})\n description = value[0] if value[0] is not None else \"\"\n print(\"\\t{} - {}{}\".format(select_number, key, description))\n \n select_number += 1\n\n return select_dict", "def printDict(myDict):\n for key in myDict:\n print(f\"Version: --> {myDict[key]['version']} \")\n print(f\"Accuracy: --> {myDict[key]['accuracy']}\")\n print(f\"Time --> {myDict[key]['time_per_target']}\")\n print(f\"Penalty --> {myDict[key]['target_w_penalty']}\")\n print(f\"ID --> {myDict[key]['assessed_by']}\")\n print(f\"# --> {myDict[key]['attempt']}\")\n\n print()", "def print_string_attrs(mydict, mylist):\n for c in mylist:\n print(mydict.get(c, False))", "def print_pair(pw_dict, key):\n print(\"Name:\", White + key + TR,\n \"Username:\", Green + pw_dict[key][0] + TR,\n \"Password:\", Red + pw_dict[key][1] + TR)", "def print_nested_keys_from(dictionary, *subkeys):\n dictionary.pop('license', None)\n for cli, command in nested_keys_from(dictionary, subkeys[0]):\n print(\" {}\".format(cli) + CPURP + \" => \" + CWHITE + \" {}\".format(command) + CEND)", "def show(list_of_dicts, key):\n print(\"\\nHere are the stocks I have considered for you:\")\n for i in list_of_dicts: # iterates through list_of_dicts and prints Name and Market Cap\n print(f\" - {i['Name']} - {key} is {i[key]} \")", "def pdict(dic): # nice dictionary print\n for i in dic.keys() :\n print \" \",i,\" = \",dic[i]", "def display_all(this_dict):\r\n for key in this_dict:\r\n print(\"%s is the capital city of %s\" % (this_dict[key].title(), key.title()))", "def printPicnic(itemsDict: dict, leftWidth: int, rightWidth: int) -> None:\n print('PICNIC ITEMS'.center(leftWidth + rightWidth, '-'))\n for k, v in itemsDict.items():\n print(k.ljust(leftWidth, '.') + str(v).rjust(rightWidth))", "def print_dict(pw_dict):\n # Add eleven to account for color\n mln = len(max(pw_dict.keys(), key=len)) + 11\n mlu = len(max([pw_dict[i][0] for i in pw_dict.keys()], key=len)) + 11\n mlp = len(max([pw_dict[i][1] for i in pw_dict.keys()], key=len)) + 11\n total = mln + mlu + mlp - 22\n print('{0:{1}} {2:{3}} {4:{5}}'.format(\n (White + \"Name\" + TR), mln,\n (Green + \"Username\" + TR), mlu,\n (Red + \"Password\" + TR), mlp))\n print(total * \"-\")\n for key in pw_dict.keys():\n print('{0:{1}} {2:{3}} {4:{5}}'.format(\n (White + key + TR), mln,\n (Green + pw_dict[key][0] + TR), mlu,\n (Red + pw_dict[key][1] + TR), mlp))", "def run_print_dict_examples():\n print()\n print_dict_keys(NAME_DICT)\n print()\n print_dict_items(NAME_DICT)", "def dictPrinter(inD):\n \n # First we find the longest key\n m = 0\n for var in inD:\n size = len(str(var))\n if size>m:\n m = size\n \n # Then we print out pairs so colons align\n \n list1=list(inD) #Since the list of a dict gives us all the keys\n list1.sort() \n \n #sort them in ascending order, then use the for loop on the list\n for var in list1:\n keyStr = str(var)\n size = len(keyStr)\n print( keyStr + \" \"*(m-size) + \" : \" + str(inD[var]) )\n print(\"\")", "def print_option_set(option_set, leader):\n for option in option_set:\n labels = \",\".join(option['labels'])\n option_set = leader + labels + \" \"*(20-len(labels)) + \"- \" + option['description']\n print(option_set)", "def print_options(val, cur_matches):\n print val\n\n #skip one to print none at end\n for i,v in enumerate(cur_matches[1:]):\n print \"[%i] %s : %s \"%(i+1, v[0], v[1])\n print \"[%i] %s : %s \" % (0, cur_matches[0][0], cur_matches[0][1])\n\n print \n print 'Choice?'", "def print_key_map(key):\n key_sz = len(key)\n for i in range(key_sz):\n if key[i] is None:\n s = '*'\n else:\n s = 'X'\n if i < (key_sz - 1):\n print(s, end = '')\n else:\n print(s)", "def print_options(self, clear = False):\n if clear:\n clear_screen()\n for key, value in self.options.items():\n print(f\"[{key}] {value}\")", "def print_dict(self, dictonary, output):\n sformat = \"%30s: %8d\\n\"\n total = 0\n count = 0\n for key, value in sorted(dictonary.items(), key=itemgetter(1),\n reverse=True):\n total += value\n count += 1\n output.write(sformat % (key, value))\n output.write(\"%40s\\n\" % \"----------\")\n output.write(sformat % (\"total\", total))\n output.write(sformat % (\"count\", count))", "def build_config_dict_display(lines: List[str], config_dict: Dict[str, Any], level: int = 0):\n prefix: str = \" \" * level\n for k, v in config_dict.items():\n if isinstance(v, Dict):\n lines.append(f\"{prefix}{k}:\")\n build_config_dict_display(lines, v, level + 1)\n else:\n lines.append(f\"{prefix}{k}: {v}\")", "def customprint(dico,dico2,key):\r\n \r\n print(key+':',len(dico[key])) # print the key + number of values\r\n for value in dico[key]: # for each value in the dictionnary's key\r\n print(' ',dico2[value]) # the second value is the value of the second dictionnary, which is the value of the previous\r" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a Producer queue instance
def getProducer(): # get the config and a producer config = ecommerce.config.getConfig() return ecommerce.queue.queue(config, queuePrefix)
[ "def get_queue():\n\n return multiprocessing.Queue()", "def get_queue():\n watcher = Watcher()\n watcher.connect()\n queue = watcher.get_queue()\n return queue", "def get_queue(self):\n return Queue(self.name, connection=self.connection, serializer=self.serializer)", "def get(queue_name: str, **kwargs) -> Queue:\n return Queue(queue_name, **kwargs)", "def producer(self):\n return Producer(app=self.app, client=self.client)", "def make_queue(self):\n return mp.Queue()", "def get_queue(queue_type):\n con = get_redis_connection()\n queue_type = QueueType(queue_type)\n\n queue = WorkflowQueue(queue_type.value, connection=con)\n\n return queue", "def create_queue(self):\n return queue.Queue()", "def new_queue() -> Queue:\n return multiprocessing.Queue()", "def get_worker(queue_name='default', using='default', worker_name='worker', pulse=1.0):\n cacheq = CacheQ(name=queue_name, using=using)\n return Worker(cacheq=cacheq, pulse=pulse, worker_name=worker_name)", "def get_queue(self):\n return self.queue", "def get_mq():\n if 'mq' not in g:\n credentials = PlainCredentials(\n username=current_app.config['MQ_USER'],\n password=current_app.config['MQ_PASS'])\n connection_params = ConnectionParameters(\n host=current_app.config['MQ_HOST'],\n virtual_host=current_app.config['MQ_VHOST'],\n credentials=credentials)\n try:\n g.mq = BlockingConnection(connection_params)\n\n current_app.logger.debug(\n 'Connected to message queue on %s' %\n (current_app.config['MQ_HOST'], ))\n except (AMQPConnectionError, OSError, gaierror):\n current_app.logger.error(\n 'Unable to connect to message queue on %s' %\n (current_app.config['MQ_HOST'], ))\n raise UnableToConnectToMQ(current_app.config['MQ_HOST'])\n\n return g.mq", "def get_queue(self):\n if self.queue is not None:\n return self.queue\n state = self.get_state()\n self.queue = state.get_queue()\n # print(\"IQ\", self.queue)\n return self.queue", "def create(self):\n topic = self.__conn__.create_topic(self.__topic__)\n return topic.get_producer(*self.__args__, **self.__kargs__)", "def get_queue(self, name):\n\t\treturn StyxQueue(self.pool, name)", "def producer(self,topic_name):\n return PyMomProducer(self.config.bootstrap_brokers(),topic_name)", "def get_kafka_producer(self):\n return self.kafka_producer_class()", "def get_new_queue(self):\n\n queue = PrivateQueue(self)\n\n for item in self.to_array():\n queue.put(item)\n\n self.queues.append(queue)\n\n return queue", "def get_queue(name):\n try:\n queue = sqs.get_queue_by_name(QueueName=name)\n except ClientError as error:\n raise error\n else:\n return queue" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the list of all entities of a given type from DB
def getEntityIds(type, subtype = None): # get a cursor conn = ecommerce.db.getConnection() cursor = conn.cursor() # decide the query to execute if type not in entityQueries: return [ ] # execute the query qparams = (type, ) if subtype is not None: qparams = (type, subtype) cursor.execute(entityQueries[type], qparams) # fetch the ids elist = [ ] row = cursor.fetchone() while row is not None: elist.append(int(row[0])) row = cursor.fetchone() cursor.close() return elist
[ "def __get_entities(self, etype):\n r = fapi.get_entities(self.namespace, self.name,\n etype, self.api_url)\n fapi._check_response_code(r, 200)\n return [Entity(e['entityType'], e['name'], e['attributes'])\n for e in r.json()]", "def getAll(self, tipo):\n \n objects = []\n try:\n objects = self.session.query(tipo)\\\n .order_by(Vendedor.nombre).all()\n except sqlalchemy.exc.DBAPIError, e:\n if self.session is not None:\n self.session.rollback()\n print(\"Error!\", e)\n finally:\n if self._DAOAlchemy__cerrarSesion:\n self.session.close()\n return objects", "def get_all_of_type(self, type, ids_only=False):\n LOGGER.debug('Getting all records of type %s.', type)\n # Allow_filtering() is, as a rule, inadvisable; if speed becomes a\n # concern, an id - type query table should be easy to set up.\n query = (schema.Record.objects.filter(type=type)\n .allow_filtering().values_list('id', flat=True))\n if ids_only:\n for id in query:\n yield str(id)\n else:\n for record in self.get_many(query):\n yield record", "def getAll(self, tipo):\n \n objects = []\n try:\n objects = self.session.query(tipo).filter(Product.activo==True)\\\n .order_by(asc(collate(Product.name, 'NOCASE'))).all()\n except sqlalchemy.exc.DBAPIError, e:\n if self.session is not None:\n self.session.rollback()\n print(\"Error!\", e)\n finally:\n if self._DAOAlchemy__cerrarSesion:\n self.session.close()\n return objects", "def get_all_for_entity(\n cls,\n entity_type: feconf.TranslatableEntityType,\n entity_id: str,\n entity_version: int\n ) -> Sequence[EntityTranslationsModel]:\n return cls.query(\n cls.entity_type == entity_type.value,\n cls.entity_id == entity_id,\n cls.entity_version == entity_version\n ).fetch()", "def get_types():\n\tentries = session.query(TableEntities.NodeType).all()\n\treturn {'data': [entry.as_dict() for entry in entries]}", "def get_all(class_name):\n result = class_name.query.all()\n return result", "def get_transaction_by_type(cls, type):\r\n data_list = Transactions.query.filter_by(type=type).all()\r\n return data_list", "def fetch_all_sensor_types():\n query = db.session.query(\n TypeClass.id,\n TypeClass.sensor_type,\n )\n sensor_types = db.session.execute(query).fetchall()\n sensor_types = query_result_to_array(sensor_types)\n sensor_types = [st for st in sensor_types if is_valid_sensor_type(st[\"id\"])]\n return sensor_types", "def get_all_product_type():\n product_types = ProductType.objects.all().order_by('-product_type_id')\n return product_types", "def get_all_by_type(self, type):\n # Validation\n TrainerManager._str_validator(type)\n\n # Database Query\n session = self._db_session()\n if type == 'Regular Trainer':\n trainer_query = session.query(RegularTrainer).filter(\n RegularTrainer.type == \"Regular Trainer\").all()\n if type == 'Gym Leader':\n trainer_query = session.query(GymLeader).filter(\n GymLeader.type == \"Gym Leader\").all()\n session.close()\n\n return trainer_query", "def select_all_typesOr_all_attributes(self):\n sql_string = \"SELECT * FROM TYPE_ORGANISM\"\n dalObj = DAL(self.db_name, sql_string)\n results = dalObj.executeSelect()\n return results", "def get_all_types():\n cnx, cursor = connect_db()\n query = \"\"\"select a.name, b.`order` from types a, types b\n where a.parent=b.guid\"\"\"\n cursor.execute(query)\n result = cursor.fetchall()\n result = pd.DataFrame(result, columns=['type', 'order'])\n cnx.close()\n return result", "def fetch_all_sensors(sensor_type):\n query = db.session.query(\n SensorClass.id,\n SensorClass.aranet_code,\n SensorClass.name,\n ).filter(SensorClass.type_id == sensor_type)\n sensors = db.session.execute(query).fetchall()\n sensors = query_result_to_array(sensors)\n sensors = {s[\"id\"]: s for s in sorted(sensors, key=lambda x: x[\"id\"])}\n return sensors", "def get_entities_by_component_type(component_type):\n entitiesList = []\n for entityID in components:\n if component_type in components[entityID]:\n entitiesList.append(entities[entityID])\n return entitiesList", "def get_objectsbyid(self, identifier, type='code'):\n parameters = {'type': type, 'value': identifier}\n r = self.get(\n self.entityurl+\"/entities/by-identifier\",\n params=parameters)\n tree = etree.parse(BytesIO(r.content))\n root = tree.getroot()\n objects = []\n for ent in root.findall('.//Entity', namespaces=root.nsmap):\n object = self.get_object(ent.text)\n objects.append(object)\n return objects", "def type_index(context, request):\n\n return {'types': db.DBSession.query(db.Type).order_by(db.Type.id).all()}", "def _do_get_all_of_type(self, types, ids_only=False, id_pool=None):\n if isinstance(types, utils.Negation):\n filter_func = schema.Record.type.notin_\n types = types.arg\n else:\n filter_func = schema.Record.type.in_\n\n query = (self.session.query(schema.Record.id)\n .filter(filter_func(types)))\n if id_pool is not None:\n query = query.filter(schema.Record.id.in_(id_pool))\n\n if ids_only:\n for record_id in query.all():\n yield str(record_id[0])\n else:\n filtered_ids = (str(x[0]) for x in query.all())\n for record in self.get(filtered_ids):\n yield record", "def get_objects_by_type(_type):\n temp = []\n for obj in Object._all_objects:\n if isinstance(obj, _type):\n temp.append(obj)\n return temp" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mark the entities modified before a specific date as processed
def mark_processed_entities(entity_type, max_date): try: # get a connection and cursor conn = ecommerce.db.getConnection() cursor = conn.cursor() # execute the query cursor.execute(""" UPDATE Stage0_Delta SET FlagUpdated = 0 WHERE EntityType = ? AND FlagUpdated = 1 AND LastUpdate <= TO_DATE(?, 'YYYY-MM-DD HH24:MI:SS') """, (entity_type, max_date) ) # commit changes conn.commit() except: conn.rollback() pass
[ "def visitBefore(self, date):\n raise NotImplementedError()", "def is_before(self,other_date):", "def modified(self):\r\n\t\treturn self.last_modified > self.last_processed", "def mark_as_processed(oea_msg_obj):\n oea_msg_obj.processed = timezone.now()\n oea_msg_obj.save()", "def modified(self):\n\t\treturn self.last_modified > self.last_processed", "def modified_date(self, modified_date):\n \n self._modified_date = modified_date", "def modified_date(self, modified_date):\n\n self._modified_date = modified_date", "def modified_object(obj, event):\n now = datetime.now(tz=_zone)\n obj.modification_date = now", "def modify_date(self, modify_date):\n self._modify_date = modify_date", "def set_modified_since(self, data):\n self.add_payload('modifiedSince', data)", "def process_date(self, process_date):\n\n self._process_date = process_date", "def is_modified_since(thing, action, date):\r\n from pylons import g\r\n\r\n prop = 'last_' + action\r\n if not hasattr(thing, prop):\r\n last_modified = make_last_modified()\r\n setattr(thing, prop, last_modified)\r\n thing._commit()\r\n else:\r\n last_modified = getattr(thing, prop)\r\n\r\n if not date or date < last_modified:\r\n return last_modified\r\n \r\n #if a date was passed in and it's equal to last modified\r\n return True", "def modify_date(self):\n return self._modify_date", "def date_modified(self, date_modified):\n \n self._date_modified = date_modified", "def modify_date(self, modify_date):\n\n self._modify_date = modify_date", "def test_modification_date(self):\n form_data = {'seo_title': 'New Title',\n 'seo_title_override:int': 1,\n 'form.submitted:int': 1}\n\n md_before = self.my_doc.modification_date\n self.publish(path=self.mydoc_path+'/@@seo-context-properties',\n basic=self.basic_auth, request_method='POST',\n stdin=StringIO(urllib.urlencode(form_data)))\n md_after = self.my_doc.modification_date\n\n self.assertNotEqual(md_before, md_after)", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def created_before(self, date: datetime):\n return self.created_search(date, search_type=\"before\")", "def marked_date(self, marked_date):\n\n self._marked_date = marked_date" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A generator that can be used to iterate over all of the message handlers that belong to this instance.
def iter_message_handlers(self): for name in dir(self): attr = getattr(self, name) if isinstance(attr, MessageHandler): yield attr
[ "def __iter__(self):\n for handlers in self._handlers.values():\n for h in handlers:\n yield h", "def get_message_handlers(self):\n\t\treturn self.message_handlers", "def get_message_handlers(self):\n return [\n (\"normal\", self.message),\n ]", "def get_handlers(self):\n return []", "def walk(self):\n for msg in self.msg.walk():\n yield MessageWrapper(msg)", "def handles(self):\r\n for entity_space in self:\r\n for handle in entity_space:\r\n yield handle", "def _handlers(self):\n if not self.__handlers:\n handlers = {}\n for key in dir(self):\n # Underscores are protected\n if key.startswith('_'):\n continue\n attr = getattr(self, key)\n # Tree syntax\n if issubclass(type(attr), Handler) and attr != self:\n for name, handler in attr._handlers.iteritems():\n name = '%s.%s' % (key, name)\n handlers[name] = handler\n # Normal syntax\n elif hasattr(attr, '__call__'):\n handlers[key] = attr\n self.__handlers = handlers\n return self.__handlers", "def handlers(self, handlers):\n return self._set_list_field(\"handlers\", handlers)", "def GetHandlers(self):\n return self._reg_handler.values()", "def find_handlers(self):\n return list(logging._handlerList)", "def get_handlers(self):\n svs = []\n paths = self.get_paths()\n for p in paths:\n s = re.sub(r\"(?<={)\\w+}\", \".*\", p).replace(\"{\", \"\")\n o = re.sub(r\"(?<=<)\\w+\", \"\", s).replace(\"<\", \"\").replace(\">\",\"\").replace(\"&\", \"\").replace(\"?\", \"\")\n svs.append((o, self))\n\n return svs", "def _dispatch_messages(self):\n while True:\n select_obj = (yield)\n if select_obj == self._message_queue.selobj:\n msg = self._message_queue.get_nowait()\n if msg is not None:\n msg_type = msg.get('type', None)\n if msg_type is not None:\n msg_handler = self._message_handlers.get(msg_type, None)\n if msg_handler is not None:\n msg_handler(msg['data'])", "def event_handlers(self):\n if self._event_handlers is not None:\n return self._event_handlers\n\n # Get event handlers for self\n ordered = []\n unordered = []\n cls = type(self)\n for cls_name in dir(cls):\n cls_item = getattr(cls, cls_name, None)\n if isinstance(cls_item, HandlerDecorator):\n bound_handler = getattr(self, cls_name)\n if cls_item.priority is not None:\n ordered.append((cls_item, bound_handler))\n else:\n unordered.append((cls_item, bound_handler))\n ordered.sort(key=lambda h: h[0].priority)\n\n # get parent event handlers\n try:\n parent = self.parent.acquire.event_handlers\n except AttributeError:\n parent = []\n\n # Combine, cache and return\n handlers = [*ordered, *unordered, *parent]\n self._event_handlers = handlers\n return handlers", "def get_all_messages(self):\n while not self._message_queue.empty():\n yield self._message_queue.get()", "def get_handler_registry():\n return HANDLERS", "def getSimulationEventHandlers(self): \r\n return self.__eventHandlers.values()", "def signal_callbacks(self):\n for name in self.lookup_dict[self.__class__]:\n yield name, getattr(self, name)", "def get_registered_handlers(self):\n return list(self._registry.values())", "def handlers(self, event):\n if event in self._handler_dict:\n return self._handler_dict[event]\n return []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds the given service's message handlers to our managed message handlers.
def register_service(self, service): for message_handler in service.iter_message_handlers(): self.message_handlers[message_handler.name] = message_handler
[ "def add_handlers(self, logger, handlers):\n for h in handlers:\n try:\n logger.addHandler(self.config['handlers'][h])\n except StandardError as e:\n raise ValueError('Unable to add handler %r: %s' % (h, e))", "def register_websock_handlers(self, service, new_client, new_message, close_client):\n if service in self.websock_handlers:\n L.error(\"Error: service:\" + service + \" is already registered\")\n return False\n handlers = {\n \"new_client\":new_client,\n \"new_message\":new_message,\n \"close_client\":close_client\n }\n self.websock_handlers[service] = handlers\n return True", "def add_msv_handler(self, handler):\n if handler not in self._msv_handlers:\n self._msv_handlers.append(handler)", "def add_handlers(self, host_pattern, host_handlers):\n pass", "def addAllStatics(self, module=None):\n module = module or sys.modules[self.__module__]\n\n servicehandler_classes = inspect.getmembers(module, is_ServiceHandler)\n for servicehandler in servicehandler_classes:\n self.add(servicehandler[1])", "def add_handler(self, handler: ActionHandler) -> None:\n self._handlers.append(handler)", "def fileHandlers(self, handlers):\n for handler in handlers:\n self.logger.addHandler(handler)", "def add_handler(self, handler):\n pass", "def AddHandler(self, handler):\r\n if not handler in self._handlers:\r\n self._handlers.append(handler)", "def addHandlers(self, handlers):\n self._eventHandlers.update(handlers)\n keys = self._eventHandlers.keys()\n pygame.event.set_allowed(keys)", "def register_message_handlers(journal):\n journal.dispatcher.register_message_handler(\n QuorumTransactionBlockMessage,\n transaction_block_message.transaction_block_message_handler)", "def _set_handlers(self):\n for handler_name in self.handler_names:\n\n if self.config[handler_name]['active'] is False:\n continue\n\n handler = self._get_handler(handler_name)\n\n level = self.config[handler_name].get('level')\n formatter = self.config[handler_name].get('formatter')\n self._config_handler(handler, formatter=formatter, level=level)\n\n if not self._handler_exist(handler):\n self.logger.addHandler(handler)", "def addHandler( self, msgType, handler ):\n \n \tassert len(msgType) == 4\n \tself.__handlers[ msgType ] = handler", "def add_service(self, *servs):\n for s in servs:\n self.services.append(s)\n self.iid_manager.assign(s)\n s.broker = self\n for c in s.characteristics:\n self.iid_manager.assign(c)\n c.broker = self", "def _register_handlers(self):\n import handlers as th\n import inspect\n for name, class_type in inspect.getmembers(th, predicate=inspect.isclass):\n if class_type is th.ZMQTopicHandlerBase:\n continue\n handler = class_type()\n topic = handler.get_topic()\n if topic in self._topic_handlers:\n self._topic_handlers.append(handler)\n else:\n self._topic_handlers[topic] = [handler]", "def add_handler(self, handler):\r\n pass", "def register_message_handlers(journal):\n journal.dispatcher.register_message_handler(\n DevModeTransactionBlockMessage,\n transaction_block_message.transaction_block_message_handler)", "def add(self, handler, name=None, exception_handlers=()):\n self.route.append((name, handler, exception_handlers))", "def register_message_handlers(journal):\n journal.dispatcher.register_message_handler(\n DumpJournalBlocksMessage,\n _dumpjournalblockshandler)\n journal.dispatcher.register_message_handler(\n DumpJournalValueMessage,\n _dumpjournalvaluehandler)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Invokes the correct message handler for the given message.
def handle_message(self, sender, message): self.logger.debug('handle_message(%r, %r)', sender, message.handler) message_handler = self.message_handlers.get(message.handler) if message_handler is None: self.logger.warning("sender=%r, No handler found: '%s'", sender, message.handler) return message_handler(sender, message)
[ "def handle_message(**payload):\n handler_instance = message.MessageHandler(payload)\n handler_instance.handle()", "def _PushHandlerMessage(self, message):\n\n # We only accept messages of type MESSAGE.\n if message.type != rdf_flows.GrrMessage.Type.MESSAGE:\n raise ValueError(\"Unexpected message type: %s\" % type(message))\n\n if not message.session_id:\n raise ValueError(\"Message without session_id: %s\" % message)\n\n # Assume the message is authenticated and comes from this client.\n message.source = self.client_id\n\n message.auth_state = \"AUTHENTICATED\"\n session_id = message.session_id\n\n handler_name = message_handlers.session_id_map.get(session_id, None)\n if handler_name is None:\n raise ValueError(\"Unknown well known session id in msg %s\" % message)\n\n logging.info(\"Running message handler: %s\", handler_name)\n handler_cls = handler_registry.handler_name_map.get(handler_name)\n handler_request = rdf_objects.MessageHandlerRequest(\n client_id=self.client_id,\n handler_name=handler_name,\n request_id=message.response_id,\n request=message.payload)\n\n handler_cls().ProcessMessages([handler_request])", "def handle(self, message: Message) -> None:", "def handle_message(self, msg):\n\n if msg.error != None:\n return\n else:\n try:\n method = self.get_service_method(msg.method_name)\n params = getattr(msg, 'params', None)\n msg.result = self.execute_method(method, params)\n except (MethodNotFoundError, InvalidParamsError, ServerError), ex:\n logging.error(ex)\n msg.error = ex\n except Exception, ex:\n logging.error(ex)\n ex = InternalError(\"Error executing service method\")\n ex.data = ''.join(traceback.format_exception(*sys.exc_info()))\n msg.error = ex", "def _messageHandler(self, kind, message):\n if kind not in txcurrentcost.MessageKinds:\n logging.error(\"Invalid message kind \\'%s\\' not in kinds: %s\" % txcurrentcost.MessageKinds)\n return\n\n if kind == txcurrentcost.PeriodicUpdateMsg:\n self._parsePeriodicUpdate(message)\n\n elif kind == txcurrentcost.HistoryUpdateMsg:\n self._parseHistoryUpdate(message)", "def _handle_message(self, message):\n\t\tif \"error\" in message:\n\t\t\t# This message is a reply to a request.\n\t\t\ttry:\n\t\t\t\tthread_id = self._request_queue.get(timeout=1)\n\t\t\texcept Empty:\n\t\t\t\traise MPVCommunicationError(\"got a response without a pending request\")\n\n\t\t\tself._response_queues[thread_id].put(message)\n\n\t\telif \"event\" in message:\n\t\t\t# This message is an asynchronous event.\n\t\t\tself._event_queue.put(message)\n\n\t\telse:\n\t\t\traise MPVCommunicationError(\"invalid message %r\" % message)", "async def process(self, message):\n return await self.dispatcher.dispatch(message)", "def _handler(self, message):\n\n data = pickle.loads(message['data'])\n\n if not data[2]:\n # empty method call; bail out\n return\n\n # call the function and respond to the proxy object with return value\n uuid = data[0]\n proxy = data[1]\n func = getattr(self, data[2])\n result = (uuid, func(*data[3], **data[4]))\n self._redis.publish('proxy:%s' % proxy, pickle.dumps(result))", "async def handle(self, message: discord.Message):\n raise NotImplementedError()", "def obey(self, message):\n if message is None or message not in self.commands:\n return\n # execute the bound method if a valid message was received.\n self.commands[message]()", "def _handle_message(self):\n logger.debug(\"Handle incoming message\")\n s = MPI.Status()\n data = comm.recv(status=s, source=MPI.ANY_SOURCE, tag=0)\n logger.debug(\"Received message: %s\", data)\n mname = data[0]\n record = self._recids[data[1]]\n method = getattr(record, mname)\n method(*data[2:])\n if mname == 'complete' or mname == 'cancel' or mname == 'kill':\n logger.debug(\"Re-queueing worker\")\n self._workers.append(s.source)\n self.ping()", "def _forward_message(self, name, message):\n unhashed = self.message_hashes[repr(name)]\n if unhashed in self.handlers:\n for handler in self.handlers[unhashed]:\n handler(message)", "def process(self, method, message, proxy):\n\n continue_processing, res = self._run_incoming_middlewares(message)\n if continue_processing:\n self._route_message_by_type(method, res, proxy)", "def get_message_handler(self, taxii_message):\n raise NotImplementedError()", "def message(self, msg):\n if (AZMessage.is_agilezen_xmpp_message(msg)):\n try:\n az_message = AZMessage(msg)\n except (MessageCreationException, api.APIException) as ex:\n print ex\n return None\n for handler in self.handlers:\n handler.handle(az_message)", "def _handleMessage(self):\r\n raise NotImplementedError", "def message_handler(message):\n print(f\"MY HANDLER: '{message.get('data')}\")\n json_message = None\n message_data = message.get('data')\n\n if message_data:\n json_message = json.loads(message_data) # converts to JSON type\n function_getter_from_JSON(json_message)\n global_json = json_message", "def _handleMessage(self):\r\n call = self._onBeforeCall()\r\n ## execute incoming RPC\r\n d = maybeDeferred(self._callProcedure, call)\r\n ## register callback and errback with extra argument call\r\n d.addCallbacks(self._onAfterCallSuccess,\r\n self._onAfterCallError,\r\n callbackArgs = (call,),\r\n errbackArgs = (call,))", "def handle_message(self, message):\n print(f\"Got message {message}\")\n if message >> 7 == 1:\n # String\n self.receive_char_array(message)\n elif message >> 3 == 0b00000:\n # Servo position\n self.receive_servo_position(message)\n elif message == 0b00001000:\n # All servo positions\n self.receive_all_servo_positions()\n elif message == 0b00001001:\n # All servo limits\n self.receive_all_servo_limits()\n elif message == 0b00001010:\n # Raw force reading\n self.receive_raw_force()\n print(f\"Handled message {message}\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a time.struct_time as returned by feedparser into a
def _convert_struct_time_to_dt(stime): return date.fromtimestamp(mktime(stime))
[ "def time_struct_to_datetime(struct_time_object):\n return datetime.datetime(*struct_time_object[:6])", "def _strptime_time(data_string, format=\"%a %b %d %H:%M:%S %Y\"):\n tt = _strptime(data_string, format)[0]\n return time.struct_time(tt[:time._STRUCT_TM_ITEMS])", "def parse_time(block_time):\n return datetime.strptime(block_time, timeFormat)", "def parse_time(s):\n return time.gmtime(float(s))", "def dehydrate_time(value):\n if isinstance(value, Time):\n nanoseconds = int(value.ticks * 1000000000)\n elif isinstance(value, time):\n nanoseconds = (3600000000000 * value.hour + 60000000000 * value.minute +\n 1000000000 * value.second + 1000 * value.microsecond)\n else:\n raise TypeError(\"Value must be a neotime.Time or a datetime.time\")\n if value.tzinfo:\n return Structure(ord(b\"T\"), nanoseconds, value.tzinfo.utcoffset(value).seconds)\n else:\n return Structure(ord(b\"t\"), nanoseconds)", "def parse_time_record(self, record):\n\n time_record = TIME_RECORD_MATCHER.match(record)\n if not time_record:\n time_data = None\n else:\n time_data = struct.unpack(TIME_FORMAT, \n time_record.group(0)[0:TIME_RECORD_SIZE])\n\n return time_data", "def _to_datetime(ts: time.struct_time) -> datetime:\n return datetime.fromtimestamp(timegm(ts), tz=timezone.utc)", "def struct_time(self) -> time.struct_time:\n year, month, day, hour, minute, second, weekday, _, _ = self.current_time\n # Bluetooth weekdays count from 1. struct_time counts from 0.\n return time.struct_time(\n (year, month, day, hour, minute, second, weekday - 1, -1, -1)\n )", "def _astropy_time(time):\n return time if isinstance(time, astropy.time.Time) else astropy.time.Time(parse_time(time))", "def get_time(text_time):\n # return Observer.datetime_to_astropy_time(dt.datetime.strptime(text_time, '%d/%m/%Y %H:%M'))\n the_time = dt.datetime.strptime(text_time, '%d/%m/%Y %H:%M')\n return Time(the_time.strftime('%Y-%m-%d %H:%M'))\n #date = [int(i) for i in date.split('/')]", "def _time_to_date(parsed_time):\n if not parsed_time:\n return parsed_time\n return datetime.fromtimestamp(calendar.timegm(parsed_time), tz=timezone.utc)", "def struct_time(self):\n _, month, day, hour, minute, second, weekday, _, _ = self.current_time\n # Bluetooth weekdays count from 1. struct_time counts from 0.\n return time.struct_time((month, day, hour, minute, second, weekday - 1, -1))", "def _astropy_time(time):\n return time if isinstance(time, astropy.time.Time) else astropy.time.Time(\n parse_time(time))", "def time_convert(timestr):\n \n try:\n # Analyse given time str to seperate elements.\n struct_time = time.strptime(timestr[:-4], \"%a, %d %b %Y %H:%M:%S\")\n # Convert given time by secend unit.\n t = time.mktime(struct_time) \n # Re-construct time to isotime format.\n isot = time.strftime(\"%Y-%m-%d\", time.gmtime(t))\n return isot\n \n except:\n return ''", "def directive_to_struct_time_item(directive, value):\n if directive == DIRECTIVES.YEAR:\n # Return YEAR as TM_YEAR.\n return STRUCT_TIME.TM_YEAR, value\n elif directive == DIRECTIVES.YEAR_NO_CENTURY:\n # Return YEAR_NO_CENTURY as TM_YEAR.\n # Assume that a two-digit year is relative to the year 2000.\n return STRUCT_TIME.TM_YEAR, value + 2000\n elif directive == DIRECTIVES.MONTH:\n # Return MONTH as TM_MON.\n return STRUCT_TIME.TM_MON, value\n elif directive == DIRECTIVES.ABBREV_MONTH_NAME:\n # Return ABBREV_MONTH_NAME as TM_MON.\n return STRUCT_TIME.TM_MON, ABBREVIATED_MONTH_NAMES.index(value)\n elif directive == DIRECTIVES.MONTH_NAME:\n # Return MONTH_NAME as TM_MON.\n return STRUCT_TIME.TM_MON, MONTH_NAMES.index(value)\n elif directive == DIRECTIVES.DAY_OF_MONTH:\n # Return DAY_OF_MONTH as TM_MDAY\n return STRUCT_TIME.TM_MDAY, value\n elif directive == DIRECTIVES.HOUR_24:\n # Return HOUR_24 as TM_HOUR\n return STRUCT_TIME.TM_HOUR, value\n elif directive == DIRECTIVES.HOUR_12:\n # Return HOUR_12 as 0-based TM_HOUR\n return STRUCT_TIME.TM_HOUR, 0 if value == 12 else value\n elif directive == DIRECTIVES.MINUTE:\n # Return MINUTE as TM_MIN\n return STRUCT_TIME.TM_MIN, value\n elif directive == DIRECTIVES.SECOND:\n # Return SECOND as TM_SEC\n return STRUCT_TIME.TM_SEC, value\n elif directive == DIRECTIVES.DAY_OF_WEEK:\n # Return DAY_OF_WEEK as TM_WDAY\n return STRUCT_TIME.TM_WDAY, value\n elif directive == DIRECTIVES.ABBREV_WEEKDAY_NAME:\n # Return ABBREV_WEEKDAY_NAME as TM_WDAY\n return STRUCT_TIME.TM_WDAY, ABBREVIATED_WEEKDAY_NAMES.index(value)\n elif directive == DIRECTIVES.WEEKDAY_NAME:\n # Return WEEKDAY_NAME as TM_WDAY\n return STRUCT_TIME.TM_WDAY, WEEKDAY_NAMES.index(value)\n elif directive == DIRECTIVES.DAY_OF_YEAR:\n # Return DAY_OF_YEAR as TM_YDAY\n return STRUCT_TIME.TM_YDAY, value\n elif directive == DIRECTIVES.TIME_ZONE:\n # Take no action for TIME_ZONE.\n return None\n elif directive == DIRECTIVES.TIME_ZONE_OFFSET:\n # Return TIME_ZONE_OFFSET as TM_MIN - to be subtracted from any\n # existing minute value to arrive at UTC.\n return STRUCT_TIME.TM_MIN, -value\n elif directive == DIRECTIVES.AM_PM:\n # Return AM_PM as TM_HOUR\n # If value = 'PM' return +12 to update hour value to 24-hour format.\n return STRUCT_TIME.TM_HOUR, 12 if value == 'PM' else 0\n elif directive == DIRECTIVES.PERCENT:\n # Take no action for PERCENT.\n return None\n else:\n raise NotImplementedError(\n 'struct_time conversion not defined for directive: {}'\n .format(directive)\n )", "def parseTimeV(data:Series)->Series:\n if data.name=='Recording timestamp' or data.name=='Begin Time - ss.msec':\n return pandas.to_timedelta(data, unit='s')\n #TODO numpy.vectorize\n else:\n return pandas.to_datetime(data.astype(str), infer_datetime_format=True)-date.today()", "def __resolve_time (self, tt):\n fmts = [\"%Y-%m-%dT%H:%M:%S\", # correct time format for bbx files\n \"%Y%m%d_%H%M%S\"] # additional format found in LoFASM I files\n startt_repr = tt[:-8] if 'T' in tt else tt \n time_obj = None\n for fmt in fmts:\n try:\n time_obj = datetime.datetime.strptime(startt_repr, fmt)\n break\n except ValueError:\n pass\n else:\n print ( \"Cannot parse start time header field {}\".format(tt) )\n return time_obj", "def ReadTime(b):\n\tt = b.get_timestamp()\n\td = t.d\n\tst = time.strptime('%d:%d:%d:%d:%d'%(d['y'],d['d'],d['h'],d['m'],d['s']),'%y:%j:%H:%M:%S')\n\ttepoch = calendar.timegm(st)\n\ttepoch += d['ss']*1.e-8\n\n\treturn tepoch", "def decode_time(data):\n time = _unpack_dictionary(data, YMDS_TIME)\n ms = time[\"milliseconds\"]\n ms &= 0x3FF\n # assume UTC for now\n # dst = (time[\"milliseconds\"] & 0x400) == 0x400\n utc = (time[\"milliseconds\"] & 0x800) == 0x800\n # local_dst = (time[\"milliseconds\"] & 0x1000) == 0x1000\n\n try:\n t = dt.datetime(time[\"year\"], time[\"month\"], time[\"day\"]) + dt.timedelta(\n seconds=time[\"seconds\"],\n milliseconds=ms,\n )\n if utc:\n t = t.replace(tzinfo=dt.timezone.utc)\n return t\n except ValueError:\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use feedparser to parse PyBites RSS feed. Return a list of Entry namedtuples (date = date, drop time part)
def get_feed_entries(feed=FEED) -> list: f = feedparser.parse(feed) entry_list = [] for entry in f.entries: date = _convert_struct_time_to_dt(entry["published_parsed"]) title = entry["title"] link = entry["link"] tags = [tag["term"].lower() for tag in entry["tags"]] entry_list.append(Entry(date=date, title=title, link=link, tags=tags)) return entry_list
[ "def get_feed_entries(feed=FEED):\n d = feedparser.parse(feed)\n entries = d.entries\n \n all_entries =[]\n for entry in entries:\n title = entry.title\n link = entry.link\n date = entry.published_parsed\n tags = entry.tags\n tags = [t.get('term').lower() for t in tags]\n\n date = _convert_struct_time_to_dt(date)\n\n\n entry = Entry(date,title,link,tags)\n all_entries.append(entry)\n\n return all_entries", "def parse_feed(feed, last_update, entry, get_updated = lambda e: e.updated_parsed[:6]):\n\n entries = []\n for e in feed.entries:\n if datetime(*get_updated(e)) > last_update:\n new = entry(e)\n if new != None:\n entries.append(new)\n return entries", "def atom_parse(\n soup: BeautifulSoup) -> LinkedList: # pragma: no cover\n feed = LinkedList()\n tag = soup.feed\n for entry in tag.find_all(\"entry\"):\n feed_dict = {}\n for title in entry.find_all(\"title\"):\n for string in title.find_all(string=True):\n feed_dict[\"RSS_String\"] = string\n feed_dict[\"RSS_String\"] = truncate(feed_dict[\"RSS_String\"])\n for link in entry.find_all(re.compile(\"link?\")):\n feed_dict[\"Link\"] = link.get('href')\n feed.add_list_item(feed_dict)\n return feed", "def get_entries(feeds):\n entries = []\n for feed in feeds:\n\n try:\n feed_xml = feedparser.parse(feed.feed_link)\n except:\n continue\n\n # Only parse feeds with correct status code, feed details, and at least one feed entry\n if feed_xml == None \\\n or feed_xml.get('status', 404) != 200 \\\n or not feed_xml.get('feed', None) \\\n or feed_xml.get('entries', []) == []:\n continue\n\n # Get feed name and description. Fall back to feed name in database if it\n # is not present in the rss feed.\n feed_details = feed_xml.feed\n feed.name = feed_details.get('title', feed.name)\n feed.description = feed_details.get('subtitle', '')\n\n for entry_xml in feed_xml.entries:\n # Only parse feed entries with a title and link\n if not entry_xml.get('title', '') \\\n or not entry_xml.get('link', ''):\n continue\n\n entry_name = entry_xml.title\n entry_link = entry_xml.link\n entry_description = entry_xml.get('summary', '')\n entry_date = parser.parse(entry_xml.get('published', ''))\n entry = FeedEntry(feed, entry_name, entry_link, entry_description, entry_date)\n entries.append(entry)\n\n return entries", "def parse_feed(feed, link_keyword):\n data = [(x[link_keyword], x[\"title\"], \\\n time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", \\\n x[\"published_parsed\"])) \\\n for x in feed[\"entries\"]]\n return(data)", "def from_rss(\n cls, entry: feedparser.FeedParserDict, feed: feedparser.FeedParserDict\n ) -> \"FeedEntry\":\n try:\n time = datetime.datetime(*entry.published_parsed[:6]).isoformat()\n except (AttributeError, TypeError):\n time = datetime.datetime.now().isoformat()\n return cls(\n title=entry.get(\"title\"),\n summary=entry.get(\"summary\"),\n link=sanitise_url(entry.get(\"link\")),\n time=time,\n feed=feed.feed.get(\"title\"),\n source=feed.get(\"href\"),\n )", "def rss_parse(\n soup: BeautifulSoup) -> LinkedList: # pragma: no cover\n feed = LinkedList()\n tag = soup.rss\n tag = tag.channel\n channel_dict = {\"RSS_String\": tag.title.string, \"Link\": tag.link.string}\n feed.add_list_item(channel_dict)\n for item in tag.find_all(re.compile(\"item?\")):\n feed_dict = {}\n for title in item.find_all(re.compile(\"title?\")):\n for entry in title.find_all(string=True):\n feed_dict[\"RSS_String\"] = entry\n feed_dict[\"RSS_String\"] = truncate(feed_dict[\"RSS_String\"])\n for link in item.find_all(re.compile(\"link?\")):\n for entry in link.find_all(string=True):\n feed_dict[\"Link\"] = entry\n feed.add_list_item(feed_dict)\n return feed", "def fetch_entries(self):\n entries = []\n rss_list = self.__data_adapter.fetch_rss()\n for rss in rss_list:\n rss_href = rss.get('url', None)\n if rss_href is not None:\n feed = feedparser.parse(rss_href)\n [entries.append(FeedDocument(entry.get('title', ''), entry.get('summary', ''))) for entry in feed.get('entries', [])]\n return entries", "def parse_rss(self, soup):\n for entry in soup.find_all('item')[::-1]:\n fse = FeedSporaEntry()\n fse.title = entry.find('title').text\n fse.link = entry.find('link').text\n fse.content = entry.find('description').text\n kw = set()\n kw = kw.union({keyword.text.replace(' ', '_').strip()\n for keyword in entry.find_all('category')})\n kw = kw.union({word[1:]\n for word in fse.title.split()\n if word.startswith('#')})\n fse.keywords = kw\n yield fse", "def parse_rss_feed(url):\n\n # Parse RSS feed items into dictionaries.\n # @NOTE: Requirement 1.\n feed = feedparser.parse(url)\n\n # Parallelize fetches.\n parallelize = Pool(15)\n entries = parallelize.map(parse_metadata, feed['entries'])\n\n return entries", "def produce_entries(self):\n # Fetch the HTML source, tidy it up, parse it.\n src = urlopen(self.SCRAPE_URL).read()\n tidy_src = tidy_string(src)\n doc = NonvalidatingReader.parseString(tidy_src, self.SCRAPE_URL)\n\n entries = []\n\n # Iterate through the parts identified as feed entry nodes.\n for entry_node in doc.xpath(self.ENTRIES_XPATH, self.NSS):\n\n # For each entry attribute path, attempt to extract the value\n data = {}\n for k,v in self.ENTRY_XPATHS.items():\n nodes = entry_node.xpath(v, self.NSS)\n vals = [x.nodeValue for x in nodes if x.nodeValue]\n data[k] = \" \".join(vals)\n \n # Create and append the FeedEntryDict for this extraction\n entries.append(FeedEntryDict(data, self.date_fmt))\n \n return entries", "def parse_rss(database, feed, depth=1):\n # Get the updates article count, and article urls and publish dates.\n rss_a = rss_feed(feed)\n \n # Get all (article urls, publish dates) pairs\n articles = []\n pairs = rss_a[1].items()\n for url, pubdate in pairs: \n articles += crawl_url(database, url, date=pubdate, depth=depth)\n \n return articles", "def scrape_rss(self):\n return self.scrape(self.RSS_ENTRY_TMPL, \n self.RSS_FEED_TMPL, self.RSS_DATE_FMT)", "def rss_feed(rss_url):\n try:\n # Use feedparser to analyze given RSS feed, if it is valid RSS.\n d = feedparser.parse(rss_url)\n except:\n return \"Sorry, invalid RSS feed. Please check and try again later.\"\n \n total = len(d['entries'])\n updates = dict()\n for index, item in enumerate(d['entries']):\n # Convert publish time from ctime format to iso-time format.\n a_time = time_convert(item.published)\n # Set article url ad dictionary key, with publish date as value. \n updates[str(item.link)] = a_time \n return (total, updates)", "def get_news(url):\r\n \r\n # parse RSS feed into list of dictionaries\r\n feed = feedparser.parse(url)\r\n\r\n # no RSS feed articles for url\r\n if len(feed['entries']) == 0:\r\n return []\r\n \r\n # get first ten articles from the RSS feed\r\n news = []\r\n i = 0\r\n while True:\r\n if i == len(feed['entries']) or i > 30:\r\n break\r\n \r\n try:\r\n # get link to article\r\n link = feed[\"entries\"][i][\"link\"]\r\n\r\n # get title of article\r\n title = feed[\"entries\"][i][\"title\"]\r\n \r\n try:\r\n # get raw summary of article\r\n summary_raw = feed[\"entries\"][i][\"summary\"]\r\n \r\n # format summary\r\n summary = \"\"\r\n for c in summary_raw:\r\n if c == \"<\":\r\n summary += \"...\"\r\n break\r\n summary += c\r\n except KeyError as e:\r\n logging.error(\"no summary for RSS feed article: {}\".format(link))\r\n summary = \"read more here...\"\r\n \r\n # get raw date \r\n date_raw = feed[\"entries\"][i][\"published_parsed\"]\r\n \r\n if date_raw is None:\r\n date = feed[\"entries\"][i][\"published\"]\r\n \r\n else:\r\n # format date\r\n year = str(date_raw.tm_year)\r\n months = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"]\r\n month = months[date_raw.tm_mon - 1]\r\n day = str(date_raw.tm_mday)\r\n weekdays = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]\r\n wday = weekdays[date_raw.tm_wday]\r\n hour = str(date_raw.tm_hour)\r\n hour = \"{:2}\".format(hour).format(' ','0')\r\n min = str(date_raw.tm_min)\r\n min = \"{:2}\".format(min).replace(' ','0')\r\n date = hour + \":\" + min + \" - \" + wday + \" \" + month + \" \" + day + \", \" + year\r\n \r\n # compile entry and append to news list\r\n entry = {\"link\":link, \"title\":title, \"date\":date, \"summary\":summary}\r\n \r\n # sanitize entry\r\n for key in entry:\r\n # apostrophe\r\n entry[key] = entry[key].replace(\"&#39;\", \"'\")\r\n # right single quotation mark\r\n entry[key] = entry[key].replace(\"’\", \"&#8217;\")\r\n # left single quotation mark\r\n entry[key] = entry[key].replace('\"', \"&#8216;\")\r\n # right double quotation mark\r\n entry[key] = entry[key].replace(\"'\", \"&#8221;\")\r\n # left double quotation mark\r\n entry[key] = entry[key].replace(\"'\", \"&#8220;\")\r\n # Weird ampersand formatting\r\n entry[key] = entry[key].replace(\"&amp;\", \"&\")\r\n \r\n # prepare entry for sqlite queries\r\n entry[key] = surround(entry[key])\r\n \r\n # add entry to news list\r\n news.append(entry)\r\n \r\n # max 10 entries\r\n if len(news) == 10:\r\n break\r\n i += 1\r\n \r\n except Exception as e:\r\n logging.error(e)\r\n i += 1\r\n pass\r\n \r\n # success\r\n return news", "def get_tech_crunch_data() -> List[Dict[str, str]]:\n data = feedparser.parse(RSS_URL)\n if data.status != 200:\n return None\n return data.entries", "def feed2fields(file):\r\n import feedparser\r\n d = feedparser.parse(file)\r\n for entry in d.entries:\r\n date = (time.strftime(\"%Y-%m-%d %H:%M\", entry.updated_parsed)\r\n if hasattr(entry, \"updated_parsed\") else None)\r\n author = entry.author if hasattr(entry, \"author\") else None\r\n tags = [e['term'] for e in entry.tags] if hasattr(entry, \"tags\") else None\r\n\r\n slug = slugify(entry.title)\r\n kind = 'article'\r\n yield (entry.title, entry.description, slug, date, author, [], tags,\r\n kind, \"html\")", "def get_new_entries(since_datetime, feed_uri):\n entries = feedparser.get_feed_entries_from(feed_uri)\n\n entry_count = 0\n for entry in entries:\n t = entry.date_parsed\n entry_date = datetime(\n year = t[0],\n month = t[1],\n day = t[2],\n hour = t[3],\n minute = t[4],\n second = t[5],\n microsecond = t[6]\n # FIXME:\n #tzinfo = t[7]\n )\n\n if entry_date > since_datetime:\n entry_count += 1\n\n t = entries[0].date_parsed\n last_entry = datetime(\n year = t[0],\n month = t[1],\n day = t[2],\n hour = t[3],\n minute = t[4],\n second = t[5],\n microsecond = t[6]\n )\n\n import pdb; pdb.set_trace()\n\n return (entry_count, last_entry)", "def feed_parser(text):\n items = []\n xml = BeautifulSoup(text, \"xml\")\n\n for item in xml.find_all('item'):\n new_item = parse_item(item)\n if not new_item:\n continue\n\n items.append(new_item)\n\n return {'feed': items}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if search matches any tags as stored in the Entry namedtuple (case insensitive, only whole, not partial string matches).
def filter_entries_by_tag(search, entry) -> bool: tags = entry.tags search_words = search.strip().translate(str.maketrans("&|", " ")).split() if "&" in search: search_type = "AND" else: search_type = "OR" for word in search_words: if word.lower() in tags: if search_type == "OR": return True elif search_type == "AND": return False if search_type == "OR": return False else: return True
[ "def filter_entries_by_tag(search, entry):\n \n entry_tags = entry.tags\n if '&' in search:\n splits = search.split('&')\n\n return all(split.lower() in entry_tags for split in splits)\n elif '|' in search:\n splits = search.split('|')\n return any(split.lower() in entry_tags for split in splits)\n else:\n return search.lower() in entry_tags", "def matchSearch(self, attr, searchArguments):\n\t\treturn all([s in attr.lower() for s in searchArguments])", "def match_in_entry_insensitive(self, key):\n return key in self.entry.lower()", "def match_insensitive(self, key):\n return key in self.entry.lower() or key in self.desc.lower()", "def findtags(conf, dlcs, *tags, **opts):\n\n for tag in cached_tags(conf, dlcs, opts['keep_cache'])['tags']:\n tag = tag['tag']\n\n for findtag in tags:\n if opts['ignore_case']:\n if tag.lower().find(findtag.lower()) > -1:\n print tag\n\n elif tag.find(findtag) > -1:\n print tag", "def _SearchStringInRow(self, entry, string):\n return any(list(filter(lambda x:bool(re.search(string,x.lower())), entry)))", "def tag_contains(self, key, value):\n if key not in self:\n return False\n\n wildcard_search = value == '*'\n\n if key in self.STRING_FIELDS:\n if wildcard_search and len(self[key]) > 0:\n return True\n return value in self[key].casefold()\n\n if key == 'rank':\n searchme = flatten([self['rank'][g] for g in self['rank']])\n if wildcard_search and len(list(searchme)) > 0:\n return True\n else:\n searchme = self[key]\n if wildcard_search:\n return True\n\n for searchval in searchme:\n if value in searchval.casefold():\n return True\n\n return False", "def test_search_tags(self):\n page = self.page1\n page.search_tags = \"Chutes, Ladders\"\n page.save_revision().publish()\n taglist = page.clean_search_tags\n for name in [\"Chutes\", \"Ladders\"]:\n self.assertIn(name, taglist)", "def hasname(self, tag: str) -> bool:\n for key in self.formal_names:\n if key in tag.lower():\n return True\n\n # Exit case if key -> value not in mapping \n return False", "def match(self,search_filter):\n if search_filter in self.memo or search_filter in self.tags:\n return True\n else:\n return False", "async def search(self, ctx: \"IceTeaContext\", *, query):\n response_list = await ctx.guild_data.search_tags(query)\n if len(response_list) > 0:\n response_message = \"\\n\".join([tag.title for tag in response_list])\n await ctx.send(f\"Found these tags:\\n{response_message}\")\n else:\n await ctx.send(\"No similar tags found\")", "def search(user, entry):\n user_words = split(user)\n for uword in user_words:\n for e in entry:\n if e.startswith(uword):\n break\n else:\n return False\n return True", "async def search_tags(self, ctx: Context, *, tag_name: Annotated[str, TagName]) -> None:\n\n assert ctx.guild is not None # guild only\n\n potential_tags = await search_tags(self.bot.database, ctx.guild.id, tag_name)\n\n if potential_tags:\n formatted_results = '\\n'.join(x.name for x in potential_tags)\n await ctx.send(f'Found the following tags... \\n{formatted_results}')\n else:\n await ctx.send(f'No tags found with name `{tag_name}`.')", "def match(self, query):\n return query in self.memo or query in self.tags", "def testSearchWithMatchesIsCaseInsensitive(self):\n objectID1 = uuid4()\n objectID2 = uuid4()\n objectID3 = uuid4()\n yield self.index.update({objectID1: {u'test/tag': u'VALUE'},\n objectID2: {u'test/tag': u'value'},\n objectID3: {u'test/tag': u'VaLuE'},\n uuid4(): {u'test/tag': u'devalue'}})\n yield self.index.commit()\n query = parseQuery(u'test/tag matches \"vAlUe\"')\n result = yield self.index.search(query)\n self.assertEqual(set([objectID1, objectID2, objectID3]), result)", "def search_specific_tags(package, tag_search):\n return search_tags(\n package_name=package,\n tag_search=tag_search\n )", "def asset_matches(asset, search, regex_matching):\n if regex_matching:\n if asset['label'] and re.search(search, asset['label']):\n return True\n if asset['name'] and re.search(search, asset['name']):\n return True\n elif search in (asset['label'], asset['name']):\n return True\n return False", "def search(self, search_text, contests_ids=[]):\r\n\r\n if contests_ids:\r\n if any(x == self.id for x in contests_ids):\r\n #not in list of contest ids\r\n return True\r\n else:\r\n return False\r\n else:\r\n search = search_text.lower()\r\n\r\n return search in self.name.lower() or \\\r\n search in self.description.lower() or \\\r\n any(search in contestant.name.lower() for contestant in self.contestants) or \\\r\n any(search in contestant.description.lower() for contestant in self.contestants) or \\\r\n any(search in tag_value.lower() for tag_value in self.tag_values())", "def search(self, term):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gather the top 10 words by highest (descending) likelihoods for each class
def top10_likelihoods(likelihoods, vocab, classes): resultDict = {} for cls in classes: results = [] for word in vocab: results.append((word, likelihoods[cls][word])) resultDict[cls] = results # Sort and return top 10 for each class for key in resultDict: results = resultDict[key] resultDict[key] = map(lambda x: x[0], sorted(results, key=lambda x: x[1], reverse=True))[:10] return resultDict
[ "def top10_odds_ratio(likelihoods, vocab, classes):\r\n results = []\r\n for word in vocab:\r\n highestOddsRatio = None\r\n for c1 in classes:\r\n for c2 in classes:\r\n # Skip self TODO: Is this right?\r\n # if c1 == c2:\r\n # continue\r\n oddsRatio = odds_ratio(likelihoods, c1, c2, word)\r\n if oddsRatio > highestOddsRatio or highestOddsRatio == None:\r\n highestOddsRatio = oddsRatio\r\n results.append((word, highestOddsRatio))\r\n # Sort and return top 10\r\n return map(lambda x: x[0], sorted(results, key=lambda x: x[1], reverse=True))[:10]", "def top_words(filtered_words):\n\n fdist = FreqDist(filtered_words) #finds the words qrequency disctribution\n top_ten = fdist.most_common(10) #finds top 10 of them \n print(\"\"\"Top 10 frequent words\\n_____________________\\nword – count\\n____________\"\"\")\n for i in top_ten: print('{0} – {1}'.format(i[0], i[1]))", "def display_top_n_words(total_count__of_words, n): # Considering n=10 here as specified in the requirements\n return sorted(total_count__of_words.items(), key=lambda i: i[1], reverse=True)[:n]", "def printTopWords(self, N):\n topWords = []\n for i in range(self.MAX_RATING):\n topWords.append(dict(sorted(self.dictionary.items(), key=lambda x: x[1].tfidf[i+1], reverse=True)[:N]))\n\n outputFormat = \"{:>16} - {:<30}\"\n for i in range(len(topWords)):\n print(\"Top \" + str(N) + \" words for class rating \" + str(i + 1))\n print(\"--------------------------------------\")\n for j in topWords[i]:\n print(outputFormat.format(j, self.dictionary[j].tfidf[i + 1]))\n print()", "def print_top_words(model, feature_names, n_top_words):\n for topic_idx, topic in enumerate(model.components_):\n message = \"Topic #%d: \" % topic_idx\n message += \" \".join([feature_names[i]\n for i in topic.argsort()[:-n_top_words - 1:-1]])\n print(message)\n print()", "def print_top_words(freq_dist_text):\n print('=====')\n print(freq_dist_text.most_common(10))\n print('=====')", "def print_top10(vectorizer, clf, class_labels):\n feature_names = vectorizer.get_feature_names()\n for i, class_label in enumerate(class_labels):\n top10 = np.argsort(clf.coef_[i])[-15:]\n print(\"%s: %s\" % (class_label,\n \" \".join(feature_names[j] for j in top10)))", "def get_top_10(word_counts):\n return sorted(word_counts.iteritems(), key=operator.itemgetter(1))[-10:]", "def print_top_words(components, feature_names, n_top_words: int = 10):\n for topic_idx, topic in enumerate(components):\n message = \"Topic #%d: \" % topic_idx\n message += \" \".join(\n [feature_names[i] for i in topic.argsort()[: -n_top_words - 1 : -1]]\n )\n print(message)\n print()", "def getTopKWordsByLikelihood(self, K):\n pseudocounts = copy.deepcopy(self.n_vts)\n normalizer = np.sum(pseudocounts, (1, 2))\n pseudocounts /= normalizer[:, np.newaxis, np.newaxis]\n for t in range(self.numTopics):\n for s in range(self.numSentiments):\n topWordIndices = pseudocounts[:, t, s].argsort()[-1:-(K + 1):-1]\n vocab = self.vectorizer.get_feature_names()\n # print(t, s, [vocab[i] for i in topWordIndices])", "def print_top10(vectorizer, clf, class_labels):\n\tfeature_names = vectorizer.get_feature_names()\n\t\n\tfor i, class_label in enumerate(class_labels):\n# output the original index of the top 10 coef\n\t\tif class_label == 1:\n\t\t\ttop10 = np.argsort(clf.coef_[0])[-10:]\n\t\telse:\n\t\t\ttop10 = np.argsort(clf.coef_[0])[:10]\n\n\t\tprint(\"%s: %s\" % (class_label,\n\t\t\t \", \".join(feature_names[j] for j in top10)))", "def top_words(name):\n row = wiki[wiki['name'] == name]\n word_count_table = row[['word_count']].stack('word_count', new_column_name=['word','count'])\n return word_count_table.sort('count', ascending = False)", "def top10(self) -> List[Word]:\n return self._top10", "def top_summary_words(args, examples, word_dict):\n word_count = Counter()\n for ex in examples:\n for w in ex['summary'].tokens:\n w = Vocabulary.normalize(w)\n if w in word_dict:\n word_count.update([w])\n return word_count.most_common(args.tune_partial)", "def get_topwords(self,n_words=10):\n i2w = {i: w for i, w in enumerate(self.topics_word)}\n w2i = {i2w[i]: i for i in range(len(self.topics_word))}\n\n d = np.array([self.topics_word[i2w[i]] for i in range(len(self.topics_word))])\n n_docs, n_topics = d.shape\n with open(\"TopicWords%f.txt\"%(self.Z*10),mode='w') as f:\n for k in range(n_topics):\n string = 'Topic %d: %s' % (k, ' '.join([i2w[i] for i in np.argsort(d[:, k])[-n_words:]]))\n print(string)\n f.write(string+'\\n')\n return True", "def get_top_words(self, k):\n # use argmax for Phi\n\n top_words = []\n for i in range(self.num_topics):\n #indices = np.argpartition(10 - self.Phi[:, i], k)\n #indices_sorted = np.argsort(10 - self.Phi[indices[:k], i])\n #final_inds = indices[indices_sorted]\n final_inds = np.argsort(10. - self.Phi[:, i])\n tmp = []\n for ind in final_inds[:k]:\n tmp.append(self.vocab[ind])\n top_words.append(tmp)\n\n return top_words\n #pass", "def top_keywords(n, doc, corpus):\n d = {}\n for word in set(doc):\n d[word] = tfidf(word, doc, corpus)\n sorted_d = sorted(d.items(), key=operator.itemgetter(1))\n sorted_d.reverse()\n return [w[0] for w in sorted_d[:n]]", "def print_top_misclassified(test_docs, test_labels, X_test, clf, n):\n ###TODO\n #pass\n result = []\n predicted = clf.predict(X_test)\n predicted_prob = clf.predict_proba(X_test)\n \n for i in range(len(predicted)):\n res = {}\n if predicted[i] != test_labels[i]:\n if predicted[i] != 0:\n res ={'truth' : test_labels[i], 'predicted': predicted[i], 'probability': predicted_prob[i][1], 'test':test_docs[i]}\n else:\n res ={'truth' : test_labels[i], 'predicted': predicted[i], 'probability': predicted_prob[i][0], 'test':test_docs[i]}\n\n result.append(res)\n \n result = sorted(result, key=lambda x: (x['probability']), reverse=True)[:n]\n \n for r in result:\n print('truth=%d predicted=%d proba=%.6f'%(r['truth'], r['predicted'], r['probability']))\n print(r['test'])\n print(\"\\n\")", "def get_relevant_words(vis,lam=0.3,topn=20): \n \n a = vis.topic_info\n a['relevance'] = a['logprob']*lam+(1-lam)*a['loglift'] # this calculates the relevance value as in plLDAvis\n a = a.loc[:,['Category','Term','relevance']].groupby(['Category'])\\\n .apply(lambda x: x.sort_values(by='relevance',ascending=False).head(topn))\n a = a.loc[:,'Term'].reset_index().loc[:,['Category','Term']]\n a = a[a['Category']!='Default']\n topics = a.Category.tolist()\n terms = a.Term.tolist()\n d = defaultdict(list)\n for top, term in zip(topics,terms):\n d[top].append(term)\n relevent_words = pd.DataFrame([d]).T.reset_index()\n relevent_words.columns = ['Topic','Relevant_words']\n\n token_percent = vis.topic_coordinates.sort_values(by='topics').loc[:,['topics','Freq']].reset_index().rename(columns={'topic':'topic_id'})\n return relevent_words.join(token_percent,how='left').drop('topics',axis=1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gather the top 10 words by highest (descending) odds ratios
def top10_odds_ratio(likelihoods, vocab, classes): results = [] for word in vocab: highestOddsRatio = None for c1 in classes: for c2 in classes: # Skip self TODO: Is this right? # if c1 == c2: # continue oddsRatio = odds_ratio(likelihoods, c1, c2, word) if oddsRatio > highestOddsRatio or highestOddsRatio == None: highestOddsRatio = oddsRatio results.append((word, highestOddsRatio)) # Sort and return top 10 return map(lambda x: x[0], sorted(results, key=lambda x: x[1], reverse=True))[:10]
[ "def display_top_n_words(total_count__of_words, n): # Considering n=10 here as specified in the requirements\n return sorted(total_count__of_words.items(), key=lambda i: i[1], reverse=True)[:n]", "def top_words(filtered_words):\n\n fdist = FreqDist(filtered_words) #finds the words qrequency disctribution\n top_ten = fdist.most_common(10) #finds top 10 of them \n print(\"\"\"Top 10 frequent words\\n_____________________\\nword – count\\n____________\"\"\")\n for i in top_ten: print('{0} – {1}'.format(i[0], i[1]))", "def get_top_10(word_counts):\n return sorted(word_counts.iteritems(), key=operator.itemgetter(1))[-10:]", "def print_top_words(freq_dist_text):\n print('=====')\n print(freq_dist_text.most_common(10))\n print('=====')", "def printTopWords(self, N):\n topWords = []\n for i in range(self.MAX_RATING):\n topWords.append(dict(sorted(self.dictionary.items(), key=lambda x: x[1].tfidf[i+1], reverse=True)[:N]))\n\n outputFormat = \"{:>16} - {:<30}\"\n for i in range(len(topWords)):\n print(\"Top \" + str(N) + \" words for class rating \" + str(i + 1))\n print(\"--------------------------------------\")\n for j in topWords[i]:\n print(outputFormat.format(j, self.dictionary[j].tfidf[i + 1]))\n print()", "def test_get_top_n_words_same_frequency(self):\n expected = ['happy', 'man']\n actual = get_top_n_words({'happy': 2, 'man': 2}, 2)\n self.assertEqual(expected, actual)\n expected = ['happy']\n actual = get_top_n_words({'happy': 2, 'man': 2}, 1)\n self.assertEqual(expected, actual)", "def top10(self) -> List[Word]:\n return self._top10", "def top_words(name):\n row = wiki[wiki['name'] == name]\n word_count_table = row[['word_count']].stack('word_count', new_column_name=['word','count'])\n return word_count_table.sort('count', ascending = False)", "def print_top_ten(word_dict):\n stopwords = ['of', 'and', 'the', 'in', 'to', 'a', 'that', 'as', 'have', 'be', 'is', 'on', 'by', 'which', 'or', 'are', 'it', 'with', 'for', 'from', 'we', 'this', 'been', 'not', 'but', 'their', 'i', 'at', 'has', 'they', 'will', 'an', 'all', 'same', 'other', 'some', 'more', 'so', 'would', 'may', 'each', 'these', 'many', 'any', 'can', 'if', 'when', 'its', 'than', 'most', 'no', 'one', 'two', 'between', 'thus', 'very', 'there']\n words = list(word_dict.items())\n words.sort(key=lambda tup: tup[1], reverse=True)\n filtered_words = [word for word in words if word[0] not in stopwords]\n for i in range(min(10, len(filtered_words))):\n print(filtered_words[i])", "def top_summary_words(args, examples, word_dict):\n word_count = Counter()\n for ex in examples:\n for w in ex['summary'].tokens:\n w = Vocabulary.normalize(w)\n if w in word_dict:\n word_count.update([w])\n return word_count.most_common(args.tune_partial)", "def top_words(self, document):\r\n totalstring = \" \".join(paragraph.text for paragraph in document.paragraphs)\r\n word_text = TextBlob(totalstring)\r\n word_dict = word_text.word_counts\r\n sorted_dict = sorted(word_dict, key=word_dict.get, reverse=True)\r\n return sorted_dict[0:9]", "def top_sentences(query, sentences, idfs, n):\n\n ranking = []\n for sentence, words in sentences.items():\n idf = 0\n matches = 0\n for word in query:\n if word in words:\n matches += 1\n idf += idfs[word]\n\n density = matches / len(words)\n ranking.append((sentence, idf, density))\n\n # Sort by idf and term density\n ranking.sort(key=lambda x: (x[1], x[2]), reverse=True)\n return [entry[0] for entry in ranking[:n]]", "def format_top_n(self, n=10):\n output = []\n for t, c in self._freq.most_common(n):\n files_, sents_ = self.fetch_index(t)\n word = t + ' (' + str(c) + ')'\n output.append([word, ','.join(files_), \"\\n\".join(sents_)])\n\n return output", "def print_top_words(components, feature_names, n_top_words: int = 10):\n for topic_idx, topic in enumerate(components):\n message = \"Topic #%d: \" % topic_idx\n message += \" \".join(\n [feature_names[i] for i in topic.argsort()[: -n_top_words - 1 : -1]]\n )\n print(message)\n print()", "def get_top_10_docs(W, topic):\n\n docs = np.argsort(W[:, topic])[::-1][:10]\n strengths = W[:, topic].flatten()[docs]\n\n return [(doc, strength) for doc, strength in zip(docs, strengths)]", "def top_similar(self, n):\n return text_with_recipes", "def top_n_words(self, words, n):\n \n return {k : v for k, v in sorted(words.items(), key=lambda item :\n item[1], reverse=True)[:n]}", "def get_top_n_words(topic_words_dict, n):\n score_wordlist = topic_words_dict.items()\n score_wordlist.sort(key=lambda x: x[1], reverse=True)\n return [word for (word,score) in score_wordlist[:n]]", "def top(wordList, number):\n histo = Counter()\n for word in (wordList):\n histo[word] += 1\n return histo.most_common(number)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Estimate the priors for a class
def calculate_priors(trainingLabels): sum = 0 priors = {} totalSamples = len(trainingLabels) classes = set(trainingLabels) for cls in classes: numCls = len(filter(lambda x: x == cls, trainingLabels)) sum += numCls priors[cls] = float(numCls) / float(totalSamples) # Sanity check: valid partitioning assert(sum == totalSamples) return priors
[ "def classProbs(observation, tree, classes):\n res = classify(observation, tree) #res = results\n total = sum(res.values())\n probs = []\n for c in classes:\n if c in res.keys():\n probs.append(float(res[c])/total)\n else:\n probs.append(0)\n return probs", "def class_probabilities(self, sample):\n\t\tpass", "def get_priors(self):\n return self._priors", "def class_distribution(self):\n total_particles = self.number_of_particles()\n unique_classes = self.unique_classes()\n\n particles_per_class = {}\n percentage_per_class = {}\n\n for class_idx in unique_classes:\n particles_per_class[class_idx] = np.sum(self['ref'] == class_idx)\n percentage_per_class[class_idx] = 100 * (particles_per_class[class_idx] / total_particles)\n\n return particles_per_class, percentage_per_class", "def priors(self):\n\n return self._priors", "def _calc_prob_class(self, clazz):\n clazz_count = self.training_set.get_label_value_counts()[clazz]\n return float(clazz_count) / self.training_set.num_samples()", "def estimate_log_class_priors(data):\r\n ones, zeroes, responses = 0, 0, [x[0] for x in data]\r\n\r\n for j in responses:\r\n if j == 1.0:\r\n ones += 1\r\n elif j == 0.0:\r\n zeroes += 1\r\n else:\r\n print('Error: Data entry not a one or zero.')\r\n\r\n ones_prior = ones / (ones + zeroes)\r\n zeroes_prior = zeroes / (ones + zeroes)\r\n\r\n log_class_priors = np.array([np.log(zeroes_prior), np.log(ones_prior)])\r\n\r\n return log_class_priors", "def P(self, class_partition, C):\r\n count = np.count_nonzero(class_partition == C)\r\n if class_partition.size == 0:\r\n proportion = 0\r\n else:\r\n proportion = count / class_partition.size\r\n proportion += 1\r\n #print(proportion)\r\n return proportion", "def checkPriors(para):\n\t\n\t# extract parameters\n\tA = para[0]\n\tw = para[1]\n\tp = para[2]\n\t\n\t# check them\n\tif (A<0.01 or A>10.0): A = s.uniform.rvs(0.01,10.)\n\t\n\tif (w<0.01 or w>10.0): w = s.uniform.rvs(0.01,10.)\n\t\t\n\tif ( p<0. or p>2*np.pi): p = s.uniform.rvs(0.0,2*np.pi)\n\t\n\treturn np.array([A,w,p])", "def p_class(self, c):\n if c not in self.class_frequencies:\n freq = 0\n else:\n freq = self.class_frequencies[c]\n freq = freq\n p = freq / self.n\n p = p + EPSILON\n verbose(\"class\\t\" + str(c) + \"\\t\" + str(freq) + \"\\t\" + str(self.n) + \"\\t\" + str(p))\n return p", "def get_class_rep(imin,imax):\n res=torch.zeros(10)\n nb=imax-imin\n \n if(nb<=0):\n exit(\"imax should be bigger than imin\")\n \n _,classes,_=load_data(data_path)\n for i in range(10):\n counted=0\n for k in range(imin,imax):\n if classes[k]==i:\n counted+=1 \n res[i]=counted/nb*100\n return res", "def prob_class(self, tweet_class):\n \n count = self.get_tweet_class_count(tweet_class)\n total = self.get_total_count()\n\n return float(count/total)", "def class_probability(self, x):\n # permutation before softmax b x a x c x spatial dims --> b x c x a x spatial dims\n # as expected by PyTorch Softmax the class axis = 1 \n return self._class_prob(x.permute([0, 2, 1, 3, 4]))", "def class_probabilities(parameters, features,class_prob):\n # create dictionary to store probability P(features|Y) for all values of Y\n final_class_prob = dict()\n # calculate numerator for P(y|all features)\n for class_value, class_parameters in parameters.items():\n # Initialize numerator = P(Y)\n if class_value == True:\n final_class_prob[class_value] = class_prob[0]\n else:\n final_class_prob[class_value] = class_prob[1]\n\n # Calculate P(feature|Y) for all features\n for i in range(len(class_parameters)):\n if class_parameters[i][0] == 'real':\n # if parameter is for real valued feature\n # use gaussian distribution with given mu and sigma to find probabilty\n mean = class_parameters[i][1]\n stdev = class_parameters[i][2]\n exponent = np.exp(-((features[i] - mean) ** 2 / (2 * stdev ** 2)))\n probability = min(1.0,((1 / (np.sqrt(2 * np.pi) * stdev)) * exponent))\n else:\n # if parameter is for discrete valued feature\n # get stored probabilty directly\n type, prob_true, prob_false = class_parameters[i]\n if features[i] == True:\n probability = min(1.0,prob_true)\n else:\n probability = min(1.0,prob_false)\n # multiply P(feature|Y) to numerator\n final_class_prob[class_value] *= probability\n\n\n denominator = 0\n # calculate denominator for P(y|all features)\n # summation of P(features|Y) for all Y\n for class_value,class_prob in final_class_prob.items():\n if class_value == True:\n denominator += (np.prod(class_prob))\n else:\n denominator += (np.prod(class_prob))\n\n # divide numerator with denominator to get final P(features|Y) for all Y\n for class_value, class_prob in final_class_prob.items():\n final_class_prob[class_value] /= denominator\n\n # return probability for each each class label\n return final_class_prob", "def _estimate_priors(self):\n\n # Estimate the log UMI count turning point between cells and 'empties'.\n self.priors['log_counts_crossover'] = \\\n np.mean(np.log1p([self.priors['cell_counts'],\n self.priors['empty_counts']])).item()\n\n # Estimate prior for the scale param of LogNormal for d.\n if self.model_name != \"simple\":\n self.priors['d_std'] = (np.log1p(self.priors['cell_counts'])\n - self.priors['log_counts_crossover']) / 5\n else:\n self.priors['d_std'] = 0.2 # This is a reasonable prior in log space.\n\n # Priors for models that include empty droplets:\n if self.model_name != \"simple\":\n # Estimate fraction of trimmed dataset that contains cells.\n # cell_prob = self.priors['n_cells'] / self.analyzed_barcode_inds.size\n cell_prob = (1 - self.fraction_empties) \\\n * (self.priors['n_cells'] / self.analyzed_barcode_inds.size)\n self.priors['cell_prob'] = cell_prob\n\n assert cell_prob > 0, f\"Fraction of trimmed dataset \" \\\n f\"containing cells should be > 0, \" \\\n f\"but is {cell_prob}.\"\n\n assert cell_prob <= 1, f\"Fraction of trimmed dataset \" \\\n f\"containing cells should be at most 1, \" \\\n f\"but is {cell_prob}.\"\n\n # Turn cell probability into logit.\n self.priors['cell_logit'] = np.log(cell_prob / (1 - cell_prob)).item()\n\n # Estimate the ambient gene expression profile.\n self.priors['chi_ambient'], self.priors['chi_bar'] = \\\n estimate_chi_from_dataset(self)", "def build_class_priors(\n labels,\n class_priors=None,\n weights=None,\n positive_pseudocount=1.0,\n negative_pseudocount=1.0,\n):\n if class_priors is not None:\n return class_priors\n\n N, C = labels.size()\n\n weighted_label_counts = (weights * labels).sum(0)\n\n weight_sum = weights.sum(0)\n\n class_priors = torch.div(\n weighted_label_counts + positive_pseudocount,\n weight_sum + positive_pseudocount + negative_pseudocount,\n )\n\n return class_priors", "def classes_calculations(input):\n counts, _ = np.histogram(input, bins=int(\n input.max() + 1), range=(0, int(input.max())))\n return np.nonzero(counts)[0]", "def __init__(self, N=40):\n self._primes = []\n self.find_primes(N)", "def priming(self):\n return self.__priming" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the likelihoods for multinomial
def calculate_likelihoods_multinomial(data, labels, vocab): likelihoods = {} counts = {} words = {} classes = set(labels) vocabLen = len(vocab) for cls in classes: # Initialize counts[cls] = {} words[cls] = 0 # Perform counts line = 0 for doc in data: cls = labels[line] wordCounts = counts[cls] for (word, count) in doc: if word not in wordCounts: wordCounts[word] = 0 wordCounts[word] += count words[cls] += count line += 1 # Compute likliehoods for cls in counts: wordCounts = counts[cls] likelihoods[cls] = {} wordsInClass = words[cls] for word in wordCounts: likelihoods[cls][word] = laplace_smooth(wordCounts[word], wordsInClass, vocabLen) # Add all training words: for word in vocab: if word not in likelihoods[cls]: likelihoods[cls][word] = laplace_smooth(0, wordsInClass, vocabLen) # Special laplace smoothing for words not found in training data likelihoods[cls][None] = laplace_smooth(0, wordsInClass, vocabLen) return likelihoods
[ "def log_multinomial_coefficient(n, x):\n return gammaln(n + 1) - gammaln(x + 1).sum()", "def log_marginal_likelihood(X_train,y_train,phi,tau=1.,Ve=1.e-10):", "def multinomial_gen_log_prob(x, n, p):\n if n != 1:\n raise NotImplementedError()\n log_prob = np.sum(x * np.log(p))\n return log_prob\n # TODO(trandustin): need rewrite rules to handle n > 1\n # log_prob = np.sum(x * np.log(p))\n # log_prob -= np.sum(special.gammaln(x + 1))\n # log_prob += np.sum(special.gammaln(n + 1))\n # return log_prob", "def likelihood(a,b,d):\n # a + b = N\n a = float(a)\n b = float(b)\n if d == 1: # A replicated\n def p(r):\n return a * b / (a + b) * 1. / (a + b * r)\n elif d == -1: # B replicated\n def p(r):\n return a * b / (a + b) * r / (a + b * r)\n elif d == 0: # population stationary\n def p(r):\n return (a*a + b*b*r) / (a + b) * 1. / (a + b * r)\n return p", "def calc_likelihood(self):\r\n res = np.sum(self.weights)/self.PARTICLES_NUM\r\n self.log_likelihood += log(res)", "def var_multinomial(probs):\n var = probs * (1 - probs)\n return var", "def likelihood(self):\n \n raise NotImplementedError()", "def multinomial_logistic_loss(m, A, Y):\r\n cost = np.sum(-np.max(A * Y) + np.log(np.sum(np.exp(A))))\r\n return cost", "def multinomial(self):\n return multinomial(self.values())", "def likelihood(self, data, hypo):\n tagged, n, k = data\n if hypo < tagged + n - k:\n return 0\n\n p = tagged / hypo\n like = thinkbayes.eval_binomial_pmf(k, n, p)\n return like", "def log_likelihood(data, probs):\n # Assume data is given as counts\n return _np.sum([nlogp(n, p) for n, p in zip(data, probs) if n > 0])", "def multinomial_prob(counts, probs):\n return nCkarray(*counts.values) * (probs ** counts).prod()", "def GSM_log_likelihood(X, model):\n D, M = X.shape\n k = model.mix.shape[0]\n log_likelihood = 0\n for i in range(M):\n logpdf_X = 0\n for j in range(k):\n mvn = multivariate_normal(cov=model.cov[j, :])\n logpdf_X = mvn.logpdf(x=X[:, i]) * model.mix[j]\n log_likelihood += logpdf_X\n return log_likelihood", "def multinomial_log_probs(category_log_probs, trials, query_counts):\n # Multinomial probability = n! / (x1!...xk!) * p1^x1 * ... pk^xk\n # Log prob = log(n!) - (log(x1!) ... + log(xk!)) + x1log(p1) ... + xklog(pk)\n trials, query_counts = trials.float(), query_counts.float()\n log_n_fact = torch.lgamma(trials + 1)\n log_counts_fact = torch.lgamma(query_counts + 1)\n log_counts_fact_sum = torch.sum(log_counts_fact, dim=-1)\n log_prob_pows = category_log_probs * query_counts # Elementwise sum\n log_prob_pows_sum = torch.sum(log_prob_pows, dim=-1)\n\n return log_n_fact - log_counts_fact_sum + log_prob_pows_sum", "def likelihood(sentence):\n word_arr = Counter(\n sentence.translate(punct).lower().split(\" \")) # All words to lower-case and split words to Counter\n prob_positive = prob_negative = 1 # Sets sum of positive and negative to 1, to prevent dividing by 0 and accumulate\n for w in word_arr: # Loops through words to calculate possibilities\n prob_word_positive = naive_bayes(w, pos) # Calculates probability of word occurring in positive review\n prob_positive = prob_positive * prob_word_positive # Multiplies the probabilities together to find probability of whole sentence\n prob_word_negative = naive_bayes(w, neg) # Repeat same process for negative.\n prob_negative = prob_negative * prob_word_negative\n return prob_positive, prob_negative # return both probabilities", "def calculateLogJointProbabilities(self, datum):\n\tlogJoint = util.Counter()\n\t#want to calculate log(P(y)) + log(sum(P(fi|y)))\n\t#where y is a label\n\tfor label in self.legalLabels:\n\t\tlogJoint[label] = math.log(self.prior_distribution_prob[label])\n\t\tfor feature, value in datum.items():\n\t\t\tcp = self.conditional_prob[label][feature][value]\n\t\t\tif cp > 0: #condition check for values < 0 because log(0) is undefined and math domain error occurs\n\t\t\t\tlogJoint[label] += math.log(cp) #summing up\n\t\t\t\t\n\treturn logJoint", "def llikelihood( self, p):\n # template class\n return 1.0", "def likelihoods(self, alleles):\n\n models = self.models_dict[len(alleles)]\n\n F = self.joint_frequencies_combo(alleles)\n\n ### BPH ###\n (((A0, A1),((B0,),)),) = models['BPH'][1].items()\n\n BPH = (A0 / A1) * F[B0]\n\n\n BPH += sum( sum(F[B0] * F[B1] for (B0, B1) in C) * A0 / A1\n for (A0, A1), C in models['BPH'][2].items())\n\n if len(alleles)>2:\n BPH += sum( sum(F[B0] * sum( F[B1] * F[B2] for (B1, B2) in C[B0]) for B0 in C) * A0 / A1\n for (A0, A1), C in models['BPH'][3].items())\n\n ### SPH ###\n (((A0, A1),((B0,),)),) = models['SPH'][1].items()\n SPH = (A0 / A1) * F[B0]\n\n SPH += sum( sum(F[B0] * F[B1] for (B0, B1) in C) * A0 / A1\n for (A0, A1), C in models['SPH'][2].items())\n\n ### DIPLOIDY ###\n (((A0, A1),((B0,),)),) = models['DISOMY'][1].items()\n DISOMY = (A0 / A1) * F[B0]\n\n DISOMY += sum( sum( F[B0] * F[B1] for (B0, B1) in C) * A0 / A1\n for (A0, A1), C in models['DISOMY'][2].items())\n\n ### MONOSOMY ###\n ((B0,),) = models['MONOSOMY'][1][(1,1)]\n MONOSOMY = F[B0]\n\n result = likelihoods_tuple(MONOSOMY, DISOMY, SPH, BPH)\n return result", "def get_log_likelihood(phi, pred, t, dot_product, weight, reg= 1):\n prior = -0.5* np.sum(np.multiply(weight, weight))\n likelihood = np.multiply(t, np.log(pred+TOLERANCE)) + np.multiply(1.0- t, np.log(1.0-pred+TOLERANCE))\n likelihood = np.sum(likelihood)\n\n return prior + likelihood" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract the known vocabulary from our training data
def get_vocab(trainingData): return set(reduce(lambda x,y: x+y, map(lambda x: map(lambda y: y[0], x), trainingData), []))
[ "def get_vocab(self):\n\n\t\tself.parse_transcript() \n\t\tself.purge_words()\n\t\tself.analyze_words()\n\t\tself.sort_word_analysis()", "def get_vocab(self):\n return list(self.all_words.keys())", "def test_get_transcription_vocabulary(self):\n pass", "def get_vocab(self):\n return self.vocab.idx2word", "def load_vocabulary(self):\n vocab_file = open(vocabulary_path, \"r\")\n self.vocab_list = vocab_file.read().split(\"\\n\")\n vocab_file.close()\n print(\"[INFO] Reading vocabulary...\")\n print(self.vocab_list[0:15])", "def build_vocab(self):\n if self.test_file is None:\n print('test_file is None')\n file_list = [self.train_file, self.dev_file]\n else:\n file_list = [self.train_file, self.dev_file, self.test_file]\n\n examples = []\n for file_name in file_list:\n examples += ParseExample.load_data(file_name)\n\n sents = []\n for example in examples:\n warrant0, warrant1, reason, claim, debate_meta_data, negclaim = example.get_six(type=WORD_TYPE)\n sents.append(warrant0)\n sents.append(warrant1)\n sents.append(reason)\n sents.append(claim)\n sents.append(debate_meta_data)\n\n vocab = data_utils.build_word_vocab(sents)\n\n return vocab", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences)) # 实际没用到\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv = list(sorted(vocabulary_inv))\n # 加入 <UNK>\n vocabulary_inv.insert(0, '</s>')\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def load_vocab(self):\n\n if self.vocabulary_path: \n # For now, the file format is derived from the file extension.\n if self.vocabulary_path.endswith('csv'):\n self.logger.info(\"Filter spymaster vocabulary by csv-file: {}\".format(self.vocabulary_path))\n with open(self.vocabulary_path, 'r') as fin:\n reader = csv.reader(fin)\n header = next(reader)\n for row in reader:\n word = row[1].lower()\n self.update_vocab(word) \n elif self.vocabulary_path.endswith('txt'):\n self.logger.info(\"Filter spymaster vocabulary by txt-file: {}\".format(self.vocabulary_path))\n with open(self.vocabulary_path, 'r') as fin:\n for line in fin:\n word = line.strip()\n self.update_vocab(word)\n else:\n raise ValueError(\"Unknown file format for filter spymaster vocabulary.\") \n else:\n self.logger.info(\"Load spymaster vocabulary from gensim.models.KeyedVectors.\")\n self.vocab = self.model.vocab\n self.vocab_size = len(self.vocab)\n\n self.logger.info(\"Spymaster vocabulary size is {}\".format(self.vocab_size))", "def load_vocab():\n # vocab loaded internally at google\n unused = r.sp_model\n del unused\n return r", "def gen_vocab(data, max_tokens = 200000):\n vectorizer = TextVectorization(max_tokens=max_tokens, output_sequence_length=200)\n text_ds = tf.data.Dataset.from_tensor_slices(data).batch(128)\n vectorizer.adapt(text_ds)\n vocab = vectorizer.get_vocabulary()\n return vocab, vectorizer", "def getVocabList2():\r\n\r\n## Read the fixed vocabulary list\r\n vocab_list = []\r\n f = open('vocab2.txt','r')\r\n str = f.read()\r\n vocab_list = str.split(' ')\r\n f.close()\r\n return vocab_list", "def get_vocabulary(collection):\n vocabulary = []\n\n for key in tqdm(collection):\n for word in collection[key]:\n if word not in vocabulary:\n vocabulary.append(word)\n\n return vocabulary", "def get_input_vocab():\n vocab = set()\n vocab.update(list(string.ascii_letters))\n vocab.update(list(string.digits))\n vocab.update(list(string.punctuation))\n vocab.update(list(string.whitespace))\n vocab.update(['<unk>', '<pad>'])\n return dict(zip(sorted(vocab), list(range(len(vocab)))))", "def __get_vocab(self) -> List[List[str]]:\n vocab = list()\n for doc in self.data:\n unique_tokens = list(set(doc))\n bigram_tokens = bigrams(doc) # Returns list of tupels\n bigram_tokens = [' '.join(token) for token in bigram_tokens]\n trigram_tokens: list = trigrams(doc) # Returns list of tupels\n trigram_tokens: list = [' '.join(token) for token in trigram_tokens]\n all_tokens = reduce(operator.concat, [unique_tokens, bigram_tokens, trigram_tokens])\n vocab.append(all_tokens)\n del unique_tokens, bigram_tokens, trigram_tokens, all_tokens\n return vocab", "def extract_vocabulary(documents):\n vocabulary = set()\n\n for document in documents:\n tokens = analyze(document)\n vocabulary.update(tokens)\n\n return vocabulary", "def get_vocab(model, n):\n labels = []\n tokens = []\n\n i = 0\n\n items = list(model.wv.vocab.items())\n random.shuffle(items)\n for word, _ in items:\n tokens.append(model[word])\n labels.append(word)\n\n i += 1\n if i >= n:\n break\n\n return labels, tokens", "def load_data(self):\n self.train_data, self.dev_data, self.test_data = tr.simplified_data(700, 100, 200) #load train/dev/test data\n\n # build vocab from training data\n self.vocab = Vocab() #get a Vocab object with many functions.e.g. construct,add...\n train_sents = [t.get_words() for t in self.train_data]\n self.vocab.construct(list(itertools.chain.from_iterable(train_sents))) #calling construct of vocab to add sentences. # chain('ABC', 'DEF') --> A B C D E F", "def _filter_model_words(current_words):\n return [word for word in current_words if word in model.wv.vocab]", "def getVocabulary(self): # real signature unknown; restored from __doc__\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return elements from the iterable until it is exhausted. Then repeat the sequence indefinitely. cycle(seq) ==> seq[0], seq[1], ..., seq[n 1], seq[0], seq[1], ...
def cycle(seq, n=None): if n is not None: return Iter(_ncycle(n, seq)) return Iter(itertools.cycle(seq))
[ "def cycle(iterator: Iterable[Any]) -> Iterable[Any]:\n while True:\n yield from iterator", "def iter_sequence_infinite(seq):\n while True:\n for item in seq:\n yield item", "def repeat(seq, n):\n for e in seq:\n for _ in range(n):\n yield e", "def _seq():\n i = 0\n while True:\n yield i\n i += 1", "def sequence(start=0):\n while True:\n yield start\n start += 1", "def cyclic(iterable):\n iterable = tuple(iterable)\n n = len(iterable)\n yield from (tuple(iterable[i - j] for i in range(n)) for j in range(n))", "def repeat_n(iterable, n_iter):\n for i in iterable:\n for _ in range(n_iter):\n yield i", "def initCycle(container, seq_func, n=1):\n return container(func() for _ in xrange(n) for func in seq_func)", "def simple_seq(seq):\n for i in seq:\n yield i", "def repeatedly(func, /, *args, **kwargs):\n func = to_callable(func)\n try:\n while True:\n yield func(*args, **kwargs)\n except StopIteration as e:\n yield from stop_seq(e)", "def cycle_next():\n next1 = next(ITER)\n next2 = next(ITER)\n global CUR_ITER\n CUR_ITER = [next1, next2]\n return CUR_ITER", "def forever():\n\n def animate(thing):\n thing = list(thing)\n yield from repeat(thing)\n return animate", "def cycle(self, *args):\r\n if not args:\r\n raise TypeError('no items for cycling given')\r\n return args[self.index0 % len(args)]", "def circular_shifts(iterable):\n lst = list(iterable)\n return take(len(lst), windowed(cycle(lst), len(lst)))", "def take(n, seq):\n seq = iter(seq)\n result = []\n try:\n for i in range(n):\n result.append(seq.next())\n except StopIteration:\n pass\n return result", "def delayed_iter(iterable: Iterable[T], delay: Optional[int]) -> Iterable[T]:\n\n cache: Deque[T] = collections.deque([], maxlen=delay)\n\n for item in iterable:\n if delay is not None and len(cache) >= delay:\n yield cache.popleft()\n cache.append(item)\n\n while len(cache):\n yield cache.popleft()", "def repeating_iterator(finite_list):\n idx, values_count = 0, len(finite_list)\n\n while True:\n yield finite_list[(idx % values_count)]\n idx += 1", "def loop_list(iterable):\n iterable = tuple(iterable)\n n = len(iterable)\n num = 0\n while num < n:\n yield iterable[num]\n num += 1\n if num >= n:\n num = 0", "def color_cycle():\n while True:\n for color in colors:\n yield color" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the object for the specified number of times. If not specified, returns the object endlessly.
def repeat(obj, times=None): if times is None: return Iter(itertools.repeat(obj)) return Iter(itertools.repeat(obj, times))
[ "def repeat(self, count):\n return self.Sequence((self,) * count)", "def repeater(at_least, timeout):\n timer = Timer(timeout)\n repeat = 0\n while repeat < at_least or timer.remaining():\n yield repeat\n repeat += 1", "def repeat(fun, n):\n for i in range(n):\n yield fun()", "def repeat(num_times):\n\n def decorator_repeat(func):\n \"\"\"\n defines wrapper_repeat(*args, **kwargs)\n\n :returns: wrapper_repeat\n \"\"\"\n\n @functools.wraps(func)\n def wrapper_repeat(*args, **kwargs):\n \"\"\"\n func(*args, **kwargs) num_times\n\n :return: last return value\n \"\"\"\n for _ in range(num_times):\n value = func(*args, **kwargs)\n return value\n\n return wrapper_repeat\n\n return decorator_repeat", "def timeit(self, number=default_number):\n it = itertools.repeat(None, number)\n gcold = gc.isenabled()\n gc.disable()\n try:\n timing = self.inner(it, self.timer)\n finally:\n if gcold:\n gc.enable()\n return timing", "def repeat(N, fn):\n return MultiSequential(*[fn() for _ in range(N)])", "def taking(n):\n if n <= 0:\n raise ValueError('taking() requires a positive value.')\n\n @coroutine\n def gen(target):\n for _ in range(n):\n x = (yield)\n target.send(x)\n\n raise StopConsumption()\n\n return gen", "def limit(iterable, n):\n for count, element in enumerate(iterable):\n if count >= n: break\n else: yield element", "def take_every(n, iterable):\n return islice(iterable, 0, None, n)", "def initRepeat(container, func, n):\n return container(func() for _ in xrange(n))", "def do_ten_times(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n for i in range(10):\n func(*args, **kwargs)\n\n return wrapper", "def next_n(self, n: int, fast_forward=False):\n data = []\n while len(data) < n:\n try:\n record = self.queue.get(True, self.wait)\n data.append(record)\n except Empty:\n raise StopIteration\n return data", "def start_n_new(self, n: int) -> None:\n\n for _ in range(n):\n self.start_new()", "def get_first_n_crawled_chunks(self, number):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(\"SELECT * FROM crawler WHERE c_task = 'crawled' ORDER BY index LIMIT %s;\", (number,))\n results = cur.fetchall()\n cur.close()\n return results\n except Exception as e:\n print(e)", "def forever(shard):\n def repeat(*args, **kwargs):\n while True:\n for delay in shard(*args, **kwargs):\n yield delay\n return repeat", "def _Repeat(fn):\n def wrapper(*args, **kwargs):\n for i in ITERATIONS:\n fn(*args, **kwargs)\n return wrapper", "def repeat(seq, n):\n for e in seq:\n for _ in range(n):\n yield e", "def test_orm_full_objects_chunks(n):\n\n sess = Session(engine)\n for obj in sess.query(Customer).yield_per(100).limit(n):\n print(obj.name)", "def create_n_items(n):\n total_objects = models.Item.objects.all().count()\n for i in range(n):\n models.Item.objects.create(\n name=\"Randomly generated object {}\".format(i+total_objects),\n value=random.random() * 1000000\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make infinite calls to a function with the given arguments. End sequence if func() raises StopIteration.
def repeatedly(func, /, *args, **kwargs): func = to_callable(func) try: while True: yield func(*args, **kwargs) except StopIteration as e: yield from stop_seq(e)
[ "def iterate(func, x):\n while True:\n x = func(x)\n yield x", "def iterate(func, start):\n while True:\n yield start\n start = func(start)", "def iterate(f, x):\n while True:\n yield x\n x = f(x)", "def iterate1(f, x):\n while True:\n yield x\n x = f(x)", "def loop(func):\n def wrapper(*a, **b):\n while True:\n func(*a, **b)\n return wrapper", "def repeat(fun, n):\n for i in range(n):\n yield fun()", "def infinite_loop(func):\n @wraps(func) # Preserve target's metadata\n def wrapper(*args, **kwargs):\n while True:\n try:\n func(*args, **kwargs)\n except KeyboardInterrupt:\n break\n return wrapper", "def iterate(f, *args, **kwargs):\n x = Values(*args, **kwargs)\n while True:\n yield x\n x = f(*x.rets, **x.kwrets)\n if not isinstance(x, Values):\n raise TypeError(f\"Expected a `Values`, got {type(x)} with value {repr(x)}\")", "def iter_sequence_infinite(seq):\n while True:\n for item in seq:\n yield item", "def whileloop(cond, func, do_stopiteration=none, do_exhaust=none):\n while cond():\n try:\n func()\n except StopIteration:\n do_stopiteration()\n return\n do_exhaust()\n return", "def iter_except(func, exception):\n try:\n while True:\n yield func()\n except exception:\n pass", "def _resume(self, func, *args):\n try:\n value = func(*args)\n except StopIteration as e:\n self._generator.close()\n self.set_result(e.value)\n except Exception as e:\n self._generator.close()\n self.set_exception(e)\n else:\n assert isinstance(value, Future)\n value.callbacks.append(partial(self._resume, self._generator.send))\n value.errbacks.append(partial(self._resume, self._generator.throw))", "def make_func_repeater(f, x):\n\n def repeat(i, x=x):\n if i == 0:\n return x\n else:\n return repeat(i-1, f(x))\n return repeat", "def RunCoroutineOrFunction(function, args=[]):\r\n if inspect.isgeneratorfunction(function):\r\n coroutine = function(*args)\r\n response = yield coroutine.next()\r\n while True:\r\n response = yield coroutine.send(response)\r\n else:\r\n function(*args)", "def chained(func):\n def wrapper(*args, **kwargs):\n for xs in func(*args, **kwargs):\n for x in xs:\n yield x\n return wrapper", "def repeat_with(self, f: Callable[[], T]):\n self.stream = repeat_with(f)\n self._infinite = True\n return self", "def iter_except(function, exception):\r\n try:\r\n while True:\r\n yield function()\r\n except exception:\r\n return", "def unfold(func, seed):\n try:\n elem = func(seed)\n while elem is not None:\n seed, x = elem\n yield x\n elem = func(seed)\n except StopIteration as e:\n yield from stop_seq(e)", "def fib(*s, **kw):\n fn = kw.pop('fn', sum)\n if kw: raise TypeError(str.format(\"fib: unknown arguments {kw}\", kw=seq2str(kw.keys())))\n s = list(s)\n while True:\n s.append(fn(s))\n yield s.pop(0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Invert a fold. Similar to iterate, but expects a function of seed > (seed', x). The second value of the tuple is included in the resulting sequence while the first is used to seed func in the next iteration. Stops iteration if func returns None or raise StopIteration.
def unfold(func, seed): try: elem = func(seed) while elem is not None: seed, x = elem yield x elem = func(seed) except StopIteration as e: yield from stop_seq(e)
[ "def foldr(func, iterable):\n return fold(func, reversed(iterable))", "def flip(func):\n def flipped(*args):\n return func(*args[::-1])\n return flipped", "def fold(iterable, func, base):\n acc = base\n for element in iterable:\n acc = func(acc, element)\n return acc", "def foldl(func, start, itr):\n return _foldl(func, start, iter(itr))", "def flip(func):\n if not callable(func):\n raise TypeError(\"First argument to flip must be callable\")\n \n def flipped_func(*args, **kwargs):\n return func(*reversed(args), **kwargs)\n return flipped_func", "def flip(f):\n return lambda *args, **kwargs: f(*args[::-1], **kwargs)", "def foldr(func, start, itr):\n return _foldr(func, start, iter(itr))", "def foldr(function, acc, xs):\n return reduce(lambda x, y: function(y, x), reversed(xs), acc)", "def foldl2(link, fn, z):\n def step(x, g):\n \"*** YOUR CODE HERE ***\"\n return foldr(link, step, identity)(z)", "def interleave(inter, f, seq):\n seq = iter(seq)\n try:\n f(next(seq))\n except StopIteration:\n pass\n else:\n for x in seq:\n inter()\n f(x)", "def iterate(func, x):\n while True:\n x = func(x)\n yield x", "def invert(x, out=None, **kwargs):\n return _unary_func_helper(x, _npi.bitwise_not, _np.bitwise_not, out=out, **kwargs)", "def foldl(function, acc, xs):\n return reduce(function, xs, acc)", "def cofold(function, initial, iterator):\n acc = [initial]\n\n def handleAcc(newAcc):\n acc[0] = newAcc\n\n def dofold(item):\n return function(acc[0], item)\n\n d = _CoFunCaller(dofold, resultCollector=handleAcc).coiterate(iterator)\n d.addCallback(lambda _: acc[0])\n return d", "def ifilter_c(func):\n return functools.partial(ifilter, func)", "def clump(seq, fn=None):\n seq = iter(seq)\n try:\n v = next(seq)\n except StopIteration:\n return\n xs = [v]\n if fn is not None: v = fn(v)\n for x in seq:\n v_ = (x if fn is None else fn(x))\n if v_ == v:\n xs.append(x)\n else:\n yield xs\n v = v_\n xs = [x]\n yield xs", "def inverse(self, x, *args, **kwargs):\n if self.list_of_inverses is None:\n utils.print_warning(\"inverses were not given\")\n return\n for i in range(len(self.list_of_inverses)):\n x = self.list_of_inverses[i](x, *args, **kwargs)\n return x", "def _apply_sbox_inverse(state):\n for i in range(8):\n x = state[i]\n state[i] = _sbox_i(x & 0xF) | _sbox_i((x & 0xF0) >> 4) << 4\n\n return state", "def imap_c(func):\n return functools.partial(imap, func)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create iterator from sequence of numbers.
def from_sequence(self, seq): return Iter(self._from_sequence(seq))
[ "def numeric_sequence_iteration(self) -> global___Statement.Iteration.NumericSequenceIteration:", "def _seq():\n i = 0\n while True:\n yield i\n i += 1", "def range(*args) -> \"Iter[int]\":\n return Iter(_range(*args))", "def simple_seq(seq):\n for i in seq:\n yield i", "def get_number_sequence(a: int, b: int) -> Iterable[int]:\n n = 0\n while True:\n yield n**2 + a*n + b\n n += 1", "def integers():\n i = 1\n while True:\n yield i\n i = i + 1", "def __iter__(self):\n return iter(self._sequence)", "def sequence(start=0):\n while True:\n yield start\n start += 1", "def c(sequence):\n c.starts += 1\n for item in sequence:\n c.items += 1\n yield item", "def get_numbers(sequence):\r\n\r\n new_list = []\r\n for element in sequence:\r\n if isinstance(element, numbers.Number) == True:\r\n new_list.append(element)\r\n\r\n return new_list", "def seq_ints(n, start=0, step=1):\n return list(range(start, start + n*abs(step), step))", "def c(sequence):\n Debugger.starts += 1\n for item in sequence:\n Debugger.items += 1\n yield item", "def __iter__(self):\n if self.findsymbols():\n raise TypeError(\"Not numeric: %s\" % self)\n for subrange in self.subranges:\n start, step, stop = subrange\n if step > 0:\n # positive direction\n\n if start <= stop:\n # first value\n yield start\n\n # iterate over next values\n val = start + step\n while val <= stop:\n yield val\n val += step\n elif step == 0:\n # no direction\n\n if start == stop:\n # only value\n yield start\n elif step < 0:\n # negative direction\n\n if start >= stop:\n # first value\n yield start\n\n # iterate over next values\n val = start + step\n while val >= stop:\n yield val\n val += step", "def fromSequence(self, sequence):\n for aVal, bVal in sequence:\n self.add(aVal, bVal)\n\n return self", "def gen_natural_numbers(start = 0, end = -1, incr = 1):\n i = start\n sign = incr // abs(incr)\n while end < 0 or i*sign <= end*sign:\n yield i\n i += incr", "def iter_sequence_infinite(seq):\n while True:\n for item in seq:\n yield item", "def get_nums():\n num = -1\n while num != 0:\n num = int(input())\n yield num", "def xrange1(value):\n try:\n i = int(value)\n return [x+1 for x in xrange(i)]\n except:\n return []", "def xrange(*args):\n len_args = len(args)\n if len_args == 1:\n stop = int(args[0])\n start = 0\n step = 1\n elif len_args == 2:\n start = int(args[0])\n stop = int(args[1])\n step = 1\n elif len_args == 3:\n start = int(args[0])\n stop = int(args[1])\n step = int(args[2])\n else:\n raise TypeError(\"xrange() requires 1-3 int arguments\")\n if step < 0:\n bcmp = operator.gt\n elif step > 0:\n bcmp = operator.lt\n else:\n raise StopIteration\n act = int(start)\n while bcmp(act, stop):\n yield act\n act += step" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create iterator from slice object.
def from_slice(self, slice): start = 0 if slice.start is None else slice.start step = 1 if slice.step is None else slice.step return self.count(start, step, stop=slice.step)
[ "def __iter__(self):\n return _IndexedComponent_slice_iter(self)", "def islice(iterable, *args) -> \"Iter\":\n return Iter(itertools.islice(iterable, *args))", "def from_sequence(self, seq):\n return Iter(self._from_sequence(seq))", "def iter_slices(\n len_seq: int,\n chunk_size: int,\n start: int = 0,\n) -> typing.Iterator[slice]:\n return (\n slice(pos, pos + chunk_size)\n for pos in range(start, len_seq, chunk_size)\n )", "def with_slices(it: Iterator[np.ndarray]\n ) -> Iterator[Tuple[FixedSlice, np.ndarray]]:\n start_idx = 0\n for d in it:\n end_idx = start_idx + d.shape[0]\n yield FixedSlice(start_idx, end_idx), d\n start_idx = end_idx", "def SliceView(sequence, start=None, stop=None, step=1):\n start, stop, step = slice(start, stop, step).indices(len(sequence))\n for i in range(start, stop, step):\n yield sequence[i]", "def _transform_slice_to_indices(i):\n if type(i) is slice:\n if i.step is None:\n step = 1\n else:\n step = i.step\n idxs = list(range(i.start, i.stop, step))\n else:\n idxs = [i]\n return idxs", "def _slice(self, start, stop, step=None):\n\n slices = [slice(None)] * self.data.ndim\n slices[self.axis] = slice(start, stop, step)\n return tuple(slices)", "def _slice_generator(self, index):\n start, stop, step = index.indices(len(self))\n for i in range(start, stop, step):\n yield self.get_blob(i)", "def gen_slices(self, chunk):\n for i in range(1, len(chunk)+1): # the current subset size\n for j in range(0, len(chunk)-i+1): # the current subset offset\n yield self.SubSlice(j, j+i)", "def __getslice__(self, *args):\n return self.__class__(super(IOVSet, self).__getslice__(*args))", "def _conv_slice_to_list(slice_obj, start_def=0, stop_def=100, step_def=1):\n if slice_obj.start is None:\n start = start_def\n else:\n start = slice_obj.start\n if slice_obj.stop is None:\n stop = stop_def\n else:\n stop = slice_obj.stop\n if slice_obj.step is None:\n step = step_def\n else:\n step = slice_obj.step\n return list(range(start, stop, step))", "def slicer(self) -> None:\n if self.slice_str:\n slice_start_str, slice_end_str = self.slice_str.split(':', maxsplit=1)\n if slice_start_str:\n self.slice_start = int(slice_start_str)\n if slice_end_str:\n self.slice_end = int(slice_end_str)\n\n if not self.slice_start is None or not self.slice_end is None:\n # standard parsers UTF-8 input\n if isinstance(self.data_in, str):\n data_in_iter = self.lazy_splitlines(self.data_in)\n\n # positive slices\n if (self.slice_start is None or self.slice_start >= 0) \\\n and (self.slice_end is None or self.slice_end >= 0):\n\n self.data_in = '\\n'.join(islice(data_in_iter, self.slice_start, self.slice_end))\n\n # negative slices found (non-lazy, uses more memory)\n else:\n self.data_in = '\\n'.join(list(data_in_iter)[self.slice_start:self.slice_end])\n\n # standard parsers bytes input\n elif isinstance(self.data_in, bytes):\n utils.warning_message(['Cannot slice bytes data.'])\n\n # streaming parsers UTF-8 input\n else:\n # positive slices\n if (self.slice_start is None or self.slice_start >= 0) \\\n and (self.slice_end is None or self.slice_end >= 0) \\\n and self.data_in:\n\n self.data_in = islice(self.data_in, self.slice_start, self.slice_end)\n\n # negative slices found (non-lazy, uses more memory)\n elif self.data_in:\n self.data_in = list(self.data_in)[self.slice_start:self.slice_end]", "def slice(self, obj):\n return obj[self.start : self.end + 1]", "def _get_slice(self, slice):\n start = slice.start or 0\n stop = slice.stop\n\n if start < 0:\n raise ValueError(\"Collection slices must have a nonnegative \"\n \"start point.\")\n if stop < 0:\n raise ValueError(\"Collection slices must have a definite, \"\n \"nonnegative end point.\")\n\n existing_representation = self._wadl_resource.representation\n if (existing_representation is not None\n and start < len(existing_representation['entries'])):\n # An optimization: the first page of entries has already\n # been loaded. This can happen if this collection is the\n # return value of a named operation, or if the client did\n # something like check the length of the collection.\n #\n # Either way, we've already made an HTTP request and\n # gotten some entries back. The client has requested a\n # slice that includes some of the entries we already have.\n # In the best case, we can fulfil the slice immediately,\n # without making another HTTP request.\n #\n # Even if we can't fulfil the entire slice, we can get one\n # or more objects from the first page and then have fewer\n # objects to retrieve from the server later. This saves us\n # time and bandwidth, and it might let us save a whole\n # HTTP request.\n entry_page = existing_representation['entries']\n\n first_page_size = len(entry_page)\n entry_dicts = entry_page[start:stop]\n page_url = existing_representation.get('next_collection_link')\n else:\n # No part of this collection has been loaded yet, or the\n # slice starts beyond the part that has been loaded. We'll\n # use our secret knowledge of lazr.restful to set a value for\n # the ws.start variable. That way we start reading entries\n # from the first one we want.\n first_page_size = None\n entry_dicts = []\n page_url = self._with_url_query_variable_set(\n self._wadl_resource.url, 'ws.start', start)\n\n desired_size = stop - start\n more_needed = desired_size - len(entry_dicts)\n\n # Iterate over pages until we have the correct number of entries.\n while more_needed > 0 and page_url is not None:\n page_get = self._root._browser.get(page_url)\n if isinstance(page_get, binary_type):\n page_get = page_get.decode('utf-8')\n representation = loads(page_get)\n current_page_entries = representation['entries']\n entry_dicts += current_page_entries[:more_needed]\n more_needed = desired_size - len(entry_dicts)\n\n page_url = representation.get('next_collection_link')\n if page_url is None:\n # We've gotten the entire collection; there are no\n # more entries.\n break\n if first_page_size is None:\n first_page_size = len(current_page_entries)\n if more_needed > 0 and more_needed < first_page_size:\n # An optimization: it's likely that we need less than\n # a full page of entries, because the number we need\n # is less than the size of the first page we got.\n # Instead of requesting a full-sized page, we'll\n # request only the number of entries we think we'll\n # need. If we're wrong, there's no problem; we'll just\n # keep looping.\n page_url = self._with_url_query_variable_set(\n page_url, 'ws.size', more_needed)\n\n if slice.step is not None:\n entry_dicts = entry_dicts[::slice.step]\n\n # Convert entry_dicts into a list of Entry objects.\n return [resource for resource\n in self._convert_dicts_to_entries(entry_dicts)]", "def __getslice__(self, *args):\n return ObjectProxy(Container.__getslice__(self, *args),\n self.parent)", "def test_getitem_slice(self):\n ds = Dataset()\n ds.CommandGroupLength = 120 # 0000,0000\n ds.CommandLengthToEnd = 111 # 0000,0001\n ds.Overlays = 12 # 0000,51B0\n ds.LengthToEnd = 12 # 0008,0001\n ds.SOPInstanceUID = '1.2.3.4' # 0008,0018\n ds.SkipFrameRangeFlag = 'TEST' # 0008,9460\n ds.add_new(0x00090001, 'PN', 'CITIZEN^1')\n ds.add_new(0x00090002, 'PN', 'CITIZEN^2')\n ds.add_new(0x00090003, 'PN', 'CITIZEN^3')\n ds.add_new(0x00090004, 'PN', 'CITIZEN^4')\n ds.add_new(0x00090005, 'PN', 'CITIZEN^5')\n ds.add_new(0x00090006, 'PN', 'CITIZEN^6')\n ds.add_new(0x00090007, 'PN', 'CITIZEN^7')\n ds.add_new(0x00090008, 'PN', 'CITIZEN^8')\n ds.add_new(0x00090009, 'PN', 'CITIZEN^9')\n ds.add_new(0x00090010, 'PN', 'CITIZEN^10')\n ds.PatientName = 'CITIZEN^Jan' # 0010,0010\n ds.PatientID = '12345' # 0010,0010\n ds.ExaminedBodyThickness = 1.223 # 0010,9431\n ds.BeamSequence = [Dataset()] # 300A,00B0\n ds.BeamSequence[0].PatientName = 'ANON'\n\n # Slice all items - should return original dataset\n assert ds[:] == ds\n\n # Slice starting from and including (0008,0001)\n test_ds = ds[0x00080001:]\n assert 'CommandGroupLength' not in test_ds\n assert 'CommandLengthToEnd' not in test_ds\n assert 'Overlays' not in test_ds\n assert 'LengthToEnd' in test_ds\n assert 'BeamSequence' in test_ds\n\n # Slice ending at and not including (0009,0002)\n test_ds = ds[:0x00090002]\n assert 'CommandGroupLength' in test_ds\n assert 'CommandLengthToEnd' in test_ds\n assert 'Overlays' in test_ds\n assert 'LengthToEnd' in test_ds\n assert 0x00090001 in test_ds\n assert 0x00090002 not in test_ds\n assert 'BeamSequence' not in test_ds\n\n # Slice with a step - every second tag\n # Should return zeroth tag, then second, fourth, etc...\n test_ds = ds[::2]\n assert 'CommandGroupLength' in test_ds\n assert 'CommandLengthToEnd' not in test_ds\n assert 0x00090001 in test_ds\n assert 0x00090002 not in test_ds\n\n # Slice starting at and including (0008,0018) and ending at and not\n # including (0009,0008)\n test_ds = ds[0x00080018:0x00090008]\n assert 'SOPInstanceUID' in test_ds\n assert 0x00090007 in test_ds\n assert 0x00090008 not in test_ds\n\n # Slice starting at and including (0008,0018) and ending at and not\n # including (0009,0008), every third element\n test_ds = ds[0x00080018:0x00090008:3]\n assert 'SOPInstanceUID' in test_ds\n assert 0x00090001 not in test_ds\n assert 0x00090002 in test_ds\n assert 0x00090003 not in test_ds\n assert 0x00090004 not in test_ds\n assert 0x00090005 in test_ds\n assert 0x00090006 not in test_ds\n assert 0x00090008 not in test_ds\n\n # Slice starting and ending (and not including) (0008,0018)\n assert ds[(0x0008, 0x0018):(0x0008, 0x0018)] == Dataset()\n\n # Test slicing using other acceptable Tag initialisations\n assert 'SOPInstanceUID' in ds[(0x00080018):(0x00080019)]\n assert 'SOPInstanceUID' in ds[(0x0008, 0x0018):(0x0008, 0x0019)]\n assert 'SOPInstanceUID' in ds['0x00080018':'0x00080019']", "def create_slice(*, stop : Optional[int] = None, start : Optional[int] = None, step : Optional[int] = None) -> slice:\n return slice(start, stop, step)", "def __next__(self) -> StreamSlice:\n\n if self._start_date >= self._end_date:\n raise StopIteration()\n if not self._range_adjusted:\n self._current_range = self.MAX_RANGE_DAYS\n next_start_date = min(self._end_date, self._start_date + pendulum.Duration(days=self._current_range))\n slice = StreamSlice(start_date=self._start_date, end_date=next_start_date)\n self._prev_start_date = self._start_date\n self._start_date = next_start_date\n self._range_adjusted = False\n return slice" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a sequence of n evenly spaced numbers from a to b.
def evenly_spaced(self, a: Real, b: Real, n: int) -> Iter: return Iter(_evenly_spaced(a, b, n))
[ "def get_number_sequence(a: int, b: int) -> Iterable[int]:\n n = 0\n while True:\n yield n**2 + a*n + b\n n += 1", "def distribute(N, b):\n return [div + mod for div, mod in izip_longest(repeat(N//b, b), repeat(1, N%b), fillvalue=0)]", "def linspace(a,b,nsteps):\n ssize = float(b-a)/(nsteps-1)\n return [a + i*ssize for i in range(nsteps)]", "def arithmeticSequence(self, n: int, b: int):\r\n # 使用最少的操作次数,将这几个数构造成一个等差数列。\r\n ...", "def SEQUENCE(n):\r\n return [i for i in range(1, n+1)]", "def randints(a, b, n) :\n return [randint(a, b) for _ in range(n)]", "def evenly_spaced_BDs_OLD(BDs, n):\n BDs = BDs.iloc[:,0].tolist()\n BD_min = min(BDs)\n BD_max = max(BDs)\n return np.linspace(BD_min, BD_max, n)", "def seq_ints(n, start=0, step=1):\n return list(range(start, start + n*abs(step), step))", "def groups_of(seq, n):\n for i in range(0, len(seq), n):\n yield seq[i : (i + n)]", "def takespread(sequence, num):\n length = float(len(sequence))\n for i in range(num):\n yield sequence[int(np.ceil(i * length / num))]", "def takespread(sequence, num):\n length = float(len(sequence))\n for i in range(num):\n yield sequence[int(ceil(i * length / num))]", "def linspace(start, stop, n):\n step = (stop-start)/(n-1)\n return [start+step*(i) for i in range(n)]", "def stones(n, a, b):\n # Any two consecutive stones' numbers differ by one of two values, a and b\n\n stones = [[0]]\n for _ in range(n-1): # add the remaining stones\n new_stone = [] # possible values for the new stone\n for step_size in (a,b):\n new_stone.extend([val + step_size for val in stones[-1]])\n\n stones.append(sorted(list(set(new_stone))))\n\n return stones[-1]", "def batches(l, n):\n for i in range (0, l, n):\n yield range (i, min (l, i + n))", "def shingle(iterable, n):\n num_shingles = max(1, len(iterable) - n + 1)\n return [iterable[i:i + n] for i in range(num_shingles)]", "def split(a, n):\n n = min(n, len(a))\n k, m = divmod(len(a), n)\n return [a[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n)]", "def list1ToN(n):\n num = 0\n return_value = []\n for int in range(1, n + 1):\n return_value.append(num)\n num = num + 1\n return return_value", "def list0toN(n):\n num = 0\n return_value = []\n for int in range(0, n + 1):\n return_value.append(num)\n num = num + 1\n return return_value", "def divisor_pairs(n):\n if n == 0:\n yield (0, 0)\n return\n a = 1\n while True:\n (b, r) = divmod(n, a)\n if a > b: break\n if r == 0: yield (a, b)\n a += 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert int to string without using builtin str()
def int_to_string(num): if num < 0: num, is_neg = -num, True else: is_neg = False s = [] while num > 0: s.append(chr(ord('0') + num%10)) num //= 10 return ('-' if is_neg else '') + ''.join(reversed(s))
[ "def to_str(num: int) -> str:\n return str(num)", "def int_to_str(source: int) -> str:\n return str(source)", "def int_to_str(i):\n digits = '0123456789'\n if i == 0:\n return '0'\n result = ''\n while i > 0:\n result = digits[i%10] + result\n i = i//10\n return result", "def _to_str(num):\n if num < 10:\n return '0' + str(num)\n else:\n return str(num)", "def _int_to_string(v):\n \n if not isinstance(v,int):\n raise InstrumentParameterException('Value %s is not an int.' % str(v))\n else:\n return '%i' % v", "def intToString(*args):\n return _libsbml.SBO_intToString(*args)", "def to_str(variable):\n try:\n int(variable)\n return str(variable)\n except ValueError:\n return variable", "def ints_to_string(iterable):\n return ''.join([chr(i) for i in iterable])", "def num2str(num):\n require_type(is_number(num), 'parameter of number->string must be a number')\n return tostr(num)", "def primitive_number_str(number: numbers.Number) -> strings.String:\n return strings.create(str(number.value))", "def base2str(self, int_number):\r\n return self.format_base % (float(int_number) / self.mult_base)", "def SBO_intToString(*args):\n return _libsbml.SBO_intToString(*args)", "def _int_to_formatted_str(an_int):\n\tabs_int = abs(an_int)\n\tint_str = str(abs_int)\n\n\tif 0 <= abs_int and abs_int <= 9:\n\t\tint_str = _ZERO_STR + int_str\n\n\treturn int_str", "def process_int(integer: int) -> str:\n\n return str(integer) if integer else Presenter.DEFAULT", "def int2str(number, length):\n number = \"{0:b}\".format(number)\n return \"0\" * (length - len(number)) + number", "def convert_int_to_str(int_note):\n if type(int_note) is not int:\n raise Exception('Note type exception')\n elif int_note not in note_int_to_str_dict:\n raise Exception('Unknown note exception')\n else:\n return note_int_to_str_dict[int_note]", "def __rank_from_int_to_str(rank: int) -> str:\n return str(rank + 1)", "def serialise_number(i, context=None):\n return (\"%d\" % i).encode('ascii')", "def int2dec(n: int) -> str:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot a template over a detected stream, with picks corrected by lagcalc.
def plot_repicked(template, picks, det_stream, size=(10.5, 7.5), save=False, savefile=None, title=False): # _check_save_args(save, savefile) fig, axes = plt.subplots(len(det_stream), 1, sharex=True, figsize=size) if len(template) > 1: axes = axes.ravel() mintime = det_stream.sort(['starttime'])[0].stats.starttime template.sort(['network', 'station', 'starttime']) lengths = [] lines = [] labels = [] n_templates_plotted = 0 for i, tr in enumerate(det_stream.sort(['starttime'])): # Cope with a single channel template case. if len(det_stream) > 1: axis = axes[i] else: axis = axes tr_picks = [pick for pick in picks if pick.waveform_id.station_code == tr.stats.station and pick.waveform_id.channel_code[0] + pick.waveform_id.channel_code[-1] == tr.stats.channel[0] + tr.stats.channel[-1]] if len(tr_picks) > 1: msg = 'Multiple picks on channel %s' % tr.stats.station + ', ' + \ tr.stats.channel raise NotImplementedError(msg) if len(tr_picks) == 0: msg = 'No pick for chanel %s' % tr.stats.station + ', ' + \ tr.stats.channel print(msg) else: pick = tr_picks[0] pick_delay = pick.time - mintime delay = tr.stats.starttime - mintime y = tr.data # Normalise if len(tr_picks) > 0 and template: y /= max(abs(y[int(pick_delay/tr.stats.delta):int(pick_delay/tr.stats.delta) + len(template[0])])) else: y /= max(abs(y)) x = np.linspace(0, (len(y) - 1) * tr.stats.delta, len(y)) x += delay axis.plot(x, y, 'k', linewidth=1.5) axis.set_ylim(-max(abs(y)), max(abs(y))) if template.select(station=tr.stats.station, channel=tr.stats.channel): btr = template.select(station=tr.stats.station, channel=tr.stats.channel)[0] bdelay = pick.time - mintime by = btr.data by /= max(abs(by)) bx = np.linspace(0, (len(by) - 1) * btr.stats.delta, len(by)) bx += bdelay if len(tr_picks) > 0: # Heads up for the x - 0.1 fudge factor here accounting for template pre-pick time template_line, = axis.plot(bx - 0.1, by, 'r', linewidth=1.6, label='Template') if not pick.phase_hint: pcolor = 'k' label = 'Unknown pick' elif 'P' in pick.phase_hint.upper(): pcolor = 'red' label = 'P-pick' elif 'S' in pick.phase_hint.upper(): pcolor = 'blue' label = 'S-pick' else: pcolor = 'k' label = 'Unknown pick' pdelay = pick.time - mintime ccval = pick.comments[0].text.split('=')[-1] line = axis.axvline(x=pdelay, color=pcolor, linewidth=2, linestyle='--', label=label) axis.text(pdelay, max(by), ccval, fontsize=12) if label not in labels: lines.append(line) labels.append(label) if n_templates_plotted == 0: lines.append(template_line) labels.append('Template') n_templates_plotted += 1 lengths.append(max(bx[-1], x[-1])) else: lengths.append(bx[1]) axis.set_ylabel('.'.join([tr.stats.station, tr.stats.channel]), rotation=0, horizontalalignment='right') axis.yaxis.set_ticks([]) if len(det_stream) > 1: axis = axes[len(det_stream) - 1] else: axis = axes axis.set_xlabel('Time (s) from %s' % mintime.datetime.strftime('%Y/%m/%d %H:%M:%S.%f')) plt.figlegend(lines, labels, 'upper right') if title: if len(template) > 1: axes[0].set_title(title) else: axes.set_title(title) else: plt.subplots_adjust(top=0.98) plt.tight_layout() plt.subplots_adjust(hspace=0) if not save: plt.show() plt.close() else: plt.savefig(savefile) plt.close() return fig
[ "def plotTemplate(self,ax=None):\n if self.templatefilename is None:\n return\n doshow = False\n if ax is None:\n doshow = True\n fig = figure()\n ax = fig.add_subplot(111)\n #ax.plot(self.sptemp.data,'k')\n ax.plot(self.sptemp.mpw,self.sptemp.data[self.sptemp.mpw],'g')\n ax.plot(self.sptemp.opw,self.sptemp.data[self.sptemp.opw],'r')\n ax.set_xlim(0,np.max(self.sptemp.bins))\n ax.set_ylim(-0.05,1.05)\n ax.set_xlabel(\"Phase bins\")\n ax.set_ylabel(\"Normalized Intensity\")\n if doshow:\n show()\n return ax", "def picker_plot(stream, picks, types, show=True, savefile=''):\n import matplotlib.pyplot as plt\n import datetime as dt\n import matplotlib.dates as mpdates\n import numpy as np\n stream.filter('highpass',freq=2.0, corners=2, zerophase=True)\n plotno=1\n fig=plt.figure(num=1, dpi=100, facecolor='w', edgecolor='k')\n fig.suptitle('Picks')\n for tr in stream:\n dates=range(0,len(tr.data))\n dates=[date/tr.stats.sampling_rate for date in dates]\n ax=fig.add_subplot(len(stream),1, plotno)\n ax.plot(dates,tr.data,'k-')\n plt.text(-0.001*len(stream[0].data),np.mean(tr.data),tr.stats.station+' '+tr.stats.channel)\n if types[plotno-1]=='P': #or types[plotno-1][1]=='P':\n ax.plot((picks[plotno-1], picks[plotno-1]), \\\n (min(tr.data)-10, max(tr.data)+10), 'r-', linewidth=2.0)\n elif types[plotno-1]=='S':\n ax.plot((picks[plotno-1], picks[plotno-1]), \\\n (min(tr.data)-10, max(tr.data)+10), 'b-', linewidth=2.0)\n plotno+=1\n fig.subplots_adjust(hspace=0)\n plt.setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False)\n plt.setp([a.get_yticklabels() for a in fig.axes[:]], visible=False)\n plt.xlabel('Time [s]')\n if show:\n plt.show()\n else:\n plt.savefig(savefile)\n return", "def plot_vanHove_dt(comp,conn,start,step_size,steps):\n \n (fin,) = conn.execute(\"select fout from comps where comp_key = ?\",comp).fetchone()\n (max_step,) = conn.execute(\"select max_step from vanHove_prams where comp_key = ?\",comp).fetchone()\n Fin = h5py.File(fin,'r')\n g = Fin[fd('vanHove',comp[0])]\n\n temp = g.attrs['temperature']\n dtime = g.attrs['dtime']\n\n\n # istatus = plots.non_i_plot_start()\n \n fig = mplt.figure()\n fig.suptitle(r'van Hove dist temp: %.2f dtime: %d'% (temp,dtime))\n dims = figure_out_grid(steps)\n \n plt_count = 1\n outs = []\n tmps = []\n for j in range(start,start+step_size*steps, step_size):\n (edges,count,x_lim) = _extract_vanHove(g,j+1,1,5)\n if len(count) < 50:\n plt_count += 1\n continue\n #count = count/np.sum(count)\n \n sp_arg = dims +(plt_count,)\n ax = fig.add_subplot(*sp_arg)\n ax.grid(True)\n\n \n alpha = _alpha2(edges,count)\n \n ax.set_ylabel(r'$\\log{P(N)}$')\n ax.step(edges,np.log((count/np.sum(count))),lw=2)\n ax.set_title(r'$\\alpha_2 = %.2f$'%alpha + ' j:%d '%j )\n ax.set_xlim(x_lim)\n plt_count += 1\n\n mplt.draw()\n\n # plots.non_i_plot_start(istatus)\n\n del g\n Fin.close()\n del Fin", "def visualize():", "def plot_trajectory(self, poses_gt, poses_result, seq):\n plot_keys = [\"Ground Truth\", \"Ours\"]\n fontsize_ = 20\n\n poses_dict = {}\n poses_dict[\"Ground Truth\"] = poses_gt\n poses_dict[\"Ours\"] = poses_result\n\n color_list = {\"Ground Truth\": 'k',\n \"Ours\": 'lime'}\n linestyle = {\"Ground Truth\": \"--\",\n \"Ours\": \"-\"}\n\n fig = plt.figure()\n ax = plt.gca()\n ax.set_aspect('equal')\n\n for key in plot_keys:\n pos_xyz = []\n frame_idx_list = sorted(poses_dict[\"Ours\"].keys())\n for frame_idx in frame_idx_list:\n # pose = np.linalg.inv(poses_dict[key][frame_idx_list[0]]) @ poses_dict[key][frame_idx]\n pose = poses_dict[key][frame_idx]\n pos_xyz.append([pose[0, 3], pose[1, 3], pose[2, 3]])\n pos_xyz = np.asarray(pos_xyz)\n plt.plot(pos_xyz[:, 0], pos_xyz[:, 2], label=key, c=color_list[key], linestyle=linestyle[key])\n\n # Draw rect\n if key == 'Ground Truth':\n rect = mpl.patches.Rectangle((pos_xyz[0, 0]-5, pos_xyz[0, 2]-5), 10,10, linewidth=2, edgecolor='k', facecolor='none')\n ax.add_patch(rect)\n\n plt.legend(loc=\"upper right\", prop={'size': fontsize_})\n plt.xticks(fontsize=fontsize_)\n plt.yticks(fontsize=fontsize_)\n plt.xlabel('x (m)', fontsize=fontsize_)\n plt.ylabel('z (m)', fontsize=fontsize_)\n plt.grid(linestyle=\"--\")\n fig.set_size_inches(10, 10)\n png_title = \"sequence_{}\".format(seq)\n fig_pdf = self.plot_path_dir + \"/\" + png_title + \".pdf\"\n plt.savefig(fig_pdf, bbox_inches='tight', pad_inches=0)\n plt.close(fig)", "def plot_all_profiles(self):\n tensor = self.y_tensor.copy()\n nb_times = tensor.shape[0]\n\n for k in range(5):\n tensor[...,k] = tensor[...,k]*(self.maxs[k] - self.mins[k]) + self.mins[k]\n \n fig, ax = plt.subplots()\n \n for t in range(0, nb_times, 20):\n #ax.scatter(np.arange(0, 10*len(tensor[t, :, 2]), 10),tensor[t, :, 2], c='b',marker='+',s=1.0)\n ax.plot(np.arange(0, 10*len(tensor[t, :, 2]), 10),tensor[t, :, 2], color='blue', linewidth=0.5)\n \n return fig, ax", "def plot_Templates(templates, templates_sim, good_inds, Config, save=None):\n\n fig, axes = plt.subplots(ncols=2, sharex=True, sharey=True)\n plot_AnalogSignal(templates, color='k', alpha=0.2, lw=1, ax=axes[0],\n rescale=False)\n for i, line in enumerate(axes[0].lines):\n if not good_inds[i]:\n line.set_color('r')\n plot_AnalogSignal(templates_sim, color='k', alpha=0.2, lw=1, ax=axes[1],\n rescale=False)\n\n if save is not None:\n fig.savefig('.'.join([save, Config['general']['fig_format']]))\n plt.close(fig)\n return fig, axes", "def plotTI():\n min_dl = dlam[dlam != 0].min()\n S = int(0.4/min_dl)\n fig = pl.figure(figsize = (8,6))\n ax = fig.add_subplot(1,1,1)\n ax.spines['bottom'].set_position('zero')\n ax.spines['top'].set_color('none')\n ax.spines['right'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n for k, spine in ax.spines.items():\n spine.set_zorder(12.2)\n\n xs, ndx, dx = [0], 0, 0.001\n colors = ['r', 'g', '#7F38EC', '#9F000F', 'b', 'y']\n min_y, max_y = 0, 0\n\n lines = tuple()\n ## lv_names2 = [r'$Coulomb$', r'$vdWaals$'] ## for the paper\n lv_names2 = []\n for j in range(n_components):\n y = ave_dhdl[:,j]\n if not (y == 0).all():\n lv_names2.append(r'$%s$' % P.lv_names[j].capitalize())\n\n for j in range(n_components):\n\n y = ave_dhdl[:,j]\n if not (y == 0).all():\n\n # Get the coordinates.\n lj = lchange[:,j]\n x = lv[:,j][lj]\n y = y[lj]/P.beta_report\n\n if 'TI' in P.methods:\n # Plot the TI integration area.\n ss = 'TI'\n for i in range(len(x)-1):\n min_y = min(y.min(), min_y)\n max_y = max(y.max(), max_y)\n #pl.plot(x,y)\n if i%2==0:\n pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=1.0)\n else:\n pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=0.5)\n xlegend = [-100*wnum for wnum in range(len(lv_names2))]\n pl.plot(xlegend, [0*wnum for wnum in xlegend], ls='-', color=colors[ndx], label=lv_names2[ndx]) ## for the paper\n\n if 'TI-CUBIC' in P.methods and not cubspl[j]==0:\n # Plot the TI-CUBIC interpolation curve.\n ss += ' and TI-CUBIC'\n xnew = numpy.arange(0, 1+dx, dx)\n ynew = cubspl[j].interpolate(y, xnew)\n min_y = min(ynew.min(), min_y)\n max_y = max(ynew.max(), max_y)\n pl.plot(xnew+ndx, ynew, color='#B6B6B4', ls ='-', solid_capstyle='round', lw=3.0)\n\n else:\n # Plot the TI-CUBIC integration area.\n ss = 'TI-CUBIC'\n for i in range(len(x)-1):\n xnew = numpy.arange(x[i], x[i+1]+dx, dx)\n ynew = cubspl[j].interpolate(y, xnew)\n ynew[0], ynew[-1] = y[i], y[i+1]\n min_y = min(ynew.min(), min_y)\n max_y = max(ynew.max(), max_y)\n if i%2==0:\n pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=1.0)\n else:\n pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=0.5)\n\n # Store the abscissa values and update the subplot index.\n xs += (x+ndx).tolist()[1:]\n ndx += 1\n\n # Make sure the tick labels are not overcrowded.\n xs = numpy.array(xs)\n dl_mat = numpy.array([xs-i for i in xs])\n ri = range(len(xs))\n\n def getInd(r=ri, z=[0]):\n primo = r[0]\n min_dl=ndx*0.02*2**(primo>10)\n if dl_mat[primo].max()<min_dl:\n return z\n for i in r:\n for j in range(len(xs)):\n if dl_mat[i,j]>min_dl:\n z.append(j)\n return getInd(ri[j:], z)\n\n xt = [i if (i in getInd()) else '' for i in range(K)]\n pl.xticks(xs[1:], xt[1:], fontsize=10)\n pl.yticks(fontsize=10)\n #ax = pl.gca()\n #for label in ax.get_xticklabels():\n # label.set_bbox(dict(fc='w', ec='None', alpha=0.5))\n\n # Remove the abscissa ticks and set up the axes limits.\n for tick in ax.get_xticklines():\n tick.set_visible(False)\n pl.xlim(0, ndx)\n min_y *= 1.01\n max_y *= 1.01\n pl.ylim(min_y, max_y)\n\n for i,j in zip(xs[1:], xt[1:]):\n pl.annotate(('%.2f' % (i-1.0 if i>1.0 else i) if not j=='' else ''), xy=(i, 0), xytext=(i, 0.01), size=10, rotation=90, textcoords=('data', 'axes fraction'), va='bottom', ha='center', color='#151B54')\n if ndx>1:\n lenticks = len(ax.get_ymajorticklabels()) - 1\n if min_y<0: lenticks -= 1\n if lenticks < 5:\n from matplotlib.ticker import AutoMinorLocator as AML\n ax.yaxis.set_minor_locator(AML())\n pl.grid(which='both', color='w', lw=0.25, axis='y', zorder=12)\n pl.ylabel(r'$\\mathrm{\\langle{\\frac{ \\partial U } { \\partial \\lambda }}\\rangle_{\\lambda}\\/%s}$' % P.units, fontsize=20, color='#151B54')\n pl.annotate('$\\mathit{\\lambda}$', xy=(0, 0), xytext=(0.5, -0.05), size=18, textcoords='axes fraction', va='top', ha='center', color='#151B54')\n if not P.software.title()=='Sire':\n lege = ax.legend(prop=FP(size=14), frameon=False, loc=1)\n for l in lege.legendHandles:\n l.set_linewidth(10)\n pl.savefig(os.path.join(P.output_directory, 'dhdl_TI.pdf'))\n pl.close(fig)\n return", "def plot_tsne(data, epoch, args, num_class, front='TemporalDis'):\n print(\"==> (generate pseudo label) t-sne visualization\")\n from utils.visualization.t_SNE_Visualization import tsne_visualize\n front = front + '_' + str(epoch)\n file_name = \"{}/{}.png\".format(args.tsne_folder, front)\n tsne_visualize(data, file_name, num_class)\n return True", "def plot(self, style, skymap):\n \n # plot the source locations\n write_label = True\n for lon, lat in np.nditer([self.coord.galactic.l.deg, self.coord.galactic.b.deg]):\n if write_label:\n skymap.tissot(lon, lat, 5., 30, \n facecolor = Solarized().base1, \n alpha = style.alpha_level, label = self.label)\n write_label = False\n else:\n skymap.tissot(lon, lat, 5., 30, \n facecolor = Solarized().base1, alpha = style.alpha_level)", "def _display_tsne(self):\n self._tsne_window.clear()\n self._tsne_window.plot(self._Y_tsne[:,0], self._Y_tsne[:,1], 'b.')", "def plot_sources_interp(df_nn, rm_nn, zm_nn, rstd_nn, zstd_nn, nn_dist1_weight_avg, nn_tur_weight_avg, nn_dist1_weight_std, nn_tur_weight_std,\n df_oban1, rm_oban1, zm_oban1, rstd_oban1, zstd_oban1, oban1_dist1_weight_avg, oban1_tur_weight_avg, oban1_dist1_weight_std, oban1_tur_weight_std,\n df_oban2, rm_oban2, zm_oban2, rstd_oban2, zstd_oban2, oban2_dist1_weight_avg, oban2_tur_weight_avg, oban2_dist1_weight_std, oban2_tur_weight_std):\n xlim = (min(df_nn.dist_1/1000) - 3, max(df_nn.dist_1/1000) + 3)\n ylim = (min(df_nn.turbulence) - 0.1, max(df_nn.turbulence) + 0.1)\n s1 = 100 # - markersize\n\n fig = plt.figure(figsize=(16, 8), dpi=300)\n ###############################################################################################\n # 1\n ax1 = fig.add_subplot(131)\n cs = plt.scatter(x = df_nn.dist_1/1000., y = df_nn.turbulence, marker = 'D', s = s1, cmap = 'Reds_r',\n vmin = 0, vmax = 100, c = df_nn.dist_itp.values, alpha = 0.8, edgecolors = 'black',\n label = 'LH[0]')\n\n # - Closest source\n r1_nn = df_nn.dist_1[df_nn.dist_itp.values == min(df_nn.dist_itp.values)].values[0]/1000.\n z1_nn = df_nn.turbulence[df_nn.dist_itp.values == min(df_nn.dist_itp.values)].values[0]\n css = plt.scatter(r1_nn, z1_nn, color = 'gray', edgecolor = 'black', marker = 'X', s = 160, label = 'LH[1]')\n # - Mean and std dev\n error = plt.errorbar(x = rm_nn, y = zm_nn, xerr = rstd_nn, yerr = zstd_nn, ecolor = 'blue', \n elinewidth = 2, color = 'blue', label = 'LH[2]', marker = 'o')\n\n # - Mean and std dev weighted\n error2 = plt.errorbar(x = nn_dist1_weight_avg, y = nn_tur_weight_avg, \n xerr = nn_dist1_weight_std, yerr = nn_tur_weight_std, \n ecolor = 'green', elinewidth = 2, color = 'green', label = 'LH[2]', marker = 'o')\n\n #\n plt.ylim(ylim)\n plt.xlim(xlim )\n plt.title('Nearest Neighbor', fontsize = 14)\n\n ###############################################################################################\n # 2\n ax2 = fig.add_subplot(132)\n cs = plt.scatter(x = df_oban1.dist_1/1000., y = df_oban1.turbulence, marker = 'D', s = s1, cmap = 'Reds_r',\n vmin = 0, vmax = 100, c = df_oban1.dist_itp.values, alpha = 0.8, edgecolors = 'black',\n label = 'LH[0]')\n # - Closest source\n r1_oban1 = df_oban1.dist_1[df_oban1.dist_itp.values == min(df_oban1.dist_itp.values)].values[0]/1000.\n z1_oban1 = df_oban1.turbulence[df_oban1.dist_itp.values == min(df_oban1.dist_itp.values)].values[0]\n css = plt.scatter(r1_oban1, z1_oban1, color = 'gray', edgecolor = 'black', marker = 'X', s = 160, label = 'LH[1]')\n # - Mean and std dev\n error = plt.errorbar(x = rm_oban1, y = zm_oban1, xerr = rstd_oban1, yerr = zstd_oban1, ecolor = 'blue', \n elinewidth = 2, color = 'blue', label = 'LH[2]', marker = 'o')\n # - Mean and std dev weighted\n error2 = plt.errorbar(x = oban1_dist1_weight_avg, y = oban1_tur_weight_avg, \n xerr = oban1_dist1_weight_std, yerr = oban1_tur_weight_std, \n ecolor = 'green', elinewidth = 2, color = 'green', label = 'LH[2]', marker = 'o')\n\n #\n plt.ylim(ylim)\n plt.xlim(xlim)\n plt.title('Barnes Intepolation Rcutoff 1 ', fontsize = 14)\n\n ###############################################################################################\n # 3\n ax3 = fig.add_subplot(133)\n cs = plt.scatter(x = df_oban2.dist_1/1000., y = df_oban2.turbulence, marker = 'D', s = s1, cmap = 'Reds_r',\n vmin = 0, vmax = 100, c = df_oban2.dist_itp.values, alpha = 0.8, edgecolors = 'black',\n label = 'LH[0]')\n # - Closest source\n r1_oban2 = df_oban2.dist_1[df_oban2.dist_itp.values == min(df_oban2.dist_itp.values)].values[0]/1000.\n z1_oban2 = df_oban2.turbulence[df_oban2.dist_itp.values == min(df_oban2.dist_itp.values)].values[0]\n css = plt.scatter(r1_oban2, z1_oban2, color = 'gray', edgecolor = 'black', marker = 'X', s = 160,\n label = 'LH[1]')\n # - Mean and std dev\n error = plt.errorbar(x = rm_oban2, y = zm_oban2, xerr = rstd_oban2, yerr = zstd_oban2, ecolor = 'blue', \n elinewidth = 2, color = 'blue', label = 'LH[2]', marker = 'o')\n # - Mean and std dev weighted\n error2 = plt.errorbar(x = oban2_dist1_weight_avg, y = oban2_tur_weight_avg, \n xerr = oban2_dist1_weight_std, yerr = oban2_tur_weight_std, \n ecolor = 'green', elinewidth = 2, color = 'green', label = 'LH[2]', marker = 'o')\n\n #\n plt.ylim(ylim)\n plt.xlim(xlim)\n plt.title('Barnes Intepolation Rcutoff 2', fontsize = 14)\n ############################################################################################\n # - Colorbar\n cs_ax = fig.add_axes([0.92, 0.13, 0.02, 0.75])\n cbar = fig.colorbar(cs, cax=cs_ax)\n cbar.set_label('Orthogonal Distance (km)', size=13)\n\n # - Axis\n fig.text(0.5, 0.07, 'Distance from the flash initiation point to the interception point on the RHI scan (km)', \n va='center', ha='center', fontsize = 15)\n fig.text(0.08, 0.5, '$EDR^{0.33}$', va='center', ha='center', rotation='vertical', fontsize = 15)\n # -- Tilte\n #fig.text(0.5, 0.96, radar_time.strftime(\"%d %B %Y %H:%M:%S\") + ' flash id = ' + str(flashes_id[k]) +\n # ' Closest source distance = {:.2f} m'.format(min(abs(Y_min))),\n # va='center', ha='center', fontsize = 16)\n\n # - Create the legend\n leg = fig.legend(('Close LMA sources','Closest LMA source','Mean','Interception distance weighted mean'),\n loc='lower center', ncol = 4, fontsize = 12)\n LH = leg.legendHandles\n LH[0].set_color('red')\n LH[0].set_edgecolor('black')", "def construct_plot(self, amprtb):\n self.fig, [[self.ax1, self.ax2], [self.ax3, self.ax4]] = \\\n plt.subplots(2, 2, figsize=(10, 10),\n subplot_kw={'projection': self.projection})\n ind1, ind2 = amprtb._get_scan_indices(\n self.scanrange, self.timerange, False)\n\n # 10 GHz plot\n stuff = amprtb.plot_ampr_track(\n var='10'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax1, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange, return_flag=True)\n self.ax1.set_title(self.make_title('10', amprtb, ind1, ind2))\n\n # 19 GHz plot\n amprtb.plot_ampr_track(\n var='19'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax2, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange)\n self.ax2.set_title(self.make_title('19', amprtb, ind1, ind2))\n\n # 37 GHz plot\n amprtb.plot_ampr_track(\n var='37'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax3, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange)\n self.ax3.set_title(self.make_title('37', amprtb, ind1, ind2))\n\n # 85 GHz plot\n amprtb.plot_ampr_track(\n var='85'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax4, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange)\n self.ax4.set_title(self.make_title('85', amprtb, ind1, ind2))\n\n # plt.tight_layout()\n return True", "def logplotter(folder, sectorcounter, name1,name2, direction1, station_no, tolerance, uarray, zarray, alphas):\n import matplotlib.pyplot as plt\n import matplotlib.lines \n plt.hold('on')\n titlefont = {'fontname': 'Arial', 'size': '15', 'weight':'normal'}\n axisfont = {'fontname': 'Arial', 'size': '14'}\n fig, ax = plt.subplots(1)\n linestyle = matplotlib.lines.lineStyles.keys()[3:7]* np.shape(uarray)[1]\n degrees = 0\n for j in range(np.shape(uarray)[1]):\n ax.plot(uarray[:,j], zarray[:,j], label = (r\"$\\alpha = $ %.2f\") % alphas[j],ls = linestyle[j])\n ax.annotate(j+1, (uarray[int(round(np.shape(uarray)[1]*0.9)), j], \n\t zarray[int(round(np.shape(uarray)[1]*0.9)), j]), \n\t xytext = (24*np.cos(degrees*(np.pi/180))-12*np.sin(degrees*(np.pi/180)), 24*np.sin(degrees*(np.pi/180))+12*np.cos(degrees*(np.pi/180))), \n\t textcoords = 'offset points',\n arrowprops = dict(arrowstyle = '-', \n connectionstyle = 'arc3,rad=0'),\n rotation = 0)\n degrees += (-30)\n\ttextlist = [[str(name1 + \"-\" + name2)]] \n\tax.set_title(r\"Vertical Logarithmic profile (dir: %.1f$^\\circ$,+/-%.2f) Sector %d\" % (direction1, tolerance,sectorcounter),**titlefont)\n\tax.legend(loc='best',fontsize = 10)\n\tax.set_xlabel(\"Speed (m/s)\", **axisfont)\n\tax.set_ylabel(\"Height (m)\", **axisfont)\n\tplt.xlim(0,8)\n\tfig.savefig(folder + \"/\" + \"Sector_%d-%s-%s.png\" % (sectorcounter, station_no, direction1)) \n\tplt.hold('off')\n\treturn \" \"", "def summaryPlot1(time_raw, raw, time_det, detrend, model, ave_image, meta):\n ticid=meta['id']\n snr=meta['snr']\n per=meta['period']\n dur=meta['dur']\n depth=meta['depth']\n pn = meta['pn']\n imloc = meta['imloc'] # (col,row)\n radius = meta['radius']\n sector = meta['sector']\n cam = meta['cam']\n ccd= meta['ccd']\n \n plt.clf()\n #plt.subplot(3,2,(1,2))\n plotData(time_raw, raw, time_det, detrend, meta, nPanel=1)\n titlewords=\"TIC=%s pn:%i Sector:%i, Cam: %1u, CCD: %1u\\nP=%.4f days, Dur=%.2f hr, Depth=%.1f ppm, SNR=%.2f \" \\\n % (ticid, pn, sector, cam, ccd, per, dur*24, depth * 1e6,snr)\n plt.annotate(titlewords,(.1,.92),xycoords=\"figure fraction\", fontsize=14)\n #plt.suptitle(\"TIC %i Sector %i\" %(meta['id'], meta['sector']))\n \n plt.subplot(3,2,(3,4))\n plotFolded(time_det, detrend, model, meta, modelOn=True)\n \n plt.xlim((0,per))\n\n\n plt.subplot(325)\n plotFolded(time_det, detrend, model, meta, modelOn=True)\n if dur*1.5 < per:\n plt.xlim(per*.25-dur*1.9,per*.25+dur*1.9)\n else:\n plt.xlim(per*.15, per*.35)\n \n plt.subplot(326)\n plt.imshow(ave_image, vmax=np.percentile(ave_image, 96), cmap='cividis',origin='lower')\n plt.plot(imloc[0], imloc[1],'o',mfc=\"None\", mec='red',mew=2, alpha=.3,ms=11)\n #circ=plt.Circle(imloc, radius, fill=False, lw=2)\n plt.title('Radius: %.2f px' % radius)", "def plot_tsne(X, tsne, samples_n=100):\n plt.figure(figsize=(8, 8))\n plt.xlim(-20, 20); plt.ylim(-20, 20)\n cmap = cm.binary_r; cmap.set_over('k', alpha=0)\n\n sz = 2.5\n idxs = torch.randperm(len(tsne))[:samples_n]\n\n for img, (x, y) in zip(X[idxs], tsne[idxs]):\n location = (x-sz/2, x+sz/2, y-sz/2, y+sz/2)\n plt.imshow(img.squeeze(), extent=location, cmap=cmap, clim=[0, 0.1], interpolation='nearest')", "def generate_plots(infile=None):\n\n if infile is None:\n infile = HOUSE_KEEPING_FILE\n\n sources = ascii.read(infile)\n\n # Make plots using sources with snr > SNR_LIM_PLT\n flt = sources['SNR'] > SNR_LIM_PLT\n sources = sources[flt]\n\n for label in ('all', 'ACIS-I', 'ACIS-S', 'HRC-I', 'HRC-S'):\n\n flt = sources['INSTRUME'] == label\n srcs_flt = sources[flt]\n\n psfs = []\n\n if len(srcs_flt) > 0:\n psf = PSF(label)\n psf.psf_time_anal(srcs_flt, label)\n psfs.append(psf)\n # src_mon_plots(srcs_flt, title=label)\n\n # plots by dist\n # rbins = [(0, 30), (30, 60), (60, 120), (120, 180), (180, 240),\n # (240, 300), (300, 420), (420, 600)]\n\n # for rbin in rbins:\n # flt1 = srcs_flt['DIST'] >= rbin[0]\n # flt2 = srcs_flt['DIST'] < rbin[1]\n # r_sources = srcs_flt[flt1 & flt2]\n # if len(r_sources) > 0:\n # title = '{}_r{}-{}'.format(label, rbin[0], rbin[1])\n # src_mon_plots(r_sources, title=title)\n\n\n # Move the plots and remove the fits files\n cmd_str = 'mv -f *.html *.gif ' + HTML_DIR_PATH + '/. 2>/dev/null'\n os.system(cmd)\n\n os.system('rm -f *.fits')\n\n # Make webpages based on psfs", "def plot_resonator_net(data_,resonator_index=0,fig = None,axs=None,anchor_range=(30,100),min_load_temp=4, max_load_temp=8,max_uK=70):\n if fig is None:\n fig,axs = plt.subplots(ncols=4,nrows=2,figsize=(20,10))\n fig.subplots_adjust(wspace=.3)\n data_ = data_[data_.resonator_index == resonator_index]\n anchors = data_[(data_.timestream_duration>anchor_range[0]) & \n (data_.timestream_duration<anchor_range[1])]\n plot_s21(anchors, ax=axs[1,0], min_load_temp=min_load_temp, max_load_temp=max_load_temp)\n plot_load_vs_freq_shift(data_, axs=[axs[0,1],axs[1,1],axs[0,2],axs[1,2]], min_load_temp=min_load_temp, max_load_temp=max_load_temp, \n anchor_range=anchor_range)\n plot_noise(anchors, axs=[axs[0,3],axs[1,3]], min_load_temp=min_load_temp, max_load_temp=max_load_temp, max_uK=max_uK)\n axs[0,0].set_visible(False)\n info = data_.chip_name.iat[0].replace(', ','\\n')\n info += ('\\nResonator: %d @ %.6f MHz' % (resonator_index,data_.f_0_max.iat[0]))\n files = np.unique(data_.sweep_filename)\n files.sort()\n files = files.tolist()\n median_temp = np.median(data_.sweep_primary_package_temperature)\n temp_rms = np.std(data_.sweep_primary_package_temperature)\n info += ('\\nfirst file: %s' % files[0][:30])\n info += ('\\nlast file: %s' % files[-1][:30])\n info += ('\\nPackage Temperature: %.1f$\\pm$%.1f mK' % (median_temp*1000,temp_rms*1000))\n info += ('\\nPower ~ %.1f dBm\\n (%.1f dB atten, %.1f dB cold)' % (data_.power_dbm.iat[0],data_.atten.iat[0],data_.dac_chain_gain.iat[0]))\n \n fig.text(0.1,0.9,info,ha='left',va='top',size='x-small',bbox=dict(facecolor='w',pad=8))\n return fig", "def make_source_location_histogram_plots_uvis(data, file_name, ff, im, coordfile, \\\n filt, path_to_cleans=''):\n\tpylab.ion()\n\tif ff == 0:\n\t\tfig = pylab.figure()\n\t\tfig.subplots_adjust(wspace=0.4)\n\telse:\n\t\tpylab.clf()\n\t\t\n\txc,yc = np.loadtxt(coordfile, unpack=True, usecols = (0,1)) \n\t# plot #1 - object position\n\tsz=50.0\n\tx0=np.round(xc)-sz/2.\n\tx1=np.round(xc)+sz/2.\n\ty0=np.round(yc)-sz/2.\n\ty1=np.round(yc)+sz/2.\n\tax1 = pylab.subplot(1,2,1)\n\tax1.imshow(np.log10(im[y0:y1,x0:x1]),interpolation='nearest')\n\tax1.autoscale(axis='both',enable=False)\n\tax1.scatter([xc-x0-1.0], [yc-y0-1.0], marker='x', s=200., color='w')\n\tpylab.title('X = '+str(xc)+' Y = '+str(yc))\n\n\t# plot #2 - background histogram\n\ttmp_image=glob.glob(path_to_cleans + '*back.fits')[0]\n\tbackim = pyfits.getdata(tmp_image)\n\t#--measure back statistics (mean and mode via IRAF)\n\tinitback = iraf.imstatistics(tmp_image+'[0]', fields='mode,stddev', \\\n\t lower = -100, upper = 10000, nclip=7, \\\n\t lsigma=3.0, usigma=3.0, cache='yes', \\\n\t format='no',Stdout=1)\n\t#print 'initback:'\n\t#print initback\n\tif 'INDEF' not in initback[0]:\n\t\tllim = float(initback[0].split(' ')[0]) - 10.0*\\\n\t\t\t\tfloat(initback[0].split(' ')[1])\n\t\tulim = float(initback[0].split(' ')[0]) + 10.0*\\\n\t float(initback[0].split(' ')[1])\n\t\tbackstats=iraf.imstatistics(tmp_image+'[0]', fields='mean,mode', \\\n\t lower=llim, upper=ulim, nclip=7,lsigma=3.0, \\\n\t usigma=3.0, cache='yes', format='no',Stdout=1)\n\t\tbackmean=float(backstats[0].split(' ')[0])\n\t\tbackmode=float(backstats[0].split(' ')[1])\n\t\tfbackim= np.ndarray.flatten(backim)\n\t\tgd=np.where((fbackim > llim) & (fbackim < ulim))[0]\n\t\tbackmedian=meanclip(fbackim[gd],maxiter=7,return_median=1)[0]\n\n\t\tax2 = pylab.subplot(1,2,2)\n\t\tpylab.hist(fbackim[gd],log=True)\n\t\tpylab.ylim(0.5,600000)\n\t\tpylab.xlim(-20,20)\n\t\tpylab.plot([backmode,backmode],[0.5,600000],ls='-',color='red',\\\n\t label='mode')\n\t\tpylab.plot([backmedian,backmedian],[0.5,600000],ls='--',color='aqua',\\\n \t label='median')\n\t\tpylab.plot([backmean,backmean],[0.5,600000],ls=':',color='black',\\\n \t label='mean')\n\t\tpylab.legend(loc=2, handletextpad=0.0, borderpad=0.0, frameon=False, \\\n \t handlelength=1.)\n\t\tpylab.title('Histogram of Background Pixels')\n\t\tpylab.xlabel('Background [e-]')\n\t\tpylab.ylabel('Number of Objects')\n\t\tpylab.annotate('chip '+str(data[ff]['chip']), [0.77,0.95], \\\n \t xycoords='axes fraction')\n\t\tpylab.annotate(filt,[0.77,0.80],xycoords='axes fraction')\n\n\t\t\n\tpylab.savefig(file_name.split('.fits')[0]+'_srcloc.png')\n\tpylab.ioff()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Carga tola la pila con strings
def cargaAutoStr(pila): while not pila_llena(pila): largo = random.randint(1, 15) apilar(pila, randString(largo))
[ "def extraccion_textos():\n global NOMBRE_ASIGNATURA1, NOMBRE_ASIGNATURA2\n global DESCRIPCION_ASIGNATURA1, DESCRIPCION_ASIGNATURA2\n global CAPITULOS_ASIGNATURA1, CAPITULOS_ASIGNATURA2\n\n NOMBRE_ASIGNATURA1 = obtener_nombre(ID_ASIGNATURA1)\n NOMBRE_ASIGNATURA2 = obtener_nombre(ID_ASIGNATURA2)\n\n DESCRIPCION_ASIGNATURA1 = obtener_descripcion(ID_ASIGNATURA1)\n DESCRIPCION_ASIGNATURA2 = obtener_descripcion(ID_ASIGNATURA2)\n\n CAPITULOS_ASIGNATURA1 = obtener_capitulos(ID_ASIGNATURA1)\n CAPITULOS_ASIGNATURA2 = obtener_capitulos(ID_ASIGNATURA2)", "def saluda2(sujeto):\n print 'Hola %s !!' % sujeto", "def boton_de_descarga(url_descarga, nombre_de_descarga, texto_boton):\n with open(url_descarga, 'rb') as lector_archivo:\n objeto_a_descargar = lector_archivo.read()\n\n try:\n # Conversiones de strings <-> bytes son necesarias\n objeto_b64 = base64.b64encode(objeto_a_descargar.encode()).decode()\n\n except AttributeError as e:\n objeto_b64 = base64.b64encode(objeto_a_descargar).decode()\n\n boton_uuid = str(uuid.uuid4()).replace('-', '')\n boton_id = re.sub('\\d+', '', boton_uuid)\n\n custom_css = f\"\"\" \n <style>\n #{boton_id} {{\n background-color: rgb(255, 255, 255);\n color: rgb(38, 39, 48);\n padding: 0.25em 0.38em;\n position: absolute;\n top: 50%;\n left: 50%;\n transform: translate(-50%,-50%);\n text-decoration: none;\n border-radius: 4px;\n border-width: 1px;\n border-style: solid;\n border-color: rgb(230, 234, 241);\n border-image: initial;\n }} \n #{boton_id}:hover {{\n border-color: rgb(246, 51, 102);\n color: rgb(246, 51, 102);\n }}\n #{boton_id}:active {{\n box-shadow: none;\n background-color: rgb(246, 51, 102);\n color: white;\n }}\n </style> \"\"\"\n\n boton_html = custom_css + f'<a download=\"{nombre_de_descarga}\" \\\n id=\"{boton_id}\" href=\"data:file/txt;base64,{objeto_b64}\">{texto_boton}</a><br></br>'\n\n return boton_html", "def arroba_letras(cadena, long_palabra, costo_palabra_corta, costo_palabra_larga):\n palabras = cadena.split(\" \")\n frase_final = \"\"\n costo_total = 0\n for i in range(len(palabras)):\n if len(palabras[i]) > long_palabra:#verificio si la longitud de esa palabra cortada es menor a lo previamente establecido\n frase_final += palabras[i][0:long_palabra] + \"@ \" # corto la palabra en la posicion max y agrego un @\n costo_total += costo_palabra_corta\n if palabras[i][-1] == \".\": # veo si en la palabra corta cortada hay un punto y si lo lo borro y reemplazo por un STOP\n frase_final = frase_final.strip() + palabras[i].replace(palabras[i], \" STOP \")\n elif palabras[i][-1] == \".\": # veo si en la palabra larga cortada hay un punto y si lo hay lo borro y lo reemplazo por un STOP\n frase_final = frase_final.strip(\".\") + palabras[i].replace(palabras[i][-1], \" STOP \") \n else:\n frase_final += palabras[i] + \" \"\n costo_total += costo_palabra_larga\n frase_final += \"STOPSTOP\" \n \n return f\"\"\"El telegrama final es: \n{frase_final} \nutilizando {long_palabra} letras maximas por palabra a un costo de ${costo_total} \"\"\"", "def archivos_de_texto():\n palabra = \"\" \n palabras_candidatas = [] #lista donde se guardara las palabras candidatas de cada linea\n palabra_cantidad = {} #diccionario con la palabra candidata de clave y las veces que esta repetida en cada texto de valor\n with open(\"Cuentos.txt\",\"r\") as Cuentos: \n for linea_Cuentos in Cuentos: #cada ciclo del for es una linea del texto\n for caracter in linea_Cuentos: #cada ciclo del for es una caracter de la linea \n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter) #se transformas caracteres mayusculas y tildes\n palabra += caracter #cada caracter ira formando la palabra\n if not caracter.isalpha():\n if len(palabra) >= 5: #se analiza que la palabra tenga 5 o mas caracteres\n palabras_candidatas.append(palabra) \n palabra = \"\" #se vacia la palabra ya analizada\n for palabra_en_lista in palabras_candidatas: #se introduce las palabras candidatas a un diccionario\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [1,0,0]\n else:\n palabra_cantidad[palabra_en_lista] = [int(palabra_cantidad[palabra_en_lista][0]) + 1 , 0, 0]\n palabras_candidatas = []\n with open(\"La araña negra - tomo 1.txt\",\"r\") as La_arana_negra:#se repite el mismo proceso con los otros dos textos\n for linea_Cuentos in La_arana_negra:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,1,0]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] , int(palabra_cantidad[palabra_en_lista][1]) + 1, 0]\n palabras_candidatas = [] \n with open(\"Las 1000 Noches y 1 Noche.txt\",\"r\") as muchas_noches: \n for linea_Cuentos in muchas_noches:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,0,1]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] ,palabra_cantidad[palabra_en_lista][1], int(palabra_cantidad[palabra_en_lista][2]) + 1]\n palabras_candidatas = [] \n palabra_cantidad = dict(sorted(palabra_cantidad.items())) #se ordena el diccionario alfabeticamente\n with open(\"palabras.csv\",\"w\") as palabras_csv: # se agrga el diccionario a un arcivo .csv\n for palabra in palabra_cantidad:\n palabras_csv.write(palabra)\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][0]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][1]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][2]))\n palabras_csv.write(\"\\n\")\n return palabra_cantidad", "def adaptadorCampo(campo):\n res=\"\"\n if campo==\"\" or \"null\" in campo.lower(): # vacio o nulo (null)\n res += \"NULL\"\n elif campo.isnumeric() or isFloat(campo): # integer, real\n res += lst[i]\n elif campo[0:2]==\"x'\" or campo[0:2]==\"X'\": # blob \n res += lst[i] \n else: \n res += \"'\" + campo + \"'\" # cualquier otro... ej: text\n return res", "def trieur_font():\n downloads_end_path_msq = \"\\Downloads\\TRI_Fichiers_Polices_Ecriture\"\n x = path_default +nom_user+ downloads_end_path_msq\n path_final_msq = \"\"\n for elements in x:\n if elements != \" \":\n path_final_msq+= elements\n commande_otf = 'move *.otf \"' + path_final_msq + '\"'\n #on vérifie si un fichier existe déjà, sinon on le créer\n if os.path.exists(path_final_msq):\n print(\"dossier déjà existant\")\n else:\n os.mkdir(path_final_msq)\n print(\"Dossier non existant, création du dossier en cours\")\n os.system(commande_otf)", "def desplegar_salida_comando(salida):\n salida = salida.decode('utf-8') #decodificamos el binario que nos envio\n print(salida)", "def adiciona_pontos(texto):\n # texto = list(texto)\n # texto = \".\".join(texto)\n # return texto\n return \".\".join(list(texto))", "def saluda1(sujeto):\n print 'Hola '+sujeto+' !!'", "def acquisizioneParametri(self):\n\n messaggio =''\n\n try: \n self.__rete = slugify(self.ui.nomeRete.text())\n # controllo se la lunghezza del nome inserito sia > di 5 caratteri\n if(len(self.__rete) < 5 or len(self.__rete) > 30):\n\n messaggio = 'err: inserimento Nome'\n raise NameError\n \n # controllo che il nome scelto sia univoco\n isPresent = self.__NNNameCheck()\n if(isPresent):\n messaggio = 'err: nome già utilizzato'\n raise NameError\n\n # controlli su numero layer e numero nodi che siano >= 1\n # e che siano rispettivamente <= 20 e <= 50\n self.__layer = int(self.ui.nLayer.text())\n if(self.__layer < 1):\n messaggio = 'err: numero layer < 1'\n raise ValueError\n elif(self.__layer >= 20):\n messaggio = 'err: numero layer > 20'\n raise ValueError\n\n self.__nodi = int(self.ui.nNodi.text())\n if(self.__nodi < 1):\n messaggio = 'err: numero nodi < 1'\n raise ValueError\n if(self.__nodi >= 50):\n messaggio = 'err: numero nodi > 50'\n raise ValueError\n\n # salvataggio della funzione scelta\n self.__funzione = self.ui.funzione.currentText()\n \n # controllo che la percentuale di Vs sia < 25%\n # e che la percentuale di Ts sia > 75%\n if(self.__percentuale < 25):\n messaggio = 'err: suddivisione'\n raise ValueError\n if (self.__percentuale > 75):\n messaggio = 'err: suddivisione'\n raise ValueError\n\n # controllo che sia stato scelto effettivamente un dataset\n if(len(self.__dataSet) == 0):\n messaggio = 'err: dataSet errato'\n raise NameError\n\n # setto il tasto caricamento di una rete non cliccabile\n self.ui.but_caricaRete.setEnabled(False)\n\n # cambio nome del tasto convalida\n self.ui.but_convalida.setText('confermato')\n self.ui.comunicazione.setText('')\n #abilito salvataggio\n self.ui.but_salva.setEnabled(True)\n\n # settandola a True permetto che il training venga effettuato\n # dato che i dati inseriti sono validi\n self.__convalida = True\n return True\n except:\n # in caso di eccezzioni faccio comparire il messaggio\n self.ui.comunicazione.setText(messaggio)\n return False", "def __str__(self):\n transicoes = [] # declara o vetor string de transições\n for x, y in self.__transicoes.items(): # enquanto não percorrer todo o vetor de transições, encapsulado na classe, faça:\n transicao = x + \":\" + str(y) # a variável local transicao recebe uma transição formatada\n transicoes.append(transicao) # a variável local é armazenada no vetor de string\n trancictions = \"\\n\".join(transicao) # o vetor transictions recebe o conteúdo da variável local e o formata em um padrão específico\n return trancictions # retorna o vetor com os campos formatados", "def load_str(filename='panelcode.txt', path=''):\n if path == '':\n path = sketchPath() + '/data/input/'\n strings = loadStrings(path + filename)\n return strings", "def agregar_bolsa(self, letra, cantidad):", "def image(lote,fabricante, fecha_vencimiento):\n \n \n img = Image.new('RGB', (200, 150), \"white\")\n #crea una plantilla en blanco llamada img\n im = Image.open('fabrica/'+fabricante+'.jpg')\n #trae la imagen asociada al fabricante\n img.paste(im,(0,0))\n #inserta la imganen en la plantilla\n fnt = ImageFont.truetype('fuente/Arial.ttf', 12)\n #define la fuente del texto\n d=ImageDraw.Draw(img)\n #nombra el metodo para escribir como d\n d.text((2, 100),'Fecha de vencimiento: '+str(fecha_vencimiento), font=fnt, fill=(0, 0, 0))\n #escribe la fecha de vencimiento\n d.text((2, 125),'No.lote: ' +str(lote), font=fnt, fill=(0, 0, 0))\n #escribe el numero de lote\n img.save('imagenes/'+str(lote)+'.jpg')\n #guarda la imagen creada en la carpeta imagenes\n ruta='imagenes/'+str(lote)+'.jpg'\n #nombra la ruta donde se guardo la imagen creada \n return ruta", "def cabecalho(dic_cabecalho,dat_reuniao,imagem):\n\n tmp=''\n tmp+='\\t\\t\\t\\t<image x=\"4.1cm\" y=\"26.9cm\" width=\"74\" height=\"60\" file=\"' + imagem + '\"/>\\n'\n tmp+='\\t\\t\\t\\t<lines>3.3cm 26.3cm 19.5cm 26.3cm</lines>\\n'\n tmp+='\\t\\t\\t\\t<setFont name=\"Helvetica-Bold\" size=\"15\"/>\\n'\n tmp+='\\t\\t\\t\\t<drawString x=\"6.7cm\" y=\"28.1cm\">' + dic_cabecalho['nom_casa'] + '</drawString>\\n'\n tmp+='\\t\\t\\t\\t<setFont name=\"Helvetica\" size=\"11\"/>\\n'\n tmp+='\\t\\t\\t\\t<drawString x=\"6.7cm\" y=\"27.6cm\">' + 'Estado de ' + dic_cabecalho['nom_estado'] + '</drawString>\\n'\n return tmp", "def _helper_exporta_parametros(self, parametros, grava_arquivo=True, filename='parametros.txt', separador=' '):\n STRING_lista = [f\"{i['nome']}{separador}{i['valor']}{separador}\\\"{i['desc']}\\\"\" for i in parametros]\n\n # Cria a string concatenando cada linha uma abaixo da outra\n STRING = '\\n'.join(STRING_lista)\n\n if grava_arquivo:\n with open(filename, 'w') as file:\n file.write(STRING)\n print(f'Arquivos de parâmetros salvos em {filaname}')\n\n return STRING", "def ler_decisao(nome_arquivo):\r\n conteudo = ''\r\n\r\n # ...\r\n return conteudo", "def prendre_commande(self):\r\n #self.commandes est une liste de string, un string représentant une commande\r\n for commande in self.commandes:\r\n print(f'[Serveur] Je prends commande de {commande}') \r\n self.pic.embrocher(commande)\r\n print(\"[Serveur] Il n'y a plus de commandes à prendre\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Devuelve elemento de la cima
def cima(pila): return pila.datos[pila.tope]
[ "def elemento(self):\n try:\n nodo = self._lista._inicio\n\n count = 0\n\n while count != self._actual:\n nodo = nodo.getSiguiente()\n count += 1\n\n return nodo.getDato()\n\n except AttributeError:\n return self._lista._datos[self._actual]\n\n raise AttributeError", "def elemento(self, pos):\n if not self._datos or pos < 0 or pos > self._tamanio:\n raise IndexError\n\n return self._datos[pos]", "def marcarPunto(self):\n # Es primera vez que marco\n if self.tempSelected == None:\n # Capturo el ultimo elemento se se selecciono\n self.tempSelected = self.telaMAPA.find_withtag(self.elementoSeleccionado)\n # Lo Pinto\n self.telaMAPA.itemconfigure(self.elementoSeleccionado, fill=\"purple\")\n else:\n # Desmarco el anterior\n self.telaMAPA.itemconfigure(self.tempSelected, fill=\"white\")\n # Marco el nuevo\n self.tempSelected = self.telaMAPA.find_withtag(self.elementoSeleccionado)\n # Lo Pinto\n self.telaMAPA.itemconfigure(self.elementoSeleccionado, fill=\"purple\")", "def copiar_pieza(self):\n pieza_nueva = PiezaGo(self.obt_id(),self.obt_fila(),self.obt_columna(),self.obt_agrupacion())\n pieza_nueva.tipo_pieza = self.tipo_pieza \n return pieza_nueva", "def scrape_carteleraVIEJA(data, comp_nom):\t\n\t\n\tfunciones = []\n\tsoup = BeautifulSoup(data, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)\n\tshow_exp = re.compile(r'sid=(\\d+)')\n\t\n\tcomplejo_org = Complejo.objects.get(nombre=comp_nom)\n\t\n\t#Busca complejo platino... en caso de existir:\n\tcomplejo_platino = complejo_org\n\t\n\t\n\tpeliculas = soup.find('table', cellspacing='0', cellpadding='0', border='0').contents[3:-1:2]\n\t\n\tfor peli in peliculas:\n\t\ttres_D = False\n\t\tidioma = None\n\t\t\n\t\t#Checar tiene logo de 3d\n\t\tif peli.find('div', 'icono_platino').find('img', src=re.compile(r'3d.png$')): tres_D = True\n\t\t\n\t\t#Encabezado contiene titulo e idioma\n\t\tencabezado = peli.find('li', 'texto_3', style='margin: 2px 0px 0px; float: left; width: 155px;')\n\t\ttitulo = ''.join(encabezado.findAll(text=True)).replace('\\n', '').strip()\n\t\t\n\t\t\n\t\t#Determina Idioma\n\t\tif encabezado.find('img', alt='idioma').get('src', '').find('ing') > 0:\n\t\t\tidioma = 'ingles'\n\t\telse:\n\t\t\tidioma = 'espanol'\n\t\t\n\t\ttit = '|'+ titulo + '|'\n\t\t#Buscar pelicula por titulo segun idioma y 3d.. subtitulada o no.\n\t\tpeli_query = Pelicula.objects.filter(alt_tit__icontains=tit, tres_D=tres_D)\n\t\tif len(peli_query) > 1:\n\t\t\t#Si idioma == ingles, selecciona la pelicula subtitulada\n\t\t\tpelicula = peli_query.filter(subtitulada= (idioma == 'ingles') )\n\t\telif len(peli_query) == 1:\n\t\t\tpelicula = peli_query[0]\n\t\telse:\n\t\t\tlogger.debug( \"No se encontro pelicula %s\" % titulo\t\t)\n\t\t\t\n\t\thoras_html = peli.find('div', id='horax')\n\t\tplatino_b= False\t\t\n\t\tfor tag in horas_html.contents:\n\t\t\t#Me salto todo lo que no es html\n\t\t\tif type(tag) != NavigableString:\t\t\n\t\t\t\t#En caso de que sea funciones de platino\n\t\t\t\tif tag.name == 'center':\n\t\t\t\t\tplatino_b = True\n\t\t\t\t\tfuncion_name = ''.join(tag.findAll(text=True)).strip()\n\t\t\t\t\tif funcion_name.find('Platino') > -1:\n\t\t\t\t\t\t#Ajustar el complejo para platino\n\t\t\t\t\t\tcomplejo = complejo_platino\n\t\t\t\t\t\t\n\t\t\t\telif tag.get('style','').find('border-bottom: 1px solid rgb(238, 207, 0);') > -1:\n\t\t\t\t\t#Ajustar de regreso el complejo normal\n\t\t\t\t\tcomplejo = complejo_org\n\t\t\t\t\tplatino_b = False\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t#Si es renglon de hora y no algo mas como <br/>\t\t\t\n\t\t\t\tif tag.name== 'div' and tag.get('id','') == 'general':\n\t\t\t\t\tfecha = parseDate(tag.find('div', id=fecha).string)\n\t\t\t\t\tfunciones.extend(\n\t\t\t\t\t\t[{\n\t\t\t\t\t\t\t'pelicula': pelicula,\n\t\t\t\t\t\t\t'complejo': complejo,\n\t\t\t\t\t\t\t'hora': datetime.datetime(fecha.year, fecha.month, fecha.day, *time.strptime( hora_html.string , '%H:%M')[3:5]),\n\t\t\t\t\t\t\t'pol_idShowTime': show_exp.search(hora_html['href']).group(1),\n\t\t\t\t\t\t\t} for hora_html in tag.find('div', id='funciones').find('a', 'texto_1')]\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t)\n\t\t\t\t\t#logger.debug( funciones)\n\treturn funciones", "def buscaElemento(self, numero):\n if not self.esta_vazia():\n elementoAtual = self._cabeca\n while True:\n if elementoAtual._inteiro == numero:\n return numero\n else:\n if elementoAtual._proximo == self._cabeca:\n return None\n else:\n elementoAtual = elementoAtual._proximo\n\n return None", "def obter_valor(c):\n return c[0]", "def ver_tope(self):\r\n\t\tif self.esta_vacia():\r\n\t\t\treturn None\r\n\t\telse:\r\n\t\t\treturn self.elementos[-1]", "def getCarteAt(self, pos):\r\n #A compléter\r", "def scraper_voto(self):\n\n #per trovare il link a fantacalcio.it devo prima trovare il link della squadra e trovare il suo nome\n soup_rosa = BeautifulSoup(\n requests.get(f\"{self.LINK_FANTACALCIO_IT}/{self.team}#rosa\").text,\n \"html.parser\",\n )\n print(self.name)\n\n displayed_name = self.name\n if displayed_name == \"Coulibaly\": # caso estremo, il sito si confonde\n displayed_name = \"Coulibaly M.\"\n\n # trovo il link personale del giocatore e glielo assegno\n link = soup_rosa.find(\"a\", text=displayed_name.upper())[\"href\"]\n self.scheda_giocatore = link\n\n # leggo voto e media voto\n soup = BeautifulSoup(requests.get(link).text, \"html.parser\")\n\n self.media_voto = float(soup.find_all(class_=\"nbig2\")[0].text.replace(\",\", \".\"))\n self.media_fantavoto = float(\n soup.find_all(class_=\"nbig2\")[1].text.replace(\",\", \".\")\n )\n\n # leggo anche il ruolodalla schedina delle info\n infos = soup.find_all(class_=\"col-lg-6 col-md-6 col-sm-12 col-xs-12\")[-2]\n self.ruolo = str(infos.find(\"span\").text)\n\n # compilo i dati: partite, gol e assist\n dati_partite = soup.find_all(class_=\"nbig\")\n\n partite = \"🥅 \" + dati_partite[0].text\n # i portieri hanno statistiche diverse!\n if self.ruolo == \"P\":\n goal = \"❌ \" + dati_partite[1].text\n self.dati = \"<br>\".join([partite, goal])\n else:\n goal = \"⚽ \" + dati_partite[1].text\n assist = \"👟 \" + dati_partite[2].text\n self.dati = \"<br>\".join([partite, goal, assist])\n\n # aggiungo stellina al nome se hanno una bella media voto\n if self.media_fantavoto > 7:\n self.name += \" ⭐\"", "def Piano(self):\r\n P_plan = Piano(self.Vertici[0], self.Centro, self.Vertici[1])\r\n I = []\r\n for n in range(2, self.Num_lati):\r\n I.append(Piano_e_Punto(self.Vertici[n], P_plan))\r\n if len(I) == sum(I):\r\n return (P_plan)\r\n else:\r\n print('Il piano non e\\' unico.')\r\n return (None)", "def joga(self, tipo, casa): \n _col, _lin = casa\n _casa = self.tabuleiro[_col][_lin]\n _casa.joga(tipo)", "def first(self):\n if self.is_empty():\n raise Empty(\"Deque está vacío\")\n return self._header._next._element # un artículo real justo después de la cabecera", "def __init__(self):\n self.enfila= 0\n self.fila = []", "def retirer_objet(self, nom_membre):\n membre = self.get_membre(nom_membre)\n objet = membre.tenu\n membre.tenu = None", "def elemento_actual(self):\n\t\tif not self.actual:\n\t\t\treturn None\n\t\treturn self.actual.dato", "def jouerAleatoire(self, plateau):\n mouvements = plateau.obtenirMouvementsValides(self.cote)\n self.choix = random.choice(mouvements)\n return self.choix", "def busca(self, palavra):\n pos = self.buscaPos(palavra)\n if pos == -1:\n return None\n else:\n return self.lista[pos]", "def __getitem__(self,i):\n return self.elements[i]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Devuelve la pila invertida
def invertir(pila1): pila2 = Pila() while not pila_vacia(pila1): apilar(pila2, desapilar(pila1)) return pila2
[ "def scale_invert(self):", "def modular_inverse(self):\n i = gmpy2.invert(self.c2, self.n)\n mx = pow(self.c1, self.a, self.n)\n my = pow(i, int(-self.b), self.n)\n self.m= mx * my % self.n", "def __invert__(self):\n return BitBoard(~self.num)", "def a_inv(self):\n return Word(int.__sub__((1 << 16), self) % (1 << 16))", "def __invert__(self):\n return self * Multivector3D(pscalar=1)", "def invert(self):\n self._x = -(self._x) # magnitude won't change, so don't use the property setter\n self._y = -(self._y) #\n self._z = -(self._z) #", "def flip_vert():\r\n pass", "def f_reversible_inverse(self, program):\n raise NotImplementedError", "def _inverse(G):\n # TODO: implement this.\n pass", "def __invert__(self):\n return self.__neg__()", "def __invert__(self):\n return Factorization([(p,-e) for p,e in reversed(self)],\n cr=self._cr(), unit=self.unit()**(-1))", "def invertir_camino(D, P):\n\n assert D.directed\n for i in range(len(P) - 1):\n e = (P[i], P[i + 1])\n D.remove_edge(e)\n e_inv = inv(e)\n D.add_edge(e_inv)\n return D", "def invert(self):\n self.value = ~self", "def invert(self, src=None):\n if src is None:\n dst = TOOLS._invert_matrix(self)\n else:\n dst = TOOLS._invert_matrix(src)\n if dst[0] == 1:\n return 1\n self.a, self.b, self.c, self.d, self.e, self.f = dst[1]\n return 0", "def comp_inverse_un(switch,nbTermes,maxInt,precision):\n P = poly_random(switch,nbTermes,maxInt)\n P = P - P(0) + P.parent().one()\n return inverse_un_series(P,precision)", "def lr_flip(self):\n for g in self.grid:\n g.reverse()", "def invertir(self):\n if(not self.dirigido):\n return self\n\n g = Graph(True)\n\n for v in self.vertices:\n g.aniadir_vertice(v,0)\n for x in self.obtener_conocidos(v):\n g.aniadir_vertice(x,0)\n g.unir_vertices(x,v,0)\n return g", "def _invert(G):\n return Surreal.from_value(1 / G._n)", "def inverse(self):\n return Vector4(-self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finalize the grades and print. Only for assessors.
def finalize(request, pk, version=0): ts = get_timeslot() if not hasattr(ts, 'resultoptions'): raise PermissionDenied("Results menu is not yet visible.") else: if not get_timeslot().resultoptions.Visible: raise PermissionDenied("Results menu is not yet visible.") dstr = get_object_or_404(Distribution, pk=pk) if not hasattr(dstr, 'presentationtimeslot'): raise PermissionDenied('This student does not have a presentation planned. Please plan it first.') if not request.user.is_superuser and \ request.user not in dstr.presentationtimeslot.Presentations.Assessors.all() and \ request.user != dstr.Proposal.Track.Head: raise PermissionDenied("You are not the correct owner of this distribution. " " Grades can only be finalized by assessors or track heads. " " To get a preview of the print view, use the 'Print Preview' button.") version = int(version) # check if grade is valid error_list = '' for cat in GradeCategory.objects.filter(TimeSlot=get_timeslot()): try: cat_res = cat.results.get(Distribution=dstr) if not cat_res.is_valid(): error_list += ('<li>Category {} is not completed.</li>'.format(cat)) except CategoryResult.DoesNotExist: error_list += ('<li>Category {} is missing</li>'.format(cat)) if error_list: return render(request, "base.html", context={ 'Message': '<h1>The results of this student are not yet finished</h1><p>The following error(s) occurred:</p><ul>{}</ul>'.format(error_list), "return": "results:gradeformstaff", "returnget": str(pk), }) if version == 0: # The normal page summarizing the grades of the student return render(request, "results/finalize_grades.html", { "dstr": dstr, "catresults": dstr.results.all(), "final": all(f.Final is True for f in dstr.results.all()), "finalgrade": dstr.TotalGradeRounded(), "preview": False, }) else: # type 1 and 2, finalize grades. if get_timephase_number() != 7: raise PermissionDenied("Finalize grades is only possible in the time phase 'Presentation of results'") for cat in dstr.results.all(): # set final to True, disable editing from here onward. cat.Final = True cat.save() if version == 1: # printable page with grades return render(request, "results/print_grades_pdf.html", { "dstr": dstr, "catresults": dstr.results.all(), "finalgrade": dstr.TotalGradeRounded(), }) elif version == 2: # pdf with grades html = get_template('results/print_grades_pdf.html').render({ "dstr": dstr, "catresults": dstr.results.all(), "finalgrade": dstr.TotalGradeRounded(), }) buffer = BytesIO() pisa_status = pisa.CreatePDF(html.encode('utf-8'), dest=buffer, encoding='utf-8') if pisa_status.err: raise Exception("Pisa Failed PDF creation in print final grade for distribution {}.".format(dstr)) buffer.seek(0) response = HttpResponse(buffer, 'application/pdf') response['Content-Disposition'] = 'attachment; filename="bepresult_{}.pdf"'.format(dstr.Student.usermeta.get_nice_name()) return response raise PermissionDenied('Invalid type.')
[ "def print_grades(self):\n for i in self._grades:\n print(f'Course name: \"{i}\", Letter grade: \"{self._grades[i]}\"')\n pass", "def main():\n\n students = [\"Chris\", \"Jesse\", \"Sally\"]\n grades = [90, 80, 70]\n print_gradebook(students, grades)", "def generate_gradelog(self):\n\n grade = self._get_grade()\n gradelog_path = self.get_gradelog_path()\n\n with open(gradelog_path, 'w') as f:\n # this used to be student's name, but that might be FERPA\n gradelog_path_byte_encoded \\\n = str(self.get_gradelog_path()).encode('utf-8')\n f.write(\"%s\\n\\n%s\\nstudent_name: \\\"%s\\\"\" % (\n self.ZUCCHINI_BEGIN_GRADELOG,\n self._assignment.name,\n hashlib.sha224(gradelog_path_byte_encoded).hexdigest()[0:31]))\n\n if self._submission.seconds_late is not None:\n m, s = divmod(self._submission.seconds_late, 60)\n h, m = divmod(m, 60)\n\n f.write(\", late by (h:m:s): %d:%02d:%02d\" % (h, m, s))\n\n f.write(\"\\n\\n\")\n\n assignment_pass = True\n\n for component in grade.components:\n if component.error is not None:\n assignment_pass = False\n f.write(\"(%s / %s) [FAIL] TOTAL for %s: %s%s\\n\\n\" % (\n self._left_pad(component.points_got),\n self._left_pad(component.points_possible),\n component.name, component.error,\n ('\\n' + component.error_verbose)\n if component.error_verbose else ''))\n else:\n component_pass = True\n\n for part in component.parts:\n if part.grade >= 1:\n f.write(\"(%s / %s) [PASS] %s: %s\\n\" % (\n self._left_pad(part.points_got),\n self._left_pad(part.points_possible),\n component.name, part.name))\n else:\n component_pass = False\n assignment_pass = False\n f.write(\"(%s / %s) [FAIL] %s: %s - %s\\n\" % (\n self._left_pad(part.points_got),\n self._left_pad(part.points_possible),\n component.name, part.name, part.log))\n\n # print totals for assignment component\n f.write(\"(%s / %s) [%s] TOTAL for %s\\n\\n\" % (\n self._left_pad(component.points_got),\n self._left_pad(component.points_possible),\n \"PASS\" if component_pass else \"FAIL\",\n component.name))\n\n # print totals for complete assignment\n f.write(\"(%s / %s) [%s] TOTAL for %s (without penalties)\\n\" % (\n self._left_pad(grade.raw_grade),\n self._left_pad(Fraction(1)),\n \"PASS\" if assignment_pass else \"FAIL\",\n self._assignment.name))\n\n # print penalties (like being late)\n for penalty in grade.penalties:\n if penalty.points_delta != 0:\n f.write(\"(%s) Penalty: %s\\n\" % (\n self._left_pad(penalty.points_delta),\n penalty.name\n ))\n\n f.write(\"\\n-----------------------\\n| %s%% FINAL SCORE \"\n \"|\\n-----------------------\\n\\n\" %\n (self._left_pad(grade.grade)))\n\n # write filenames and hashes\n file_hashes, submission_hash = self.generate_submission_hash()\n f.write(\"---- File Hashes ----\\n\")\n # x is file_path and y is file_path_str, not used\n for x, y, file_path_str_abbrev, file_hash in file_hashes:\n f.write(\"(sha1: %s) %s\\n\" % (file_hash, file_path_str_abbrev))\n f.write(\"\\n---- Submission Hash ----\\n(sha1: %s)\\n\\n\"\n % submission_hash)\n f.write('%s\\n' % self.ZUCCHINI_END_GRADELOG)\n\n # write gradelog hash\n with open(gradelog_path, 'r+') as f:\n file_data = f.read()\n begin_idx = file_data.index(self.ZUCCHINI_BEGIN_GRADELOG)\n end_idx = file_data.index(self.ZUCCHINI_END_GRADELOG)\n gradelog_data = file_data[begin_idx:end_idx]\n gradelog_hash = hashlib.sha1(gradelog_data.encode()).hexdigest()\n f.write(\"%s\\n\" % (gradelog_hash))", "def write_grade(self):\n # Need to put the components in the form used in the submission\n # meta.json\n self._submission.write_grade(self.serialized_component_grades())", "def finalize(self):\n print(\"Please wait while finalizing the operation.. Thank you\")\n self.save_checkpoint()\n self.summary_writer.export_scalars_to_json(\"{}all_scalars.json\".format(self.config.summary_dir))\n self.summary_writer.close()\n self.data_loader.finalize()", "def set_garbage(self):\n self.grade = 0", "def test_final_grade(self):\n self.assertEqual(hw4.final_grade(self.cs122), {'Zoe': 91,\n 'Alex': 94, 'Dan': 80,\n 'Anna': 101})", "def finalize(self):\n logger.debug(\"Generation Complete\")\n self.events.generation_complete()", "def take_test(self):\n\n self.score = self.exam.administer()\n print \"Final score: {}\".format(self.score)", "def show_student_grades(self):\n assignments_with_grades = [assignment for assignment in self.student.assignments\n if assignment.grade is not None]\n if not self.student.assignments:\n StudentView.print_user_have_no_grades()\n assignments_as_strings_list = []\n for assignment in assignments_with_grades:\n grade_with_assignment_name = 'Grade: {}, Assignment title: {}'.format(assignment.grade, assignment.title)\n assignments_as_strings_list.append(grade_with_assignment_name)\n StudentView.display_user_grades(assignments_as_strings_list)", "def _export_grades(course=None, lecture=None):\n\n # build csv file\n headers = ['Student', 'Total Correct', 'Lectures Attended', 'Questions Answered']\n score_dict = {} # dictionary with student id's as keys, dictionaries as values (which in turn have question ids as keys and scores as values)\n export_course = False\n\n if course:\n lectures = course.lectures\n lectures.sort(key=lambda x: x.created_at)\n course_title = course.title.replace(' ', '_')\n file_id = course_title\n export_course = True\n elif lecture:\n course = lecture.course\n course_title = course.title.replace(' ', '_')\n lectures = [lecture]\n lecture_title = lecture.title.replace(' ', '_')\n file_id = f'{lecture_title}_of_{course_title}'\n else:\n raise Exception('must pass either course or lecture')\n\n # fill score_dict with empty dicts for enrolled students\n for student in course.students:\n score_dict[student.id] = {'Total Correct': 0, 'Lectures Attended': 0, 'Questions Answered': 0}\n \n if export_course:\n for lecture in lectures:\n lecture_header = f'Total for {lecture.title}'\n headers.append(lecture_header)\n for student in course.students:\n score_dict[student.id][lecture_header] = 0\n\n num_lectures = 0\n num_questions = 0\n\n # process all questions\n for lecture in lectures:\n num_lectures += 1\n questions = lecture.questions\n attending_students = set()\n for question in questions:\n num_questions += 1\n col_header = f'{lecture.title}: {question.question_title}'\n headers.append(col_header)\n answers = question.answers\n\n # process each answer to this question\n for answer in answers:\n student_id = answer.student_id\n attending_students.add(student_id)\n score = 1 if question.is_correct(answer.answer) else 0\n\n # add score to the student's list (if enrolled)\n if student_id in score_dict:\n score_dict[student_id]['Total Correct'] += score\n score_dict[student_id]['Questions Answered'] += 1\n score_dict[student_id][col_header] = score\n if export_course:\n score_dict[student_id][lecture_header] += score\n \n for student_id in attending_students:\n score_dict[student_id]['Lectures Attended'] += 1\n\n # note that heroku discards dynamically generated files on dyno restart!\n today = date.today().strftime(\"%b-%d-%Y\")\n filename = f'grades_for_{file_id}_on_{today}.csv'\n dirname = os.path.dirname(__file__)\n with open(os.path.join(dirname, 'dynamic_content/grades/', filename), 'w') as f:\n csv.register_dialect('quote all', quoting=csv.QUOTE_ALL)\n writer = csv.DictWriter(f, headers, restval=0, dialect='quote all')\n writer.writeheader()\n for student_id, inner_dict in score_dict.items():\n netId = StudentModel.get_student_by_uuid(student_id).netId\n inner_dict['Student'] = netId\n writer.writerow(inner_dict)\n \n f.write('\\n\\n')\n summary_writer = csv.DictWriter(f, ['Summary Statistics', 'Total Lectures', 'Total Questions'], restval=0, dialect='quote all')\n summary_writer.writeheader()\n summary_writer.writerow({\n 'Summary Statistics': '',\n 'Total Lectures': num_lectures,\n 'Total Questions': num_questions\n })\n\n with open(os.path.join(dirname, 'dynamic_content/grades/', filename), 'r') as f:\n file_data = f.read()\n\n path = os.path.join(dirname, \"dynamic_content/grades/\", filename)\n if os.path.exists(path):\n try:\n return custom_response({\n 'fileData': file_data,\n 'fileName': filename\n }, 200)\n finally:\n os.remove(path)\n else:\n return custom_response({'error': 'something is wrong'}, 500)", "def execute(self, grades):\n raise_not_implemented_error()", "def print_grades(grades, grader_name):\n grades = sorted(grades,\n key=lambda grade: grade.student_name())\n # Length of longest name\n max_name_len = max(len(grade.student_name()) for grade in grades)\n\n grade_report = '\\n'.join(\n '{:<{max_name_len}}\\t{}\\t{}'.format(\n grade.student_name(),\n grade.score() if grade.graded() else '(ungraded)',\n grade.breakdown(grader_name) if grade.graded() else '',\n max_name_len=max_name_len)\n for grade in grades)\n click.echo_via_pager('grade report:\\n\\n' + grade_report)", "def print_allocations(self, ):\n pass", "def test_adjusted_grade_empty_exam(self):\n self.assertEqual(hw4.adjusted_grade(self.iclicker, {}),\n {'Ryan': 1, 'Andrea': 1, 'Bryan': 0,\n 'Zoe': 0, 'Anna': 1, 'Alex': 1})\n self.assertEqual(self.iclicker, {'Zoe': 46, 'Alex': 121,\n 'Ryan': 100, 'Anna': 110,\n 'Bryan': 2, 'Andrea': 110})\n self.assertEqual(self.exam, {'Dan': 89, 'Ryan': 89, 'Alex': 95,\n 'Anna': 64, 'Bryan': 95, 'Andrea': 86})", "def finalize(self):\n pass", "def finalize_scores(self):\n if self.candidates_finalized:\n return\n self.candidates_finalized = True\n for cand in self.candidates:\n new_logp_blank = cand.logp_total()\n last_word = cand.text_state.last_word\n if self.lm is not None and last_word != '':\n # Merging cands with texts differing only in the final sep was not done in the reference.\n new_lm_state = kenlm.State()\n logp_lm_last_word = self.lm.BaseScore(cand.lm_state, last_word, new_lm_state) * self.log10_to_ln\n cand.lm_state = new_lm_state\n if self.oov_score is not None and last_word not in self.lm:\n logp_lm_last_word = self.oov_score\n new_logp_blank += self.alpha * logp_lm_last_word + self.beta\n cand.logp_blank = new_logp_blank\n cand.logp_non_blank = -np.inf\n cand.new_logp_blank = None\n cand.new_logp_non_blank = None", "def finalize(self):\n for learner in self.learners.values():\n learner.finalize()\n for net in self.inverse_map.values():\n net.compile()", "def print_results(self) -> None:\n print(\"=\" * 70, file=sys.stderr)\n total = 0.0\n max_points = 0.0\n for problem in self.problems:\n total += problem.run_tests()\n max_points += problem.max_grade\n print(f\"Total Grade: {total}/{max_points}\", file=sys.stderr)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List all aspects of a given grade category in the current timeslot
def list_aspects(request, pk): category = get_object_or_404(GradeCategory, pk=pk) aspects = GradeCategoryAspect.objects.filter(Category=category) ts = get_timeslot() return render(request, "results/list_aspects.html", { "aspects": aspects, 'ts': ts, 'cat': category, })
[ "def get_category_averages(self):\n return [strand_analysis.get_category_average() for strand_analysis in self.strand_analyses.all()]", "def attribute(category_name):", "def list_all_cat_scores(self):\n\n categories = Category.objects.all()\n score_before = self.score\n output = {}\n\n for cat in categories: # for each of the categories\n # group 1 is score, group 2 is possible\n my_regex = re.escape(cat.category) + r\",(\\d+),(\\d+),\"\n match = re.search(my_regex, self.score, re.IGNORECASE)\n\n if match:\n score = int(match.group(1))\n possible = int(match.group(2))\n try:\n percent = int(round((\n float(score) / float(possible)) * 100))\n except:\n percent = 0\n score_list = [score, possible, percent]\n output[cat.category] = score_list\n\n else: # Is possible to remove/comment this section out\n # Temporarily store the current csv that lists all the scores\n temp = self.score\n # Add the class that is not listed at the end.\n # Always end with a comma\n temp = temp + cat.category + \",0,0,\"\n self.score = temp\n output[cat.category] = [0, 0]\n\n if len(self.score) > len(score_before): # if changes have been made\n self.save() # save only at the end to minimise disc writes\n\n return output", "def possible_grade_exam(exam_type):\n final_grades = []\n\n if exam_type == \"Elementary\":\n final_grades = [1]\n if exam_type == \"Basic\":\n final_grades = [2, 3]\n if exam_type == \"Extended\":\n final_grades = [4, 5]\n\n return final_grades", "def specialty(professor, courses):\n autumn = []\n winter = []\n spring = []\n summer = []\n \n for course in courses:\n if course.expertise == professor.expertise[0] or \\\n course.expertise == professor.expertise[1] or \\\n course.expertise == professor.expertise[2]:\n n = course.quarter\n if n == 'Autumn':\n autumn.append(course)\n elif n == 'Winter':\n winter.append(course)\n elif n == 'Spring':\n spring.append(course)\n elif n == 'Summer':\n summer.append(course)\n \n \n return (autumn, winter, spring, summer)", "def printAttractionList(self):\n print \" print ATRAACTION LIST\"\n #print self.DIMENSION\n i = 0\n while i < 2 * self.DIMENSION + 1:\n print \" Attraction at \"\n print i\n print self.attraction_list[i]\n i+= 1\n # Log.i(MY_TAG, \" Attraction at \" + String.valueOf(i) + \" is \" + String.valueOf(attraction_list.get(i)));", "def time_budget(self, mode):\n\n def time_budget_analysis(cursor, plot_parameters, by_category=False):\n \"\"\"\n extract number of occurrences, total duration, mean ...\n if start_time = 0 and end_time = 0 all events are extracted\n \"\"\"\n\n categories, out = {}, []\n for subject in plot_parameters[\"selected subjects\"]:\n out_cat, categories[subject] = [], {}\n\n for behavior in plot_parameters[\"selected behaviors\"]:\n\n if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\"SELECT distinct modifiers FROM events WHERE subject = ? AND code = ?\",\n (subject, behavior))\n distinct_modifiers = list(cursor.fetchall())\n\n if not distinct_modifiers:\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n if POINT in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = cursor.fetchall()\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = list(cursor.fetchall())\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": modifier[0], \"duration\": UNPAIRED,\n \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\n \"end time\"] and plot_parameters[\"start time\"] <= rows[idx + 1][0] <= \\\n plot_parameters[\"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n # all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations),\n 3) if len(all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(\n all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(\n statistics.mean(all_event_interdurations), 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n else: # no modifiers\n\n if POINT in self.eventType(behavior).upper():\n\n # if len(selectedObservations) > 1:\n cursor.execute(\n \"SELECT occurence,observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n\n if len(selectedObservations) == 1:\n new_rows = []\n for occurence, observation in rows:\n new_occurence = max(float(plot_parameters[\"start time\"]), occurence)\n new_occurence = min(new_occurence, float(plot_parameters[\"end time\"]))\n new_rows.append([new_occurence, observation])\n rows = list(new_rows)\n\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(statistics.stdev(all_event_interdurations),\n 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n\n cursor.execute(\n \"SELECT occurence, observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]: # include behaviors without events\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": \"-\", \"duration\": 0, \"duration_mean\": 0,\n \"duration_stdev\": \"NA\", \"number\": 0, \"inter_duration_mean\": \"-\",\n \"inter_duration_stdev\": \"-\"})\n continue\n\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior, \"modifiers\": \"NA\",\n \"duration\": UNPAIRED, \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\"end time\"] and \\\n plot_parameters[\"start time\"] <= rows[idx + 1][0] <= plot_parameters[\n \"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations), 3) if len(\n all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n out += out_cat\n\n if by_category: # and flagCategories:\n\n for behav in out_cat:\n\n try:\n category = [self.pj[ETHOGRAM][x][\"category\"] for x in self.pj[ETHOGRAM] if\n \"category\" in self.pj[ETHOGRAM][x] and self.pj[ETHOGRAM][x][\"code\"] == behav[\n 'behavior']][0]\n except:\n category = \"\"\n\n if category in categories[subject]:\n if behav[\"duration\"] not in [\"-\", \"NA\"] and categories[subject][category][\n \"duration\"] != \"-\":\n categories[subject][category][\"duration\"] += behav[\"duration\"]\n else:\n categories[subject][category][\"duration\"] = \"-\"\n categories[subject][category][\"number\"] += behav[\"number\"]\n else:\n categories[subject][category] = {\"duration\": behav[\"duration\"], \"number\": behav[\"number\"]}\n\n out_sorted = []\n for subject in plot_parameters[\"selected subjects\"]:\n for behavior in plot_parameters[\"selected behaviors\"]:\n for row in out:\n if row[\"subject\"] == subject and row[\"behavior\"] == behavior:\n out_sorted.append(row)\n\n ### http://stackoverflow.com/questions/673867/python-arbitrary-order-by\n return out_sorted, categories\n\n def default_value(behav, param):\n \"\"\"\n return value for duration in case of point event\n \"\"\"\n default_value_ = 0\n if ({self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behav} == {\"Point event\"}\n and param in [\"duration\"]):\n default_value_ = \"-\"\n return default_value_\n\n def init_behav_modif():\n \"\"\"\n initialize dictionary with subject, behaviors and modifiers\n \"\"\"\n behaviors = {}\n for subj in plot_parameters[\"selected subjects\"]:\n behaviors[subj] = {}\n for behav_modif in distinct_behav_modif:\n behav, modif = behav_modif\n if behav not in behaviors[subj]:\n behaviors[subj][behav] = {}\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n behaviors[subj][behav][param[0]] = default_value(behav, param[0])\n\n if plot_parameters[\"include modifiers\"]:\n behaviors[subj][behav][modif] = {}\n for param in parameters:\n behaviors[subj][behav][modif][param[0]] = default_value(behav, param[0])\n\n return behaviors\n\n result, selectedObservations = self.selectObservations(MULTIPLE)\n if not selectedObservations:\n return\n\n # check if state events are paired\n out = \"\"\n not_paired_obs_list = []\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId], self.timeFormat)\n\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n not_paired_obs_list.append(obsId)\n\n if out:\n out = \"Some observations have UNPAIRED state events<br><br>\" + out\n self.results = dialog.Results_dialog()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.pbSave.setVisible(False)\n self.results.pbCancel.setVisible(True)\n\n if not self.results.exec_():\n return\n\n flagGroup = False\n if len(selectedObservations) > 1 and mode != \"synthetic\":\n flagGroup = dialog.MessageDialog(programName, \"Group observations in one time budget analysis?\",\n [YES, NO]) == YES\n\n '''\n # check if state events are paired\n out = \"\"\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId],\n self.timeFormat)\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n if out:\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.show()\n '''\n\n selectedObsTotalMediaLength = Decimal(\"0.0\")\n max_obs_length = 0\n for obsId in selectedObservations:\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n logging.debug(\"media length for {0}: {1}\".format(obsId, obs_length))\n\n if obs_length in [0, -1]:\n selectedObsTotalMediaLength = -1\n break\n max_obs_length = max(max_obs_length, obs_length)\n\n selectedObsTotalMediaLength += obs_length\n\n # an observation media length is not available\n if selectedObsTotalMediaLength == -1:\n # propose to user to use max event time\n if dialog.MessageDialog(programName,\n \"A media length is not available.<br>Use last event time as media length?\",\n [YES, NO]) == YES:\n maxTime = 0 # max length for all events all subjects\n for obsId in selectedObservations:\n if self.pj[OBSERVATIONS][obsId][EVENTS]:\n maxTime += max(self.pj[OBSERVATIONS][obsId][EVENTS])[0]\n logging.debug(\"max time all events all subjects: {}\".format(maxTime))\n selectedObsTotalMediaLength = maxTime\n else:\n selectedObsTotalMediaLength = 0\n\n logging.debug(\"selectedObsTotalMediaLength: {}\".format(selectedObsTotalMediaLength))\n\n if mode in [\"by_behavior\", \"by_category\"]:\n if len(selectedObservations) > 1:\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=max_obs_length,\n by_category=(mode == \"by_category\"))\n else:\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=selectedObsTotalMediaLength,\n by_category=(mode == \"by_category\"))\n\n if mode == \"synthetic\":\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=max_obs_length,\n flagShowExcludeBehaviorsWoEvents=False,\n by_category=False)\n\n if not plot_parameters[\"selected subjects\"] or not plot_parameters[\"selected behaviors\"]:\n return\n\n # check if time_budget window must be used\n if mode in [\"by_behavior\", \"by_category\"] and (flagGroup or len(selectedObservations) == 1):\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], selectedObservations,\n plot_parameters[\"selected behaviors\"])\n\n total_observation_time = 0\n for obsId in selectedObservations:\n\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n if obs_length == -1:\n obs_length = 0\n\n if plot_parameters[\"time\"] == TIME_FULL_OBS:\n min_time = float(0)\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_EVENTS:\n try:\n min_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][0][0])\n except:\n min_time = float(0)\n try:\n max_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][-1][0])\n except:\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_ARBITRARY_INTERVAL:\n min_time = float(plot_parameters[\"start time\"])\n max_time = float(plot_parameters[\"end time\"])\n\n # check intervals\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if POINT in self.eventType(behav).upper():\n continue\n # extract modifiers\n\n cursor.execute(\n \"SELECT distinct modifiers FROM events WHERE observation = ? AND subject = ? AND code = ?\",\n (obsId, subj, behav))\n distinct_modifiers = list(cursor.fetchall())\n\n logging.debug(\"distinct_modifiers: {}\".format(distinct_modifiers))\n\n for modifier in distinct_modifiers:\n\n logging.debug(\"modifier #{}#\".format(modifier[0]))\n\n if len(cursor.execute(\n \"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence < ?\",\n (obsId, subj, behav, modifier[0], min_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], min_time))\n\n if len(cursor.execute(\n \"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence > ?\",\n (obsId, subj, behav, modifier[0], max_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], max_time))\n try:\n cursor.execute(\"COMMIT\")\n except:\n pass\n\n total_observation_time += (max_time - min_time)\n\n cursor.execute(\"DELETE FROM events WHERE observation = ? AND (occurence < ? OR occurence > ?)\",\n (obsId, min_time, max_time))\n\n out, categories = time_budget_analysis(cursor, plot_parameters, by_category=(mode == \"by_category\"))\n\n # widget for results visualization\n self.tb = timeBudgetResults(logging.getLogger().getEffectiveLevel(), self.pj)\n\n # observations list\n self.tb.label.setText(\"Selected observations\")\n for obs in selectedObservations:\n self.tb.lw.addItem(obs)\n\n # media length\n if len(selectedObservations) > 1:\n if total_observation_time:\n if self.timeFormat == HHMMSS:\n self.tb.lbTotalObservedTime.setText(\n \"Total observation length: {}\".format(seconds2time(total_observation_time)))\n if self.timeFormat == S:\n self.tb.lbTotalObservedTime.setText(\n \"Total observation length: {:0.3f}\".format(float(total_observation_time)))\n else:\n self.tb.lbTotalObservedTime.setText(\"Total observation length: not available\")\n else:\n if self.timeFormat == HHMMSS:\n self.tb.lbTotalObservedTime.setText(\n \"Analysis from {} to {}\".format(seconds2time(min_time), seconds2time(max_time)))\n if self.timeFormat == S:\n self.tb.lbTotalObservedTime.setText(\n \"Analysis from {:0.3f} to {:0.3f} s\".format(float(min_time), float(max_time)))\n\n if mode == \"by_behavior\":\n\n tb_fields = [\"Subject\", \"Behavior\", \"Modifiers\", \"Total number\", \"Total duration (s)\",\n \"Duration mean (s)\", \"Duration std dev\", \"inter-event intervals mean (s)\",\n \"inter-event intervals std dev\", \"% of total length\"]\n\n fields = [\"subject\", \"behavior\", \"modifiers\", \"number\", \"duration\", \"duration_mean\", \"duration_stdev\",\n \"inter_duration_mean\", \"inter_duration_stdev\"]\n self.tb.twTB.setColumnCount(len(tb_fields))\n self.tb.twTB.setHorizontalHeaderLabels(tb_fields)\n\n for row in out:\n self.tb.twTB.setRowCount(self.tb.twTB.rowCount() + 1)\n column = 0\n for field in fields:\n item = QTableWidgetItem(str(row[field]).replace(\" ()\", \"\"))\n # no modif allowed\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n column += 1\n\n # % of total time\n if row[\"duration\"] not in [\"NA\", \"-\", UNPAIRED, 0] and selectedObsTotalMediaLength:\n item = QTableWidgetItem(str(round(row[\"duration\"] / float(total_observation_time) * 100, 1)))\n else:\n item = QTableWidgetItem(\"NA\")\n\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n if mode == \"by_category\":\n tb_fields = [\"Subject\", \"Category\", \"Total number\", \"Total duration (s)\"]\n fields = [\"number\", \"duration\"]\n self.tb.twTB.setColumnCount(len(tb_fields))\n self.tb.twTB.setHorizontalHeaderLabels(tb_fields)\n\n for subject in categories:\n\n for category in categories[subject]:\n\n self.tb.twTB.setRowCount(self.tb.twTB.rowCount() + 1)\n\n column = 0\n item = QTableWidgetItem(subject)\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n column = 1\n if category == \"\":\n item = QTableWidgetItem(\"No category\")\n else:\n item = QTableWidgetItem(category)\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n for field in fields:\n column += 1\n item = QTableWidgetItem(str(categories[subject][category][field]))\n item.setFlags(Qt.ItemIsEnabled)\n item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n self.tb.twTB.resizeColumnsToContents()\n\n self.tb.show()\n\n if mode in [\"by_behavior\", \"by_category\"] and (\n not flagGroup and len(selectedObservations) > 1) or mode == \"synthetic\":\n\n if mode in [\"by_behavior\", \"by_category\"]:\n items = (\"Tab Separated Values (*.tsv)\",\n \"Comma separated values (*.csv)\",\n \"OpenDocument Spreadsheet (*.ods)\",\n \"OpenDocument Workbook (*.ods)\",\n \"Microsoft Excel Spreadsheet (*.xlsx)\",\n \"Microsoft Excel Workbook (*.xlsx)\",\n \"HTML (*.html)\",\n \"Legacy Microsoft Excel Spreadsheet (*.xls)\")\n\n formats = [\"tsv\", \"csv\", \"od spreadsheet\", \"od workbook\", \"xlsx spreadsheet\", \"xlsx workbook\", \"html\",\n \"xls legacy\"]\n\n item, ok = QInputDialog.getItem(self, \"Time budget analysis format\", \"Available formats\", items, 0,\n False)\n if not ok:\n return\n\n outputFormat = formats[items.index(item)]\n extension = re.sub(\".* \\(\\*\\.\", \"\", item)[:-1]\n\n flagWorkBook = False\n\n if mode in [\"by_behavior\", \"by_category\"] and \"workbook\" in outputFormat:\n workbook = tablib.Databook()\n flagWorkBook = True\n if \"xls\" in outputFormat:\n filters = \"Microsoft Excel Workbook *.xlsx (*.xlsx);;All files (*)\"\n if \"od\" in outputFormat:\n filters = \"Open Document Workbook *.ods (*.ods);;All files (*)\"\n\n if QT_VERSION_STR[0] == \"4\":\n WBfileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self, \"Save Time budget analysis\",\n \"\", filters)\n else:\n WBfileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Save Time budget analysis\", \"\",\n filters)\n if not WBfileName:\n return\n\n if mode in [\"by_behavior\", \"by_category\"] and \"workbook\" not in outputFormat: # not workbook\n exportDir = QFileDialog(self).getExistingDirectory(self,\n \"Choose a directory to save the time budget analysis\",\n os.path.expanduser(\"~\"),\n options=QFileDialog.ShowDirsOnly)\n if not exportDir:\n return\n\n if mode == \"synthetic\":\n\n formats_str = (\"Tab Separated Values *.txt, *.tsv (*.txt *.tsv);;\"\n \"Comma Separated Values *.txt *.csv (*.txt *.csv);;\"\n \"Open Document Spreadsheet *.ods (*.ods);;\"\n \"Microsoft Excel Spreadsheet *.xlsx (*.xlsx);;\"\n # \"Pandas dataframe (*.df);;\"\n \"Legacy Microsoft Excel Spreadsheet *.xls (*.xls);;\"\n \"HTML *.html (*.html);;\"\n \"All files (*)\")\n\n while True:\n if QT_VERSION_STR[0] == \"4\":\n fileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self, \"Save Time budget report\",\n \"\", formats_str)\n else:\n fileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Save Time budget report\", \"\",\n formats_str)\n\n if not fileName:\n return\n\n extension = \"\"\n availableFormats = (\n \"tsv\", \"csv\", \"ods\", \"xlsx)\", \"xls)\", \"html\") # ) is added to distinguish between xls and xlsx\n for fileExtension in availableFormats:\n if fileExtension in filter_:\n extension = fileExtension.replace(\")\", \"\")\n if not extension:\n QMessageBox.warning(self, programName, \"Choose a file format\",\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n else:\n break\n\n data_report = tablib.Dataset()\n data_report.title = \"Synthetic time budget\"\n\n parameters = [[\"duration\", \"Total duration\"], [\"number\", \"Number of occurrences\"]]\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"],\n selectedObservations, plot_parameters[\"selected behaviors\"])\n\n cursor.execute(\"SELECT distinct code, modifiers FROM events WHERE subject in ({})\".format(\n \",\".join(\"?\" * len(plot_parameters[\"selected subjects\"]))),\n (plot_parameters[\"selected subjects\"]))\n\n distinct_behav_modif = [[rows[\"code\"], rows[\"modifiers\"]] for rows in cursor.fetchall()]\n\n # add selected behaviors that are not observed\n for behav in plot_parameters[\"selected behaviors\"]:\n if [x for x in distinct_behav_modif if x[0] == behav] == []:\n distinct_behav_modif.append([behav, \"-\"])\n\n behaviors = init_behav_modif()\n\n subj_header, behav_header, modif_header, param_header = [\"\", \"\"], [\"\", \"\"], [\"\", \"\"], [\"\",\n \"Total length (s)\"]\n # subj_header, behav_header, modif_header, param_header = [\"\"], [\"\"], [\"\"], [\"\"]\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n subj_header.append(subj)\n behav_header.append(behav)\n param_header.append(param[1])\n\n if plot_parameters[\"include modifiers\"]:\n for modif in sorted(list(behaviors[subj][behav].keys())):\n for param in parameters:\n subj_header.append(subj)\n behav_header.append(behav)\n modif_header.append(modif)\n param_header.append(param[1])\n\n data_report.append(subj_header)\n data_report.append(behav_header)\n if plot_parameters[\"include modifiers\"]:\n data_report.append(modif_header)\n data_report.append(param_header)\n\n if mode == \"by_behavior\":\n fields = [\"subject\", \"behavior\", \"modifiers\", \"number\",\n \"duration\", \"duration_mean\", \"duration_stdev\",\n \"inter_duration_mean\", \"inter_duration_stdev\"]\n\n if mode == \"by_category\":\n fields = [\"subject\", \"category\", \"number\", \"duration\"]\n\n for obsId in selectedObservations:\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], [obsId],\n plot_parameters[\"selected behaviors\"])\n\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n if obs_length == -1:\n obs_length = 0\n\n if plot_parameters[\"time\"] == TIME_FULL_OBS:\n min_time = float(0)\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_EVENTS:\n try:\n min_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][0][0])\n except:\n min_time = float(0)\n try:\n max_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][-1][0])\n except:\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_ARBITRARY_INTERVAL:\n min_time = float(plot_parameters[\"start time\"])\n max_time = float(plot_parameters[\"end time\"])\n\n # check intervals\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if POINT in self.eventType(behav).upper():\n continue\n # extract modifiers\n # if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\n \"SELECT distinct modifiers FROM events WHERE observation = ? AND subject = ? AND code = ?\",\n (obsId, subj, behav))\n distinct_modifiers = list(cursor.fetchall())\n\n for modifier in distinct_modifiers:\n\n if len(cursor.execute(\n \"\"\"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence < ?\"\"\",\n (obsId, subj, behav, modifier[0], min_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], min_time))\n if len(cursor.execute(\n \"\"\"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence > ?\"\"\",\n (obsId, subj, behav, modifier[0], max_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], max_time))\n try:\n cursor.execute(\"COMMIT\")\n except:\n pass\n\n cursor.execute(\"\"\"DELETE FROM events WHERE observation = ? AND (occurence < ? OR occurence > ?)\"\"\",\n (obsId, min_time, max_time))\n\n out, categories = time_budget_analysis(cursor, plot_parameters, by_category=(mode == \"by_category\"))\n\n if mode == \"synthetic\":\n\n behaviors = init_behav_modif()\n\n for element in out:\n for param in parameters:\n if not plot_parameters[\"include modifiers\"]:\n try:\n behaviors[element[\"subject\"]][element[\"behavior\"]][param[0]] = element[param[0]]\n except:\n pass\n if plot_parameters[\"include modifiers\"]:\n try:\n behaviors[element[\"subject\"]][element[\"behavior\"]][element[\"modifiers\"]][param[0]] = \\\n element[param[0]]\n except:\n pass\n\n columns = []\n columns.append(obsId)\n columns.append(\"{:0.3f}\".format(max_time - min_time))\n # columns.append([obsId])\n\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n columns.append(behaviors[subj][behav][param[0]])\n if plot_parameters[\"include modifiers\"]:\n for modif in sorted(list(behaviors[subj][behav].keys())):\n for param in parameters:\n columns.append(behaviors[subj][behav][modif][param[0]])\n\n data_report.append(columns)\n\n if mode in [\"by_behavior\", \"by_category\"]:\n rows = []\n # observation id\n rows.append([\"Observation id\", obsId])\n rows.append([\"\"])\n\n labels = [\"Independent variables\"]\n values = [\"\"]\n if INDEPENDENT_VARIABLES in self.pj and self.pj[INDEPENDENT_VARIABLES]:\n for idx in self.pj[INDEPENDENT_VARIABLES]:\n labels.append(self.pj[INDEPENDENT_VARIABLES][idx][\"label\"])\n if (INDEPENDENT_VARIABLES in self.pj[OBSERVATIONS][obsId]\n and self.pj[INDEPENDENT_VARIABLES][idx][\"label\"] in self.pj[OBSERVATIONS][obsId][\n INDEPENDENT_VARIABLES]):\n values.append(self.pj[OBSERVATIONS][obsId][INDEPENDENT_VARIABLES][\n self.pj[INDEPENDENT_VARIABLES][idx][\"label\"]])\n rows.append(labels)\n rows.append(values)\n rows.append([\"\"])\n\n rows.append(\n [\"Analysis from\", \"{:0.3f}\".format(float(min_time)), \"to\", \"{:0.3f}\".format(float(max_time))])\n rows.append([\"Total length (s)\", \"{:0.3f}\".format(float(max_time - min_time))])\n rows.append([\"\"])\n rows.append([\"Time budget\"])\n\n if mode == \"by_behavior\":\n\n rows.append(fields + [\"% of total length\"])\n # data.headers = fields + [\"% of total media length\"]\n\n for row in out:\n values = []\n for field in fields:\n values.append(str(row[field]).replace(\" ()\", \"\"))\n\n # % of total time\n if row[\"duration\"] not in [\"NA\", \"-\", UNPAIRED, 0] and selectedObsTotalMediaLength:\n # if row[\"duration\"] != \"-\" and row[\"duration\"] != 0 and row[\"duration\"] != UNPAIRED and selectedObsTotalMediaLength:\n values.append(round(row[\"duration\"] / float(max_time - min_time) * 100, 1))\n '''\n if len(selectedObservations) > 1:\n values.append(round(row[\"duration\"] / float(selectedObsTotalMediaLength) * 100, 1))\n else:\n values.append(round(row[\"duration\"] / float(max_time - min_time) * 100, 1))\n '''\n else:\n values.append(\"-\")\n\n rows.append(values)\n\n if mode == \"by_category\":\n rows.append = fields\n # data.headers = fields # + [\"% of total media length\"]\n for subject in categories:\n\n for category in categories[subject]:\n values = []\n values.append(subject)\n if category == \"\":\n values.append(\"No category\")\n else:\n values.append(category)\n\n values.append(categories[subject][category][\"number\"])\n values.append(categories[subject][category][\"duration\"])\n\n rows.append(values)\n\n data = tablib.Dataset()\n data.title = obsId\n for row in rows:\n data.append(complete(row, max([len(r) for r in rows])))\n\n if \"xls\" in outputFormat:\n for forbidden_char in EXCEL_FORBIDDEN_CHARACTERS:\n data.title = data.title.replace(forbidden_char, \" \")\n\n if flagWorkBook:\n for forbidden_char in EXCEL_FORBIDDEN_CHARACTERS:\n data.title = data.title.replace(forbidden_char, \" \")\n if \"xls\" in outputFormat:\n if len(data.title) > 31:\n data.title = data.title[:31]\n workbook.add_sheet(data)\n\n else:\n\n fileName = exportDir + os.sep + safeFileName(obsId) + \".\" + extension\n\n if outputFormat in [\"tsv\", \"csv\", \"html\"]:\n with open(fileName, \"wb\") as f:\n f.write(str.encode(data.export(outputFormat)))\n\n if outputFormat == \"od spreadsheet\":\n with open(fileName, \"wb\") as f:\n f.write(data.ods)\n\n if outputFormat == \"xlsx spreadsheet\":\n with open(fileName, \"wb\") as f:\n f.write(data.xlsx)\n\n if outputFormat == \"xls legacy\":\n if len(data.title) > 31:\n data.title = data.title[:31]\n QMessageBox.warning(None, programName, (\n \"The worksheet name <b>{0}</b> was shortened to <b>{1}</b> due to XLS format limitations.\\n\"\n \"The limit on worksheet name length is 31 characters\").format(obsId, data.title),\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n\n with open(fileName, \"wb\") as f:\n f.write(data.xls)\n\n if mode == \"synthetic\":\n if extension in [\"tsv\", \"csv\", \"html\"]:\n with open(fileName, \"wb\") as f:\n f.write(str.encode(data_report.export(extension)))\n if extension in [\"ods\", \"xlsx\", \"xls\"]:\n with open(fileName, \"wb\") as f:\n f.write(data_report.export(extension))\n\n if mode in [\"by_behavior\", \"by_category\"] and flagWorkBook:\n if \"xls\" in outputFormat:\n with open(WBfileName, \"wb\") as f:\n f.write(workbook.xlsx)\n if \"od\" in outputFormat:\n with open(WBfileName, \"wb\") as f:\n f.write(workbook.ods)", "def __statistics_disciplines_graded(self):\n disciplines_list = self.__grade_controller.get_list_of_graded_disciplines()\n if len(disciplines_list) == 0:\n print(\"There is no graded discipline!\")\n return\n\n for discipline in disciplines_list:\n print(str(discipline) + \"\\n\")", "def get_grade_with_unique_title():\n # cc_list = ClassCategory.objects.values_list('title', flat=True).distinct()\n # return [(cc, cc) for cc in cc_list]\n return []", "def PrintCategoryScore(Cat):\r\n print()\r\n print(\"########## Individual Category Results ##########\")\r\n for i in range(len(Cat)): # prints out the results per category \r\n print()\r\n print(Cat[i])\r\n print(CategoryScore(Cat[i]))\r\n print()\r\n return print(\"----- End of Individuals Category Results -----\")", "def get_category_stats():\n # print(\"get category stats for \", session['selected_category'])\n category_stats = get_weighted_chosen_stats(session['selected_category'], 'category')\n return jsonify(category_stats)", "def extract_abilities(self):\n titleLabel = QLabel(\"Ability Scores\")\n titleLabel.setStyleSheet('font: 20pt \"Imprint MT Shadow\"; color: #ffffff;')\n grid = QGridLayout()\n self.filterVbox.addWidget(titleLabel, alignment=Qt.AlignCenter)\n self.filterVbox.addLayout(grid)\n\n counter = 0\n abilities = [\"Strength\", \"Dexterity\", \"Constitution\", \"Intelligence\", \"Wisdom\", \"Charisma\"]\n for [minVal, maxVal] in self.filters[\"Abilities\"].values():\n nextLabel = QLabel(f\"{abilities[counter]} - Between {str(minVal)} & {str(maxVal)}\")\n nextLabel.setStyleSheet('font: 12pt \"Times New Roman\"; color: rgb(188, 189, 177);')\n grid.addWidget(nextLabel, math.floor(counter / 2), counter % 2, alignment=Qt.AlignCenter)\n counter += 1", "def get_course_grade_items(self, coursename):\n\n params = self.config.request_params\n params.update({\n 'wsfunction': 'local_presentation_get_course_grade_items',\n 'course': coursename\n })\n return self.config.session.get(self.config.api_url, params=params).json()", "def listAvailabilities(self, milTime):\n try:\n assert (self.availabilities['date'] != None)\n print self.availabilities['date'], self.userName\n for i in self.availabilities['times']:\n print printTime(i, milTime)\n except:\n print \"Error in printing availabilities\"", "def print_grades(self):\n for i in self._grades:\n print(f'Course name: \"{i}\", Letter grade: \"{self._grades[i]}\"')\n pass", "def addCourseGrade(self, course:str, grade: str) -> None:\r\n self.summary[course] = grade\r\n self.completedCourse.append(course)\r\n self.completedCourse = sorted(self.completedCourse)# sort in alpabetical order\r", "def Subcategories():\n subcat = {\n \t\"Featured\": 0,\n \t\"All\": 1,\n \t\"Collectibles\": 2,\n \t\"Clothing\": 3,\n \t\"BodyParts\": 4,\n \t\"Gear\": 5,\n \t\"Models\": 6,\n \t\"Plugins\": 7,\n \t\"Decals\": 8,\n \t\"Hats\": 9,\n \t\"Faces\": 10,\n \t\"Packages\": 11,\n \t\"Shirts\": 12,\n \t\"Tshirts\": 13,\n \t\"Pants\": 14,\n \t\"Heads\": 15,\n \t\"Audio\": 16,\n \t\"RobloxCreated\": 17,\n \t\"Meshes\": 18,\n \t\"Accessories\": 19,\n \t\"HairAccessories\": 20,\n \t\"FaceAccessories\": 21,\n \t\"NeckAccessories\": 22,\n \t\"ShoulderAccessories\": 23,\n \t\"FrontAccessories\": 24,\n \t\"BackAccessories\": 25,\n \t\"WaistAccessories\": 26,\n \t\"AvatarAnimations\": 27,\n \t\"ClimbAnimations\": 28,\n \t\"FallAnimations\": 30,\n \t\"IdleAnimations\": 31,\n\t \"JumpAnimations\": 32,\n\t \"RunAnimations\": 33,\n \t\"SwimAnimations\": 34,\n \t\"WalkAnimations\": 35,\n \t\"AnimationPackage\": 36,\n \t\"Bundles\": 37,\n \t\"AnimationBundles\": 38,\n\t \"EmoteAnimations\": 39,\n\t \"CommunityCreations\": 40,\n\t \"Video\": 41,\n\t \"Recommended\": 51\n }\n return subcat", "def grocery_list(*args, **kwargs):\n for category, things in kwargs.items():\n print(f\"{category}:\\n{', '.join(things)}\")\n print(f'Total cost: ${sum(args)}')", "def get_knowledge_category_terms(self):\n return # osid.grading.GradeQueryInspector" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a child config
def add(self, key, child_config): self.__dict__[key] = child_config child_config.root = self
[ "def register(self, child, name=None):\n if name is None:\n name = child.name\n if isinstance(child, ConfigValue):\n if name in self:\n raise KeyError('A child with this name already exists')\n self._values[name] = child\n elif isinstance(child, ConfigSection):\n if name in self._values:\n raise KeyError('A child with this name already exists')\n self._subsections[name].append(child)\n else:\n raise TypeError('Child must be a ConfigValue or ConfigSection object')", "def CustomConfig(self, parent):\n pass", "def inherit_config(child, parent, keys):\n for key in keys:\n if key not in child.keys():\n child[key] = parent[key]\n print(\n \"{} not found in io.yaml file, falling back to main config\".format(key)\n )\n\n return child", "def addChild(self, title, path, orig_cp):\n cp = L10nConfigParser(urljoin(self.baseurl, path), **self.defaults)\n cp.loadConfigs()\n self.children.append(cp)", "def new_child(self, prefix: str, root: Any = None, values: Dict[str, Any] = None) -> 'Config':\n config = Config(prefix, self)\n if root and prefix:\n config[prefix] = root\n if values:\n config.add_all(values)\n return config", "def add_child(self, child):\n assert isinstance(child, Term)\n self.children.append(child)\n child.parent = self\n assert not child.term_is(\"Datafile.Section\")", "def addConfig(self, config):\n self.configs[config.name] = config", "def addChild(self, child):\n\t\traise Exception(\"Abstract method IDataFlow.addChild not implemented in: \" + str(self))", "def add_child(self, child):\r\n self.children.append(child)", "def addChild(self, child):\n self.children.append(child)", "def add_child(self, child):\n self.children.append(child)\n child.parent = self", "def addConfigVar(self,name,type,isrand=False):\n self.configVars.append(ConfigClass(name,type,isrand))", "def add_dependency(self, parent: str, child: str):", "def appendChild(self, child):\n self.__initChild()\n self.__child.append(child)", "def add_child(self, child: UIComponent):\n child.parent = self\n child.set_chronometer(self._chronometer)\n self.children.append(child)\n if self.props.resize_mode == ResizeMode.AUTO:\n self._reset('add_child')", "def config_changed(self, update_parent=True):\n super(Assembly, self).config_changed(update_parent)\n # driver must tell workflow that config has changed because\n # dependencies may have changed\n if self.driver is not None:\n self.driver.config_changed(update_parent=False)", "def add_additional_configuration(self, namespace):\n pass", "def _newChild(self, child):\n self._testKeySubNsAdd()\n self._getSubNsList().append(child)", "def init_config(self):\n super().init_config()\n for param in self.parameters():\n if param.name == 'source':\n continue\n self.add_config_item(param.name,\n saver=lambda p=param: getattr(p, \"value\"),\n loader=lambda x, p=param: setattr(p, \"value\", x),\n default=param.default)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Format strings using CONFIG object. This method uses python's builtin `str.format()` method. All root properties in CONFIG are passed in as kwargs. The properties lazy evaluate and recursively expand.
def format(self, value, key=None, **kwargs): if not isinstance(value, str): return value # always format strings using the root so the full path is available if self.root: return self.root.format(value, key, **kwargs) variables = CONFIG_VARIABLE_PATTERN.findall(value) expanded = {} for variable in variables: if variable not in kwargs: try: root_key = variable.split(".")[0] root = self.root if self.root else self expanded[root_key] = self.format(getattr(root, root_key), variable, **kwargs) except AttributeError: raise MissingConfiguration(variable, key) expanded.update(**kwargs) return value.format(**expanded)
[ "def recursively_update_config(config, string_formatting_dict):\n\n for k in _iterate_list_or_dict(config):\n v = config[k]\n if isinstance(v, dict) or isinstance(v, list):\n recursively_update_config(v, string_formatting_dict)\n else:\n if _key_in_string(v, string_formatting_dict):\n config[k] = v.format(**string_formatting_dict)", "def set_formatter_string(config: dict):\n formatter_str = \"%(levelname)s %(name)s\"\n\n if config.get(\"formatter\"):\n return config[\"formatter\"]\n\n if config.get(\"extended\"):\n formatter_str += \".%(funcName)s():\"\n\n if config.get(\"timestamp\"):\n formatter_str = \"%(asctime)s \" + formatter_str\n\n formatter_str += \" %(message)s\"\n\n return formatter_str", "def format_string(self, template):\n out_str = \"\"\n search_property_name = \"\"\n in_property = False\n for char in template:\n if (in_property):\n if (char == '%'):\n if (len(search_property_name) > 0):\n prop_value = \"\"\n try:\n prop_value = str(self.get_property(search_property_name))\n except KeyError:\n pass\n out_str += prop_value\n search_property_name = \"\"\n in_property = False\n else:\n search_property_name += char\n else:\n if (char == '%'):\n in_property = True\n else:\n out_str += char\n\n # Handle unterminated property names\n if (in_property):\n out_str += '%'\n out_str += search_property_name\n\n return out_str", "def configure_formatter(self, config):\n if '()' in config:\n factory = config['()'] # for use in exception handler\n try:\n result = self.configure_custom(config)\n except TypeError as te:\n if \"'format'\" not in str(te):\n raise\n # Name of parameter changed from fmt to format.\n # Retry with old name.\n # This is so that code can be used with older Python versions\n #(e.g. by Django)\n config['fmt'] = config.pop('format')\n config['()'] = factory\n result = self.configure_custom(config)\n else:\n fmt = config.get('format', None)\n dfmt = config.get('datefmt', None)\n result = logging.Formatter(fmt, dfmt)\n return result", "def formatConfig(self, parameters):\n\n if 'usr' not in parameters:\n parameters['usr'] = \"\"\n\n if 'pw' not in parameters:\n parameters['pw'] = \"\"\n\n return \"\"\"[Dialer %(profileName)s]\nModem Type = Analog Modem\nPhone = *99***1#\nISDN = 0\nBaud = 460800\nUsername = %(usr)s\nPassword = %(pw)s\nModem = %(modem)s\nInit1 = ATZ\nInit2 = at+cgdcont=1,\"ip\",\"%(apn)s\"\nStupid Mode = 1\n\"\"\" % parameters", "def cfg_to_prop_string(cfg, key_transform=lambda k: k, value_transform=lambda v: v, separator=\";\"):\n return separator.join([\"%s:%s\" % (key_transform(key), value_transform(value)) for key, value in iteritems(cfg)])", "def format_config(data, env):\n if isinstance(data, list):\n return [format_config(datum, env) for datum in data]\n elif isinstance(data, dict):\n return {\n format_config(key, env): format_config(value, env)\n for key, value in data.items()\n }\n elif isinstance(data, str):\n def replace(match):\n var_name = match.group(1)\n var = env.get(var_name)\n assert var, 'Variable %s undefined.' % var_name\n return var\n return ENV_PLACEHODLER.sub(replace, data)\n else:\n return data", "def format(*args, **kwargs):\n if args:\n print ', '.join([str(s) for s in args])\n if kwargs:\n sub_items = []\n for k, v in kwargs.items():\n sub_items.append('{}={}'.format(k, v))\n print ', '.join(sub_items)", "def object_string_formatter(s, **kwargs):\n result = s\n for key, o in kwargs.items():\n matches = re.findall(r'\\{%s\\.([^\\}]+)\\}' % key, s)\n for attr in matches:\n if hasattr(o, attr):\n try:\n from_s = '{%s.%s}' % (key, attr)\n to_s = str(getattr(o, attr))\n result = result.replace(from_s, to_s)\n except:\n logger.warn('Replace of \"%s\" failed for \"%s\".', attr, s)\n\n return result", "def recursive_format(format_this, *args, **kwargs):\n\n if isinstance(format_this, str):\n res = lazy_format(format_this, *args, **kwargs)\n elif isinstance(format_this, list):\n res = [recursive_format(item, *args, **kwargs)\n for item in format_this]\n elif isinstance(format_this, tuple):\n res = tuple(recursive_format(item, *args, **kwargs)\n for item in format_this)\n elif isinstance(format_this, dict):\n res = {}\n for key, val in format_this.items():\n res[key] = recursive_format(val, *args, **kwargs)\n else:\n res = format_this\n\n return res", "def format_recursive(template, arguments):\n if isinstance(template, str):\n return template.format(**arguments)\n elif isinstance(template, dict):\n return {\n k: format_recursive(v, arguments)\n for (k, v) in template.items()\n }\n elif isinstance(template, list):\n return [format_recursive(v, arguments) for v in template]\n else:\n return template", "def dump_config(self, obj, level=-1):\n indent = u\" \"\n if level >= 0:\n self._nginx_config += f\"{level * indent}{{\\n\"\n if isinstance(obj, dict):\n for key, val in obj.items():\n if hasattr(val, u\"__iter__\") and not isinstance(val, str):\n self._nginx_config += f\"{(level + 1) * indent}{key}\\n\"\n self.dump_config(val, level + 1)\n else:\n self._nginx_config += f\"{(level + 1) * indent}\" \\\n f\"{key} {val};\\n\"\n else:\n for val in obj:\n self._nginx_config += f\"{(level + 1) * indent}{val};\\n\"\n if level >= 0:\n self._nginx_config += f\"{level * indent}}}\\n\"", "def format_string(self, pat=None, pat_args={}):\n if pat is None:\n pat = self.parent.pat\n if pat_args == {}:\n pat_args = self.parent.pat_args\n return entry_format.output(self, pat, pat_args)", "def build_config_dict_display(lines: List[str], config_dict: Dict[str, Any], level: int = 0):\n prefix: str = \" \" * level\n for k, v in config_dict.items():\n if isinstance(v, Dict):\n lines.append(f\"{prefix}{k}:\")\n build_config_dict_display(lines, v, level + 1)\n else:\n lines.append(f\"{prefix}{k}: {v}\")", "def __str__(self):\n pretty = pprint.PrettyPrinter(indent=2)\n return pretty.pformat(self._config)", "def format_parameter_value(self, param_config, precision):\n # type: (Dict[str, Any], int) -> str\n\n fmt = '%.{}e'.format(precision)\n swp_type = param_config['type']\n if swp_type == 'single':\n return fmt % param_config['value']\n elif swp_type == 'list':\n return ' '.join((fmt % val for val in param_config['values']))\n elif swp_type == 'linstep':\n syntax = '{From/To}Linear:%s:%s:%s{From/To}' % (fmt, fmt, fmt)\n return syntax % (param_config['start'], param_config['step'], param_config['stop'])\n elif swp_type == 'decade':\n syntax = '{From/To}Decade:%s:%s:%s{From/To}' % (fmt, '%d', fmt)\n return syntax % (param_config['start'], param_config['num'], param_config['stop'])\n else:\n raise Exception('Unsupported param_config: %s' % param_config)", "def format(self, *args, **kwargs) -> String:\n pass", "def pprint(self):\n text = \"\"\n for (s, o, v) in self.config:\n text += \"[%s] %s = %s\\n\" % (s, o, v)\n print(text)", "def _format_bases_config(bases_config: BasesConfiguration) -> str:\n return \"_\".join([_format_run_on_base(r) for r in bases_config.run_on])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Directory where ixian is installed
def IXIAN(cls): import ixian return os.path.dirname(os.path.realpath(ixian.__file__))
[ "def toilPackageDirPath():\n result = os.path.dirname(os.path.realpath(__file__))\n assert result.endswith('/toil')\n return result", "def mitogen_lxc_path(self):", "def get_install_dir(self):\n return EventGenerator.get_install_dir(self) + \"/madgraph5/src\"", "def get_installdir(self):\n import mewlo\n path = os.path.dirname(os.path.realpath(mewlo.__file__))\n return path", "def pathToBaseNanoporeDir():\n import marginAlign\n i = absSymPath(__file__)\n return os.path.split(os.path.split(os.path.split(i)[0])[0])[0]", "def intermediate_dir_prefix():\n return \"%s-%s-\" % (\"scipy\", whoami())", "def ifaces_dir(self):\n return self.system_path(self._ifaces_dir)", "def get_install_dir(self):\n return EventGenerator.get_install_dir(self) + \"/madgraph4/src\"", "def site_packages_dir(self):", "def get_icon_dir():\n pjoin = os.path.join\n apath = os.path.abspath\n froi_dir = os.path.dirname(__file__)\n base_dir = apath(pjoin(froi_dir, os.pardir))\n #base_dir = apath(pjoin(base_dir, os.pardir))\n if not os.path.exists(pjoin(base_dir, 'data')):\n icon_dir = pjoin(froi_dir, 'icon')\n else:\n icon_dir = pjoin(base_dir, 'icon')\n return icon_dir", "def __instruments_absdir(self, institute: str) -> str:\n return self.__institutes_absdir() + institute + \"/instruments/\"", "def mitogen_lxc_info_path(self):", "def _getXRCFileLocations():\n for p in sys.path:\n yield p\n yield os.path.normpath(os.path.join(sys.prefix,\"share/XRCWidgets/data\"))", "def get_install_path():\n return install_path", "def get_pretend_installpath():\n return os.path.join(os.path.expanduser('~'), 'easybuildinstall')", "def get_qiime_scripts_dir():\r\n script_fp = which('print_qiime_config.py')\r\n\r\n if script_fp is None:\r\n raise ScriptsDirError(\"Could not find the directory containing QIIME \"\r\n \"scripts. QIIME scripts must be accessible via \"\r\n \"the PATH environment variable, and they must \"\r\n \"be executable. Please ensure that you have a \"\r\n \"valid QIIME installation (see the QIIME \"\r\n \"Installation Guide: \"\r\n \"http://qiime.org/install/install.html).\")\r\n\r\n return dirname(script_fp)", "def personaldir():\n if platform == 'windows':\n return os.path.join(os.environ['APPDATA'], 'automaton')\n else:\n return os.path.expanduser('~/.automaton/')", "def getInstDir(self):\n\n return os.path.join(self.configDir, self.instName.name.lower())", "def ht_install_dir():\n return env.cyclozzo_config.ht_install_dir or DEFAULT_HT_INSTALL_DIR" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Trova uno zero della funzione f tra i punti a e b, dove la f assume segno discorde. Il parametro opzionale toll indica la precisione con cui si vuole calcolare il valore dello zero
def bisezione(f,a,b,toll=10**-5): m = (a+b)/2 f_m = f(m) while abs(f_m) > toll: if f(a)*f_m < 0: b = m elif f(b)*f_m < 0: a = m elif f_m == 0: print("Trovata solzione esatta") return m else: print("Metodo fallito") return None m = (a+b)/2 f_m = f(m) return m
[ "def valeur_approchee(f):\n (a, b) = f\n return round(a / b, 4)", "def zero_force_func(v,t):\n return 0.0", "def f(x0: float, x1: float) -> float:\n return 8 - (x0 - 2) ** 2 - (x1 - 2) ** 2", "def _correct_p(self, f0, f1):\n return self.p * np.exp(self.dbeta * (f0 + f1) / 2)", "def find_zero(f, a, b, t=1e-9, ft=1e-6):\n # find a minimum of the absolute value\n r = find_min(f, a, b, t, m=abs)\n # and check the function value is close enough to 0\n if ft < abs(r.fv): raise ValueError(\"value not found\") # try a smaller t, or a larger ft\n r.ft = ft\n return r", "def bissecao(f, a, b, epsilon, maxIter=12):\r\n # Inicializar as variáveis Fa e Fb\r\n Fa = f(a)\r\n Fb = f(b)\r\n\r\n # Teste para saber se a função muda de sinal. Se não mudar, mostrar\r\n # mensagem de erro\r\n if ((f(a)*f(b)) < 0):\r\n print(\"Sem Erros !!!\")\r\n else: # Mostrar mensagem\r\n print(\"Erro! A função não muda de sinal.\")\r\n return (True, None)\r\n\r\n # Mostra na tela cabeçalho da tabela\r\n print(\"k\\t\\t a\\t\\t\\t fa\\t\\t\\t b\\t\\t\\t fb\\t\\t\\t x\\t\\t\\t fx\\t\\t\\t intervX\")\r\n\r\n # Inicializa tamanho do intervalo intervX usando a função abs, x e Fx\r\n intervX = abs(b-a)\r\n x = abs(b-a)\r\n Fx = f(x)\r\n\r\n # Mostra dados de inicialização\r\n print(\"-\\t%e\\t%e\\t%e\\t%e\\t%e\\t%e\\t%e\" % (a, Fa, b, Fb, x, Fx, intervX))\r\n\r\n # Teste se intervalo já é do tamanho da precisão e retorna a raiz sem erros\r\n # escreva o seu código aqui\r\n if(intervX < 0.001):\r\n return intervX\r\n\r\n\r\n # Iniciliza o k\r\n # escreva o seu código aqui\r\n k = 1\r\n fb = f(b)\r\n fa = f(a)\r\n # laço\r\n\r\n while k <= maxIter:\r\n # Testes para saber se a raiz está entre a e x ou entre x e b e atualiza\r\n # as variáveis apropriadamente\r\n # escreva o seu código aqui\r\n x = (a + b) / 2\r\n if((f(a)*f(x)) < 0):\r\n b = x\r\n fb = f(x)\r\n else:\r\n a = x\r\n fa = f(x)\r\n\r\n # Atualiza intervX, x, e Fx\r\n x = ((a+b)/2)\r\n Fx = f(x)\r\n Fa = f(a)\r\n Fb = f(b)\r\n intervX = (b-a)\r\n # Mostra valores na tela\r\n print(\"%d\\t%e\\t%e\\t%e\\t%e\\t%e\\t%e\\t%e\" %\r\n (k, a, Fa, b, Fb, x, Fx, intervX))\r\n\r\n # Teste do critério de parada (usando apenas o tamanho do intervalo)\r\n # escreva o seu código aqui\r\n if((intervX) < 0.001):\r\n break\r\n\r\n # Atualiza o k\r\n k = k + 1\r\n # Se chegar aqui é porque o número máximo de iterações foi atingido\r\n # Mostrar uma mensagem de erro e retorna que houve erro e a última raiz encontrada\r\n print(\"ERRO! número máximo de iterações atingido.\")\r\n return(True, x)", "def find_value(f, v, a, b, t=1e-9, ft=1e-6):\n r = find_zero((lambda x: f(x) - v), a, b, t, ft)\n r.fv += v\n return r", "def biseccion(func, a, b, tol=1e-4):\n p = (a + b) / 2 \n while np.fabs(func(p)) > tol:\n p = (a + b) / 2 \n if func(a) * func(p) < 0:\n b = p\n elif func(a) * func(p) > 0:\n a = p\n else:\n return p\n return p", "def RegulaFalsiMethod(f, a=0.0, b=0.75, tol=1e-10):\n\tstart = time()\n\tf_a = f(a)\n\tf_b = f(b)\n\terror = tol + 1\n\t\n\terrs = []\n\ti = 0\n\n\twhile error > tol:\n\t\tx = (a*f_b - b*f_a) / (f_b - f_a)\n\t\tf_x = f(x)\n\n\t\terrs.append(error)\n\n\t\tif f_a*f_x > 0:\n\t\t\ta = x\n\t\t\tf_a = f_x\n\t\telif f_b*f_x > 0:\n\t\t\tb = x\n\t\t\tf_b = f_x\n\t\telse:\n\t\t\tbreak\n\n\t\terror = np.abs(f_x)\n\t\ti = i+1\n\tend = time()\n\treturn x, (end-start), i", "def false_pos(f, a, b, epsilon, maxIter = 50):\r\n ## Inicializar as variáveis Fa e Fb\r\n Fa = f(a)\r\n Fb = f(b)\r\n x= ((a*Fb) - (b*Fa))/(Fb-Fa)\r\n ## Teste para saber se a função muda de sinal. Se não mudar, mostrar\r\n \r\n if Fa*Fb>0:\r\n print(\"Erro! A função não muda de sinal.\")\r\n return (True, None)\r\n \r\n ## Inicializa o tamanho do intervalo intervX usando a função abs\r\n intervX = abs(a-b)\r\n \r\n ## Teste se intervalo já é do tamanho da precisão e retorna a raiz sem erros\r\n if intervX <= epsilon:\r\n return x\r\n \r\n \r\n ## Testes se raiz está nos extremos dos intervalos\r\n \r\n ## Teste se a é raiz, se for, retorna o próprio a sem erros\r\n if Fa==0:\r\n return a\r\n \r\n ## Teste se b é raiz, se for, retorna o próprio b sem erros\r\n if Fb==0:\r\n return b\r\n \r\n ## Mostra na tela cabeçalho da tabela\r\n print(\"k\\t a\\t\\t Fa\\t\\t b\\t\\t Fb\\t\\t x\\t\\t Fx\\t\\tintervX\")\r\n \r\n ## Iniciliza o k, dessa vez usaremos um for\r\n k=1\r\n for k in range(1, maxIter+1):\r\n ## Calcula x, Fx\r\n x= ((a*Fb) - (b*Fa))/(Fb-Fa)\r\n Fx=f(x)\r\n \r\n ## Mostra valores na tela\r\n print(\"%d\\t%e\\t%e\\t%e\\t%e\\t%e\\t%e\\t%e\"%(k,a, Fa, b, Fb, x, Fx, intervX))\r\n \r\n ## Teste do critério de parada módulo da função\r\n if Fx<= epsilon:\r\n return x\r\n \r\n ## Testes para saber se a raiz está entre a e x ou entre x e b e atualiza\r\n ## as variáveis apropriadamente\r\n \r\n if Fa * Fx >0:\r\n a = x\r\n Fa = Fx\r\n else:\r\n b = x\r\n Fb = Fx\r\n \r\n ## Atualiza intervX e checa o outro critério de parada: tamanho do intervalo\r\n intervX = abs(a-b)\r\n if intervX <= epsilon:\r\n return x\r\n \r\n ## Mostrar uma mensagem de erro e retorna que houve erro e a última raiz encontrada\r\n print(\"ERRO! número máximo de iterações atingido.\")\r\n return (True, x)", "def F0(t):\n if t < 1e-6: # limit as t->0 of F0 is 1e0\n return 1e0\n else:\n thalf = sqrt(t)\n return sqrt(pi) / (2e0 * thalf) * erf(sqrt(t))", "def calc_f1(precision: float, recall: float) -> float:\r\n return 2 * (precision * recall) / (precision + recall)", "def classic_fp(self,tau21=0.864,tau31=0.864):\n\n\n# Compute radiances\n# -----------------\n L21 = B21(self.T21) \n L31 = B31(self.T31) \n E21 = B21(self.Tb21)\n E31 = B31(self.Tb31)\n\n if isscalar(tau21): tau21 = tau21 * ones(L21.shape)\n if isscalar(tau31): tau31 = tau31 * ones(L31.shape)\n \n# The nonlinear equation to be solved is: \n# B21(Tf) = a + b * B31(Tf)\n# or\n# T = iB21(a + b * B31(T))\n# ---------------------------------------\n r21 = (L21-E21)/(L31-E31)\n a21 = E21 - r21 * E31 \n b21 = r21 * tau31 / tau21\n\n if self.verb > 0:\n print_stats('__header__','Classic Fixed-point Dozier - Inputs')\n print_stats('DT21',self.T21-self.Tb21)\n print_stats('DT31',self.T31-self.Tb31)\n print_stats('__sep__')\n print_stats('b21',b21)\n print_stats('a21',a21)\n print_stats('__footer__')\n\n# Used fixed point algorithm to find solution\n# -------------------------------------------\n Tf = fixed_point(Tfunc21,self.T21,xtol=0.001,args=(a21,b21))\n p = 100. * (L21 - E21) / ( tau21 * B21(Tf) - E21 ) \n\n# Quality control\n# ---------------\n m = isnan(Tf) == False\n m = m & (p>0)\n\n# Add solution as attributes\n# --------------------------\n self.m = m\n self.Tf = Tf\n self.p = p\n\n# Print out results\n# -----------------\n y = 100. * ( Tf[m].size ) / a21.size + 0.05\n print_stats('__header__','Classic Dozier - Fixed-point Results (yield: %4.1f%%)'%y)\n print_stats('Tf (K)',Tf[m])\n print_stats('p (%)',p[m])\n print_stats('__footer__')\n\n# Plot KDE\n# --------\n plot_dozier(Tf[m],p[m],L21[m],E21[m],tau21[m],L31[m],E31[m],tau31[m],\\\n 'Fixed-point','fp',pow=self.pow[m])", "def biseccion(funcion,x0,x1,tol): \n xr=x0\n a=x0\n b=x1\n k=1\n if (evaluar(funcion,x0) * evaluar(funcion,x1) <0 ):\n while (error(a,b,k)>=tol): \n xr=(x0+x1)/2\n if (evaluar(funcion,x0)*evaluar(funcion,xr)<0):\n x1=xr\n else:\n x0=xr\n k+=1\n return (x0+x1)/2\n else:\n return \"No existe raíz\"", "def calc_f(self):\n self.f = self.g + self.h", "def df(a, x, b_inf, b_sup):\n sin_x = sin(x)\n cos_x = cos(x)\n return cbrt((x * sin_x)**2) + 2 * (cbrt(x * sin_x)**2) * sin_x *\\\n cos_x - sign(sin, b_inf, b_sup) * a / 3 * (sin_x + x * cos_x)", "def prob_b(a,b,o):\n if o == 0:\n if np.abs(b - a) < DT:\n return 1 / (2*DT); \n else:\n return 0; \n else:\n if b<=0 or b>a:\n return 0; \n else:\n return LMBDA*np.exp(-LMBDA*b)/(1-np.exp(-LMBDA*a))", "def f1(recall, precision):\n return 2 * recall * precision / (recall + precision + 1.e-16)", "def integrate(f, a, b):\n if(a > b):\n return - integrate(f, b, a)\n total = 0\n iters = 1000\n while(a <= b):\n total += (f(a) + f(a+1.0/iters)) / (2 * iters)\n a += 1.0 / iters\n return round(total, 8)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Restituisce una lista con le sole frequenze con cui compaiono gli elementi nella lista/tupla l. Ad es. se l = (1,2,2,5,3,4,1,1), allora
def freq(l): d = {} for i in l: if i in d: d[i] += 1 else: d[i] = 1 return list(d.values())
[ "def tri_entiers(l):\n # première boucle, minimum, maximum\n m = l [0]\n M = l [0]\n for k in range(1,len(l)):\n if l [k] < m : m = l [k]\n if l [k] > M : M = l [k]\n \n # calcul du nombre d'occurrences\n p = [0 for i in range (m,M+1) ]\n for i in range (0, len (l)) :\n p [ l [i] - m ] += 1\n \n # fonction de répartition\n P = [0 for i in range (m,M+1) ]\n P [0] = p [0]\n for k in range (1, len (p)) :\n P [k] = P [k-1] + p [k]\n \n # tri\n pos = 0\n for i in range (1, len (l)) :\n while P [pos] < i : pos += 1\n l [i-1] = pos + m\n l [len (l)-1] = M", "def __getListFrequencies(self, L):\n counter = collections.Counter(L)\n total = len(L)\n frequencies = {key: counter[key] / total for key in counter.keys()}\n return frequencies", "def frequences(liste):\n liste_frequences = [0] * 10\n for valeur in liste:\n liste_frequences[valeur] += 1\n for i, _ in enumerate(liste_frequences):\n liste_frequences[i] /= len(liste)\n return liste_frequences", "def most_frequent(l):\n input_len = 0 if l is None else len(l)\n if input_len == 0:\n return None\n if input_len < 3:\n return l[0]\n\n l_dict = {}\n half_total = input_len * 0.5\n\n result = None\n max_freq = 0\n\n for i in l:\n current = l_dict.get(i)\n l_dict[i] = count = 1 if current is None\\\n else current + 1\n\n if count > max_freq:\n max_freq = count\n result = i\n\n if count > half_total:\n break\n\n return result", "def get_somme(l):\n somme = 0\n for nombre in l:\n somme += nombre", "def litemfreq(inlist):\r\n scores = pstats.unique(inlist)\r\n scores.sort()\r\n freq = []\r\n for item in scores:\r\n freq.append(inlist.count(item))\r\n return pstats.abut(scores, freq)", "def cuenta_elementos_de_lista(lista):\n return Counter(lista)", "def affichage_boucle_liste_dans_list(Liste): \n cpt_l=0 #compteur de sous-listes\n for sousliste in Liste : #pour chaque sous liste dans la liste principale :\n cpt_l+=1 #incrÈmentation du compteur de liste\n print \"Sous-liste : \",cpt_l #affichage du numÈro de la sous-liste\n cpt_e=0 #initialisation du compteur d'ÈlÈments pour la sous-liste considÈrÈe \n for elmt in sousliste : #pour chaque ÈlÈment dans la sous-liste\n cpt_e+=1 #incrÈmentation du compteur d'ÈlÈments\n print elmt #affichage de l'ÈlÈment\n #‡ la fin de l'affichage de l'ensemble de la sous-liste, le nombre total d'ÈlÈment est affichÈ\n print \"il y a en tout : \",cpt_e,\"elements dans cette sous-liste.\\n\" \n #‡ la fin de la prÈsentation de toutes les sous-listes, le nombre total de sous-listes est affichÈ\n print \"\\nLa liste principale contient en tout \"+cpt_l+\" sous-listes.\"", "def order_by_frequency(list_leaves: list):\r\n\r\n for i in range(len(list_leaves)):\r\n for j in range(i + 1, len(list_leaves)):\r\n if list_leaves[j].sum_frequency > list_leaves[i].sum_frequency :\r\n list_leaves[j], list_leaves[i] = list_leaves[i], list_leaves[j] \r\n return (list_leaves)", "def sum_unique(l):\n pass", "def calcule_quartiles(liste): \n med = mediane(liste)\n\n liste_triee = sorted(liste)\n n = len(liste_triee)\n indice_milieu = n//2\n if n%2 == 0: # si n pair\n liste_inf = liste[:indice_milieu]\n liste_sup = liste[indice_milieu:]\n else: # n impair\n liste_inf = liste[:indice_milieu+1]\n liste_sup = liste[indice_milieu:] \n Q1 = mediane(liste_inf)\n Q3 = mediane(liste_sup)\n\n return Q1, med, Q3", "def count_list_freq(l):\n freq = {}\n for items in l:\n freq[items] = l.count(items)\n return freq", "def entropy_of_list(ls):\n elements = {}\n for e in ls:\n elements[e] = elements.get(e, 0) + 1\n length = float(len(ls))\n return sum(map(lambda v: -v / length * math.log(v / length), elements.values()))", "def occurrences(lst):\n # ...", "def list_stats (lst: list) -> tuple:\n num = len(lst)\n if num == 0:\n return (0,0,0,0)\n return (round(sum(lst)/num, 2), min(lst), max(lst), lst[int(num/2)])", "def collectTotalInfo(list_of_lists):\n sorted_list=heapq.merge(*list_of_lists)\n output_list=[]\n for item in sorted_list:\n output_list.append((item[3],item[2],-item[0]))\n return output_list", "def find_stats(lst, k):\n ret = [0] * k\n ret[- 1] = quick_select(lst, (2 ** k) - 1, 0, len(lst) - 1)\n for i in range(k - 1, 0, -1):\n ret[i - 1] = quick_select(lst, (2 ** i)-1, 0, 2 ** (i + 1))\n\n return ret", "def countFrequenciesAccurate(rList, fList):\n freqNA = countFrequenciesAccurateHelper(rList[0], fList)\n freqME = countFrequenciesAccurateHelper(rList[1], fList)\n freqAs = countFrequenciesAccurateHelper(rList[2], fList)\n freqSA = countFrequenciesAccurateHelper(rList[3], fList)\n freqLA = countFrequenciesAccurateHelper(rList[4], fList)\n freqAf = countFrequenciesAccurateHelper(rList[5], fList)\n freqE = countFrequenciesAccurateHelper(rList[6], fList)\n freqO = countFrequenciesAccurateHelper(rList[7], fList)\n \n freqList = [freqNA, freqME, freqAs, freqSA, freqLA, freqAf, freqE, freqO]\n return freqList", "def running_sum(l):\n ret_list = [] \n ret_list = [sum(l[:i + 1]) for i in range(len(l))] \n return ret_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Restituisce la funzione di Mobius dei primi n numeri. Viene fornita la lista primes che deve contenere tutti i primi almeno fino a n, questa cosa per velocità non viene verificata, è a cura di chi utilizza la funzione!!!
def mobius(n,primes): m = [0]*(n+1) for p in primes: for i in range(p, n+1, p): m[i] += 1 for p in primes: p_2 = p**2 for i in range(p_2, n+1, p_2): m[i] = 0 for i in range(n+1): if m[i] == 0: continue elif m[i]%2 == 0: m[i] = 1 else: m[i] = -1 m[1] = 1 return m
[ "def mobius(n, fn=prime_factor):\n if n < 1: return None\n r = 1\n for (p, e) in fn(n):\n if e > 1: return 0\n r = -r\n return r", "def lista_numeros_primos(m):\n for n in range(2, m):\n resultado = eh_primo(n)\n if resultado == True:\n print(n, 'é um número primo')\n else:\n print(n, 'igual', resultado, '*', n // resultado)\n\n return None", "def tempsMoyen(nb,fun,*paras):\n moyen = 0\n for k in range(nb):\n debut = time()\n fun(*paras)\n fin = time()\n moyen = moyen + fin - debut\n\n return moyen/nb", "def lista_numeros_primos2(m):\n n=0\n k=2\n while n < m: # n=1,2,3,..,m\n resultado = eh_primo(k)\n if resultado == True:\n print(k, 'é um número primo')\n n=n+1\n else:\n print(k, 'igual', resultado, '*', k // resultado)\n k=k+1\n return None", "def _calc_simul(símismo, paso, n_pasos, extrn=None):\n\n # Para cada paso de tiempo, incrementar el modelo\n for i in range(1, n_pasos):\n símismo._incrementar(paso, i=i, extrn=extrn)", "def mersenne(n:int=None, m:int=None) -> NumOrGen :\r\n if n is not None and n>=0:\r\n if m:\r\n return (pow(2,n,m)-1)%m #sucesion_de_lucas_primer_tipo(n,3,2,m)\r\n return pow(2,n)-1\r\n return sucesion_de_Lucas_Generalizada(a=0,b=1,P=3,Q=2,M=m)", "def motzkin_sum(n):\n motskin = [0]*(n)\n motskin[0] = 1\n motskin[1] = 0\n for i in range(2, n):\n motskin[i] = int((i-1)*(2*motskin[i-1] + 3*motskin[i-2])/(i+1))\n print(motskin)\n return motskin[n - 1]", "def methon1(n, m):\n result = NumberOfMAndN.__number_of_1(n ^ m)\n print(result)", "def silnia_it(n):\n wynik = 1\n\n for i in range(1,n+1):\n wynik = n * (n-1)\n #print(wynik)\n \n return wynik \n \n pass", "def somme_chiffres(n):\n if n < 10:\n return n\n\n # Cas général\n unite = n%10\n nn = n//10\n S = unite + somme_chiffres(nn)\n\n return S", "def moyenne_run(machine, algo, T, n = 100):\n mu_moyen, Na, res_temps_moyen = run(machine, algo, T) \n for i in range(1, n):\n mu, Na, res = run(machine, algo, T)\n mu_moyen = mu_moyen + mu\n res_temps_moyen = res_temps_moyen + res\n return mu_moyen/n, Na, res_temps_moyen/n", "def multiplicaciones(): #906609 tiene que darme\n ultimo_palindromo = 0\n total = 0\n for primer_numero in range(100, 1000):\n for segundo_numero in range(100, 1000):\n #total se encarga de hacer la multiplicacion entre los numeros\n total = primer_numero * segundo_numero\n # llamamos a la funcion que verifica si la multiplicacion que envia es un palindromo\n if obtener_palindromo(total):\n #luego de verificar que la multiplicacion era palindromo pasamos a evaluarla hasta llegar al ultimo palindromo\n #entre 100 y 1000\n if ultimo_palindromo < total:\n ultimo_palindromo = total\n return ultimo_palindromo", "def spisok(n):\n result = [] # объявление результирующего списка\n for x in range(n): # для каждого из чисел от 0 до 999999 делай\n result.append(x) # добавление в результирующий список очередного элемента\n return result", "def fatorial(numero):\n resultado = 1\n for i in range(1, numero+1):\n resultado = resultado * i\n return resultado", "def metrique_pheno_param(inNdvi=sp.empty,inSos=0,inEos=0,inIndMax=0):\n try:\n \n if (inSos<inEos and inSos>0 ):\n \n areabef=[] #suface sous la courbe avant la valeur max du NDVI\n areaaft=[] #surface sous la courbe après la valeur max du NDVI \n pente1=[] #pente du sos à la valeur max (vitesse de croissance)\n pente2=[] #pente du max au eos (vitesse de croissance)\n tsos_tmax=[] #duree entre le debut de la saison et le max de vegetation\n area=[] #surface du debut de la saison à la fin\n tmax_teos=[] #duree entre la floraison et la fin de la saison\n \n ms=inSos-1 # -1 parceque indice commence à 0\n me=inEos-1 # -1 parceque indice commence à 0\n mMaX=inIndMax-1 #\n ndviMax=inNdvi[mMaX]\n \n areabef=sp.sum(inNdvi[ms:mMaX])\n areaaft=sp.sum(inNdvi[mMaX:inEos]) \n area=sp.sum(inNdvi[ms:inEos])\n tsos_tmax=mMaX-ms \n tmax_teos=me-mMaX\n \n if mMaX>ms and mMaX<me :\n \n pente1=(ndviMax-inNdvi[ms])/tsos_tmax\n pente2=(ndviMax-inNdvi[me])/tmax_teos\n else:\n pente1=0\n pente2=0\n \n \n outListe=[area,areabef,areaaft,inIndMax,tsos_tmax,tmax_teos,pente1,pente2]\n \n else:\n outListe=[-1,-1,-1,-1,-1,-1,-1,-1]\n except:\n outListe=[-1,-1,-1,-1,-1,-1,-1,-1]\n \n return outListe", "def massimo(insieme):\n\n # n indica il numero di elementi di insieme\n n = len(insieme)\n\n max_val = insieme[0]\n\n # fa variare i da 1 a n-1\n for i in range(1, n):\n if insieme[i] > max_val:\n max_val = insieme[i]\n\n print \"l'elemento massimo e' %d\" % max_val", "def factoresPrimos(n:int) -> Iterator[int]:\r\n return descompocion_en_primos(n,repeticion=False)", "def simulationFreq(n):\n freq = [0] # Ligne à compléter pour construire une liste de onze 0\n for i in range(n):\n nbr_touche = simulation10TirV2()\n freq[nbr_touche] += 1\n for i in range(11):\n pass # ligne à modifier pour terminer le calcul des fréquences\n return freq", "def mote_max_inc(mote, n):\n\n return ((2 ** n) * mote) - ((2 ** n) - 1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Algoritmo di integrazione Simpson 1/3, per calcolare l'integrale della funzione f tra gli estremi a e b. L'intervallo è suddiviso in n sottointervalli. Aumentando n aumenta la precisione.
def simpson_13(f,a,b,n=1): step = (b-a)/n h = step/2 ret = 0 for i in range(n): a_1 = a + i*step b_1 = a_1 + step ret += h/3*(f(a_1)+4*f(a_1+h)+f(b_1)) return ret
[ "def simpson1(func, g, a, b, n):\n\n if n%2!=0: #Verifica se é divisivel por 2\n print(\"Não é possivel calcular com esse numero de subintervalos! Insira um multiplo de 2!\\n\")\n return\n\n h=(b-a)/n\n x=a\n soma=0\n\n for i in range(0, n+1):\n if i==0 or i==n: soma+=funcao(x, func, g)\n elif i%2==0: soma+=2*funcao(x, func, g)\n else: soma+=4*funcao(x, func, g)\n x+=h\n print(\"O resultado aproximado para a integral pelo método 1/3 de Simpson Repetido é {}\".format(soma*h/3))\n return soma*h/3", "def integracao_simpson(f, a, b, n):\r\n\r\n if n % 2:\r\n raise ValueError(\"n deve ser par (n=%d)\" % n)\r\n\r\n h = (b - a) / n\r\n s = f(a) + f(b)\r\n\r\n for i in range(1, n, 2):\r\n s += 4 * f(a + i * h)\r\n for i in range(2, n-1, 2):\r\n s += 2 * f(a + i * h)\r\n\r\n return s * h / 3", "def simpson(f, a, b, N):\n #############################################################\n # #\n # Implementieren Sie hier die zusammengesetzte Simpsonregel #\n # #\n #############################################################\n x, h = linspace(a, b, 2*N+1, retstep=True)\n I = h/3.0 * sum(f(x[:-2:2]) + 4.0*f(x[1:-1:2]) + f(x[2::2]))\n return I", "def simpson_integrate(f, a, b, N, points):\n total_sum = 0\n total_sum += f(points[0]) + f(points[N])\n\n first_sum = 0\n for i in range(1, N // 2):\n first_sum += f(points[2 * i])\n first_sum *= 2\n total_sum += first_sum\n\n second_sum = 0\n for j in range(1, (N // 2) + 1):\n second_sum += f(points[2 * j - 1])\n second_sum *= 4\n total_sum += second_sum\n\n interval_length = b - a\n denominator = 3 * N\n\n return (interval_length / denominator) * total_sum", "def Simpson(f, a, b, n=500):\n \n #Test the supplied n:\n assert n%2 == 0, \"Parameter n (intervals) must be an even integer.\"\n \n #Test the supplied integration limits\n assert b > a, \"Integration limit b must be greater than a.\"\n \n #Calculate h, ensure floating point number\n h = (b-a)/float(n)\n \n #calculate the partial sums (this could likely be sped up by one loop)\n sf_0 = f(a)\n sf_1 = f(b)\n sf_2 = 0\n sf_3 = 0\n \n #Compute sf_2\n for i in range(1,n//2 + 1):\n sf_2 += f(a + (2*i - 1)*h)\n #end for \n\n #Compute sf_3\n for i in range(1,n//2):\n sf_3 += f(a + 2*i*h)\n\n #Compute and return the Simpson's rule approximation\n s = (1/3.)*h*(sf_0 + sf_1 + 4*sf_2 + 2*sf_3)\n \n return s", "def integrate_simpson(f, lower, upper, N=1000):\n a = lower # Lower integration limit\n b = upper # Upper integration limit\n w = (b - a) / N # Width of each trapezoid\n\n if N % 2 != 0:\n N += 1\n print(\"Number of slices was odd so 1 was added to N.\")\n\n I = (1 / 3) * f(a) * w + (1 / 3) * f(b) * w # Area of first and last trapezoids\n\n for i in range(1, N, 2): # Odd terms\n I += f(a + i * w) * w * (4 / 3)\n\n for i in range(2, N, 2): # Even terms\n I += f(a + i * w) * w * (2 / 3)\n\n return I, N", "def integrate(self, f: callable, a: float, b: float, n: int) -> np.float32:\n\n # replace this line with your solution\n b32 = np.float32(b)\n a32 = np.float32(a)\n n32 = np.float32(n)\n\n # simpson(a, b, n, f):\n sum = np.float32(0.0)\n inc = (b32 - a32) / n32\n if (n32 % 2 != 0.0):\n n32 = n32 - 1.0\n for k in range(n + 1):\n x = a32 + (k * inc)\n summand = f(x)\n if (summand == np.float32(\"inf\") or summand == np.float32(\"-inf\")):\n summand = 0.0\n\n if (k != 0.0) and (k != n):\n summand *= (2.0 + (2.0 * (k % 2.0)))\n #print(\"x: \" + str(x) + \", summand: \" + str(summand))\n sum += summand\n result = ((b32 - a32) / (3.0 * n32)) * sum\n\n return np.float32(result)", "def integrate(f, a, b, n):\n h = float(b-a)/n\n s = 0\n for i in range(1, n+1):\n s += f(a + (i-0.5)*h)\n return s*h", "def simpson_rule(f,a,b):\n n = f.shape[0]\n h = (b - a)/n\n \n res = (f[a]+f[b])/6\n som = 0\n for i in range(0,n):\n som = som + f[a + i*h]/3 +2*f[a + i*h + h/2]/3\n\n return h*(res + som)", "def sp_c_n_func(self, n_a):\n self.sp_c_n = float(pow(14 / n_a, 3))", "def const_Qu(self, n, a):\n _4nn = 4*n*n\n mant = self._mantissa[:]\n mant[0] = mant[0]*0 + 1 # 1.0 of type of mant[0]\n for k in range(1, self._max_N):\n mant[k] = mant[k-1] * (_4nn -(2*k-1)*(2*k-1)) / (8*a*k)\n return PolyNum(mant, 0)", "def d_simpson(n, x_at):\n if n % 2 != 0:\n return None\n h = x_at / n\n odd_sum = 0\n even_sum = 0\n for i in range(1, n):\n if i % 2 == 1:\n odd_sum += 4 * f(i * h)\n elif i % 2 == 0:\n even_sum += 2 * f(i * h)\n return h / 3 * (f(0) + f(x_at) + odd_sum + even_sum) * exp(-x_at ** 2)", "def _integrate(function: Callable[[float, ], float],\n start: float, end: float, n: int) -> float:\n if n % 2 != 0:\n raise ValueError(\"In Simpson rule n have to be even\")\n if start >= end:\n raise ValueError(\n f\"Upper bound ({end}) is lesser or equal to lower ({start})\")\n\n step = (end - start) / n\n\n _sum = function(start) + function(end)\n for i in range(1, n):\n if i % 2 == 0:\n _sum += 2 * function(start + i * step)\n else:\n _sum += 4 * function(start + i * step)\n\n return (step / 3) * _sum", "def rational(n, d):", "def simpsons_rule(f, high, low, n, label='x', toDisplay=True):\n #n must be an even number to use Simpson's rule\n assert n%2 == 0, \"n must be an even number.\"\n\n #compute h value\n h = (high - low) / n\n\n #initialize integral value with function values at 'low' and 'high' points\n integral = evaluate(f, label, low) + evaluate(f, label, high)\n\n #summing part of the Simpson's rule\n for pair in [(1, n, 4), (2, n-1, 2)]:\n for i in range(pair[0], pair[1], 2):\n integral += pair[2] * evaluate(f, label, low + i*h)\n\n #final calculation\n integral *= h/3\n\n #print information or return value\n if toDisplay:\n print(\"Integral value using Simpson's rule: \", integral)\n else: return integral", "def simp(N, a, b):\n assert type(N) is IntType, \"First input must be an integer.\"\n\n # Setup needed vaiables\n wi = []\n xi = []\n\n # Determine delta x\n deltax = (b - a) / (2 * N)\n\n # Will need to add 2N + 1 values to above lists since we also use midpoints for\n # Simpson's rule. Setup wi and xi as follows:\n # 'wi' will be:\n # (delta x / 3) if i = 1 or 2N + 1\n # (4 * delta x / 3) for i = 2 to 2N when i is even\n # (2 * delta x / 3) for i = 3 to 2N - 1 when i is odd\n # 'xi' will be: a + (i - 1) * delta x\n for i in range(1, (2*N + 2)): # range() is exclusive in the second parameter\n if i == 1 or i == (2*N + 1):\n wi.append(deltax / 3)\n elif i % 2 == 0:\n wi.append((4 * deltax) / 3)\n elif i % 2 == 1:\n wi.append((2 * deltax) / 3)\n\n xi.append(a + (i - 1) * deltax)\n\n # Return these lists in a dictionary\n return {'wi': wi, 'xi': xi}", "def quad (n):\n for i in range(n):\n print (i*i),", "def PG(n,q,a1=1.0):\n Sinf=1/(1-q)\n Sn=a1*(1-q**n)/(1-q)\n return Sinf-Sn,Sinf,Sn", "def scintegral(f,s,a,b):\n\tx = sc.linspace(0,10,1000)\n\ti_trapez = integ.trapz(f(x), x)\n\ti_simpson = integ.simps(f(x), x)\n\ti_quad = integ.quad(f, 0, 10)\n\ti_quad2 = i_quad[0]\n\t\n\tprint \"integ.trapz() bei \" + str(s) + \" Stützpunkten: \" + str(i_trapez)\n\tprint \"integ.simps() bei \" + str(s) + \" Stützpunkten: \" + str(i_simpson)\n\tprint \"integ.quad() bei \" + str(s) + \" Stützpunkten: \" + str(i_quad2)\n\t\n\tintegral_pre = [i_quad2, i_trapez, i_simpson]\n\treturn integral_pre" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Algoritmo di kruskal per la ricerca dell'MST di un grafo, fornito in tramite la sua matrice di adiacenza, usa la funzione ring_finder per cercare anelli nel grafo e di min_nonzero_idx per trovare gli inidici dei rami con costo minimo
def kruskal(m): n = m.shape[0] m_ret = np.zeros([n,n], dtype=int) while np.count_nonzero(m_ret) != 2*(n-1): i_min, j_min = min_nonzero_idx(m) n_min = m[i_min, j_min] m[i_min, j_min], m[j_min, i_min] = 0, 0 m_ret[i_min, j_min], m_ret[j_min, i_min] = n_min, n_min if ring_finder(m_ret, [i_min], []): m_ret[i_min, j_min], m_ret[j_min, i_min] = 0, 0 return m_ret
[ "def kruskal(self, matr, minimo=True):\n ncomps = len(matr)\n already = []\n ramas = []\n dists = []\n matrix = [ele[:] for ele in matr]\n dic_posis = [-(ele+1) for ele in range(ncomps)]\n comps = [[] for ele in range(ncomps)]\n nrama = 0\n mins = []\n for ii in range(ncomps):\n for jj in range(ii+1, ncomps):\n if matrix[ii][jj] >= 0:\n mins.append((ii, jj, matrix[ii][jj]))\n mins.sort(key=lambda x:x[2], reverse=not minimo)\n for edge in mins:\n i = edge[0]\n j = edge[1]\n dist = edge[2]\n if dic_posis[i] != dic_posis[j]:\n dists.append(dist)\n if i not in already and j not in already:\n ramas.append((i, j))\n already.append(i)\n already.append(j)\n comps[i].append(j)\n comps[j].append(i)\n dic_posis[i] = nrama\n dic_posis[j] = nrama\n nrama += 1\n elif i not in already:\n ramas.append((i, ramas[dic_posis[j]]))\n comps[i].append(j)\n already.append(i)\n for ele in comps[j]:\n comps[i].append(ele)\n comps[ele].append(i)\n dic_posis[ele] = nrama\n comps[j].append(i)\n dic_posis[i] = nrama\n dic_posis[j] = nrama\n nrama += 1\n elif j not in already:\n ramas.append((j, ramas[dic_posis[i]]))\n comps[j].append(i)\n already.append(j)\n for ele in comps[i]:\n comps[j].append(ele)\n comps[ele].append(j)\n dic_posis[ele] = nrama\n comps[i].append(j)\n dic_posis[i] = nrama\n dic_posis[j] = nrama\n nrama += 1\n else:\n ramas.append((ramas[dic_posis[i]], ramas[dic_posis[j]]))\n for ele in comps[j]:\n dic_posis[ele] = nrama\n if i not in comps[ele]:\n comps[ele].append(i)\n if ele not in comps[i]:\n comps[i].append(ele)\n for ele2 in comps[i]:\n dic_posis[ele2] = nrama\n if j not in comps[ele2]:\n comps[ele2].append(j)\n if ele2 not in comps[j]:\n comps[j].append(ele2)\n if ele2 not in comps[ele]:\n comps[ele].append(ele2)\n if ele not in comps[ele2]:\n comps[ele2].append(ele)\n if j not in comps[i]:\n comps[i].append(j)\n if i not in comps[j]:\n comps[j].append(i)\n dic_posis[i] = nrama\n dic_posis[j] = nrama\n nrama += 1\n orde = [int(ele) for ele in unicode(ramas[nrama-1]).replace(\" \", \"\").\\\n replace(\"(\", \"\").replace(\")\", \"\").split(\",\")]\n self.tree = ramas[nrama-1]\n self.dists = dists\n self.branches = ramas\n self.krusk_ord = orde", "def test_riemannian_mst_computation():\n n_neighbors = 2\n eps = 1e-1\n tolerance = 1e-6\n\n cloud = np.array([\n [0,0,0],\n [0,1,0],\n [0,1,1],\n [0,1,2]\n ])\n normals = np.array([\n [1,0,0],\n [0,1,0],\n [1,0,0],\n [0,1,0]\n ])\n\n true_symmetrized_emst = np.array([\n [0,1,0,0],\n [1,0,1,0],\n [0,1,0,1],\n [0,0,1,0]\n ])\n true_kneighbors_graph = np.array([\n [0,1,1,0],\n [1,0,1,0],\n [0,1,0,1],\n [0,1,1,0]\n ])\n true_symmetrized_kneighbors_graph = np.array([\n [0,1,1,0],\n [1,0,1,1],\n [1,1,0,1],\n [0,1,1,0]\n ])\n true_riemannian_graph = np.array([\n [0,1+eps,eps,0],\n [1+eps,0,1+eps,eps],\n [eps,1+eps,0,1+eps],\n [0,eps,1+eps,0]\n ])\n true_possible_asymetric_rmsts = [\n np.array([\n [0,0,eps,0],\n [0,0,0,eps],\n [0,0,0,1+eps],\n [0,0,0,0],\n ]),\n np.array([\n [0,0,eps,0],\n [0,0,1+eps,eps],\n [0,0,0,0],\n [0,0,0,0],\n ]),\n np.array([\n [0,1+eps,eps,0],\n [0,0,0,eps],\n [0,0,0,0],\n [0,0,0,0],\n ]),\n ]\n true_possible_rmsts = [rmst + rmst.T for rmst in true_possible_asymetric_rmsts]\n\n actual_rmst = compute_riemannian_mst(cloud=cloud,normals=normals,n_neighbors=n_neighbors,eps=eps)\n is_possible = False\n for possible_rmst in true_possible_rmsts:\n is_possible = is_possible or (np.abs(possible_rmst - actual_rmst) < tolerance).all()\n if is_possible:\n return True\n else:\n print('---- Possible Riemannian MSTs:')\n for rmst in true_possible_rmsts:\n print(rmst)\n print('---- Actual Riemannian MST:')\n print(actual_rmst.toarray())\n return False", "def Kruskal(G): # la fonction prend la liste de edges et de union find\n edges = G.edges\n unionfind_list = G.nodes\n G_k = Graph() # le graph contient le graph de kruskal\n dim = len(unionfind_list) # dimension du nombre de sommet du graph\n kruskal_cost = 0 # initilisation du cout du graphe\n\n sorted_edges = deepcopy(edges)\n sorted_edges.sort() # copy et triage des aretes par cout croissant\n # pour chaque arete on recupere les deux noeuds de leur extremite\n for edge in sorted_edges:\n unionfind_a = edge.get_startnode()\n unionfind_b = edge.get_endnode()\n # s'ils ont deux racines differentes\n if unionfind_a.find() != unionfind_b.find():\n G_k.add_node(unionfind_a)\n G_k.add_node(unionfind_b)\n # on ajoute les deux noeuds et l'arete dans l'arbre de kruskal\n G_k.add_edge(edge)\n # on met a jour le cout\n kruskal_cost += edge.get_vcost()\n unionfind_a.union(unionfind_b)\n # si le nombre d'arete de l'arbre de kruskal est\n # egal au nombre de sommet-1\n # on retourne l'arbre de kruskal et son cout\n if G_k.get_nb_edges() == dim - 1:\n return kruskal_cost, G_k\n return kruskal_cost, G_k", "def find_kx(input_params, search_domain=None, show_progress=False,\r\n grid_points=20, iterations=9, reduction_factor=9,\r\n plot_full_region=True):\r\n w = input_params['w']\r\n d_list = input_params['d_list']\r\n ex_list = input_params['ex_list']\r\n ez_list = input_params['ez_list']\r\n mu_list = input_params['mu_list']\r\n N = len(mu_list)\r\n assert N == len(d_list) == len(ex_list) == len(ez_list)\r\n # error(z) approaches 0 as kx = z approaches a true plasmon mode.\r\n # It's proportional to the determinant of the boundary-condition matrix, \r\n # which equals zero at modes.\r\n def error(kx):\r\n if kx == 0:\r\n return inf\r\n temp_params = input_params.copy()\r\n temp_params['kx'] = kx\r\n should_be_zero = np.linalg.det(bc_matrix(find_kzs(temp_params)))\r\n return should_be_zero / kx**(N+1)\r\n # \"return should_be_zero\" is also OK but has an overall slope that\r\n # makes it harder to find zeros; also, there's a false-positive at k=0.\r\n \r\n # choose the region in which to search for minima. My heuristic is:\r\n # The upper limit of kx should be large enough that\r\n # 2 * pi * i * kzm * d ~ 20 for the thinnest layer we have, or 3 times\r\n # the light-line, whichever is bigger.\r\n if search_domain is None:\r\n kx_re_max = max(max(abs((20 / (2 * pi * d_list[i]))\r\n * cmath.sqrt(ez_list[i] / ex_list[i])) for i in range(1,N)),\r\n 3 * w / nu.c0)\r\n kx_re_min = -kx_re_max\r\n kx_im_min = 0\r\n kx_im_max = abs(kx_re_max)\r\n else:\r\n kx_re_min = search_domain[0]\r\n kx_re_max = search_domain[1]\r\n kx_im_min = search_domain[2]\r\n kx_im_max = search_domain[3]\r\n \r\n # Main part of function: Call find_all_zeros()\r\n kx_list = find_all_zeros(kx_re_min, kx_re_max, kx_im_min, kx_im_max, error,\r\n show_progress=show_progress, grid_points=grid_points,\r\n iterations=iterations,\r\n reduction_factor=reduction_factor,\r\n plot_full_region=plot_full_region)\r\n \r\n # sort and remove \"repeats\" with opposite signs\r\n kx_list = sorted(kx_list, key=(lambda kx : abs(kx)))\r\n i=0\r\n while i < len(kx_list) - 1:\r\n if abs(kx_list[i] + kx_list[i+1]) <= 1e-6 * (abs(kx_list[i]) + abs(kx_list[i+1])):\r\n kx_list.pop(i)\r\n else:\r\n i += 1\r\n \r\n # Fix amplifying waves\r\n kx_list = [(-kx if (kx.imag < 0 or (kx.imag==0 and kx.real < 0)) else kx)\r\n for kx in kx_list]\r\n \r\n return kx_list", "def kst():", "def a0min(A,amin,J1,J2):\n\n #----------------------Initialize dictionaries-----------------------------#\n\n a_init = -1 # Initial a value\n Edict = {} # Explored nodes dict\n Udict = {} # Unexplored nodes dict\n\n for n in range(len(A)):\n Udict[n] = a_init # Initializing a as -1\n Udict[J1] = 1 # a needed to source is 1\n\n #-----------------------------Main Search----------------------------------#\n\n while len(Udict) > 0: # While there are unexplored nodes\n #Find node with max a in Udict and move to Edict\n a_max = a_init # Setting maximum a to intial one for starting value\n for n,w in Udict.items(): # Looping through the unexplored dictionary\n if w > a_max: # If the weight node is greater than current a_max\n a_max = w # Update a_max\n n_max = n # Keep track of node where this occurs\n Edict[n_max] = Udict.pop(n_max) # Mark such node as visited\n #print(\"moved node\", n_max)\n\n # Update provisional a's for unexplored neighbours of n_max\n for i in range(len(A[n_max])): # Looping through nodes and their associated weights\n n = A[n_max][i][0] # Defining node\n w = A[n_max][i][1] # Defining associated value a of the node\n if n in Udict: # While it is in the unexplored dictionary\n Udict[n] = max(min(w,a_max),Udict[n]) #update value to maximum of all values on path\n\n if Edict[J2] != 0 : #Avoid division by zero\n a0_min = amin/Edict[J2]\n path = findPath(A,a0_min,amin,J1,J2)\n output = a0_min, path\n if len(path) == 0:\n output = -1,[]\n else:\n output = -1,[]\n\n return output,Edict", "def _G_to_km_on_basis_single_level(self, w, m):\n kB = self._sym.kBoundedSubspace(self.k,t=1)\n g = kB.K_kschur()\n mon = self.km()\n if m < w.length():\n return 0\n ans = self.zero()\n for la in Partitions(m, max_part = self.k):\n ans += g.homogeneous_basis_noncommutative_variables_zero_Hecke((la)).coefficient(w)*mon(la)\n return ans", "def decide_k_min(self, H0_dist, Ha_dist, rnd_index):\r\n\r\n self.H0_dists.append(copy.deepcopy(H0_dist))\r\n self.Ha_dists.append(copy.deepcopy(Ha_dist))\r\n #print(\"Deciding kmin for round index\", rnd_index)\r\n\r\n # If you change the end bound to len(H0_dist) then that's an issue\r\n\r\n for k in range(self.round_sched[rnd_index] // 2 + 1, self.round_sched[rnd_index] + 1):\r\n #print(\"kmin?:\", k)\r\n LR_num = 0\r\n LR_denom = 0\r\n for i in range(k, len(H0_dist)):\r\n LR_num += Ha_dist[i]\r\n LR_denom += H0_dist[i]\r\n \r\n delta = 1\r\n\r\n # FOR METIS\r\n #if (LR_num + self.pr_Ha_sched[max(rnd_index-1, 0)])/ (LR_denom + self.pr_H0_sched[max(rnd_index-1, 0)])> 1 / self.alpha:\r\n\r\n # FOR ATHENA\r\n if LR_num / LR_denom > 1 / self.alpha and Ha_dist[k] > delta * H0_dist[k]:\r\n \r\n # The case of equality essentially only happens when both sides are 0. Then there's no harm\r\n # in calling it a kmin (since it necessarily won't contribute to the risk), in spite of the fact\r\n # that the ratio criterion cannot be satisfied because of division by zero.\r\n # GRANT COULD ALSO BE DENOM = 0 OR ALPHA NUM > DENOM short circuit\r\n\r\n\r\n\r\n # SENTINELS FOR WHEN THERE'S NO KMIN! if we get to the\r\n # end of the dist and there's no satisfaction just return SENTINEL\r\n\r\n # FOR MINERVA\r\n #if self.alpha * LR_num >= LR_denom:\r\n\r\n self.k_min_sched[rnd_index] = k\r\n\r\n cumulative_H0_sched = self.pr_H0_sched[max(rnd_index-1, 0)]\r\n cumulative_Ha_sched = self.pr_Ha_sched[max(rnd_index-1, 0)]\r\n\r\n self.pr_H0_sched[rnd_index] = LR_denom + cumulative_H0_sched\r\n self.pr_Ha_sched[rnd_index] = LR_num + cumulative_Ha_sched\r\n\r\n # FOR MINERVA\r\n self.risk_sched[rnd_index] = LR_denom / LR_num\r\n\r\n # FOR METIS\r\n #self.risk_sched[rnd_index] = self.pr_H0_sched[rnd_index] / self.pr_Ha_sched[rnd_index]\r\n return", "def ekm(x_point, w_lower, w_upper, max_flag):\n \n if max(w_upper) == 0 or max(x_point) == 0:\n return 0\n \n if max(w_lower) == 0:\n if max_flag > 0:\n return max(x_point)\n else:\n return min(x_point)\n \n if len(x_point) == 1:\n return x_point[0]\n \n \n # removing items with 0 upper value\n zero_filtered = [x if x != 0 else None for x in w_upper]\n selectors = [x is not None for x in zero_filtered]\n x_point = list(itertools.compress(x_point, selectors))\n w_lower = list(itertools.compress(w_lower, selectors))\n w_upper = list(itertools.compress(w_upper, selectors))\n \n \n # combine zero Xs\n items = [item for item in sorted(zip(x_point, w_lower, w_upper))]\n x_point = [x[0] for x in items]\n \n if 0 in x_point and (len(x_point) - x_point.index(0) - 1) > 0:\n lower_sum = sum([x[1] for x in items if x[0] == 0])\n upper_sum = sum([x[2] for x in items if x[0] == 0])\n items = [x for x in items if x[0] != 0]\n items.insert(0, (0, lower_sum, upper_sum))\n \n # Starting the KM algorithm\n x_point = [x[0] for x in items]\n w_lower = [x[1] for x in items]\n w_upper = [x[2] for x in items]\n \n ly = len(x_point)\n if max_flag < 0:\n k = int(ly // 2.4)\n temp = w_upper[:k + 1] + w_lower[k + 1:]\n else:\n k = int(ly // 1.7)\n temp = w_lower[:k + 1] + w_upper[k + 1:]\n \n a = sum(np.array(x_point) * temp)\n b = sum(temp)\n y = a / b\n # rounded_xpoints = [round(x, 9) for x in x_point]\n k_new = np.nonzero(np.array(x_point) > y)[0] - 1\n \n if k_new.size != 0:\n k_new = k_new[0]\n \n while k != k_new:\n mink = min(k, k_new)\n maxk = max(k, k_new)\n temp = np.array(w_upper[mink + 1:maxk + 1]) - w_lower[mink + 1:maxk + 1]\n b = b - np.sign(k_new - k) * np.sign(max_flag) * sum(temp)\n a = a - np.sign(k_new - k) * np.sign(max_flag) * sum(temp * x_point[mink + 1:maxk + 1])\n y = a / b\n k = k_new\n k_new = np.nonzero(np.array(x_point) > y)[0] - 1\n if k_new.size != 0:\n k_new = k_new[0]\n \n return y", "def search(x,y,prrw,prrz, Dt, hw, hz):\n #if else statement to check if prrz or prrw is 0\n u = x; v = y\n col = [-9, -6, -3, 0, 1, 2, 3, 4, 5]\n row = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]\n feasible=[]\n power_val = []\n loc = []\n if x == 21:\n x = 20\n if f(x, y, prrw, prrz, Dt, hw, hz) < 0.9:\n pass\n else:\n exclude = power_table[:row.index(x)+1, :col.index(y)+1] #exclusion matrix with values that automatically do not satisfy condition\n for (x,y), value in np.ndenumerate(power_table):\n if value in exclude.flatten():\n pass\n else:\n a = (np.where(power_table==value)[0])[0]\n b = (np.where(power_table==value)[1])[0]\n current_settings = f(row[a], col[b], prrw, prrz, Dt, hw, hz)\n if current_settings > 0.9:\n pass\n else:\n feasible.append(value)\n power_val.append(current_settings * value)\n loc.append((x,y))\n # return loc[power_val.index(min(power_val))], feasible[power_val.index(min(power_val))], min(power_val)\n #returns minimum power value that satisfies condition\n if not power_val:\n return search(u,v,prrw+0.1,prrz+0.05,Dt,hw,hz)\n else:\n with open('pyout.dat', 'w') as outp:\n outp.write(str(row[loc[power_val.index(min(power_val))][0]])+\" \"+str(col[loc[power_val.index(min(power_val))][1]]))", "def findminpath(tab, gxtab, gytab, pixtab):\n\n pathdist = 2 # the number of points each points on a ray can related to on the previous ray\n pathdist_penalty = 0.3 # penalty of the difference of the pathdist\n pathpix_penalty = 2 # penalty of the difference of pixel values between the point and the previous point\n nray = tab.shape[1]\n\n #tab = np.hstack((tab,tab[:, 0].reshape(tab.shape[0], 1)))\n #pixtab = np.hstack((pixtab,pixtab[:, 0].reshape(pixtab.shape[0], 1)))\n #gxtab = np.hstack((gxtab,gxtab[:, 0].reshape(gxtab.shape[0], 1)))\n #gytab = np.hstack((gytab,gytab[:, 0].reshape(gytab.shape[0], 1)))\n\n tab = np.hstack((tab,tab,tab)) # horizontally stack the tab matrix to prepare for the filtering on the result\n pixtab = np.hstack((pixtab,pixtab,pixtab))\n gxtab = np.hstack((gxtab,gxtab,gxtab))\n gytab = np.hstack((gytab,gytab,gytab))\n\n tab = (tab - tab.min()) / (tab.max() - tab.min()) # noralize the tab matrix\n pixtab = (pixtab - pixtab.min()) / (pixtab.max() - pixtab.min()) * -1 # for we want to find the white contour of the cell so we multipy -1 on the pixtab\n # tab = tab / np.median(tab)\n # pixtab = pixtab / np.median(pixtab)\n path = np.zeros(tab.shape)\n path[:, 0] = np.array(range(0, tab.shape[0]))\n score = np.zeros(tab.shape)\n score[:, 1] = tab[:, 1]\n\n for i in range(1, tab.shape[1]):\n for j in range(tab.shape[0]):\n mins = np.Inf # record the min value of the ray\n minat = 0\n for k in range(-pathdist, pathdist+1):\n if(0 <= (j+k) and (j+k) < tab.shape[0]):\n s = pixtab[j, i]\n pixdiff = abs(pixtab[j, i] - pixtab[j+k, i-1])\n s += pixdiff * pathpix_penalty # two kinds of penalty\n s += abs(k) * pathdist_penalty\n s += score[j+k, i-1]\n\n if(s < mins):\n mins = s\n minat = j + k\n path[j, i] = minat\n score[j, i]= mins\n\n start = int(np.argmin(score[:, -1]))\n path = path.astype(np.int32)\n minpath = [start]\n for i in range(tab.shape[1]-1, 0, -1):\n minpath.append(path[minpath[-1], i])\n minpath = minpath[::-1]\n # print(len(minpath))\n minpath = savgol_filter(minpath, 15, 3) # apply a Savitzky-Golay filter to the raw minpath signal\n minpath = minpath[nray:nray*2] # cut the middle part of minpath whose length is nray\n return np.array(minpath).astype(np.int32)", "def test_kruskal(self):\n x = basicdata()\n dmax = np.sqrt((x ** 2).sum())\n m = mst(x)\n g = eps_nn(x, dmax)\n k = g.kruskal()\n assert np.abs(k.weights.sum() - m.weights.sum() < 1.e-7)", "def grau(self, v):\n # O loop percorre cada linha e cada coluna da matriz até a posição do vértice do parâmetro.\n # Então, soma o valor/quantidade das arestas conectadas aquele vértice, e adiciona-o a variável grau para depois\n # retorná-la.\n\n pos = self.N.index(v)\n grau = 0\n for x, lista_de_arestas in enumerate(self.M):\n if x <= pos:\n for y, aresta in enumerate(lista_de_arestas):\n if x != pos:\n if y == pos:\n grau += aresta\n else:\n if aresta != '-':\n grau += aresta\n return grau", "def ACM_Kruskal(G):\n pass", "def kruskal_solve(self):\n\n\t\tmin_span_tree = Graph(self.graph.vertices, [])\n\t\tedges = sorted(self.graph.edges[:], key=lambda x: x[2])\n\t\tcount = 0\n\n\t\twhile count < len(self.graph.vertices) - 1:\n\t\t\tcur_edge = edges[0]\n\t\t\tedges = edges[1:]\n\t\t\t\n\t\t\tnode1, node2, weight = cur_edge\n\t\t\tif not min_span_tree.is_connected(node1, node2):\n\t\t\t\tmin_span_tree.edges.append(cur_edge)\n\t\t\t\tcount = count + 1\n\n\t\treturn min_span_tree", "def maze_kruskal(self, screen: Surface) -> None:\n # x1 and y1 are the main coordinates\n # x2 and y2 are their edge neighboors\n x1 = []\n y1 = []\n x2 = []\n y2 = []\n\n for x in range(0, self.horizontal):\n for y in range(0, self.vertical):\n for i in range(0, 4):\n newX = x+self.dx[i]\n newY = y+self.dy[i]\n if not (self.out_of_range(newX, newY)):\n x1.append(x)\n y1.append(y)\n x2.append(newX)\n y2.append(newY)\n\n check = [[0 for i in range(self.vertical)] for j in range(self.horizontal)]\n \n while x1:\n n = randint(0, len(x1) - 1)\n sx1 = x1[n]\n sx2 = x2[n]\n sy1 = y1[n]\n sy2 = y2[n]\n\n if (check[sx1][sy1] + check[sx2][sy2]) == 0:\n key = randint(1, 9000000)\n check[sx1][sy1] = key\n check[sx2][sy2] = key\n self.draw_neighbor(screen, sx1, sx2, sy1, sy2)\n self.pop4(x1, x2, y1, y2, n)\n continue\n\n if check[sx1][sy1] == check[sx2][sy2]:\n self.pop4(x1, x2, y1, y2, n)\n continue\n\n if check[sx1][sy1] == 0:\n check[sx1][sy1] = check[sx2][sy2]\n self.draw_neighbor(screen, sx1, sx2, sy1, sy2)\n self.pop4(x1, x2, y1, y2, n)\n continue\n\n if check[sx2][sy2] == 0:\n check[sx2][sy2] = check[sx1][sy1]\n self.draw_neighbor(screen, sx1, sx2, sy1, sy2)\n self.pop4(x1, x2, y1, y2, n)\n continue\n\n target = check[sx1][sy1]\n for x in range(0, self.horizontal):\n for y in range(0, self.vertical):\n if check[x][y] == target:\n check[x][y] = check[sx2][sy2]\n self.draw_neighbor(screen, sx1, sx2, sy1, sy2)\n self.pop4(x1, x2, y1, y2, n)", "def MinSpanningTreeKruskal(self):\n nodes = [n for n in self.nodes]\n edges = [e for e in self.edges]\n self.ResetGraph()\n for n in nodes:\n self.AddNode(n)\n n.neighbours = []\n\n \n edges.sort(key=lambda e: e.weight)\n \n for edge in edges:\n if not self.CausesCycleIfAdded(edge):\n self.ConnectByEdge(edge)\n if len(self.edges) == self.NodesCount()-1:\n break", "def get_shortest_route_floyd(network, start,destination, excludings=[]):\n\n # On récupère la liste des villes\n list_city = network[1].keys()\n \n # Si la ville de départ ou de fin n'existe pas\n if start not in list_city or destination not in list_city:\n return None\n\n # On retire les villes à exclure\n list_city = [x for x in list_city if x not in excludings]\n\n\n # Initialisation de se qu'on a besoin\n matrix = []\n distance = []\n n = len(list_city)\n\n \n # On construit la matrice adjacente où indique la distance si il existe une autoroute entre 2 villes\n for x in range(n): \n matrix.append( [] )\n distance.append( [] )\n for y in range(n):\n road_id = get_road_to(network,list_city[x],list_city[y])\n if road_id != None:\n matrix[x].append( get_length(network,road_id) )\n else:\n matrix[x].append( None )\n distance[x].append( [road_id] ) # Autoroute -> format: ['LA']\n\n\t \n # Algorithme de Floyd\n for k in range(n):\n for i in range(n):\n for j in range(n):\n if ( matrix[i][k] != None and matrix[k][j] != None ) and ( ( matrix[i][j] == None ) or ( matrix[i][j] > matrix[i][k] + matrix[k][j] ) ):\n matrix[i][j] = matrix[i][k] + matrix[k][j]\n\t\t \n\t\t # Hors Floyd / Ajout personnel\n if i != k and j != k: # Si i == k ou j == k, cela veut dire qu'on additionne un résultat supplémentaire à la case ij\n distance[i][j] = [] # Sinon ca signifie qu'on a trouvé un chemin plus court, du coup on supprime l'ancien chemin\n distance[i][j].extend( distance[i][k] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n distance[i][j].extend( distance[k][j] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n\n\t\t \n # On récupère simplement la liste des autoroutes parcourus\n idx_start = list_city.index( start )\n idx_destination = list_city.index( destination )\n distance_minimum = distance[ idx_start ][ idx_destination ]\n\n \n # Si on ne trouve aucune solution, on renvoie None\n if distance_minimum == [None]:\n distance_minimum = None\n \n return distance_minimum", "def find(Map, PosI, PosF):\n \n # Pour les tests, cf. Pathfinding et Pathfinding2 \n \n InitialPosI = PosI\n InitialPosF = PosF\n Chemin = []\n \n Hvalue = np.zeros((np.shape(Map))) #Distance\n Gvalue = np.zeros((np.shape(Map))) #Movement Cost\n Fvalue = np.zeros((np.shape(Map))) #G+H \n Gvalue[:] = np.nan #initialiser Gvalue à une matrice NaN\n \n OpenList = [(InitialPosI,'N')]\n CloseList = []\n \n # Initialisation de Hvalue\n for i in range(np.shape(Hvalue)[0]):\n for j in range(np.shape(Hvalue)[1]):\n if Map[i,j] !=1:\n Hvalue[i,j] = abs(i-PosF[0]) + abs(j-PosF[1])\n else:\n Hvalue[i,j] = np.nan\n\n### Round 1 (+initialisations)\n \n CloseList.append(tuple(PosI))\n \n if PosI[0]-1>=0 and Map[PosI[0]-1,PosI[1]] != 1 and ((PosI[0]-1,PosI[1]) not in OpenList) and ((PosI[0]-1,PosI[1]) not in CloseList): #Check vertical haut\n OpenList.append(((PosI[0]-1,PosI[1]),'D')) #D : fleche vers le bas..\n if PosI[0]+1<=np.shape(Map)[0]-1 and Map[PosI[0]+1,PosI[1]] != 1 and ((PosI[0]+1,PosI[1]) not in OpenList) and ((PosI[0]+1,PosI[1]) not in CloseList): #Check vertical bas\n OpenList.append(((PosI[0]+1,PosI[1]),'U')) \n if PosI[1]-1>=0 and Map[PosI[0],PosI[1]-1] != 1 and ((PosI[0],PosI[1]-1) not in OpenList) and ((PosI[0],PosI[1]-1) not in CloseList): #Check horiz gauche\n OpenList.append(((PosI[0],PosI[1]-1),'R'))\n if PosI[1]+1<=np.shape(Map)[1]-1 and Map[PosI[0],PosI[1]+1] != 1 and ((PosI[0],PosI[1]+1) not in OpenList) and ((PosI[0],PosI[1]+1) not in CloseList): #Check horiz droit\n OpenList.append(((PosI[0],PosI[1]+1),'L'))\n \n \n for OV in OpenList: #OV pour OpenValue \n Gvalue[OV[0][0],OV[0][1]] = 10\n \n Fvalue = np.copy(Gvalue + Hvalue)\n for CV in CloseList: #CV pour ClosedValue\n Fvalue[CV[0],CV[1]] = np.nan\n \n\n#### Round NEXT \n ###Vers le min de Fvalue:\n while PosF not in CloseList and PosI != PosF:\n \n if np.all(np.isnan(Fvalue)): #Check si F est égale à une matrice Full NaN\n# print('Pas de chemin')\n return(False) # soit return False, soit return la position init, donc bon..\n \n Index = np.argwhere(Fvalue == np.nanmin(Fvalue))\n PosI = Index.tolist()[0]\n \n CloseList.append(tuple(PosI))\n if PosI[0]-1>=0 and Map[PosI[0]-1,PosI[1]] != 1 and ((PosI[0]-1,PosI[1]) not in OpenList) and ((PosI[0]-1,PosI[1]) not in CloseList): #Check vertical haut\n OpenList.append(((PosI[0]-1,PosI[1]),'D')) #DOWN (fleche vers le bas..)\n if PosI[0]+1<=np.shape(Map)[0]-1 and Map[PosI[0]+1,PosI[1]] != 1 and ((PosI[0]+1,PosI[1]) not in OpenList) and ((PosI[0]+1,PosI[1]) not in CloseList): #Check vertical bas\n OpenList.append(((PosI[0]+1,PosI[1]),'U')) #Up\n if PosI[1]-1>=0 and Map[PosI[0],PosI[1]-1] != 1 and ((PosI[0],PosI[1]-1) not in OpenList) and ((PosI[0],PosI[1]-1) not in CloseList): #Check horiz gauche\n OpenList.append(((PosI[0],PosI[1]-1),'R')) #Right\n if PosI[1]+1<=np.shape(Map)[1]-1 and Map[PosI[0],PosI[1]+1] != 1 and ((PosI[0],PosI[1]+1) not in OpenList) and ((PosI[0],PosI[1]+1) not in CloseList): #Check horiz droit\n OpenList.append(((PosI[0],PosI[1]+1),'L')) #Left\n \n for OV in OpenList:\n Gvalue[OV[0][0],OV[0][1]] = 10\n \n Fvalue = np.copy(Gvalue + Hvalue)\n for CV in CloseList:\n Fvalue[CV[0],CV[1]] = np.nan\n \n\n \n############## TRACING BACK \n PosF = InitialPosF\n\n while InitialPosI not in Chemin:\n \n for Trace in OpenList:\n if Trace[0] == PosF:\n Chemin.append(PosF)\n if Trace[1] == 'U':\n PosF = (PosF[0]-1,PosF[1]) #Go up\n elif Trace[1] == 'D':\n PosF = (PosF[0]+1,PosF[1]) #Go down\n elif Trace[1] == 'L':\n PosF = (PosF[0],PosF[1]-1) #Go left\n elif Trace[1] == 'R':\n PosF = (PosF[0],PosF[1]+1) #Go right\n# else:\n# print(Chemin)\n Chemin.reverse()\n return(Chemin)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
PRIME FACTORS OF FACTORIAL Restituisce una lista in cui all'iesima posizione si trovala potenza a cui l'iesimo numero primo è elevato nella scomposizione in fattori primi del fattoriale di n
def pff(n): global primes #primes = primi(n) ret = [] for p in primes: a = 0 if p > n: break for i in range(1, int(log(n,p)) + 1): a += int(n/p**i) ret.append(a) return ret
[ "def factorization(n):\r\n pf = []\r\n for p in primeslist:\r\n if p * p > n : break\r\n count = 0\r\n while not n % p:\r\n n //= p\r\n count += 1\r\n if count > 0: pf.append((p, count))\r\n if n > 1: pf.append((n, 1))\r\n return pf", "def factorization(n):\n pf = []\n for p in primeslist:\n if p*p > n : break\n count = 0\n while not n % p:\n n //= p\n count += 1\n if count > 0: pf.append((p, count))\n if n > 1: pf.append((n, 1))\n return pf", "def factoresPrimos(n:int) -> Iterator[int]:\r\n return descompocion_en_primos(n,repeticion=False)", "def factor(n):\n def _factor(K):\n factors = []\n for i in xrange(2, 1+int(K**.5)):\n d, r = divmod(K, i)\n if (r == 0):\n factors.append(i)\n factors.append(d)\n return factors\n \n #print _factor(n)\n primes = [k for k in _factor(n) if len(_factor(k))==0]\n primes.sort()\n return primes", "def GetNFactors(n, primes, n_pfactors, _):\n sqrtn = int(n ** 0.5) + 1\n\n for p in primes:\n if p > sqrtn:\n break\n if n % p == 0:\n n //= p\n if n % p == 0:\n return n_pfactors[n]\n else:\n return n_pfactors[n] + 1\n\n # n is primes\n primes.append(n)\n return 1", "def padic_factorization(f):\n num_factors = f.degree()\n stock = None\n for p in prime.generator():\n fmodp = uniutil.polynomial(\n f.terms(),\n finitefield.FinitePrimeField.getInstance(p))\n if f.degree() > fmodp.degree():\n continue\n g = fmodp.getRing().gcd(fmodp,\n fmodp.differentiate())\n if g.degree() == 0:\n fp_factors = fmodp.factor()\n if not stock or num_factors > len(fp_factors):\n stock = (p, fp_factors)\n if len(fp_factors) == 1:\n return stock\n num_factors = len(fp_factors)\n else:\n break\n p = stock[0]\n fp_factors = []\n for (fp_factor, m) in stock[1]:\n assert m == 1 # since squarefree\n fp_factors.append(minimum_absolute_injection(fp_factor))\n return (p, fp_factors)", "def findfamily(p):\n ans = []\n for i in range(10):\n candidate = p.replace(\"*\", str(i))\n if is_prime(int(candidate)):\n ans.append(candidate)\n return sorted(ans)", "def PrimeFactorization(p):\n d, primeFactors = 2, []\n while d*d <= p:\n while (p % d) == 0:\n primeFactors.append(d)\n p //= d\n d += 1\n if p > 1:\n primeFactors.append(p)\n return primeFactors", "def pfb(n1, n2):\n global primes\n #primes = primi(n)\n n3 = n1 - n2\n factors_1, factors_2, factors_3 = pff(n1), pff(n2), pff(n3)\n for i in range(len(factors_2)):\n factors_1[i] -= factors_2[i]\n if i < len(factors_3):\n factors_1[i] -= factors_3[i]\n return factors_1", "def problem3():\n def _prime_factorization(n):\n \"\"\"Returns the list of prime factors of a number n\"\"\"\n factors = []\n f = 2\n # Use trial division to add factors\n while f**2 <= n:\n while (n % f) == 0:\n factors.append(f)\n n //= f\n f += 1\n\n if n > 1:\n factors.append(n)\n\n return factors\n\n return max(_prime_factorization(600851475143))", "def _factors(n):\n gen = ([i, n // i] for i in range(1, int(n ** 0.5) + 1) if n % i == 0)\n return set(sum(gen, []))", "def fator_primo(numero):\n\n if numero // 1 == numero:\n\n divisores = int(numero)\n\n maior_fator = 0\n for i in range(1, divisores+1):\n\n cont = 0\n for j in range(1, i+1):\n\n if i % j == 0:\n cont += 1\n\n # Aqui o fator sempre será maior que 1\n if cont == 2:\n\n if divisores % i == 0:\n\n if i > maior_fator:\n maior_fator = i\n\n divisores = divisores / i\n\n # Aqui o fator sempre será 1\n elif cont == 1:\n\n if i > maior_fator:\n maior_fator = i\n\n return maior_fator", "def _gf_sqf_list(self, f):\n domain = self.domain\n\n n, factors, p = 1, [], int(domain.characteristic)\n m = int(domain.order // p)\n\n while not f.is_ground:\n df = [f.diff(x) for x in self.gens]\n\n if any(_ for _ in df):\n g = f\n for q in df:\n g = self.gcd(g, q)\n h, f, i = f // g, g, 1\n\n while h != 1:\n g = self.gcd(f, h)\n h //= g\n\n if not h.is_ground:\n factors.append((h, i*n))\n\n f //= g\n h = g\n i += 1\n\n n *= p\n\n g = self.zero\n for monom, coeff in f.items():\n g[tuple(_ // p for _ in monom)] = coeff**m\n f = g\n\n return factors", "def prime_divisors(n):\r\n\treturn list(set(factors(n)))", "def get_prime_factors(n):\n factors = []\n i = 2\n while i * i <= n:\n if n % i:\n i += 1\n else:\n n //= i\n factors.append(i)\n if n > 1:\n factors.append(n)\n return factors", "def landau1(n):\n\n i = 2\n sum_factors = 1\n factors = set()\n\n while i <= n: \n common = {j for j in factors if gcd(j, i) != 1}\n if len(common) == 0:\n factors = add_factor(i, n, factors)\n sum_factors = sum(factors)\n elif product(common) <= i:\n difference = factors.difference(common)\n new_factors = add_factor(i, n, difference)\n if product(new_factors) > product(factors):\n factors = new_factors\n sum_factors = sum(factors)\n i += 1\n\n print(n, product(factors), factors)\n return product(factors)", "def factor(N):\n\t#initialize\n\tprimes = [2,3]\n\tfactors = []\n\tn = N\n\t#test each known prime\n\tkeepGoing = True\n\twhile keepGoing:\n\t\tfor x in primes: #skip 1\n\t\t\tif n % x == 0:\n\t\t\t\tfactors.append(x)\n\t\t\t\tn = n / x\n\t\t\t\tbreak\n\t\tif n > primes[-1]:\n\t\t\tprimes.append(getNextPrime(primes))\n\t\telif n == 1:\n\t\t\tkeepGoing = False\n\treturn factors", "def prime_factors(n):\n\n prime_set = primes(n)\n factors = []\n for prime in prime_set:\n if n % prime == 0:\n factors.append(prime)\n return factors", "def get_probable_prime(n: int) -> [int]:\n return [6*n-1, 6*n+1]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
PRIME FACTORS OF BINOMIAL Restituisce una lista in cui all'iesima posizione si trovala potenza a cui l'iesimo numero primo è elevato nella scomposizione in fattori primi del coefficiente binomiale di (n1 n2)
def pfb(n1, n2): global primes #primes = primi(n) n3 = n1 - n2 factors_1, factors_2, factors_3 = pff(n1), pff(n2), pff(n3) for i in range(len(factors_2)): factors_1[i] -= factors_2[i] if i < len(factors_3): factors_1[i] -= factors_3[i] return factors_1
[ "def pff(n):\n global primes\n #primes = primi(n)\n ret = []\n for p in primes:\n a = 0\n if p > n:\n break\n for i in range(1, int(log(n,p)) + 1):\n a += int(n/p**i)\n ret.append(a)\n return ret", "def factorization(n):\r\n pf = []\r\n for p in primeslist:\r\n if p * p > n : break\r\n count = 0\r\n while not n % p:\r\n n //= p\r\n count += 1\r\n if count > 0: pf.append((p, count))\r\n if n > 1: pf.append((n, 1))\r\n return pf", "def pe029(max_a=100, max_b=100):\n\n \"\"\"\n # méthode peu efficace avec les listes\n puissances = list()\n for a in range(2, max_a+1):\n for b in range(2, max_b+1):\n # ajouter (sans doublon) a**b dans l'ensemble des puissances\n puiss = a**b\n if (puiss not in puissances):\n puissances.append(puiss)\n return len(puissances)\n \"\"\"\n\n # méthode efficace avec les ensembles\n puissances = set()\n for a in range(2, max_a+1):\n for b in range(2, max_b+1):\n # ajouter (sans doublon) a**b dans l'ensemble des puissances\n puiss = a**b\n puissances.add(puiss)\n return len(puissances)", "def factorization(n):\n pf = []\n for p in primeslist:\n if p*p > n : break\n count = 0\n while not n % p:\n n //= p\n count += 1\n if count > 0: pf.append((p, count))\n if n > 1: pf.append((n, 1))\n return pf", "def get_probable_prime(n: int) -> [int]:\n return [6*n-1, 6*n+1]", "def mobius(n,primes):\n m = [0]*(n+1)\n for p in primes:\n for i in range(p, n+1, p):\n m[i] += 1\n for p in primes:\n p_2 = p**2\n for i in range(p_2, n+1, p_2):\n m[i] = 0\n for i in range(n+1):\n if m[i] == 0:\n continue\n elif m[i]%2 == 0:\n m[i] = 1\n else:\n m[i] = -1\n m[1] = 1\n return m", "def Bpoly(n, x):\n n = int(n)\n out = 0\n for k in xrange(0, n+1):\n out += comb(n,k)*Bnum(n-k)*x**float(k)\n return out", "def factor(n):\n def _factor(K):\n factors = []\n for i in xrange(2, 1+int(K**.5)):\n d, r = divmod(K, i)\n if (r == 0):\n factors.append(i)\n factors.append(d)\n return factors\n \n #print _factor(n)\n primes = [k for k in _factor(n) if len(_factor(k))==0]\n primes.sort()\n return primes", "def quadratic_sieve(n, bound=1024, sieve=1000000):\n\n if n % 2 == 0:\n return 2\n if isprime(n):\n return n\n if ispower(n):\n return ispower(n)\n\n # primes where n is a quadratic residue - 0 would mean divisable ...\n p_list = [p for p in primes(bound) if pow(n, (p - 1) // 2, p) != p - 1]\n pi = len(p_list)\n\n rows = list()\n a_list = list()\n b_list = list()\n\n for a0 in range(isqrt(n) + 1, isqrt(2 * n), sieve):\n\n # sieving for b smooth numbers\n v_list = [a * a - n for a in range(a0, a0 + sieve)]\n for p in p_list:\n r = mod_sqrt(n, p)\n for i in range((r - a0) % p, sieve, p):\n while v_list[i] % p == 0:\n v_list[i] //= p\n for i in range((p - r - a0) % p, sieve, p):\n while v_list[i] % p == 0:\n v_list[i] //= p\n\n # factoring and checking\n for ai, r in enumerate(v_list):\n if r != 1 and not issquare(r):\n continue\n\n # factor all b smooth numbers\n ai += a0\n b2 = ai * ai - n\n v = 0\n factors = Counter()\n for i, p in enumerate(p_list):\n power = 0\n while b2 % p == 0:\n b2 //= p\n power += 1\n if power:\n factors[p] = power\n if power % 2:\n v |= 1 << i\n if b2 == 1:\n break\n\n # if b2 is square, use fermat's method\n if not v:\n print(\"fermat\")\n return ai - isqrt(ai * ai - n)\n\n rows.append(v)\n a_list.append(ai)\n b_list.append(factors)\n\n # look for 3+ that combine\n print(len(rows))\n cols = [\n sum(1 << i for i, x in enumerate(rows) if (1 << j) & x) for j in range(pi)\n ]\n marks = set(range(len(a_list)))\n\n # gaussian elimination\n for j, col in enumerate(cols):\n if not col:\n continue\n mask = 1\n i = 0\n while not mask & col:\n mask *= 2\n i += 1\n marks.remove(i)\n for k, col2 in enumerate(cols):\n if j == k:\n continue\n elif mask & col2:\n cols[k] ^= col\n\n # find matches\n rows2 = [\n sum(1 << i for i, x in enumerate(reversed(cols)) if (1 << j) & x)\n for j in range(len(a_list))\n ]\n for mark in marks:\n r_list = [mark]\n mask = 1\n row = rows2[mark]\n for _ in range(pi):\n if mask & row:\n r_list.append(rows2.index(mask))\n mask *= 2\n\n # combine with 0s?\n x = y = 1\n sq = Counter()\n for r in r_list:\n x = x * a_list[r] % n\n sq += b_list[r]\n for k, v in sq.items():\n y = y * pow(k, v // 2, n) % n\n f = gcd(y - x, n)\n\n if 1 < f < n:\n return f", "def lista_numeros_primos2(m):\n n=0\n k=2\n while n < m: # n=1,2,3,..,m\n resultado = eh_primo(k)\n if resultado == True:\n print(k, 'é um número primo')\n n=n+1\n else:\n print(k, 'igual', resultado, '*', k // resultado)\n k=k+1\n return None", "def factoresPrimos(n:int) -> Iterator[int]:\r\n return descompocion_en_primos(n,repeticion=False)", "def crear_claves():\r\n minPrim = 0\r\n maxPrim = 150\r\n primos = [i for i in range(minPrim, maxPrim) if isPrime(i)]\r\n primos_permi = primos_permitidos()\r\n tupla = choice(primos_permi)\r\n pri1 = tupla[0]\r\n pri2 = tupla[1]\r\n # ya tenemos los dos primos\r\n #print(pri1,\"---\",pri2)\r\n n = pri1 * pri2\r\n Phi_e = (pri1-1)*(pri2-1) # z\r\n # encontrar un coprimo\r\n for numero in primos:\r\n if Phi_e % numero != 0:\r\n co_pri = numero # k\r\n break\r\n clave_publica = [str(co_pri),str(n)]\r\n #print(clave_publica)\r\n for j in itertools.count(2):\r\n if (co_pri*j) % Phi_e == 1:\r\n clave_privada = [str(j),str(n)]\r\n break\r\n #print(clave_privada)\r\n clave_publica = \",\".join(clave_publica)\r\n clave_privada = \",\".join(clave_privada)\r\n return clave_publica,clave_privada", "def pt(n):\n retval = []\n for a in range(1, n):\n for b in range(a, n):\n for c in range(b, n):\n if a * a + b * b == c * c:\n retval.appen([a,b,c])\n return retval", "def Grundy(x):\n # n taille bianire max des xi; m longeur de x\n \n # Calcul de la longueur binaire utilisée\n # Complexité en O(m)\n \n n = 0\n \n for val in x :\n t = taille(val)\n if n < t :\n n = t\n \n \n \n # Ecriture de la liste x en binaire\n # Complexité en O(m*n) car binaire(x,n) en O(n)\n \n b = [binaire(val,n) for val in x ]\n \n # Calcul de la somme binaire mod 2 :\n # Complexité en O(m*n) : m valeur dans b; n tours de boucle\n \n a = []\n \n for i in range(n):\n \n s = 0\n \n for val in b :\n s+= val[i]\n \n a.append(s%2)\n \n return(decimal(a))", "def linear_sieve(max_n):\n smallest_factors = [0] * max_n\n primes = []\n\n for i in range(2, max_n):\n if smallest_factors[i] == 0:\n smallest_factors[i] = i\n primes.append(i)\n\n for p in primes:\n if p > smallest_factors[i] or i * p >= max_n:\n break\n smallest_factors[i * p] = p\n return primes, smallest_factors", "def Pollard_pm1(n, primes, max_B=1000000):\n B = 10\n g = 1\n while B < max_B and g < n:\n a = randint(2, n - 2)\n g = gcd(a, n)\n if g != 1:\n return g\n for p in primes:\n if p >= B:\n break\n pd = 1 # p^d\n while pd * p <= B:\n pd *= p\n a = powmod(a, pd, n)\n g = gcd(a - 1, n)\n if g != 1 and g != n:\n return g\n B *= 2\n return 1", "def multiplicaciones(): #906609 tiene que darme\n ultimo_palindromo = 0\n total = 0\n for primer_numero in range(100, 1000):\n for segundo_numero in range(100, 1000):\n #total se encarga de hacer la multiplicacion entre los numeros\n total = primer_numero * segundo_numero\n # llamamos a la funcion que verifica si la multiplicacion que envia es un palindromo\n if obtener_palindromo(total):\n #luego de verificar que la multiplicacion era palindromo pasamos a evaluarla hasta llegar al ultimo palindromo\n #entre 100 y 1000\n if ultimo_palindromo < total:\n ultimo_palindromo = total\n return ultimo_palindromo", "def __primes(n):\n \"\"\" https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\"\"\"\n sieve = [True] * n\n for i in range(3, int(n ** 0.5) + 1, 2):\n if sieve[i]:\n sieve[i * i::2 * i] = [False] * int(((n - i * i - 1) / (2 * i) + 1))\n return [2] + [i for i in range(3, n, 2) if sieve[i]]", "def primes( self, top ):\n\t \n\t if top <= max(self._smallprimes) :\n\t return [x for x in self._smallprimes if x < top]\n\t else :\n\t for i in range(max(self._smallprimes)+1, top): \n\t if (i % 2) and self.miller_rabin(i, 30) : # miller avec une sécurité élevée : pas d'erreur probable & pas de contrainte de vitesse\n\t self._smallprimes.append(i) \n\t return self._smallprimes" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Restituisce una lista con le prime n righe del triangolo di Pascal, ogni riga è una a sua volta una lista
def triangolo_pascal(n): tri = [[1]] for i in range(1,n): tri.append([1]) for j in range(1,i): tri[i].append(tri[i-1][j-1] +tri[i-1][j]) tri[i].append(1) return tri
[ "def UtworzPustaPlansze (rozmiar):\r\n tablica=[0]*rozmiar # najpierw tworzymy listę z <wierszy> elementów\r\n # elementy listy mogą być dowolne\r\n for i in range (rozmiar):\r\n tablica[i]=[\" \"]*rozmiar\r\n return tablica", "def pascal_triangle(n):\n myList = []\n if n <= 0:\n return []\n elif n == 1:\n return [[1]]\n for lines in range(1, n + 1):\n myList2 = []\n c = 1\n for i in range(1, lines + 1):\n myList2.append(str(c))\n c = c * (lines - i) // i\n myList.append(myList2)\n return myList", "def primos_permitidos():\r\n parejas_permitidas = [(31,23),(47,19),(7,19),(17,41),(31,7),(29,47),(37,23),(2,79),(43,17),(7,37),(5,61),\r\n (17,31),(23,19),(23,7),(11,83),(17,7),(71,3),(37,29),(7,79),(11,59),(37,3),(3,59),(13,53),(79,11),(89,3),\r\n (2,97),(23,5),(13,41),(89,2),(5,97),(89,7),(41,7),(59,7),(19,41),(31,13),(29,19),(79,5),(83,7),\r\n (83,3),(43,7),(23,17),(23,29),(3,41),(17,47),(37,13),(37,11),(53,5),(43,3),(5,83),(7,67),(89,5),\r\n (19,53),(29,17),(53,11),(11,41),(5,47),(73,13),(13,23),(47,29),(5,89),(17,23),(5,43),(71,11),(67,5),\r\n (149,3),(7,47),(19,37),(127,7),(109,7),(7,53),(67,2),(19,41),(67,11),(7,97),(3,103),(3,131),(163,2),(11,61),\r\n (113,5),(73,5),(17,7),(61,5),(97,5),(43,13),(157,5),(2,107),(71,5),(3,151),(5,29),(2,151),(137,3),\r\n (13,29),(59,11),(137,5),(47,11),(13,47),(2,197),(53,17),(239,3),(229,2),(23,37),(53,13),(11,73)]\r\n return parejas_permitidas", "def generate_pascal_row(row):\n ##############################################################################\n # TODO: Write code to generate a row of Pascal's triangle. #\n ##############################################################################\n # Replace \"pass\" statement with your code\n list1= [] # 新增一個list\n for i in range(len(row)):\n if( i == 0 or i == len(row) ):\n list1.append(1)\n else : \n sum = row[i] + row[i-1]\n list1.append(sum)\n list1.append(1) # 給最後一個值\n return list1\n \n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################", "def primeroultimo(lista):\r\n return [lista[0],lista[-1]]", "def tri_entiers(l):\n # première boucle, minimum, maximum\n m = l [0]\n M = l [0]\n for k in range(1,len(l)):\n if l [k] < m : m = l [k]\n if l [k] > M : M = l [k]\n \n # calcul du nombre d'occurrences\n p = [0 for i in range (m,M+1) ]\n for i in range (0, len (l)) :\n p [ l [i] - m ] += 1\n \n # fonction de répartition\n P = [0 for i in range (m,M+1) ]\n P [0] = p [0]\n for k in range (1, len (p)) :\n P [k] = P [k-1] + p [k]\n \n # tri\n pos = 0\n for i in range (1, len (l)) :\n while P [pos] < i : pos += 1\n l [i-1] = pos + m\n l [len (l)-1] = M", "def generar_numeros_pares(n = 100):\n pares = []\n \n contador = 0\n numero = 0\n \n while contador < n:\n if numero % 2 == 0:\n pares.append(numero)\n contador += 1\n \n numero += 1\n \n return pares", "def pascal_triangle(n):\n\n l_list = []\n\n if n <= 0:\n return []\n for i in range(1, n + 1):\n n_list = []\n num = 1\n for j in range(1, i + 1):\n n_list.append(num)\n num = int(num * (i - j) // j)\n l_list.append(n_list)\n return l_list", "def parcours_colonne(n):\n print(\"parcours colonne\")\n j=0 #colonne\n l=[]\n for j in range(n):\n i=0 #ligne\n for i in range(n):\n l+=[(j,i)]\n i+=1\n j+=1\n return l", "def pascal_triangle(n):\n n_list = []\n if n <= 0:\n return n_list\n else:\n for i in range(n):\n sub_list = []\n sub_list.append(1)\n for j in range(1, i):\n sub_list.append(n_list[i - 1][j - 1] + n_list[i - 1][j])\n if i != 0:\n sub_list.append(1)\n n_list.append(sub_list)\n return n_list", "def rangoli(n):\r\n alphabet = string.ascii_lowercase\r\n pad = 4*n-3\r\n filler = '-'\r\n initial = [alphabet[n-1]]\r\n top = [alphabet[n-1].center(pad, filler)]\r\n\r\n for i in range(n-2, -1, -1):\r\n initial.append(alphabet[i])\r\n sub_list = initial[:-1]+[alphabet[i]]+list(reversed(initial[:-1]))\r\n sub_seq = filler.join(sub_list).center(pad, filler)\r\n top.append(sub_seq)\r\n\r\n bot = list(reversed(top[:-1]))\r\n result = '\\n'.join(top + bot)\r\n print(result)\r\n return", "def possible_subpeptides(self):\n ret = [\"\"]\n protein_len = len(self.protein)\n for l in range(1, protein_len):\n for i in range(protein_len):\n if i + l <= protein_len:\n ret += [self.protein[i : i+l]]\n else:\n ret += [self.protein[i:] + self.protein[:(i+l)%protein_len]]\n ret += [self.protein]\n return ret", "def spisok(n):\n result = [] # объявление результирующего списка\n for x in range(n): # для каждого из чисел от 0 до 999999 делай\n result.append(x) # добавление в результирующий список очередного элемента\n return result", "def premier_list_10000(self):\n\n if not os.path.exists('premier_list_10000.txt'):\n with open('premier_list_10000.txt', mode='w', encoding='utf-8') as file:\n file.write('[1,2')\n number = 3\n while(number < 10000):\n if self.is_premier(number):\n file.write(','+str(number))\n number+=2\n file.write(']')\n with open('premier_list_10000.txt', mode='r', encoding='utf-8') as file:\n return eval(file.read())", "def reubicar(lista, p):\r\n\r\n\tv = lista[p]\r\n\r\n# Recorrer el segmento [0:p-1] de derecha a izquierda hasta\r\n# encontrar la posición j tal que lista[j-1] <= v < lista[j].\r\n\tj = p\r\n\twhile j > 0 and v < lista[j - 1]:\r\n# Desplazar los elementos hacia la derecha, dejando lugar\r\n# para insertar el elemento v donde corresponda.\r\n\t\tlista[j] = lista[j - 1]\r\n\t\tj -= 1\r\n\tlista[j] = v", "def partitions_list(n):\r\n p = IntegerPartition([n])\r\n w = []\r\n while list(p.args[1]) not in w:\r\n w.append(list(p.args[1]))\r\n p = p.next_lex()\r\n return w", "def obtenerListaPorcentaje(paisesF, prc = 80):\r\n tot = sum([x[1] for x in paisesF])\r\n acum = 0\r\n listF = []\r\n for pais in paisesF:\r\n prcI = (pais[1]/tot)*100\r\n listF.append([pais[0], pais[1], round(prcI,2)])\r\n acum += prcI\r\n if acum > prc:\r\n break\r\n \r\n return listF", "def primerosAvistamientos(catalog, n):\n return model.primerosAvistamientos(catalog, n)", "def create_pascal_triangle(self, n):\r\n if n == 0:\r\n return list()\r\n\r\n return [self.gen_triangle_level(i)\r\n for i\r\n in range(1, n + 1, 1)]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get current NFL season After March, returns year of upcoming season.
def current_season() -> int: now = datetime.now() month, year = now.month, now.year if month < 4: year -= 1 return year
[ "def current_season():\n td = datetime.datetime.today()\n if td.month > 8:\n return td.year\n return td.year - 1", "def get_current_season():\n month_nr = date.today().month\n return (month_nr % 12 + 3) // 3", "def get_current_season():\n current_time = datetime.now()\n if current_time.month > 6:\n start_year = current_time.year\n else:\n start_year = current_time.year - 1\n end_year = start_year + 1\n return \"{}{}\".format(str(start_year)[2:], str(end_year)[2:])", "def latest_season_before(date):\n\tif date.month < 9:\n\t\treturn date.year - 1\n\treturn date.year", "def return_football_season(date=datetime.datetime.today()):\n date_aux = subtract_months(date, 6)\n beginning_year = str(date_aux.year)\n ending_year = date_aux.year + 1\n ending_year = str(ending_year)[-2:]\n season = ''.join([beginning_year, '-', ending_year])\n return season", "def season(self):\n if(self.start_date.month >= 1 and self.start_date.month <= 5):\n return \"Spring\"\n elif(self.start_date.month >= 8 and self.start_date.month <= 12):\n return \"Fall\"\n else:\n return \"Summer\"", "def season():\n month = datetime.now().month\n return 0 if month in {1, 2, 3, 10, 11, 12} else 1", "def season(self):\n # In lieu of a Python enum type:\n return str(((int(self.published.strftime(\"%m\")) - 1) / 3) % 4)", "def season(self):\n if self.game_id[3] == \"9\":\n return \"19\" + self.game_id[3] + self.game_id[4]\n else:\n return \"20\" + self.game_id[3] + self.game_id[4]", "def season(self):\n return self._season", "def _current_season():\n endpoint = \"seasons/current\"\n data = _api_request(endpoint)\n if data:\n season = data['seasons'][0]['seasonId']\n return season\n else:\n raise JockBotNHLException('Unable to retrieve current NHL season')", "def season_from_date(date):\r\n month = int(date[5:7])\r\n if 4 < month < 11:\r\n return \"summer\"\r\n else:\r\n return \"winter\"", "def get_current_season_name():\n month_nr = get_current_season()\n return get_season_name(month_nr)", "def get_current_player_season(self):\n return self.get_player_season(\"current\")", "def media_season(self):\n return self._season", "def media_season(self):\n media_status = self._media_status()[0]\n return media_status.season if media_status else None", "def getSeason(date):\n\n date = validate.timestamp(date)\n day = date.dayofyear\n leap_year = int(date.is_leap_year)\n\n spring = numpy.arange(80, 172) + leap_year\n summer = numpy.arange(172, 264) + leap_year\n autumn = numpy.arange(264, 355) + leap_year\n\n if day in spring:\n season = \"spring\"\n elif day in summer:\n season = \"summer\"\n elif day in autumn:\n season = \"autumn\"\n else:\n season = \"winter\"\n\n return season", "def manager_season(self):\n return self._manager_season", "def seasonNumber(self):\n return self.index" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get NFL week (ESPN scoring period) from date The year of the given date determines the relevant NFL season. Assumes week 1 begins the week of Labor Day and ends the following Wednesday. Does not cap value, so may be below 1 or above 17.
def get_week_from_date(date) -> int: month, year = date.month, date.year if month < 4: year -= 1 ld = _labor_day(year) wk1_wed = ld + timedelta(days=2) days_since = (date - wk1_wed).days weeks_since = days_since / 7. week = math.floor(weeks_since) + 1 return int(week)
[ "def get_week(date):\n\n # TODO: the API seems broken. It returns week, year not year, week as documentef\n # why not use date.isocalendar() from the stdlib?\n\n date = date_trunc('week', date)\n\n first_monday = date_trunc('week', date_trunc('year', date))\n if first_monday.year < date.year:\n first_monday += datetime.timedelta(weeks=1)\n diff = date_trunc('day', date) - first_monday\n week = 1 + (diff.days / 7)\n return week, first_monday.year", "def W(cls, dt):\n # Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt\n jan1_weekday = dt.replace(month=1, day=1).weekday() + 1\n weekday = dt.weekday() + 1\n day_of_year = cls.z(dt)\n if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4:\n if jan1_weekday == 5 or (jan1_weekday == 6 and\n calendar.isleap(dt.year - 1)):\n week_number = 53\n else:\n week_number = 52\n else:\n if calendar.isleap(dt.year):\n i = 366\n else:\n i = 365\n if (i - day_of_year) < (4 - weekday):\n week_number = 1\n else:\n j = day_of_year + (7 - weekday) + (jan1_weekday - 1)\n week_number = j // 7\n if jan1_weekday > 4:\n week_number -= 1\n return week_number", "def GetWeekNum(self, date):\n (y, m, d) = date.split('-')\n return (dt.date(int(y), int(m), int(d)) - self.START_DATE).days / 7", "def ISOWEEKNUM(date):\n return _make_datetime(date).isocalendar()[1]", "def weeks_per_year(year):\n return week_from_date(date(year, 12, 31))", "def workweeks(yr):\n\n # TODO: MOVE all of this crap into a intelDateTime.py module. Does not belong here. JSS\n\n nyd = datetime.date(yr, 1, 1).weekday() # Determine the day of the week on which the 1st of January fell this year.\n if nyd == 5: return 53 # If the 1st of January fell on a Saturday, the year has 53 weeks.\n if nyd == 4 and isleapyear(yr): return 53 # Same deal if the 1st of January fell on a Friday in a leap year.\n return 52 # All other years have 52 work weeks.", "def epi_week_for_date(date, param_config=country_config):\n # We don't support timezone info in date comparison\n # TODO: .replace(tzinfo=None) should be moved to a common meerkat dateparser\n date = date.replace(tzinfo=None)\n _epi_config = param_config[\"epi_week\"]\n _epi_week_53_strategy=param_config.get(\"epi_week_53_strategy\",\n \"leave_as_is\")\n _epi_year_start_date = epi_year_start_date(date, epi_config=_epi_config)\n _epi_year = epi_year_by_date(date, epi_config=_epi_config)\n _epi_week_number = (date - _epi_year_start_date).days // 7 + 1\n if _epi_week_number in [0, 53]:\n _epi_year, _epi_week_number = __handle_epi_week_53(epi_year=_epi_year,\n epi_week_53_strategy=_epi_week_53_strategy)\n return _epi_year, _epi_week_number", "def get_week_start_date(year, week):\n day_1 = datetime.datetime(year, 1, 1)\n if day_1.isoweekday() > 4: # iso8601, first week is one with first thursday\n week_1 = day_1 + datetime.timedelta(days=8-day_1.isoweekday())\n else:\n week_1 = day_1 - datetime.timedelta(days=day_1.isoweekday()-1)\n return week_1 + datetime.timedelta(weeks=week-1)", "def week_num() -> int:\n return date.today().isocalendar()[1]", "def week1(self,year):\n t = datetime.datetime(year,1,1)\n while t.isocalendar()[2] != self.ws:\n t += datetime.timedelta(days=1)\n if self.pw:\n t += datetime.timedelta(days=1-self.ws)\n return t", "def get_week_date_from_ordinal_date(year, day_of_year):\n year, month, day = get_calendar_date_from_ordinal_date(year, day_of_year)\n return get_week_date_from_calendar_date(year, month, day)", "def get_week(date):\n day_idx = (date.weekday() + 1) % 7 # turn sunday into 0, monday into 1, etc.\n sunday = date - timedelta(days=day_idx)\n date = sunday\n for n in xrange(7):\n yield date\n date += one_day", "def _get_weeks_in_year(year, _):\n cal_year, cal_ord_days = get_ordinal_date_week_date_start(year)\n cal_year_next, cal_ord_days_next = get_ordinal_date_week_date_start(\n year + 1)\n diff_days = cal_ord_days_next - cal_ord_days\n for intervening_year in range(cal_year, cal_year_next):\n diff_days += get_days_in_year(intervening_year)\n return diff_days // CALENDAR.DAYS_IN_WEEK", "def _window_for_week(date):\n monday = date - datetime.timedelta(date.weekday())\n sunday = monday - datetime.timedelta(1)\n min = datetime.datetime.combine(sunday, gtimelog.virtual_midnight)\n max = min + datetime.timedelta(7)\n window = gtimelog.window_for(min, max)\n return window", "def get_week_from_datestr(datestr: str) -> int:\n return date.fromisoformat(datestr).isocalendar()[1]", "def find_date_for_week_rollup(self):\n weekNo = int((datetime.utcnow().strftime(\"%U\")))\n # first date of the current year\n first_date_of_year = date(datetime.utcnow().year,1,1)\n # find the day of the week as an integer sun = 7/mon = 1\n if(first_date_of_year.weekday()>3):\n # find the date of weekend i.e ( date on sunday of that week). needs to add number of days if weeday > 3\n last_date_week = first_date_of_year+timedelta(7-first_date_of_year.weekday())\n else:\n # find the date of weekend i.e ( date on sunday of last week). needs to sub if weekday < 3\n last_date_week = first_date_of_year - timedelta(first_date_of_year.weekday())\n # find number of days upto lastweek for current year\n no_of_days = timedelta(days = (weekNo-1)*7)\n # for date = (2009-10-10) weekBeforeToDate will be (2009-10-04)\n weekBeforeToDate = (last_date_week + no_of_days + timedelta(days=6))\n return weekBeforeToDate", "def _get_freq_label_by_week(date_value: str) -> str:\n if bool(re.match(r\"^\\d{4}W\\d{1,2}$\", date_value)):\n return date_value\n if not bool(re.match(r\"^\\d{4}-\\d{1,2}-\\d{1,2}$\", date_value)):\n raise ValueError(\"Date needs to be in yyyy-mm-dd format when freq is W\")\n ts = pd.Timestamp(date_value)\n return \"{}W{}\".format(ts.year, ts.week)", "def get_week_of_year(date, padded_or_unpadded, start_Sunday_or_Monday):\n if start_Sunday_or_Monday == constants.str_Sunday:\n week_of_year = date.strftime('%U')\n elif start_Sunday_or_Monday == constants.str_Monday:\n week_of_year = date.strftime('%W')\n else:\n err_msg = str_possible_values('start_Sunday_or_Monday', [\n constants.str_Sunday, constants.str_Monday])\n raise ValueError(err_msg)\n\n if padded_or_unpadded == constants.str_padded:\n return week_of_year\n elif padded_or_unpadded == constants.str_unpadded:\n return str(int(week_of_year))\n else:\n err_msg = str_possible_values('padded_or_unpadded', [\n constants.str_padded, constants.str_unpadded])\n raise ValueError(err_msg)", "def get_ordinal_date_week_date_start(year):\n return _get_ordinal_date_week_date_start(year, CALENDAR.mode)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find list of edl directories in all dependencies for the passed module
def get_edl_dirs(mod, gen_cfg): log.info("Fetching dependencies for %s", coordinates.as_path(mod.coords)) dependencies = mod.get_dependencies() edl_dirs = [mod.get_edl_path()] for dep, dep_coords in dependencies.items(): dep_cfg = gen_cfg.get_mod_cfg(dep) log.info("Dependency: %s", coordinates.as_path(dep_coords)) dep_edl_path = os.path.join(mod.mirror_root, coordinates.as_path(dep_coords, False)[1:], dep_coords.version, dep_cfg.edl_dir) edl_dirs.append(dep_edl_path) return edl_dirs
[ "def retrieve_module_list():\n\n current_dir = getcwd()\n mod_list = []\n\n for item in listdir(current_dir):\n\n if item.endswith('db'):\n\n mod_list.append(item)\n\n return mod_list", "def getModules() -> tuple:\n return data.getFoldersOf(data.ETC)", "def library_dirs(self):", "def modules():\n return [os.path.relpath(os.path.join(root, filename), 'groot_ansible')\n for root, _, filenames in os.walk('groot_ansible/playbooks/library') for filename in filenames if '.git' not in root.split(os.sep)\n ]", "def __dir__():\n return __all__", "def modpaths(self):\n for module_info in pkgutil.iter_modules([self.directory]):\n bits = self.relbits + [module_info[1]]\n yield Modulepath(\".\".join(bits), self.basedir)\n\n if module_info[2]: # module is a package because index 2 is True\n submodules = Dirpath(os.sep.join(bits), self.basedir)\n for submodule in submodules.modpaths():\n #subbits = [module_info[1]] + submodule.relbits\n #yield Modulepath(u\".\".join(subbits), self.basedir)\n yield submodule", "def discover_modules(repo_dir):\n for modules_dir, _, files in os.walk(repo_dir):\n if 'module.yaml' in files:\n module = Module(os.path.join(modules_dir, 'module.yaml'))\n module.fetch_dependencies(repo_dir)\n modules.append(module)", "def get_module_search_paths(module_name, script_file_path):\n for parent in traverse_parents(script_file_path):\n if os.path.basename(parent) == module_name:\n yield parent", "def dcs_modules():\n\n dcs_dirname = os.path.dirname(__file__)\n module_prefix = __package__ + '.'\n\n if getattr(sys, 'frozen', False):\n importer = pkgutil.get_importer(dcs_dirname)\n return [module for module in list(importer.toc) if module.startswith(module_prefix) and module.count('.') == 2]\n else:\n return [module_prefix + name for _, name, is_pkg in pkgutil.iter_modules([dcs_dirname]) if not is_pkg]", "def __dir__():\n keys = (*globals().keys(), *_lazy_imports_obj.keys(), *_lazy_imports_mod.keys())\n return sorted(keys)", "def dependency_dir(self) -> Path:", "def include_dirs(self):", "def debugger_list_modules():", "def find_enstools_packages():\n\n return [f'enstools.{p}' for p in (find_packages(f'{os.path.dirname(__file__)}/enstools'))]", "def get_dependencies(dir: str = \".\") -> Iterable[Tuple[str, str]]:\n # data = get_dependencies(dir)\n\n valid, config = load_config(dir)\n return tuple(config['dependencies'].items())", "def lib_dirs(self):\r\n ret = []\r\n for x in [y.type for y in self.variables] + [\r\n y.op for y in self.node_order]:\r\n try:\r\n ret += x.c_lib_dirs()\r\n except utils.MethodNotDefined:\r\n pass\r\n return utils.uniq(ret)", "def list_modules():\n for module_name in listdir(modules_directory):\n if isdir(join(modules_directory, module_name)):\n log.debug('Load module: {0}'.format(module_name))\n yield module_name", "def collect_all_loadmodules():\n locations = None\n if flag_filemode == \"target\":\n locations = \"%s/symbols/system\" % apo\n else:\n locations = \"%s/bin %s/lib64\" % (aho, aho)\n u.verbose(1, \"collecting loadmodules from %s\" % locations)\n cmd = \"find %s -type f -print\" % locations\n u.verbose(1, \"find cmd: %s\" % cmd)\n cargs = shlex.split(cmd)\n mypipe = subprocess.Popen(cargs, stdout=subprocess.PIPE)\n pout, _ = mypipe.communicate()\n if mypipe.returncode != 0:\n u.error(\"command failed (rc=%d): cmd was %s\" % (mypipe.returncode, cmd))\n encoding = locale.getdefaultlocale()[1]\n decoded = pout.decode(encoding)\n lines = decoded.strip().split(\"\\n\")\n u.verbose(1, \"found a total of %d load modules\" % len(lines))\n for line in lines:\n path = line.strip()\n u.verbose(2, \"adding LM %s\" % path)\n all_loadmodules[path] = 0\n bn = os.path.basename(path)\n pdict = base_to_paths[bn]\n pdict[path] = 1\n if flag_backward_slice:\n for filearg in flag_input_files:\n bn = os.path.basename(filearg)\n if bn not in all_loadmodules:\n u.warning(\"argument %s not found in all_loadmodules \"\n \"-- unable to compute slice\" % filearg)", "def execd_module_paths(execd_dir=None):\n if not execd_dir:\n execd_dir = default_execd_dir()\n\n if not os.path.exists(execd_dir):\n return\n\n for subpath in os.listdir(execd_dir):\n module = os.path.join(execd_dir, subpath)\n if os.path.isdir(module):\n yield module" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the symbol XML node
def edit_symbol_node(node, filename): size = int(re.findall('\d+', filename)[-1]) log.info('New filename %s; size %s', filename, size) node.set('typeId', SYMBOL_ID) node.find('name').text = 'DLS symbol' # Use PV name from rule in control PV for tooltip etc. # Reference that PV in rule to avoid duplication. pv_name = node.find('.//pv').text pv_element = et.Element('pv_name') pv_element.text = pv_name node.append(pv_element) node.find('.//pv').text = '$(pv_name)' rule_element = node.find('.//rule') rule_element.set('prop_id', 'image_index') rule_element.set('out_exp', 'true') file_element = et.Element('image_file') file_element.text = filename num_element = et.Element('symbol_number') num_element.text = '0' img_size_element = et.Element('sub_image_width') img_size_element.text = str(size) node.append(file_element) node.append(num_element) node.append(img_size_element) node.remove(node.find('opi_file'))
[ "def update_sym(self, new_symbol):\n self.symbol = new_symbol\n self.layer_type_dict = self._get_layer_type_dict()\n self.arg_params = self._clean_params(self.symbol, self.arg_params)\n self.aux_params = self._clean_params(self.symbol, self.aux_params)", "def symbol(self, new_symbol: int):\n # TODO add checking\n self._symbol = new_symbol", "def symbol(self, symbol):\n self._symbol = symbol", "def update(xml_node, suppress_errors=False):", "def symbol(self, symbol):\n\n self._symbol = symbol", "def update_node(self, node):", "def set_symbol(self, symbol, value):\n ctypes.pointer(\n type(value).from_address(self._get_address(symbol))\n )[0] = value", "def set_symbol(self, row, col, symbol):\n self.field[row, col] = symbol", "def set_full_symbol(self,symbol: str):\n\n # Update name line widget and clear all labels\n self.full_symbol = symbol\n self.symbol_line.setText(symbol)\n self.clear_label_text()", "def setSymbolProps(self, name, symbol):\r\n self.symbolProps = autosar.base.SymbolProps( str(name), str(symbol))", "def create_simple_symbol(xml_document, symbols_element, properties, count, alpha, tags=None):\n symbol_element = xml_document.createElement(\"symbol\")\n symbol_element.setAttribute(\"alpha\", alpha)\n symbol_element.setAttribute(\"clip_to_extent\", \"1\")\n symbol_element.setAttribute(\"type\", properties['symbol_type'])\n symbol_element.setAttribute(\"name\", unicode(count))\n if tags and len(tags) > 0:\n symbol_element.setAttribute(\"tags\", tags)\n symbols_element.appendChild(symbol_element)\n\n for layer in reversed(properties['layer']):\n renderer_layer_element = xml_document.createElement(\"layer\")\n renderer_layer_element.setAttribute(\"pass\", \"0\")\n renderer_layer_element.setAttribute(\"enabled\", \"1\")\n renderer_layer_element.setAttribute(\"locked\", \"0\")\n renderer_layer_element.setAttribute(\"class\", layer['simpleSymbolClass'])\n symbol_element.appendChild(renderer_layer_element)\n\n for key, value in layer['dict_symbols'].items():\n\n symbol_properties_element = xml_document.createElement(\"prop\")\n symbol_properties_element.setAttribute(\"k\", unicode(key))\n symbol_properties_element.setAttribute(\"v\", unicode(value))\n renderer_layer_element.appendChild(symbol_properties_element)\n\n data_defined_properties_element = xml_document.createElement(\"data_defined_properties\")\n renderer_layer_element.appendChild(data_defined_properties_element)\n\n data_defined_option_element = xml_document.createElement(\"Option\")\n data_defined_option_element.setAttribute(\"type\", \"Map\")\n data_defined_properties_element.appendChild(data_defined_option_element)\n\n data_defined_option_value_element = xml_document.createElement(\"Option\")\n data_defined_option_value_element.setAttribute(\"value\", \"\")\n data_defined_option_value_element.setAttribute(\"type\", \"QString\")\n data_defined_option_value_element.setAttribute(\"name\", \"name\")\n data_defined_option_element.appendChild(data_defined_option_value_element)\n\n data_defined_option_name_element = xml_document.createElement(\"Option\")\n data_defined_option_name_element.setAttribute(\"name\", \"properties\")\n data_defined_option_element.appendChild(data_defined_option_name_element)\n\n data_defined_option_collection_element = xml_document.createElement(\"Option\")\n data_defined_option_collection_element.setAttribute(\"value\", \"collection\")\n data_defined_option_collection_element.setAttribute(\"type\", \"QString\")\n data_defined_option_collection_element.setAttribute(\"name\", \"type\")\n data_defined_option_element.appendChild(data_defined_option_collection_element)\n\n if 'subSymbol' in layer:\n SimpleSymbol.create_simple_symbol(xml_document, renderer_layer_element, layer['subSymbol'], \"@0@0\", '1')", "def _AddSymbolIntoFileNode(node, symbol_type, symbol_name, symbol_size,\n include_symbols):\n node[_NODE_LAST_PATH_ELEMENT_KEY] = True\n # Don't bother with buckets when not including symbols.\n if include_symbols:\n node = _GetOrMakeChildNode(node, _NODE_TYPE_BUCKET, symbol_type)\n node[_NODE_SYMBOL_TYPE_KEY] = symbol_type\n\n # 'node' is now the symbol-type bucket. Make the child entry.\n if include_symbols or not symbol_name:\n node_name = symbol_name or '[Anonymous]'\n elif symbol_name.startswith('*'):\n node_name = symbol_name\n else:\n node_name = symbol_type\n node = _GetOrMakeChildNode(node, _NODE_TYPE_SYMBOL, node_name)\n node[_NODE_SYMBOL_SIZE_KEY] = node.get(_NODE_SYMBOL_SIZE_KEY, 0) + symbol_size\n node[_NODE_SYMBOL_TYPE_KEY] = symbol_type", "def updateGraph(self, symbol=None):\n if symbol is None:\n return\n\n # Get all stock data back for the given symbol\n self.stock_data = self.db.queryAllData(table_name=symbol)\n\n # Create a list of prices and a list of dates\n self.prices = [x[1].strip('$') for x in self.stock_data]\n self.dates = [x[0] for x in self.stock_data]\n date_string = [x.strftime(\"%m/%d/%Y\") for x in self.dates]\n self.x = [datetime.datetime.strptime(d, '%m/%d/%Y').date()\n for d in date_string]\n\n # Create an instance of QtMpl\n self.mpl = self.central.mpl\n self.mpl.addLine(x=self.x, y=self.prices, title=symbol)", "def symbol_id(self, value: str):\n self._symbol = value", "def _set_symbol(self, symbol, blank=False):\n self._symbols.add(symbol)\n\n try:\n assert self._blank_symbol == None or not blank\n if blank:\n self._blank_symbol = symbol\n except:\n raise Exception(\n f\"Machine got blank symbol '{symbol}' which is already set to '{self._blank_symbol}'\"\n )", "def export_symbol(self, addr, name, stype=\"\"):\n self.start_element(SYMBOL)\n self.write_address_attribute(ADDRESS, addr)\n self.write_attribute(NAME, name)\n self.write_attribute(TYPE, stype)\n mangled = idc.get_name(addr, idc.GN_STRICT)\n if name != None and mangled != name:\n self.write_attribute(\"MANGLED\", mangled)\n self.close_tag()", "def place_symbol(self, coords, symbol):\n x, y = coords\n self.array[y - 1][x - 1] = symbol", "def ast_update_id(symbol, name, id):\n if not isinstance(symbol, Symbol):\n return\n new_name = \"%s_%s\" % (name, str(id))\n if name == symbol.symbol:\n symbol.symbol = new_name\n new_rank = [new_name if name == r else r for r in symbol.rank]\n symbol.rank = tuple(new_rank)", "def put_symbol(self, symbol, row, column):\n\n self.board[row][column] = symbol" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Grep on the basepath to find all files that contain an EDM symbol widget. control
def build_filelist(basepath): log.info("Building list of files containing EDM symbols in %s", basepath) symbol_files = [] for dir_path, _, filenames in os.walk(basepath): for filename in filenames: filepath = os.path.join(dir_path, filename) if filename.endswith(".opi") and utils.grep(filepath, "EDM Symbol"): symbol_files.append(filepath) return symbol_files
[ "def GetSupplementalFiles():\n # Can't use the one in gyp_chromium since the directory location of the root\n # is different.\n t = glob.glob(os.path.join(checkout_root, 'supplement.gypi'))\n print t\n return t", "def find_dcds(src):\n\n dcd_paths = []\n\n for root, dirs, files in os.walk(src):\n for filename in files:\n if filename.endswith(\".dcd\"):\n dcd_paths.append(os.path.join(root, filename))\n\n return dcd_paths", "def __searchFiles(self):\n self.ui.showFindFilesDialog(self.textForFind())", "def setup_findinfiles(qtbot):\n widget = FindInFilesWidget(None)\n qtbot.addWidget(widget)\n return widget", "def find(pattern):\n files = config.index.files(path_glob=\"*%s*\" % pattern)\n print_files(files)", "def filesearch(word=\"\"):\n logger.info('Starting filesearch')\n file = []\n for f in glob.glob(\"*\"):\n if word[0] == \".\":\n if f.endswith(word):\n file.append(f)\n\n elif word in f:\n file.append(f)\n #return file\n logger.debug(file)\n return file", "def find(self):\n print(\"Looking for pacnew and pacsave files…\")\n paths = ('/bin', '/etc', '/opt', '/usr')\n for dir_path, _, files in chain.from_iterable(os.walk(path) for path in paths):\n for f in files:\n pacnew = os.path.join(dir_path, f)\n if self.re_pacfiles.search(pacnew):\n self.pacfiles.append(pacnew)\n self.pacfiles.sort()\n print(\"%d file(s) found.\" % len(self.pacfiles))", "def get_available_patterns() -> list:\n path_folder = os.path.join(config.ROOT_PATH, config.FOLDER_PATTERNS)\n return [f.replace(\".cells\", \"\") for f in os.listdir(path_folder) if os.path.isfile(os.path.join(path_folder, f)) and f.endswith(\".cells\")]", "def search(path, ftype):\r\n # Get all files in the path\r\n files = glob.glob(path + \"/*.\" + ftype)\r\n click.echo(click.style(\"Found matches:\", fg=\"red\"))\r\n # Print the files\r\n for file in files:\r\n click.echo(click.style(file, bg=\"white\", fg=\"black\"))", "def _getXRCFileLocations():\n for p in sys.path:\n yield p\n yield os.path.normpath(os.path.join(sys.prefix,\"share/XRCWidgets/data\"))", "def find_c_files(base):\n for path, dirs, files in os.walk(base):\n for filename in files:\n abspath = os.path.abspath(os.path.join(path, filename))\n ext = os.path.splitext(abspath)[1]\n if ext == \".c\":\n yield abspath", "def findrun(base,dim,boxsize):\n\n if not os.path.isdir(base):\n print base, 'is not a valid directory'\n sys.exit(1)\n\n #retreive all files that match tag and box size\n #note this will include the initialisation boxes, which\n #are independent of redshift\n searchstr='_'+str(dim)+'_'+str(boxsize)+'Mpc'\n filenames=os.listdir(base)\n\n box_files=[]\n for filename in filenames:\n if filename.find(searchstr)>=0:\n box_files.append(os.path.join(base,filename))\n\n return box_files", "def findhtml(pathused,ticker,typ):\n\n allfiles = [] # initializing the return list\n pathused += \"/\"+ticker.upper()+\"/\"+typ # since SEC_edgar has a standard way to store files as its the Ticker and inside \n # sec-edgar-filings ==> AAPL ==> 10-K \n \n for r,d,f in os.walk(pathused): # os.walk will return all the files inside the directory (with absolute path)\n # r is the absolute path\n # f is list of files in the folders\n \n if 'filing-details.html' in f: # if filing.html (SEC-edgar convention to name html files) is in this folder \n pathfol = r.replace(\"\\\\\",\"/\") # we modify it \n allfiles.append(pathfol+'/filing-details.html') # we append the absolute path\n else:\n continue\n return allfiles #and return it", "def find(self, pattern):\n if pattern.endswith(\".py\"):\n files = glob.glob(pattern)\n elif pattern.endswith(\"*\"):\n files = glob.glob(pattern + \".py\")\n elif pattern.endswith(os.sep):\n files = glob.glob(pattern + \"*.py\")\n else:\n files = glob.glob(pattern + os.sep + \"*.py\")\n for filename in files:\n mod = self._get_module(filename)\n self._inspect_module(mod)", "def find_files(basedir, regexp):\n regexp = re.compile(regexp)\n return sorted(fn for fn in glob.glob(os.path.join(basedir, '**'),\n recursive=True)\n if regexp.match(fn))", "def find_define_file_uses(self):\n # Executing git grep is substantially faster than using the define_re\n # directly on the contents of the file in Python.\n for define_file in self.get_checked_define_files():\n excluded_files = set([define_file])\n excluded_files.update(define_file.get_included_files(recursive=True))\n all_defines = define_file.get_declared_defines()\n args = ['git', 'grep', '-zwIF']\n for define in all_defines:\n args.extend(['-e', define])\n args.extend(['--', '*.cpp', '*.c', '*.cu', '*.h', '*.cuh'])\n define_re = r'\\b(?:' + '|'.join(all_defines)+ r')\\b'\n output = subprocess.check_output(args, cwd=self._source_root).decode()\n for line in output.splitlines():\n (filename, text) = line.split('\\0')\n fileobj = self._files.get(filename)\n if fileobj is not None and fileobj not in excluded_files:\n defines = re.findall(define_re, text)\n fileobj.add_used_defines(define_file, defines)", "def get_available_symbols():\n symbols = []\n for file in os.listdir(\"data/prices/\"):\n if file.endswith(\".csv\"):\n symbols.append(file[:-4])\n\n return symbols", "def gen_find(filepat, top, dig_recursive = None):\n for path, dirlist, filelist in os.walk(top):\n if not dig_recursive:\n dirlist = []\n for fname in fnmatch.filter(filelist, filepat):\n yield os.path.join(path, fname)", "def find_datafiles(self):\n matches = (re.match(datafile_pattern, s) for s in os.listdir(self.datadir))\n self.datafiles = [m.string for m in matches if m is not None]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process one symbol file and convert to PNG.
def process_symbol(filename, mod, mod_cfg, mirror_root, prod_root): working_path = os.path.join(mirror_root, prod_root[1:]) log.debug("Finding version from %s", working_path) mod_version = utils.get_module_version(working_path, mod_cfg.area, mod, mod_cfg.version) log.info("Found version %s", mod_version) coords = coordinates.create(prod_root, mod_cfg.area, mod, mod_version) mirror_path = os.path.join(mirror_root, coordinates.as_path(coords)[1:]) full_path = os.path.join(mirror_path, mod_cfg.edl_dir, filename[:-3] + 'edl') destination = os.path.dirname(os.path.join(mirror_path, mod_cfg.opi_dir, filename)) log.info('Destination directory is {}'.format(destination)) if os.path.exists(destination): for f in os.listdir(destination): n = os.path.split(filename)[1] n = '.'.join(n.split('.')[:-1]) if f.startswith(n) and f.endswith('png'): log.info('Symbol png already exists: %s', f) return f else: log.warn('Failed to process symbol: %s does not exist', destination) return if os.path.exists(full_path): return files.convert_symbol(full_path, [destination]) else: log.warn('Symbol %s does not exist', full_path)
[ "def __export_jpg(self, filename):\n name = os.path.splitext(os.path.basename(filename))[0]\n count = len(self.__colors)\n chars = [chr(x) for x in range(32, 127) if chr(x) != '\"']\n if count > len(chars):\n chars = []\n for x in range(32, 127):\n if chr(x) == '\"':\n continue\n for y in range(32, 127):\n if chr(y) == '\"':\n continue\n chars.append(chr(x) + chr(y))\n chars.reverse()\n if count > len(chars):\n raise ExportError(\"cannot export XPM: too many colors\")\n fh = None\n try:\n fh = open(filename, \"w\", encoding=\"ascii\")\n fh.write(\"/* XPM */\\n\")\n fh.write(\"static char *{0}[] = {{\\n\".format(name))\n fh.write(\"/* columns rows colors chars-per-pixel */\\n\")\n fh.write('\"{0.width} {0.height} {1} {2}\",\\n'.format(\n self, count, len(chars[0])))\n char_for_colour = {}\n for color in self.__colors:\n char = chars.pop()\n fh.write('\"{char} c {color}\",\\n'.format(**locals()))\n char_for_colour[color] = char\n fh.write(\"/* pixels */\\n\")\n for y in range(self.height):\n row = []\n for x in range(self.width):\n color = self.__data.get((x, y), self.__background)\n row.append(char_for_colour[color])\n fh.write('\"{0}\",\\n'.format(\"\".join(row)))\n fh.write(\"};\\n\")\n except EnvironmentError as err:\n raise ExportError(str(err))\n finally:\n if fh is not None:\n fh.close()", "def generate_image(self):\n characters = [] # character list\n for i in range(0x4E00, 0x9FA5 + 1):\n characters.append(chr(i))\n write_directory = self.output_dir\n\n print(\"Creating images for\" + self.style_name + self.add_name)\n print(\"Images saved to\" + write_directory)\n for char in characters:\n unicode = self.take_unicode(char)[2:]\n try:\n im = self.get_font_image(char)\n if self.is_empty(im):\n pass\n print(\"Character missing for font: \" + self.style_name + \"/\" + self.font_filename)\n print(\"Unicode: \", unicode)\n else:\n im.save(write_directory+'/{}.png'.format(unicode))\n except Exception as error:\n pass\n print(\"Exception: \", error)\n print(\"For unicode: \", unicode)", "def decode():\r\n # Open the file with binary instructions\r\n with open(file_name) as file:\r\n lines = file.readlines()\r\n with open(PATH + file_name, \"w\") as file_write:\r\n for line in lines:\r\n file_write.write(line + \"\\n\")\r\n\r\n # Read the instructions\r\n instructions, instruction_names = [], []\r\n parse_instr_bin_list(lines, instructions, instruction_names)\r\n\r\n # Print formatted binary instructions and their names\r\n instr_print(instructions, instruction_names)\r\n\r\n # Write to each of MPS-Files parsed hex-instructions\r\n write_mps(instructions)\r\n\r\n # Write to Mapping-PROM linked addresses\r\n write_mapping_prom(instruction_names)", "def writeImageAnimation(\n self, fileName: str, format: SymbolicConstant, canvasObjects: tuple = ()\n ):\n pass", "def drawToFile(bc, filename):\n \n fd = open(filename, 'w')\n Barcode_ps_print(bc, fd)\n fd.close()", "def convert_with_cairosvg(svg_filename, png_filename):\n '''if os.path.exists(png_filename):\n \treturn'''\n svg2png(open(svg_filename, 'rb').read(), write_to=open(png_filename, 'wb'))", "def to_image(self,fig,file_name,file_type='png'):\n self.image.save(fig,file_name,file_type)", "def main():\n options, args = PARSER.parse_args()\n\n if options.one_barcode:\n image = generator.code128_image(options.one_barcode, height=100, thickness=3, quiet_zone=True, label=True)\n image.save(options.one_barcode + \".gif\", format=\"GIF\")\n\n elif options.barcodes:\n # Multiple barcodes, generate PDF with them\n options.barcodes = \"\"\n sample_codes = \"123124123345|23|pointy grishko;12312412334545|32|blayer M;123124123345kk|423|florette;1234321,623,kaktasowo;ghash|1|ajfusowo;xcvabcdefgkihj|423|calkiem dlugi napisek;\"\n for i in range(0, 13):\n options.barcodes += sample_codes\n options.barcodes = options.barcodes[:-1]\n\n def barcode_generator():\n for code_data in options.barcodes.split(\";\"):\n code = code_data.split(\",\")\n # Generate barcode data in img, price, label format\n yield (generator.code128_image(code[0], height=700, thickness=13, quiet_zone=False), code[0], code[1], code[2])\n\n pdf_images(barcode_generator(), \"out.pdf\")", "def to_gif(self, filename):\n filename_gif = ''.join(filename.split('.')[:-1]) + '.gif'\n command = \"/usr/bin/ffmpeg -i {} -y \" \\\n \"-filter_complex \" \\\n \"'fps=10,scale=320:-1:flags=lanczos,split [o1] [o2];[o1] \" \\\n \"palettegen [p]; [o2] fifo [o3];[o3] [p] paletteuse' {}\".format(filename, filename_gif)\n try:\n subprocess.run(command, shell=True, check=True, stdout=subprocess.DEVNULL,\n stderr=open('ffmpeg_error.log', 'a'))\n except Exception as e:\n return {'status': 'error', 'error': e}\n else:\n return {'status': 'success', 'path': filename_gif}", "def _add_png(self, pngfile):\n with open(pngfile, 'rb') as png:\n if png.read(8) != self.magic:\n raise ValueError(\"{} is not a PNG file\".format(pngfile))\n while True:\n chead = png.read(8)\n if len(chead) == 0:\n break\n clen, ctype = struct.unpack(\">L4s\", chead)\n cdata = png.read(clen)\n ccrc = png.read(4)\n utype = ctype.decode(\"ascii\")\n self._current_chunk = (chead[:4], ctype, cdata, ccrc)\n if ctype in self.mustmatch:\n ref = self._matchref.get(ctype)\n if ref is None:\n self._matchref[ctype] = cdata\n self._copy()\n else:\n if cdata != ref:\n raise ValueError(\"Chunk {} mismatch\".format(utype))\n met = (\"_first_\" if self._first else \"_next_\") + utype\n try:\n met = getattr(self, met)\n except AttributeError:\n pass\n else:\n met(cdata)\n self._first = False", "def jig2Main(symbolPath='symboltable', pagefiles=glob.glob('page-*')):\n print(\"** symbolPath=%s\" % symbolPath, file=sys.stderr)\n print(\"** pagefiles= %d: %s\" % (len(pagefiles), pagefiles), file=sys.stderr)\n\n doc = Doc()\n pages = Obj({'Type': '/Pages'})\n doc.add_object(pages)\n catalog = Obj({'Type': '/Catalog', 'Pages': ref(pages.id)})\n doc.add_catalog(catalog)\n symd = doc.add_object(Obj({}, readFile(symbolPath)))\n\n page_objs = []\n pagefiles.sort()\n for i, pageFile in enumerate(pagefiles):\n bgdFile = pageFile + '.png'\n jpgFile = pageFile + '.jpg'\n print(\"** page %d: %s\" % (i, pageFile), file=sys.stderr)\n # assert os.path.exists(bgdFile), bgdFile\n\n if os.path.exists(bgdFile):\n bgd = cv2.imread(bgdFile)\n assert bgd is not None, bgdFile\n cv2.imwrite(jpgFile, bgd, [cv2.IMWRITE_JPEG_QUALITY, 25])\n bgdContents = readFile(jpgFile)\n h, w = bgd.shape[:2]\n print('** bgd (width, height)', [w, h], file=sys.stderr)\n else:\n bgdContents = None\n\n fgdContents = readFile(pageFile)\n\n # Big endian. Network byte order\n width, height, xres, yres = struct.unpack('>IIII', fgdContents[11:27])\n\n print('** fgd (width, height, xres, yres)', [width, height, xres, yres], file=sys.stderr)\n\n widthPts = float(width * 72) / xres\n heightPts = float(height * 72) / yres\n\n if bgdContents is not None:\n bgdXobj = Obj({'Type': '/XObject', 'Subtype': '/Image',\n 'Width': str(w),\n 'Height': str(h),\n 'ColorSpace': '/DeviceRGB',\n 'BitsPerComponent': '8',\n 'Filter': '/DCTDecode'},\n bgdContents)\n bgdDo = b'/Im%d Do' % bgdXobj.id\n bgdRef = b'/Im%d %s' % (bgdXobj.id, ref(bgdXobj.id))\n else:\n bgdXobj = None\n bgdDo = b''\n bgdRef = b''\n\n fgdXobj = Obj({'Type': '/XObject', 'Subtype': '/Image',\n 'Width': str(width),\n 'Height': str(height),\n 'ColorSpace': '/DeviceGray',\n 'ImageMask': 'true',\n 'BlackIs1': 'false',\n 'BitsPerComponent': '1',\n 'Filter': '/JBIG2Decode',\n 'DecodeParms': b'<< /JBIG2Globals %s >>' % symd.ref()},\n fgdContents)\n fgdDo = b'/Im%d Do' % fgdXobj.id\n fgdRef = b'/Im%d %s' % (fgdXobj.id, fgdXobj.ref())\n\n # scale image to widthPts x heightPts points\n scale = b'%f 0 0 %f 0 0 cm' % (widthPts, heightPts)\n\n cmds = Obj({}, b'q %s %s %s Q' % (scale, bgdDo, fgdDo))\n resources = Obj({'XObject': b'<<%s%s>>' % (bgdRef, fgdRef)})\n page = Obj({'Type': '/Page', 'Parent': pages.ref(),\n 'MediaBox': '[0 0 %f %f]' % (widthPts, heightPts),\n 'Contents': cmds.ref(),\n 'Resources': resources.ref()\n })\n doc.add_objects([bgdXobj, fgdXobj, cmds, resources, page])\n page_objs.append(page)\n\n pages.d.d[b'Count'] = b'%d' % len(page_objs)\n pages.d.d[b'Kids'] = b'[%s]' % b' '.join(o.ref() for o in page_objs)\n\n sys.stdout.buffer.write(bytes(doc))", "def svg_file_generator(parsed_files_path):\n #vider le dossier de destination\n for file_path in os.listdir(parsed_files_path):\n os.remove(os.path.join(parsed_files_path, file_path))\n #création de chaque fichier vide.\n dwg1 = svgwrite.Drawing(os.path.join(parsed_files_path, '18mm.svg'), profile='tiny')\n dwg1.add(dwg1.line((0, 0), (10, 0), stroke=svgwrite.rgb(10, 10, 16, '%')))\n dwg2 = svgwrite.Drawing(os.path.join(parsed_files_path, '18mmXL.svg'), profile='tiny')\n dwg3 = svgwrite.Drawing(os.path.join(parsed_files_path, '35mm.svg'), profile='tiny')\n #enregistrement des fichiers\n dwg1.save()\n dwg2.save()\n dwg3.save()\n return \"boloooooooooos\"", "def run_turtle_program(source):\n ast = parser.parse(source)\n\n t = turtle.Turtle()\n for stmt in ast.statement:\n do_statement(stmt, t)\n canvas = turtle.Screen().getcanvas()\n canvas.postscript(file='image.eps')\n img = Image.open('image.eps')\n img.save('image.png', 'png')\n turtle.Screen().bye()\n return 'image.png'", "def serializeSvgSymbol(svgPath):\n\n # Check if the SVG file contains an embedded image\n with codecs.open(svgPath, 'r', 'utf-8') as fin:\n svgContents = fin.read().replace('\\n', '')\n \n rx = re.compile(u'<image[^>]+xlink:href=\"([^\"]+)\"')\n m = rx.search(svgContents)\n\n if (m is not None):\n # We have an image, check if its a data URI or a general one\n uri = m.group(1)\n imageType = 'PIXMAP'\n symbolUUID = makeSymbolUUID('svgraster')\n\n if uri[:10] == u'data:image':\n # We have a data URI, save the image into an external file.\n # Please note that we only consider base64-encoded images here.\n #\n dataURIRx = re.compile('data:image/(\\w+);base64,(.+)')\n dm = dataURIRx.match(uri)\n\n if (dm is not None):\n imageExt = dm.group(1)\n try:\n imageData = bytearray(binascii.a2b_base64(dm.group(2)))\n except:\n raise ValueError('Cannot decode base64 URI in embedded image while parsing SVG.') \n \n imageName = '%s.%s' % (symbolUUID, imageExt)\n imageDir = os.path.join(os.path.dirname(svgPath), SVG_IMAGE_DIR)\n \n if not os.path.exists(imageDir):\n os.makedirs(imageDir)\n\n imagePath = os.path.join(imageDir, imageName).encode('utf-8')\n\n with open(imagePath, 'wb') as imageOut:\n imageOut.write(imageData)\n\n else:\n raise ValueError('Invalid data URI encountered while parsing SVG.')\n \n else:\n # We have a non-data URI.\n # We only want to consider relative URIs here so perform some naive sanity checks on it\n\n if uri.startswith('file://'):\n uri = uri[7:]\n if (uri.find('..') == -1) and (not uri.startswith('/')):\n imagePath = os.path.join(os.path.dirname(svgPath), uri)\n else:\n raise ValueError('Invalid URI encountered while parsing SVG.')\n else:\n raise ValueError('Invalid URI encountered while parsing SVG.')\n else:\n # We do not have an embedded image thus the SVG is all vector and can probably be \n # rendered without a hitch\n\n imageType = 'SVG' \n imagePath = svgPath\n\n symbolSetData = \"\"\"\n SYMBOLSET\n SYMBOL\n NAME \"%s\"\n TYPE %s\n IMAGE \"%s\"\n ANCHORPOINT 0.5 0.5\n END\n END\n \"\"\"\n\n # Create a temporary file and open it\n (tempHandle, tempName) = mkstemp()\n \n # Write symbol set data\n os.write(tempHandle, symbolSetData % (makeSymbolUUID('svg'), imageType, imagePath))\n os.close(tempHandle)\n\n # Load and parse the symbol set\n msSymbolSet = mapscript.symbolSetObj(tempName)\n\n # Remove the temporary file\n # os.unlink(tempName)\n\n # Fetch and return our SVG symbol\n msSymbol = msSymbolSet.getSymbol(1)\n msSymbol.inmapfile = True\n\n return msSymbol", "def main(filename):\n # Generate frames.\n images = []\n for framenum in range(TOTAL_DOTS):\n image = draw_frame(framenum)\n images.append(image)\n\n # Write gif.\n images[0].save(filename, save_all=True, append_images=images[1:],\n duration=SECONDS / TOTAL_DOTS * 1000,\n loop=0,\n transparency=1,\n disposal=2)", "def main(args):\n parser = argparse.ArgumentParser(\"Converts lots of blend files\")\n parser.add_argument('--resolution', help=\"Resolution Multiplier\", type=float)\n config = parser.parse_args(args)\n\n\n prefix = bpy.data.filepath.split('.')[0]\n outimage = prefix + '.png'\n export_png(outimage, config.resolution)", "def main():\n argvs = sys.argv\n argc = len(argvs)\n if argc == 1:\n print('usage: convert2png.py <path/to/*.ppm> ...')\n sys.exit(1)\n\n os.makedirs('result/convert2png', exist_ok=True)\n\n for i in range(1, argc):\n img = cv2.imread(argvs[i])\n\n # root, ext = os.path.splitext(argvs[i])\n # cv2.imwrite(root + '.png', img)\n\n root, ext = os.path.splitext(argvs[i])\n strImgName = root.split('/')[-1]\n cv2.imwrite('result/convert2png/' + strImgName + '.png', img)", "def export(path):\n from anytree.exporter import UniqueDotExporter\n UniqueDotExporter(root).to_picture(path)\n print(\"Se exportó una imagen png\")", "def fix_png_file(filename, folder):\n subprocess.call(\n f'pngfix --quiet --strip=color --prefix=fixed_ \"{filename}\"',\n cwd=f'{folder}',\n shell=True)\n subprocess.call(\n f'mv \"fixed_{filename}\" \"{filename}\"', cwd=f'{folder}', shell=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
calculate_angles(chunk) calculates elevation and azimuth given a jsonformatted chunk from ODAS
def calculate_angles(self,chunk): import math import collections Angles = collections.namedtuple("Angles", "ev az") x = float(chunk['x']) y = float(chunk['y']) z = float(chunk['z']) ev = round(90 - math.acos(z/math.sqrt(x*x+y*y+z*z))*180/math.pi) az = round(math.atan2(y,x)*180/math.pi) return(Angles(ev, az))
[ "def _azimuth(section, soma):\n vector = morphmath.vector(section[0], soma.center)\n return np.arctan2(vector[COLS.Z], vector[COLS.X])", "def extract_angles(self):\n atom_ids = self.contents['ID']\n angle_list = []\n for key, value in self.angles.items():\n a = value[0]\n b = value[1]\n c = value[2]\n\n lst = [a, b, c]\n\n A_ = np.asarray(atom_ids).reshape(-1, 3)\n\n sorted = np.argsort(lst)\n A_sorted = A_[:, sorted]\n\n idd = np.ones(len(A_sorted)) * key\n iff = np.arange(1, len(A_sorted) + 1)\n\n concate = np.concatenate((iff[:,np.newaxis], idd[:,np.newaxis], A_sorted), axis=-1)\n df = pd.DataFrame(data=concate, columns=['Mol_ID', 'Angle_type', 'Atom_1', 'Atom_2', 'Atom_3'])\n angle_list.append(df)\n self.angle_df = pd.concat(angle_list)\n self.num_angles = len(self.angle_df)", "def extract_angles_from_metadata_file(filename: str) -> Tuple[float, float, float, float]:\n root = _get_xml_root(filename)\n sza = 0.\n saa = 0.\n vza = []\n vaa = []\n for child in root:\n for x in child.findall(\"Tile_Angles\"):\n for y in x.find(\"Mean_Sun_Angle\"):\n if y.tag == \"ZENITH_ANGLE\":\n sza = float(y.text)\n elif y.tag == \"AZIMUTH_ANGLE\":\n saa = float(y.text)\n for s in x.find(\"Mean_Viewing_Incidence_Angle_List\"):\n for r in s:\n if r.tag == \"ZENITH_ANGLE\":\n vza.append(float(r.text))\n elif r.tag == \"AZIMUTH_ANGLE\":\n vaa.append(float(r.text))\n\n return sza, saa, float(np.mean(vza)), float(np.mean(vaa))", "def get_mean_viewing_angles(self) -> (float, float, float):\n # Get MTD XML file\n root, _ = self.read_mtd()\n\n # Open zenith and azimuth angle\n try:\n az = float(root.findtext(\".//SatelliteAzimuth\"))\n off_nadir = float(root.findtext(\".//ViewAngle\"))\n incidence_angle = float(root.findtext(\".//incidenceAngle\"))\n except TypeError:\n raise InvalidProductError(\n \"SatelliteAzimuth, ViewAngle or incidenceAngle not found in metadata!\"\n )\n\n return az, off_nadir, incidence_angle", "def director2angles(data, out):\n c = data.shape[0]\n if c != 3:\n raise TypeError(\"invalid shape\")\n\n x = data[0]\n y = data[1]\n z = data[2]\n phi = np.arctan2(y,x)\n theta = np.arctan2(np.sqrt(x**2+y**2),z)\n #s = np.sqrt(x**2+y**2+z**2)\n out[0] = 0. #yaw = 0.\n out[1] = theta\n out[2] = phi", "def process_raw_odometry():\n x_p = 0\n y_p = 0\n z_p = 0 \n out_file = open('new.dat','w')\n data = read_raw(\"raw_Odometry.dat\") \n for (step, reading) in enumerate(data):\n odometry = [reading['odometry']['x'], reading['odometry']['y'], reading['odometry']['z']]\n\n print odometry\n x = odometry[0]\n y = odometry[1] \n z= odometry[2]\n #drawing.draw_state_for_me(step, x, y, z)\n z = tools.normalize_angle(z)\n rt_1 = math.atan2(y-y_p , x-x_p) - z_p\n t = math.sqrt((x - x_p)**2+(y - y_p)**2) \n rt_2 = z - z_p - rt_1\n x_p = x\n y_p = y\n z_p = z\n out_file.write('%s %.7f %.7f %.7f\\n' %('ODOMETRY', rt_1, t, rt_2)) \n out_file.close()", "def calculate_angles(self, x, y):\n Oimat = inv(self.Omat)\n Mat = self.pixel_size * inv(self.Dmat) * Oimat\n polar_angles = []\n azimuthal_angles = []\n for i in range(len(x)):\n peak = Oimat * (vec(x[i], y[i]) - self.Cvec)\n v = norm(Mat * peak)\n polar_angle = np.arctan(v / self.distance)\n polar_angles.append(polar_angle)\n azimuthal_angles.append(np.arctan2(-peak[1, 0], peak[2, 0]))\n return (np.array(polar_angles) * degrees,\n np.array(azimuthal_angles) * degrees)", "def test_calcAngles_angles_or_axis(self, kargs, expected_len_result, expected_truncated_results):\n kargs['vsk'] = self.cal_SM\n result = pycgmCalc.calcAngles(self.motion_data, **kargs)\n np.testing.assert_equal(len(result), expected_len_result)\n np.testing.assert_almost_equal(result[0:5], expected_truncated_results)", "def test_azimuth_of_vertices(self):\n for vertex in self.vertices:\n vertex_number = self.vertices.index(vertex)\n expected_azimuth = 360.0/(len(self.vertices)-1) * vertex_number\n actual_azimuth = (geodesic.Geodesic.WGS84.Inverse(\n self.latitude, self.longitude, vertex[0], vertex[1]))['azi1']\n if actual_azimuth < 0:\n actual_azimuth = 360.0 + actual_azimuth\n\n assert_almost_equal(expected_azimuth, actual_azimuth, places=5)", "def get_angle(tap_location_samples):\n return [math.atan2(float(log_line.split(\",\")[3]),\n float(log_line.split(\",\")[2]))\n for log_line in get_highest_lines(tap_location_samples)]\n # for log_line in max_magnitude_list:\n # [math.atan2(float(log_line.split(\",\")[3]),\n # float(log_line.split(\",\")[2]))\n # for log_line in ]\n # split_log_line = log_line.split(\",\")\n # x_value = float(split_log_line[2])\n # y_value = float(split_log_line[3])\n # return math.atan2(y_value, x_value)", "def angle_population_vector_zar(angles):\n X = np.mean(np.cos(angles))\n Y = np.mean(np.sin(angles))\n r = np.sqrt(X**2 + Y**2)\n return r", "def find_angles(self):\r\n for atom1 in self.atoms:\r\n atoms2 = connected_atom_bond(atom1.connectivity)\r\n i = 1\r\n for bond1 in atoms2:\r\n for bond2 in atoms2[i:]:\r\n atoms = [atom1, bond1[1],bond2[1]]\r\n length = np.linalg.norm(bond1[1].position - bond2[1].position)\r\n cos = (bond1[0]^2 + bond2[0]^2 -length^2)/(2*bond1[0]*bond2[0])\r\n angel = math.degrees(math.acos(cos))\r\n atom1.angles.append(Angel(angel,atoms))\r\n \r\n i += 1\r\n all_angels = []\r\n for atom1 in self.atoms:\r\n for angel in atom1.angels:\r\n all_angels.append(angel)\r\n self.angels = list(set(all_angels))\r\n return None", "def angle_and_azimuth(self, satellite_ecef):\n from numpy import arcsin, arctan2, dot\n from numpy.linalg import norm\n\n r_ss = satellite_ecef - self.position\n r_ss_norm = r_ss / norm(r_ss)\n\n r_sse = dot(r_ss_norm, self._e)\n r_ssn = dot(r_ss_norm, self._n)\n r_ssu = dot(r_ss_norm, self._u)\n\n angle = arcsin(r_ssu)\n azimuth = arctan2(r_sse, r_ssn)\n return angle, azimuth", "def angle(z):", "def test_azimuth_angle(self):\n p1 = self.frame[\"SPEFitSingle_HV_ipdf\"]\n p2 = self.frame[\"SPEFitSingle_HV_rpdf\"]\n self.assertLess(abs(p1.dir.azimuth - p2.dir.azimuth), 1e-6)", "def get_mean_sun_angles(self) -> (float, float):\n # Get MTD XML file\n root, _ = self.read_mtd()\n\n # Open zenith and azimuth angle\n zenith_angle = float(root.findtext(\".//SolarZenith\"))\n azimuth_angle = float(root.findtext(\".//SolarAzimuth\"))\n\n return azimuth_angle, zenith_angle", "def analyze_sequence(ellipsoids, validate=True):\n\n if not all(isinstance(ell, Ellipsoid) for ell in ellipsoids):\n raise ValueError(\"The entries of `ellipsoids` must be of the `Ellipsoid` type\")\n\n prev_ell = None\n stats = defaultdict(list)\n for ell in ellipsoids:\n if prev_ell is None:\n prev_ell = ell\n stats['rotation'].append(None)\n continue\n\n R = geometry.find_relative_axes_rotation(prev_ell.axes, ell.axes)\n angles = geometry.rotation_matrix_to_angles(R)\n\n # Convert angles to a proper range\n angles = [a if abs(a) < np.pi / 2.0 else a - np.sign(a) * np.pi for a in angles]\n\n # Append\n stats['rotation'].append(angles)\n\n # Update\n prev_ell = ell\n\n return stats", "def calculate_angles():\n time = request.args.get('time')\n\n result = Helpers.validate_and_parse_input(time)\n if result:\n hour, minute = result\n\n hour_angle = 0.5 * (hour * 60 + minute)\n minute_angle = 6 * minute\n\n angle = abs(hour_angle - minute_angle)\n angle = min(360 - angle, angle)\n DatastoreClient(kind='clock_angle_logs').log_to_datastore(time, angle)\n\n return Helpers.success(angle)\n else:\n DatastoreClient(kind='clock_angle_logs').log_to_datastore(time, 'bad_request')\n return Helpers.bad_request(r\"query parameter time should follow regex ^\\d{1,2}:\\d{1,2}$ and value should be \"\n r\"between 00:00 and 23:59\")", "def calculate_average_angles(tube_steps,angular_file,pixel_step,tube_sep,extra_dummy=[]):\n no_of_overlaps = int(round((len(tube_steps)+len(extra_dummy))/pixel_step))-1\n correction_array = Array(read_horizontal_corrections(angular_file))\n no_of_tubes = len(correction_array)\n counter = array.zeros(no_of_tubes+no_of_overlaps,int)\n final_values = array.zeros(no_of_tubes+no_of_overlaps,float)\n for stepno in range(no_of_overlaps+1):\n counter[stepno:stepno+no_of_tubes]+=array.ones(no_of_tubes,int)\n final_values[stepno:stepno+no_of_tubes]+=correction_array\n ave_angles = final_values/counter\n print 'Check: average angles ' + `ave_angles`\n print 'Check: counter' + `counter`\n print 'Check: no of overlaps, tubes: %d %d ' % (no_of_overlaps,no_of_tubes)\n # Now apply these average corrections to the actual angles\n real_step = pixel_step\n if len(tube_steps)<pixel_step:\n real_step = len(tube_steps) #for when we have no overlap and missing steps\n final_values = array.zeros((no_of_tubes+no_of_overlaps)*real_step)\n print 'Final values has len %d' % len(final_values)\n for stepno in range(no_of_tubes+no_of_overlaps):\n final_values[stepno*real_step:(stepno+1)*real_step] = tube_steps + tube_sep*stepno + ave_angles[stepno]\n return final_values" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an "absolute" value for a timedelta, always representing a time distance.
def abs_timedelta(delta): if delta.days < 0: now = datetime.datetime.now() return now - (now + delta) return delta
[ "def abs_timedelta(delta):\r\n if delta.days < 0:\r\n now = _now()\r\n return now - (now + delta)\r\n return delta", "def __abs__(self):\n return Duration(self._frame, abs(self._seconds))", "def delta(self, abs_value=False):\n return self.current - self.last if not abs_value else np.abs(self.current - self.last)", "def dst(self, _):\n return timedelta(0)", "def get_time_delta(delta):\n return datetime.timedelta(seconds=delta)", "def day_absolute_to_relative(absolute):\n today = datetime.datetime.today()\n date = datetime.datetime.strptime(absolute, \"%Y-%m-%d\")\n return abs((today - date).days)", "def absolute_value(self) -> float:\n return pulumi.get(self, \"absolute_value\")", "def abs(self, a):\n return abs(a)", "def abs_food_dist(self):\n d = self.rel_food_dist()\n return abs(d[0]) + abs(d[1])", "def apparent_to_absolute_magnitude(apparent_magnitude, distance):\n distance_in_parsecs = distance / (648000. * astronomical_unit / np.pi)\n absolute_magnitude = apparent_magnitude - 5*np.log10(distance_in_parsecs) + 5\n return absolute_magnitude", "def _dst_offset_diff(dattim: dt.datetime) -> dt.timedelta:\n delta = dt.timedelta(hours=24)\n return (dattim + delta).utcoffset() - (dattim - delta).utcoffset() # type: ignore[operator]", "def return_timedelta_full_hours(timedelta_time):\n\n return timedelta_time.seconds // 3600", "def abs_(a):", "def constrain(self, duration: timedelta) -> timedelta:\n if self._minimum is not None:\n duration = max(duration, self._minimum)\n if self._maximum is not None:\n duration = min(duration, self._maximum)\n return duration", "def as_delta(time: dt.time):\n return dt.datetime.combine(dt.datetime.min, time) - dt.datetime.min", "def __abs__(self):\n return Quantity(abs(self._value), self.unit)", "def create_timedelta():\n # timedelta(days, seconds, microseconds, milliseconds, minutes, hours, weeks)\n td = datetime.timedelta(microseconds=-1)\n # Why is this (-1, 86399, 999999)?\n # Because -1 days + (86,400 - 1) seconds = -1 second, and -1,000,000 microseconds + 999,999 microseconds = -1 microsecond\n print(td.days, td.seconds, td.microseconds) # (-1, 86399, 999999)", "def timedelta(self):\n return self._timedelta", "def change_value_absolute(self) -> float:\n return pulumi.get(self, \"change_value_absolute\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Turn a value into a date and a timedelta which represents how long ago it was. If that's not possible, return (None, value).
def date_and_delta(value): now = datetime.datetime.now() if isinstance(value, datetime.datetime): date = value delta = now - value elif isinstance(value, datetime.timedelta): date = now - value delta = value else: try: value = int(value) delta = datetime.timedelta(seconds=value) date = now - delta except (ValueError, TypeError): return None, value return date, abs_timedelta(delta)
[ "def date_and_delta(value):\r\n now = _now()\r\n if isinstance(value, datetime):\r\n date = value\r\n delta = now - value\r\n elif isinstance(value, timedelta):\r\n date = now - value\r\n delta = value\r\n else:\r\n try:\r\n value = int(value)\r\n delta = timedelta(seconds=value)\r\n date = now - delta\r\n except (ValueError, TypeError):\r\n return (None, value)\r\n return date, abs_timedelta(delta)", "def customnaturaltime(value):\n try:\n value = datetime.datetime(value.year, value.month, value.day, value.hour, value.minute, value.second)\n except AttributeError:\n return value\n except ValueError:\n return value\n\n if getattr(value, 'tzinfo', None):\n now = datetime.datetime.now(LocalTimezone(value))\n else:\n now = datetime.datetime.now()\n now = now - datetime.timedelta(0, 0, now.microsecond)\n\n time = ''\n\n if value < now:\n delta = now - value\n #if delta.days != 0:\n # return \"%s ago\" % defaultfilters.timesince(value)\n years = delta.days / 365\n weeks = delta.days / 7\n months = weeks / 4\n if years != 0:\n return '%d years ago' % years if years > 1 else 'a year ago'\n elif months != 0:\n return '%d months ago' % months if months > 1 else 'a month ago'\n elif delta.days != 0:\n return '%s days ago' % delta.days if delta.days > 1 else 'a day ago'\n elif delta.seconds == 0:\n return 'now'\n elif delta.seconds < 60:\n return '%s seconds ago' % delta.seconds if delta.seconds > 1 else 'a second ago'\n elif delta.seconds / 60 < 60:\n count = delta.seconds / 60\n return '%s minutes ago' % count if count > 1 else 'a minute ago'\n else:\n count = delta.seconds / 60 / 60\n return '%s hours ago' % count if count > 1 else 'an hour ago'\n else:\n delta = value - now\n #if delta.days != 0:\n # return '%s from now' % defaultfilters.timeuntil(value)\n years = delta.days / 365\n weeks = delta.days / 7\n months = weeks / 4\n if years != 0:\n return '%d years ago' % years if years > 1 else 'a year from now'\n elif months != 0:\n return '%d months ago' % months if months > 1 else 'a month from now'\n elif delta.days != 0:\n return '%s days ago' % delta.days if delta.days > 1 else 'a day from now'\n elif delta.seconds == 0:\n return 'now'\n elif delta.seconds < 60:\n return '%s seconds from now' % delta.seconds if delta.seconds > 1 else 'a second from now'\n elif delta.seconds / 60 < 60:\n count = delta.seconds / 60\n return '%(count)s minutes from now' % count if count > 1 else 'a minute from now'\n else:\n count = delta.seconds / 60 / 60\n return '%(count)s hours from now' % count if count > 1 else 'a hour from now'", "def naturaltimediff(value):\n \n from datetime import datetime\n \n if isinstance(value, datetime):\n delta = now() - value\n if delta.days > 6:\n return value.strftime(\"%b %d\") # May 15\n if delta.days > 1:\n return value.strftime(\"%A\") # Wednesday\n elif delta.days == 1:\n return 'yesterday' # yesterday\n elif delta.seconds > 3600:\n return str(delta.seconds / 3600 ) + ' hours ago' # 3 hours ago\n elif delta.seconds > MOMENT:\n return str(delta.seconds/60) + ' minutes ago' # 29 minutes ago\n else:\n return 'a moment ago' # a moment ago\n return defaultfilters.date(value)\n else:\n return str(value)", "def getTimeDifferenceValue(td):\n SECOND = 1\n MINUTE = 60 * SECOND\n HOUR = 60 * MINUTE\n DAY = 24 * HOUR\n WEEK = 7 * DAY\n MONTH = 30 * DAY\n \n timenow = datetime.now();\n difference = timenow - td;\n\n delta = difference.days * DAY + difference.seconds \n \n minutes = delta / MINUTE\n hours = delta / HOUR\n days = delta / DAY\n weeks = delta / WEEK\n months = delta / MONTH\n \n if delta < 0:\n return \"Please give time after current time\"\n if delta < 10 * SECOND:\n return \"just now\" \n if delta < 1 * MINUTE: \n return str(delta) + \" seconds ago\"\n if delta < 60 * MINUTE: \n return str(minutes) + \" minutes ago\"\n if delta < 24 * HOUR:\n return str(hours) + \" hours ago\"\n if delta < 1 * WEEK:\n return \"one week ago\"\n if delta < 4 * WEEK:\n return str(weeks) + \" weeks ago\"\n if delta < 1 * DAY: \n return \"one day ago\"\n if delta < 30 * DAY: \n return str(days) + \" days ago\"\n if delta < 1 * MONTH: \n return \"one month ago\"\n else:\n return str(months) + \" months ago\"", "def _get_delta(self, now, then):\n if now.__class__ is not then.__class__:\n now = datetime.date(now.year, now.month, now.day)\n then = datetime.date(then.year, then.month, then.day)\n if now < then:\n raise ValueError(\"Cannot determine moderation rules because date field is set to a value in the future\")\n return now - then", "def find_delta(record, prev_rec):\n if prev_rec is None:\n return None, None\n interval = record.timestamp - prev_rec.timestamp\n if interval == 0:\n return None, None\n return interval, 60.0 * (record.value - prev_rec.value) / interval", "def naturaltime(value, future=False, months=True):\r\n now = _now()\r\n date, delta = date_and_delta(value)\r\n if date is None:\r\n return value\r\n # determine tense by value only if datetime/timedelta were passed\r\n if isinstance(value, (datetime, timedelta)):\r\n future = date > now\r\n\r\n ago = _('%s from now') if future else _('%s ago')\r\n delta = naturaldelta(delta)\r\n\r\n if delta == _(\"a moment\"):\r\n return _(\"now\")\r\n\r\n return ago % delta", "def timesince_human(date): # TODO: let user specify format strings\n delta = timezone.now() - date\n\n num_years = delta.days / 365\n if (num_years > 0):\n return ungettext(u\"%d year ago\", u\"%d years ago\", num_years) % (\n num_years,)\n\n num_months = delta.days / 30\n if (num_months > 0):\n return ungettext(u\"%d month ago\", u\"%d months ago\",\n num_months) % num_months\n\n num_weeks = delta.days / 7\n if (num_weeks > 0): # TODO: \"last week\" if num_weeks == 1\n return ungettext(u\"%d week ago\", u\"%d weeks ago\",\n num_weeks) % num_weeks\n\n if (delta.days > 0): # TODO: \"yesterday\" if days == 1\n return ungettext(u\"%d day ago\", u\"%d days ago\",\n delta.days) % delta.days\n\n num_hours = delta.seconds / 3600\n if (num_hours > 0): # TODO: \"an hour ago\" if num_hours == 1\n return ungettext(u\"%d hour ago\", u\"%d hours ago\",\n num_hours) % num_hours\n\n num_minutes = delta.seconds / 60\n if (num_minutes > 0): # TODO: \"a minute ago\" if num_minutes == 1\n return ungettext(u\"%d minute ago\", u\"%d minutes ago\",\n num_minutes) % num_minutes\n\n return ugettext(u\"just now\")", "def coerce_delta(value: t.Optional[TypeDelta] = None) -> t.Optional[datetime.timedelta]:\n if value is None or isinstance(value, datetime.timedelta):\n return value\n value = coerce_seconds(value=value)\n return datetime.timedelta(seconds=value) if isinstance(value, float) else None", "def relativeTime(date):\n diff = datetime.utcnow() - date\n\n if diff.days > 7 or diff.days < 0:\n return date.ctime()\n elif diff.days == 1:\n return '1 day ago'\n elif diff.days > 1:\n return '%d days ago' % diff.days\n elif diff.seconds <= 1:\n return 'just now'\n elif diff.seconds < 60:\n return '%d seconds ago' % diff.seconds\n elif diff.seconds < (60 * 2):\n return '1 minute ago'\n elif diff.seconds < (60 * 60):\n return '%d minutes ago' % (diff.seconds / 60)\n elif diff.seconds < (60 * 60 * 2):\n return '1 hour ago'\n else:\n return '%d hours ago' % (diff.seconds / (60 * 60))", "def abs_timedelta(delta):\n if delta.days < 0:\n now = datetime.datetime.now()\n return now - (now + delta)\n return delta", "def abs_timedelta(delta):\r\n if delta.days < 0:\r\n now = _now()\r\n return now - (now + delta)\r\n return delta", "def get_time_delta(delta):\n return datetime.timedelta(seconds=delta)", "def date_diff(time_point_1=None, time_point_2=None):\n if time_point_2 < time_point_1:\n return (time_point_1 - time_point_2, \"-\")\n else:\n return (time_point_2 - time_point_1, \"\")", "def get_delta(self, value: ValueType, ts: int) -> Optional[ValueType]:\n dv = value - self.state.lv\n if dv >= 0:\n self.state.lt = ts\n self.state.lv = value\n return dv\n # Counter wrapped, either due to wrap or due to stepback\n bound = self.get_bound(self.state.lv)\n # Wrap distance\n d_wrap = value + (bound - self.state.lv)\n if -dv < d_wrap:\n # Possible counter stepback, skip value\n return None\n # Counter wrap\n self.state.lt = ts\n self.state.lv = value\n return d_wrap", "def humanizeTimeDiff(timestamp = None):\n import datetime\n \n timeDiff = datetime.datetime.now() - timestamp\n days = timeDiff.days\n hours = timeDiff.seconds/3600\n minutes = timeDiff.seconds%3600/60\n seconds = timeDiff.seconds%3600%60\n \n str = \"\"\n tStr = \"\"\n if days > 0:\n if days == 1: tStr = \"day ago\"\n else: tStr = \"days ago\"\n str = str + \"%s %s\" %(days, tStr)\n return str\n elif hours > 0:\n if hours == 1: tStr = \"hour ago\"\n else: tStr = \"hours ago\" \n str = str + \"%s %s\" %(hours, tStr)\n return str\n elif minutes > 0:\n if minutes == 1:tStr = \"min ago\"\n else: tStr = \"mins ago\" \n str = str + \"%s %s\" %(minutes, tStr)\n return str\n elif seconds > 0:\n if seconds == 1:tStr = \"sec ago\"\n else: tStr = \"secs ago\"\n str = str + \"%s %s\" %(seconds, tStr)\n return str\n else:\n return None", "def get_pretty_date(time=False):\n now = datetime.datetime.now()\n if type(time) is int:\n diff = now - datetime.datetime.fromtimestamp(time)\n elif type(time) is float:\n diff = now - datetime.datetime.fromtimestamp(int(time))\n elif isinstance(time,datetime.datetime):\n diff = now - time\n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(second_diff) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str( second_diff / 60 ) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str( second_diff / 3600 ) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(day_diff) + \" days ago\"\n if day_diff < 31:\n return str(day_diff/7) + \" weeks ago\"\n if day_diff < 365:\n return str(day_diff/30) + \" months ago\"\n return str(day_diff/365) + \" years ago\"", "def as_delta(time: dt.time):\n return dt.datetime.combine(dt.datetime.min, time) - dt.datetime.min", "def timedelta_filter(date_value, **kwargs):\n\n current_date = parse_datetime(date_value)\n return (current_date - timedelta(**kwargs))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the Hamming distance between equallength sequences
def __hamming_distance(s1, s2): if len(s1) != len(s2): raise ValueError("Undefined for sequences of unequal length") return sum(el1 != el2 for el1, el2 in zip(s1, s2))
[ "def hamming_distance(seq1, seq2):\n dist = sum([char1 != char2 for char1, char2 in zip(seq1, seq2)])\n return dist", "def modified_hamming_distance(s1, s2):\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal length\")\n score = 0\n for el1, el2 in zip(s1, s2):\n if el2 != 0:\n \n return sum(el1 != el2 for el1, el2 in zip(s1, s2))", "def hammingDistance(s1 = \"\", s2 = \"\"):\n # if len(s1) != len(s2):\n # raise ValueError(\"Undefined for sequences of unequal length\")\n return sum(bool(ord(ch1) - ord(ch2)) for ch1, ch2 in zip(s1, s2))", "def hamming_dist(sequence1, sequence2):\n assert len(sequence1) == len(sequence2), 'Unequal sequence length. ' \\\n + '{} compared to {}. '.format(len(sequence1), len(sequence2))\n \n dist = 0\n for sym1, sym2 in zip(sequence1, sequence2):\n if sym1 != sym2:\n dist += 1\n\n # for pos in range(len(sequence1)):\n # if sequence1[pos] != sequence2[pos]:\n # dist += 1\n \n return dist", "def hamming_distance(s1, s2):\n assert(len(s1) == len(s2))\n return np.sum([1 if c1 != c2 else 0 for c1, c2 in zip(s1, s2)])", "def hamming_distance(cs):\n d = 0.0\n end = len(cs) - 1\n for idx in range(end):\n s1 = cs[idx]\n s2 = cs[idx + 1]\n assert len(s1) == len(s2)\n s1_bits = ''.join('{:b}'.format(c).zfill(8) for c in s1)\n s2_bits = ''.join('{:b}'.format(c).zfill(8) for c in s2)\n d += sum(c1 != c2 for c1, c2 in zip(s1_bits, s2_bits))\n return d / end", "def compute_hamming_distance(str1, str2):\n\n mismatches = 0\n len_strs = len(str1)\n for i in range(len_strs):\n if str1[i] != str2[i]:\n mismatches = mismatches + 1\n return mismatches", "def hamming_distance(h1, h2):\n b1 = bitarray.bitarray()\n b1.frombytes(h1)\n b2 = bitarray.bitarray()\n b2.frombytes(h2)\n return bitarray.bitdiff(b1, b2)", "def hamming_distance(hash_1, hash_2):\n return libpHash.ph_hamming_distance(c_ulonglong(hash_1), c_ulonglong(hash_2))", "def __hamming_distance_with_hash(dhash1, dhash2):\n difference = (int(dhash1, 16)) ^ (int(dhash2, 16))\n return bin(difference).count(\"1\")", "def calculated_hamming_distance_kmer(kmer_1, kmer_2):\n return sum(a == b for a, b in zip(kmer_1, kmer_2) if a != \"-\" and b != \"-\")", "def hamming_distance(self, x, y):\n # Assign larger number to x\n if y > x:\n x,y = y,x\n # Convert integers to binary\n x = self.get_binary(x)\n y = self.get_binary(y)\n # split x so `compare` piece is same length as y\n x_compare = x[-len(y):]\n x_remainder = x[:-len(y)]\n # Count the differences\n counter = x_remainder.count(\"1\")\n for i in range(0, len(y)):\n if y[i] != x_compare[i]:\n counter += 1\n\n return counter", "def hamming_distance_with_hex_strings(h1, h2):\n\n bin_h1 = get_binary_representation_512_bits_of_hex(h1)\n bin_h2 = get_binary_representation_512_bits_of_hex(h2)\n\n distance = 0\n for x1, x2 in zip(bin_h1, bin_h2):\n if x1 != x2:\n distance += 1\n return distance", "def hamming_distance(string_a: str, string_b: str) -> int:\n if len(string_a) != len(string_b):\n raise ValueError(\n \"Strings are of unequal length can not compute hamming distance. Hamming distance is undefined.\"\n )\n return sum(char_1 != char_2 for char_1, char_2 in zip(string_a, string_b))", "def hamming_distance(str1, str2):\n \n # TODO: Write your solution here\n\n # basic variables\n distance = 0\n\n #check length of two strings is equal or no\n if len(str1) != len(str2):\n return None\n \n for char in range(len(str1)):\n if str1[char] != str2[char]:\n distance += 1\n\n return distance", "def hamming_distance(array1, array2):\n if (array1.shape != array2.shape):\n raise ValueError(\"Input arrays must have same shape!\")\n distance = 0\n for i in range(array1.shape[0]):\n if (array1[i] != array2[i]):\n distance += 1\n return distance", "def HammingDistance(p, q):\n if len(p) != len(q):\n return -1\n\n dist = 0\n #zip(AB,CD) gives (('A','C'),('B','D'))\n for first, second in zip(p, q):\n if first != second:\n dist = dist + 1\n\n return dist", "def HammingDist(str1, str2):\n\tHdist = 0\n\tfor i, base in enumerate(str1):\n\t\tif base != str2[i]:\n\t\t\tHdist += 1\n\n\treturn Hdist", "def HammingDistance(p, q):\r\n if len(p) != len(q):\r\n return -1\r\n dist = 0\r\n #zip(AB,CD) gives (('A','C'),('B','D'))\r\n for first, second in zip(p, q):\r\n if first != second:\r\n dist = dist + 1\r\n return dist" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return checkpoints for recomputing
def get_checkpoints(self): # recompute checkpoints return self._checkpoints
[ "def checkpoint():", "def get_all_overall_checkpoint(cls):\n return cls.create_all_overall_checkpoint()", "def checkpoint(self):\r\n return self._checkpoint", "def finish_checkpoint(self):\n return self.this_evaluation.checkpoint", "def create_all_overall_checkpoint(cls):\n return DB.read_all_overall_checkpoint()", "def checkpoint(self):\n return self.__checkpoint", "def get_checkpoint_snapshot(self):\n try:\n __method_name = inspect.currentframe().f_code.co_name\n checkpoint = self.state.get()\n if checkpoint:\n checkpoint = json.loads(checkpoint)\n checkpoint = checkpoint.get(\"snapshot\")\n self.applogger.info(\n \"{}(method={}) : {} : Checkpoint list fetched successfully.\".format(\n consts.LOGS_STARTS_WITH, __method_name, self.function_name\n )\n )\n else:\n checkpoint = []\n self.state.post(json.dumps({\"snapshot\": checkpoint}))\n self.applogger.info(\n \"{}(method={}) : {} : Checkpoint list not found. Created new checkpoint list.\".format(\n consts.LOGS_STARTS_WITH, __method_name, self.function_name\n )\n )\n return checkpoint\n except Exception as ex:\n self.applogger.error(\n '{}(method={}) : {} : Unexpected error while getting checkpoint list: err=\"{}\"'.format(\n consts.LOGS_STARTS_WITH, __method_name, self.function_name, str(ex)\n )\n )\n raise Exception(ex)", "def _get_checkpoints_with_results(self):\n if not tf.io.gfile.exists(self._score_file):\n return set()\n with tf.io.gfile.GFile(self._score_file) as f:\n reader = csv.DictReader(f)\n return {r[\"checkpoint_path\"] for r in reader}\n return set()", "def get_checkpoint_data(self) -> Dict[str, Any]:\n # get ckpt file path from config.trainer.params.resume_from_checkpoint\n path = self.config.trainer.params.get(\"resume_from_checkpoint\", None)\n if path is not None:\n is_zoo = self.is_zoo_path(path)\n ckpt_filepath = path\n if is_zoo:\n folder = download_pretrained_model(path)\n ckpt_filepath = get_ckpt_path_from_folder(folder)\n ckpt = get_ckpt_from_path(ckpt_filepath)\n config = get_config_from_folder_or_ckpt(folder, ckpt)\n else:\n ckpt = get_ckpt_from_path(ckpt_filepath)\n config = None\n\n return {\n \"ckpt\": ckpt,\n \"checkpoint_path\": ckpt_filepath,\n \"is_zoo\": is_zoo,\n \"config\": config,\n }\n\n is_zoo = False\n config = None\n ckpt = None\n # get ckpt file path from config.checkpoint\n ckpt_config = self.config.checkpoint\n suffix = \"best.ckpt\" if ckpt_config.resume_best else \"current.ckpt\"\n path = os.path.join(get_mmf_env(key=\"save_dir\"), suffix)\n ckpt_filepath = None\n resume_from_specified_path = (\n ckpt_config.resume_file is not None or ckpt_config.resume_zoo is not None\n ) and (not ckpt_config.resume or not PathManager.exists(path))\n if resume_from_specified_path:\n if ckpt_config.resume_file and PathManager.exists(ckpt_config.resume_file):\n ckpt_filepath = ckpt_config.resume_file\n elif ckpt_config.resume_zoo is not None:\n is_zoo = True\n folder = download_pretrained_model(ckpt_config.resume_zoo)\n ckpt_filepath = get_ckpt_path_from_folder(folder)\n ckpt = get_ckpt_from_path(ckpt_filepath)\n config = get_config_from_folder_or_ckpt(folder, ckpt)\n else:\n raise RuntimeError(f\"{ckpt_config.resume_file} doesn't exist\")\n\n if ckpt_config.resume and PathManager.exists(path):\n ckpt_filepath = path\n\n if ckpt_filepath is not None:\n ckpt = get_ckpt_from_path(ckpt_filepath)\n\n return {\n \"ckpt\": ckpt,\n \"checkpoint_path\": ckpt_filepath,\n \"is_zoo\": is_zoo,\n \"config\": config,\n }", "def get_checkpoint_extra_state(self):\n return {'cur_epoch': self.cur_epoch}", "def create_checkpoint_list(cls):\n checkpoint_data = DB.read_checkpoint_record_list()\n return [Checkpoint(*checkpoint) for checkpoint in checkpoint_data]", "def find_checkpoint(self, checkpoints, current_time):\n checkpoint_line_len = N.zeros(len(checkpoints), dtype=float)\n checkpoint_distances = N.zeros(len(checkpoints), dtype=float)\n checkpoint_chosen = False\n\n for i in range(len(checkpoints)):\n checkpoint_line_len[i] = checkpoints[i].get_line_length()\n checkpoint_distances[i] = self._calc_distance(checkpoints[i].location)\n \n min_length = N.min(checkpoint_line_len)\n min_dist = N.min(checkpoint_distances)\n # If the min_length of all lines is > 0, divide all lengths by the min_length\n if (min_length > 0):\n checkpoint_line_len = checkpoint_line_len / min_length\n # Same idea for the distances\n if (min_dist > 0):\n checkpoint_ratios = checkpoint_distances / min_dist\n else:\n checkpoint_ratios = checkpoint_distances\n \n # Add these values together, and choose the smallest value\n checkpoint_rankings = checkpoint_ratios + checkpoint_line_len\n min_index = N.argmin(checkpoint_rankings)\n # found the target checkpoint, set that as the target_checkpoint\n checkpoint_candidate = checkpoints[min_index]\n if self.checkpoint_target is None or self.checkpoint_target is not checkpoint_candidate:\n if self.checkpoint_target is not None:\n print(\"Attendee:\", self.attendee_id, \"has changed checkpoint target from:\",\\\n self.checkpoint_target.get_location(), \"to checkpoint at:\",\\\n checkpoint_candidate.get_location())\n self.checkpoint_target = checkpoint_candidate\n self._calc_checkpoint_arrival(checkpoint_distances[min_index], current_time)\n self._set_checkpoint_vector(self.checkpoint_target.get_location())\n \n return self.checkpoint_target", "def variable_progression():\n\t# files = glob.glob('parameter_checkpoints/epoch-*[!.meta]')\n\tfiles = glob.glob('parameter_checkpoints/epoch-*')\n\n\t# reorder epochs by 'human order' otherwise it would order it as 1,110,12,...\n\t# http://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside\n\tdef atoi(text):\n\t return int(text) if text.isdigit() else text\n\n\tdef natural_keys(text):\n\t '''\n\t alist.sort(key=natural_keys) sorts in human order\n\t http://nedbatchelder.com/blog/200712/human_sorting.html\n\t (See Toothy's implementation in the comments)\n\t '''\n\t return [ atoi(c) for c in re.split('(\\d+)', text) ]\n\n\tfiles.sort(key=natural_keys)\n\n\tx, W, bh, bv = rbm.get_variables()\n\ttrainable_vars = [W, bh, bv]\n\n\tsaver = tf.train.Saver(trainable_vars)\t# restore the weights and biases of the trained model\n\n\tweights = []\n\tbhs = []\n\tbvs = []\n\twith tf.Session() as sess:\n\t\tinit = tf.initialize_all_variables()\t\n\t\tsess.run(init)\n\t\t# iterate through each saved epoch checkpoint, and add the W, bh, and bv matrices to their\n\t\t# respective lists\n\t\tfor f in files:\n\t\t\tsaver.restore(sess, f)\t\t# load the saved weights and biases from a given epoch checkpoint file\n\t\t\tweights.append(W.eval())\t\n\t\t\tbhs.append(bh.eval())\n\t\t\tbvs.append(bv.eval())\n\n\treturn weights, bhs, bvs", "def previous_saves(self):\n if os.path.exists(self.results_dir):\n return sorted([x for x in Path(self.results_dir).glob(f'{self.model_name}checkpoint_*.pk')], key=lambda s: int(s.name.replace(f'{self.model_name}checkpoint_', '').replace('.pk', '')))\n else:\n return []", "def find_checkpoint(self, args: Namespace):\n\n current_time = datetime.now()\n\n possible_checkpoints = []\n for rootdir, _, files in os.walk(self.checkpoint_dir):\n rootdir = Path(rootdir)\n if files:\n # skip checkpoints that are empty\n try:\n checkpoint_file = [rootdir / f for f in files if f.endswith(\".ckpt\")][0]\n except:\n continue\n\n creation_time = datetime.fromtimestamp(os.path.getctime(checkpoint_file))\n if current_time - creation_time < self.max_hours:\n ck = Checkpoint(\n creation_time=creation_time,\n args=rootdir / \"args.json\",\n checkpoint=checkpoint_file,\n )\n possible_checkpoints.append(ck)\n\n if possible_checkpoints:\n # sort by most recent\n possible_checkpoints = sorted(\n possible_checkpoints, key=lambda ck: ck.creation_time, reverse=True\n )\n\n for checkpoint in possible_checkpoints:\n checkpoint_args = Namespace(**json.load(open(checkpoint.args)))\n if all(\n getattr(checkpoint_args, param) == getattr(args, param)\n for param in AutoResumer.SHOULD_MATCH\n ):\n return checkpoint.checkpoint\n\n return None", "def __ray_checkpoint__(self):\n worker = ray.worker.global_worker\n checkpoint_index = worker.actor_task_counter\n # Get the state to save.\n checkpoint = self.__ray_save_checkpoint__()\n # Get the current task frontier, per actor handle.\n # NOTE(swang): This only includes actor handles that the local\n # scheduler has seen. Handle IDs for which no task has yet reached\n # the local scheduler will not be included, and may not be runnable\n # on checkpoint resumption.\n actor_id = ray.local_scheduler.ObjectID(worker.actor_id)\n frontier = worker.local_scheduler_client.get_actor_frontier(\n actor_id)\n # Save the checkpoint in Redis. TODO(rkn): Checkpoints\n # should not be stored in Redis. Fix this.\n set_actor_checkpoint(worker, worker.actor_id, checkpoint_index,\n checkpoint, frontier)", "def find_best_checkpoint(self, all_flows):\n best_checkpoint = None\n best_loss = np.infty\n for checkpoint, data in tqdm(all_flows.items()):\n flow = data['flow']\n p0 = data['p0']\n v = tf.tensordot(p0, tf.linalg.matvec(self.model.Gamma(self.model.X, self.model.X), p0),\n axes=1)\n deformation_loss = self.model.model_params.mu / 2 * v\n loss = self.model.regression_loss(flow[-1].reshape(-1), self.model.Y, self.model.K) + deformation_loss\n loss = loss.numpy()\n if loss < best_loss:\n best_loss = loss\n best_checkpoint = checkpoint\n return best_checkpoint, best_loss", "def unevaluated_checkpoints(self,\n timeout: int = 3600 * 8,\n num_batched_steps: int = 1,\n eval_every_steps: Optional[int] = None,\n ) -> Iterable[str]:\n logging.info(\"Looking for checkpoints in %s\", self.ckpt.base_directory)\n evaluated_checkpoints = self._get_checkpoints_with_results()\n last_eval = time.time()\n while True:\n # Check if directory exists. The train job may only create the directory\n # some time after the test job starts.\n if not tf.io.gfile.exists(self.ckpt.base_directory):\n logging.info(\"Directory %s does not exist!\", self.ckpt.base_directory)\n else:\n logging.info(\"what is in %s: are %s\", self.ckpt.base_directory,\n tf.io.gfile.listdir(self.ckpt.base_directory))\n unevaluated_checkpoints = []\n checkpoints = self.ckpt.get_all_checkpoints_to_restore_from()\n logging.info(\"checkpoints: %s\", checkpoints)\n unevaluated_checkpoints = checkpoints - evaluated_checkpoints\n step_and_ckpt = sorted(\n (int(x.split(\"-\")[-1]), x) for x in unevaluated_checkpoints)\n\n unevaluated_checkpoints = []\n for step, ckpt in step_and_ckpt:\n if eval_every_steps:\n if step > num_batched_steps and (\n step % eval_every_steps < num_batched_steps):\n unevaluated_checkpoints.append(ckpt)\n else:\n unevaluated_checkpoints.append(ckpt)\n\n logging.info(\n \"Found checkpoints: %s\\nEvaluated checkpoints: %s\\n\"\n \"Unevaluated checkpoints: %s\", checkpoints, evaluated_checkpoints,\n unevaluated_checkpoints)\n for checkpoint_path in unevaluated_checkpoints:\n yield checkpoint_path\n\n if unevaluated_checkpoints:\n evaluated_checkpoints |= set(unevaluated_checkpoints)\n last_eval = time.time()\n continue\n if time.time() - last_eval > timeout or self.is_training_done():\n break\n time.sleep(5)", "def predict_at_checkpoints(self, data_store, checkpoints):\n import tempfile\n\n output_dir = tempfile.gettempdir()\n\n self._forest.predict_and_output(data_store._data_store, checkpoints, output_dir)\n for p in checkpoints:\n yield p, [float(line) for line in open(output_dir + '/forest.{}.score'.format(p))\n if line.strip()]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
append name with postfix
def append_name(name, postfix): if name is None: ret = None elif name == '': ret = postfix else: ret = '%s_%s' % (name, postfix) return ret
[ "def add_name_index(self, index):\n self.name += \".%d\" % index", "def NewName(self) -> str:", "def print_name(name):\r\n\r\n\r\n return name + \"-apple\"", "def add_name(self, node):\n if 'name' in self.options:\n name = nodes.fully_normalize_name(self.options.pop('name'))\n if 'name' in node:\n del(node['name'])\n node['names'].append(name)\n self.state.document.note_explicit_target(node, node)", "def add_suffix(name: str, suffix: str):\n return f'{name}_{suffix}'", "def addName(kml, name):\n\n nameStart = kml.find(NAME_START) + len(NAME_START)\n nameEnd = kml.find(NAME_END)\n kml = kml[0:nameStart] + name + kml[nameEnd:]\n\n return kml", "def append_to_path(path, name):\n if path[-1] == '/' or path[-1] == ':':\n return path + name\n else:\n return str(path) + str('/') + str(name)", "def with_prefix(prefix, name):\n return \"/\".join((prefix, name))", "def add_name(self, name):\n self.set_name(name, len(self) - 1)", "def _addAfterName(self,name='',afterName=''):\n if not name.endswith('.csv'):\n return name\n name=name[0:len(name)-4]+afterName+'.csv'\n return name", "def create_policy_name(self, role_name, postfix):\n return '{}-{}-{}'.format(role_name, 'policy', postfix)", "def add_prefix(value, arg):\n return arg + str(value)", "def __add__(self, new_name: Tuple[str, str]) -> None:\n self.formal_names.update({new_name[0]: new_name[1]})", "def add_name(self,q):\n d = self.lnames\n for s in q:\n try:\n d = d[s]\n except KeyError:\n d[s] = {}\n d = d[s]\n d[\"$\"] = {} #'$' means the end of a word", "def addName(self, *args):\n return _yarp.Contact_addName(self, *args)", "def iter_name(self, prepend=''):\n out_str = self.lowupjoin(prepend)\n out_str += '_%d.root'\n return out_str", "def add_proper_name (w,lx):\n if ('A' <= w[0] and w[0] <= 'Z'):\n lx.add(w,'P')\n return ''\n else:\n return (w + \" isn't a proper name\")", "def apply_identity_suffix(self, suffix):\n self.owner += suffix\n self.group += suffix", "def add_proper_name(w,lx):\n if ('A' <= w[0] and w[0] <= 'Z'):\n lx.add(w,'P')\n return ''\n else:\n return (w + \" isn't a proper name\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lands the rover, and makes it part of the grid Throws an exception if A rover with that name already existed The rover being landed has a bad direction The rovers coordinates are off the grid A rover already exists on the gird at the rover's coordinates
def land_rover(self, rover): if self.rovers.get(rover.name): raise RoverException(ExceptionMessages.ROVER_ALREADY_LANDED) if not Rover.valid_direction(rover.direction): raise RoverException(ExceptionMessages.BAD_DIRECTION) if not self._is_coordinate_in_the_grid(rover.coordinate): raise RoverException(ExceptionMessages.OFF_GRID) if self._is_coordinate_occupied(rover.coordinate): raise RoverException(ExceptionMessages.ROVER_COLLISION) self.rovers[rover.name] = rover
[ "def move(self, rover_id, grid_point):", "def make_move(self, playername, coordinates, direction):\n curr_player = self.get_player_from_name(playername)\n curr_player_marble = curr_player.get_marble_color()\n\n # if Game has already been won - return False\n if self._winner is not None:\n print(\"Game has already been won.\")\n return False\n\n # if marble at coordinates isn't player's marble color, return false\n if self._board[coordinates[0]][coordinates[1]] != curr_player_marble:\n print(\"That marble doesn't belong to you!\")\n return False\n\n # if you knock your own marble off the table, return false\n if self.check_knock_own_marble(curr_player_marble, direction, coordinates) is True:\n print(\"You knocked your own marble off the table. Invalid move.\")\n return False\n\n # if Ko rule is not followed, return false\n if coordinates == self._last_slot_moved:\n if direction == 'L' and self._prev_direction == 'R':\n print(\"Ko rule not followed. Invalid move.\")\n return False\n if direction == 'R' and self._prev_direction == 'L':\n print(\"Ko rule not followed. Invalid move.\")\n return False\n if direction == 'F' and self._prev_direction == 'B':\n print(\"Ko rule not followed. Invalid move.\")\n return False\n if direction == 'B' and self._prev_direction == 'F':\n print(\"Ko rule not followed. Invalid move.\")\n return False\n\n # branch depending on direction\n if direction == 'L':\n # if not at right edge of board and slot to right isn't a blank, marble isn't accessible\n if coordinates[1] != 6 and self.get_marble((coordinates[0], coordinates[1] + 1)) != 'X':\n # print(\"The marble to the left of this one is not accessible\")\n return False\n\n # if starting game, set current turn to current player, if current turn != current player,\n # it's not current player's turn\n if self._current_turn is None:\n self._current_turn = playername\n elif self._current_turn != playername:\n # print(\"It is not this player's turn.\")\n return False\n\n # use in while loop\n prev_slot = coordinates\n curr_is_empty = False\n\n # logic - move towards marbles in direction you are pushing until you encounter a space or\n # until you get to the left edge of the board. For each slot, copy the marble that is currently\n # in the previous slot into the current slot.\n while not curr_is_empty and prev_slot[1] != 0:\n curr_slot = (prev_slot[0], prev_slot[1] - 1)\n curr_is_empty = self.get_marble(curr_slot) == 'X'\n if prev_slot == coordinates:\n old_prev_marble = self.get_marble(prev_slot)\n else:\n old_prev_marble = old_curr_marble\n old_curr_marble = self.get_marble(curr_slot)\n self.set_marble(curr_slot, old_prev_marble)\n\n if prev_slot == coordinates:\n self.set_marble(prev_slot, 'X')\n\n prev_slot = curr_slot\n\n # if at edge of board and the marble that was there was red, it was pushed off\n if prev_slot[1] == 0 and old_curr_marble == 'R':\n curr_player.inc_red_count()\n elif direction == 'R':\n if coordinates[1] != 0 and self.get_marble((coordinates[0], coordinates[1] - 1)) != 'X':\n print(\"The marble to the right of this one is not accessible\")\n return False\n\n if self._current_turn is None:\n self._current_turn = playername\n elif self._current_turn != playername:\n print(\"It is not this player's turn.\")\n return False\n\n prev_slot = coordinates\n curr_is_empty = False\n\n while not curr_is_empty and prev_slot[1] != 6:\n curr_slot = (prev_slot[0], prev_slot[1] + 1)\n curr_is_empty = self.get_marble(curr_slot) == 'X'\n if prev_slot == coordinates:\n old_prev_marble = self.get_marble(prev_slot)\n else:\n old_prev_marble = old_curr_marble\n old_curr_marble = self.get_marble(curr_slot)\n self.set_marble(curr_slot, old_prev_marble)\n\n if prev_slot == coordinates:\n self.set_marble(prev_slot, 'X')\n\n prev_slot = curr_slot\n\n if prev_slot[1] == 6 and old_curr_marble == 'R':\n curr_player.inc_red_count()\n elif direction == 'F':\n if coordinates[0] != 6 and self.get_marble((coordinates[0] + 1, coordinates[1])) != 'X':\n print(\"The marble below this one is not accessible\")\n return False\n\n if self._current_turn is None:\n self._current_turn = playername\n elif self._current_turn != playername:\n print(\"It is not this player's turn.\")\n return False\n\n prev_slot = coordinates\n curr_is_empty = False\n\n while not curr_is_empty and prev_slot[0] != 0:\n curr_slot = (prev_slot[0] - 1, prev_slot[1])\n curr_is_empty = self.get_marble(curr_slot) == 'X'\n if prev_slot == coordinates:\n old_prev_marble = self.get_marble(prev_slot)\n else:\n old_prev_marble = old_curr_marble\n old_curr_marble = self.get_marble(curr_slot)\n self.set_marble(curr_slot, old_prev_marble)\n\n if prev_slot == coordinates:\n self.set_marble(prev_slot, 'X')\n\n prev_slot = curr_slot\n\n if prev_slot[0] == 0 and old_curr_marble == 'R':\n curr_player.inc_red_count()\n elif direction == 'B':\n if coordinates[0] != 0 and self.get_marble((coordinates[0] - 1, coordinates[1])) != 'X':\n print(\"The marble above this one is not accessible\")\n return False\n\n if self._current_turn is None:\n self._current_turn = playername\n elif self._current_turn != playername:\n print(\"It is not this player's turn.\")\n return False\n\n prev_slot = coordinates\n curr_is_empty = False\n\n while not curr_is_empty and prev_slot[0] != 6:\n curr_slot = (prev_slot[0] + 1, prev_slot[1])\n curr_is_empty = self.get_marble(curr_slot) == 'X'\n if prev_slot == coordinates:\n old_prev_marble = self.get_marble(prev_slot)\n else:\n old_prev_marble = old_curr_marble\n old_curr_marble = self.get_marble(curr_slot)\n self.set_marble(curr_slot, old_prev_marble)\n\n if prev_slot == coordinates:\n self.set_marble(prev_slot, 'X')\n\n prev_slot = curr_slot\n\n if prev_slot[0] == 6 and old_curr_marble == 'R':\n curr_player.inc_red_count()\n\n self._last_slot_moved = prev_slot\n self._prev_direction = direction\n\n # check for winner via 7 red marbles\n for player in self._players:\n if player.get_red_count() == 7:\n self._winner = player.get_playername()\n\n # check for winner via knocking all of other player's marbles off\n if self.get_marble_count()[0] == 0:\n for player in self._players:\n if player.get_marble_color() == 'B':\n self._winner = player.get_playername()\n elif self.get_marble_count()[1] == 0:\n for player in self._players:\n if player.get_marble_color() == 'W':\n self._winner = player.get_playername()\n\n # at end of move, set current turn to be name of other player\n if curr_player == self._players[0]:\n self._current_turn = self._players[1].get_playername()\n else:\n self._current_turn = self._players[0].get_playername()\n\n return True", "def add_rover(self, rover_id, rover):\n self.check_grid_position(rover.position)\n if not rover_id in self.rovers.keys():\n self.rovers[rover_id] = rover", "def check_grid_position(self, position):\n if not self.is_empty(position):\n raise Exception('Another rover is already present at location')\n\n if not self.inside_grid(position):\n raise Exception('New rover position is not with in the grid.')", "def doTurn(self, loc_info):\n r = random.random() \n if r < 0.05 and not loc_info.left_wall:\n self.turnLeft()\n elif r < 0.10 and not loc_info.right_wall:\n self.turnRight()\n else:\n if loc_info.front_wall:\n r =random.random()\n if r < 0.5 and not loc_info.left_wall:\n self.turnLeft()\n else:\n self.turnRight()", "def tree_on_road(self):\n\t\tfor i in range(4):\n\t\t\tself.raceMapGrid[15,15+i] = 4\n\t\t\tscreen.blit(self.tree,((15+i)*32,15*32))\n\t\tpygame.display.update()\n\t\tpass", "def move_to_next_railroad(self, player_name):\n # set variables for better readability\n current_pos = self.players[player_name][1]\n\n destination = current_pos\n for dist in range(1, TILE_LIMIT): # walk forward until reaching a Railroad tile.\n destination = (current_pos + dist) % TILE_LIMIT # set destination to an index of tile_list\n if type(self.tile_list[destination]) is Property:\n if self.tile_list[destination].get_group() == \"Railroad\":\n break\n\n # move the player to the destination tile\n self.player_direct_move(player_name, destination)\n # set gui special event value to twice normal rent\n self.gui.set_special_event(2 * self.get_rent_value(self.tile_list[destination].get_name()))\n # set special_event for next stage of event progression\n self.set_special_event(5)", "def place_pillar_e(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__pillar_e = x, y\r\n if self.pillar_e_room() == self.pillar_a_room() or \\\r\n self.pillar_e_room() == self.pillar_i_room() or \\\r\n self.pillar_e_room() == self.pillar_p_room() or \\\r\n self.pillar_e_room() == self.entrance_room() or \\\r\n self.pillar_e_room() == self.exit_room():\r\n return self.place_pillar_e()\r\n self.__maze[x][y].set_pillar_e(True)", "def place_road(self, road):\n\n # Check if space is empty\n if not self.environment.grid.is_cell_empty(road.pos):\n return False\n\n # Place Road\n self.environment.grid.place_agent(agent=road, pos=road.pos)\n\n # Add road to environment's road list\n self.environment.agents['roads'].append(road)\n\n # Update the list of cells where other things can be built\n self.update_available_cells(road)", "def turn(self, rover_id, new_direction):", "def predator_on_road(self):\n\t\tself.raceMapGrid[8,13] = 4\n\t\tself.raceMapGrid[8,14] = 4\n\t\tscreen.blit(self.predator,(13*32,8*32))\n\t\tscreen.blit(self.predator,(14*32,8*32))\n\t\tpygame.display.update()", "def place_entrance(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__current_room = x, y # places adventurer in dungeon at start of game\r\n self.__entrance_room = x, y\r\n self.__maze[x][y].set_entrance(True)", "def test_rover_position(self):\n rover = Rover(self.plateau_dimensions, self.rover_initial_position, Rover.DIRECTIONS.get('E'))\n rover.execute_instructions(\"LMLM\")\n self.assertEqual(rover._position.x, 1)\n self.assertEqual(rover._position.y, 2)\n self.assertEqual(rover.get_heading, 'W')", "def move(self, direction):\r\n #create moving direction vectors if the N S E W or a combination is entered\r\n if direction == \"N\":\r\n new = (0,-1)\r\n elif direction == \"S\":\r\n new = (0,1)\r\n elif direction == \"E\":\r\n new = (1,0)\r\n elif direction == \"W\":\r\n new = (-1,0)\r\n elif direction == \"NW\":\r\n new = (-1,-1)\r\n elif direction == \"NE\":\r\n new = (1,-1)\r\n elif direction == \"SE\":\r\n new = (1,1)\r\n elif direction == \"SW\":\r\n new = (-1,1)\r\n else:\r\n print(\"Invalid Command\")\r\n \r\n #create a copy of the map so as not to disturb the original \r\n g = copy.copy(self.map)\r\n #find the boat's index\r\n B = (self.boat.grid_x,self.boat.grid_y)\r\n #if the boat (x,y) + the new direction(x,y) is in the bounds of the map\r\n if B[0]+new[0] >= 0 and B[0]+new[0] < self.height and B[1]+new[1] >= 0 and B[1]+new[1] < self.width:\r\n #if the new position of the boat is navigable\r\n if g[B[0]+new[0]][B[1]+new[1]].navigable == True or g[B[0]+new[0]][B[1]+new[1]] == self.treasure:\r\n #if the new position is the target position then you win\r\n if g[B[0]+new[0]][B[1]+new[1]] == self.treasure:\r\n self.state = \"WON\"\r\n print(self.state)\r\n #move change the map's position to an dot on the old one and B on the new\r\n self.boat = g[B[0]+new[0]][B[1]+new[1]]\r\n #join the list back together the list we have been working on\r\n #set the map equal to the new one set boat equal to new position\r\n self.map = g\r\n else:\r\n print(\"Cannot Move here\")\r\n else:\r\n print(\"Cannot Move here\")", "def placeOnFloor(self):\n assert self.notify.debugStateCall(self)\n self.oneTimeCollide()\n self.avatarNodePath.setZ(self.avatarNodePath.getZ()-self.lifter.getAirborneHeight())", "def make_move(self, board):", "def go_to_prison(self):\n self.coordinate_x = 56\n self.coordinate_y = 819 - 20 * self.player_number\n self.cell = 10", "def move_object(self, object_name, new_location):\n\t\t#If predator\n\t\tif(object_name == 'predator'):\n\t\t\t#Get old location\n\t\t\told_location = self.predator_location\n\t\t\t#Update predator location\n\t\t\tself.predator_location = new_location\n\t\t\t#Empty old location in grid\n\t\t\tself.grid[old_location[0]][old_location[1]] = ' '\n\t\t\t#Set new location in grid to 'X'\n\t\t\tself.grid[new_location[0]][new_location[1]] = 'X'\n\t\t#If prey\n\t\telif(object_name == 'prey'):\n\t\t\t#Get old location\n\t\t\told_location = self.prey_location\n\t\t\t#Update prey location\n\t\t\tself.prey_location = new_location\n\t\t\t#Empty old location in grid\n\t\t\tself.grid[old_location[0]][old_location[1]] = ' '\n\t\t\t#Set new location in grid to 'O'\n\t\t\tself.grid[new_location[0]][new_location[1]] = 'O'", "def create_neighborhood(self):\n if len(self.available_building_cells) == 0:\n return False\n # Pick cell\n shuffle(self.available_building_cells)\n\n neighborhood_origin = self.available_building_cells[0]\n if not self.creates_valid_building(neighborhood_origin):\n # If not a valid placement, remove location from list\n self.available_building_cells.remove(neighborhood_origin)\n # Retry!\n self.create_neighborhood()\n return True # Exit after neighborhood is created\n\n final_cells = [neighborhood_origin]\n self.available_building_cells.remove(neighborhood_origin)\n\n # Place building on origin\n self.place_building(Building(self.environment, self.environment.next_building_id, neighborhood_origin, attractiveness=random()))\n neighborhood_cells = self.environment.grid.get_neighborhood(neighborhood_origin, moore=True, include_center=True)\n\n # Create a random number of residence buildings in this neighborhood\n number_of_residences = randrange(2,6)\n for i in range(number_of_residences):\n while len(neighborhood_cells) > 0:\n shuffle(neighborhood_cells)\n # Only place building if space is empty\n if self.environment.grid.is_cell_empty(neighborhood_cells[0]):\n self.place_building(Building(self.environment, self.environment.next_building_id, neighborhood_cells[0], attractiveness=random()))\n final_cells.append(neighborhood_cells[0])\n try:\n # If this space was available before, remove it from list\n self.available_building_cells.remove(neighborhood_cells[0])\n except:\n pass\n\n continue\n\n # Remove cell from list\n neighborhood_cells.remove(neighborhood_cells[0])\n\n # Fill surrounding space around buildings with roads!\n for building_location in final_cells:\n for surrounding_cell in self.environment.grid.get_neighborhood(building_location, moore=True):\n if self.environment.grid.is_cell_empty(surrounding_cell):\n self.place_road(Road(surrounding_cell))\n\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tries to navigate and reposition the rover on the gird. Throws an exception if It cannot find that rover on the grid A bad instruction is passed Executing the instruction string will cause a collision with another rover on the gird
def navigate_rover(self, name, instruction_str): rover = self.rovers.get(name) if not rover: raise RoverException(ExceptionMessages.BAD_NAME) coordinate = copy.deepcopy(rover.coordinate) direction = rover.direction for instruction in instruction_str: if instruction == 'L' or instruction == 'R': direction = self._direction_after_turning(direction, instruction) elif instruction == 'M': coordinate = self._coordinate_after_moving(direction, coordinate) else: raise RoverException(ExceptionMessages.INVALID_INSTRUCTION) # This means we have processed all the instructions without exception # assign new direction and coordinates to rover rover.direction = direction rover.coordinate = coordinate
[ "def move(self, rover_id, grid_point):", "def move(self, direction):\r\n #create moving direction vectors if the N S E W or a combination is entered\r\n if direction == \"N\":\r\n new = (0,-1)\r\n elif direction == \"S\":\r\n new = (0,1)\r\n elif direction == \"E\":\r\n new = (1,0)\r\n elif direction == \"W\":\r\n new = (-1,0)\r\n elif direction == \"NW\":\r\n new = (-1,-1)\r\n elif direction == \"NE\":\r\n new = (1,-1)\r\n elif direction == \"SE\":\r\n new = (1,1)\r\n elif direction == \"SW\":\r\n new = (-1,1)\r\n else:\r\n print(\"Invalid Command\")\r\n \r\n #create a copy of the map so as not to disturb the original \r\n g = copy.copy(self.map)\r\n #find the boat's index\r\n B = (self.boat.grid_x,self.boat.grid_y)\r\n #if the boat (x,y) + the new direction(x,y) is in the bounds of the map\r\n if B[0]+new[0] >= 0 and B[0]+new[0] < self.height and B[1]+new[1] >= 0 and B[1]+new[1] < self.width:\r\n #if the new position of the boat is navigable\r\n if g[B[0]+new[0]][B[1]+new[1]].navigable == True or g[B[0]+new[0]][B[1]+new[1]] == self.treasure:\r\n #if the new position is the target position then you win\r\n if g[B[0]+new[0]][B[1]+new[1]] == self.treasure:\r\n self.state = \"WON\"\r\n print(self.state)\r\n #move change the map's position to an dot on the old one and B on the new\r\n self.boat = g[B[0]+new[0]][B[1]+new[1]]\r\n #join the list back together the list we have been working on\r\n #set the map equal to the new one set boat equal to new position\r\n self.map = g\r\n else:\r\n print(\"Cannot Move here\")\r\n else:\r\n print(\"Cannot Move here\")", "def move(self):\n # neighbor offsets\n offset = [(-1,1),(0,1),(1,1),(-1,0),(1,0),(-1,-1),(0,-1),(1,-1)] \n for i in range(len(offset)):\n x = self.x + offset[i][0] # neighboring coordinates\n y = self.y + offset[i][1]\n if self.island.animal(x,y) == 0: # neighboring spot is open\n self.island.remove(self) # remove from current spot\n self.x = x # new coordinates\n self.y = y\n self.island.register(self) # register new coordinates\n break # finished with move", "def move(self):\n # neighbor offsets\n offset = [(-1, 1),(0, 1),(1, 1),(-1, 0),(1, 0),(-1, -1),(0, -1),(1, -1)]\n for i in range(len(offset)):\n x = self.x + offset[i][0] # neighboring coordinates\n y = self.y + offset[i][1]\n if self.island.animal(x, y) == 0: # neighboring spot is open\n self.island.remove(self) # remove from current spot\n self.x = x # new coordinates\n self.y = y\n self.island.register(self) # register new coordinates\n break # finished with move", "def move_right():\n return __maze.move_right()", "def test_rover_position(self):\n rover = Rover(self.plateau_dimensions, self.rover_initial_position, Rover.DIRECTIONS.get('E'))\n rover.execute_instructions(\"LMLM\")\n self.assertEqual(rover._position.x, 1)\n self.assertEqual(rover._position.y, 2)\n self.assertEqual(rover.get_heading, 'W')", "def move_invalid():\n check50.run(run_command).stdin(\"EAST\").stdout(\"Invalid command.\")", "def solve_row1_tile(self, target_col):\n # replace with your code\n assert self.row1_invariant(target_col), \"row1 invariant does not hold before solve row1\"\n \n move_string = \"\"\n \n correcttile_pos = self.current_position(1, target_col)\n \n #print correcttile_pos\n distance_ver = 1 - correcttile_pos[0] \n distance_hor = target_col - correcttile_pos[1]\n \n \n \n if distance_hor < 0:\n if correcttile_pos[0] == 0:\n move_string += \"u\"*(distance_ver-1)\n move_string += \"r\"*(-distance_hor) +\"uld\"\n #start recurring horizontal cyclic movement\n move_string += \"rulld\"*(-distance_hor)\n\n else:\n move_string += \"u\"*distance_ver\n move_string += \"r\"*(-distance_hor) +\"ulld\"\n #start recurring horizontal cyclic movement\n move_string += \"rulld\"*(-distance_hor-1)\n \n elif distance_hor == 0:\n move_string += \"u\"*distance_ver\n move_string += \"ld\"\n \n else: \n \n move_string += \"l\"*(distance_hor)\n \n if distance_ver >0:\n move_string += \"u\"*distance_ver+\"rdl\"\n #start recurring horizontal cyclic movement\n move_string += \"urrdl\"*(distance_hor-1)\n \n \n \n # 3. move tile down vertically to target row\n #if correcttile_pos[0] == 0 and distance_hor <0:\n move_string += \"druld\"*(distance_ver-1)\n #else:\n #move_string += \"druld\"*distance_ver\n move_string += \"ur\"\n \n # update puzzle\n self.update_puzzle(move_string)\n \n assert self.row0_invariant(target_col), \"row0 does not hold after solve row1 tile\"\n\n return move_string", "def move_right(self):\n if self.grid_pos_x == self.grid_column_len - 1:\n self.grid_pos_x = 0\n self.x_pos = 1\n\n self.grid[self.grid_pos_y][-1] = self.tile_symbol\n self.grid[self.grid_pos_y][self.grid_pos_x] = self.pos_symbol\n else:\n self.grid[self.grid_pos_y][self.grid_pos_x] = self.tile_symbol\n self.grid[self.grid_pos_y][self.grid_pos_x + 1] = self.pos_symbol\n\n self.grid_pos_x += 1\n self.x_pos += 1", "def move_robot(self):\n global gl\n global mb\n global imported_class\n self.run_info = self.ids.run_info\n if self.pressed_algo:\n self.run_info.text='[color=#0010FF]Route completed.[/color]'\n try:\n if self.user_class:\n if imported_class._distance < gl.best_distance:\n follow_route = imported_class.route\n print(\"Executing: {} route\".format(self.user_class.upper()))\n else:\n follow_route = gl.best_distance.routes\n print(\"Executing: {} route\".format(gl.best_distance.name))\n else:\n print(\"Executing: {} route\".format(gl.best_distance.name))\n follow_route = gl.best_distance.routes\n except Exception as e:\n pass\n finally:\n mb = MoveBase(follow_route)\n else:\n self.run_info.text='[color=#FF0000]Run algorithms first.[/color]'", "def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.minimax(self, sample_space, affinity, depth_limit, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)", "def test_move_roomXinv(self):\n self.assertIn(self.bauble, self.initial)\n self.G._move(self.bauble, \n self.initial, self.G.inventory)\n self.assertIn(self.bauble, self.G.inventory)", "def go_to_prison(self):\n self.coordinate_x = 56\n self.coordinate_y = 819 - 20 * self.player_number\n self.cell = 10", "def go_to_specific_pos(self,pos,pos_type=None):\n goal_pos = pos #The position the agent is going for\n distances = []\n all_steps = self.model.grid.get_neighborhood(self.pos,moore=True,include_center=False) #getting neighbours\n possible_steps = []\n\n for position in all_steps: #Checking if cell is empty, and adding to list of possible empty steps.\n if self.model.grid.is_cell_empty(position) or (pos_type == \"exit\" and position in self.model.entre_pos):\n possible_steps.append(position)\n\n if len(possible_steps) == 0: #If possible empty steps is empty,\n if pos_type == \"scene\": #If going to scene\n back_up_list = []\n all_neighbors = self.model.grid.get_neighbors(self.pos,moore=True,include_center=False)\n for n in all_neighbors:\n if isinstance(n,guest):\n back_up_list.append(n.pos)\n try:\n pos = random.choice(back_up_list)\n self.model.grid.move_agent(self,pos)\n return\n except:\n return\n return\n\n else:\n for pos in possible_steps:\n distances.append((distance(goal_pos,pos),pos)) #find distance from the possible empty steps to goal position\n\n x_,y_ = min(distances,key=lambda x:x[0])[1] #choose position with shortest distance to goal\n if (x_,y_) == goal_pos: #if the chosen position is goal\n if pos_type == \"exit\": #and the type is exit\n self.has_left = True #Set agent to \"has left\"\n\n self.model.grid.move_agent(self,(x_,y_)) #move agent.", "def tryMove(self, newPos):\r\n reward = 0\r\n done = False\r\n\r\n # Boundary check\r\n if newPos[0] < 0 or newPos[0] >= self.gridSize \\\r\n or newPos[1] < 0 or newPos[1] >= self.gridSize:\r\n return done, reward\r\n\r\n targetCell = self.grid.get(newPos[0], newPos[1])\r\n if targetCell == None:\r\n self.previous_cell = targetCell\r\n self.previous_pos = self.agentPos\r\n self.agentPos = newPos\r\n elif targetCell.type == 'goal' and isinstance(self.carrying, Flag):\r\n self.previous_cell = targetCell\r\n self.previous_pos = self.agentPos\r\n self.agentPos = newPos\r\n done = True\r\n reward = 1; #100 - self.stepCount\r\n elif targetCell.type == 'lava':\r\n done = True\r\n reward = -1000 - self.stepCount\r\n elif targetCell.canOverlap():\r\n self.previous_cell = targetCell\r\n self.previous_pos = self.agentPos\r\n self.agentPos = newPos\r\n if targetCell.canPickup() and self.carrying is None:\r\n self.carrying = targetCell\r\n #self.grid.set(*newPos, None)\r\n return done, reward", "def solve_row0_tile(self, target_col):\n # replace with your code\n assert self.row0_invariant(target_col), \"row0_invariant does not hold before solve row0 tile\"\n \n move_string = \"\"\n \n correcttile_pos = self.current_position(0,target_col)\n \n #distance_ver = 0 - correcttile_pos[0] \n distance_hor = target_col - correcttile_pos[1] \n \n move_string += \"ld\"\n if correcttile_pos == (0,target_col-1):\n pass\n \n else:\n if correcttile_pos == (1,target_col-1) or correcttile_pos[0] == 0:\n move_string +=\"l\"*(distance_hor-1)\n move_string += \"u\" +\"rdlur\"*(distance_hor-1)\n move_string += \"ld\"\n \n# elif correcttile_pos[0]>1:\n# move_string += \"u\"*(distance_ver-1)\n#\n# if correcttile_pos[0] == 0:\n# move_string += \"r\"*(-distance_hor-1) +\"dllu\"\n# #start recurring horizontal cyclic movement\n# move_string += \"rdllu\"*(-distance_hor-2)\n#\n# else:\n# move_string += \"r\"*(-distance_hor-1) +\"ulld\"\n# #start recurring horizontal cyclic movement\n# move_string += \"rulld\"*(-distance_hor-2) \n# \n# move_string += \"druld\"*(distance_ver -1)\n else:\n move_string +=\"l\"*(distance_hor-1)\n move_string +=\"urrdl\"*(distance_hor-2)\n \n\n # adding the fixed move string \n move_string += \"urdlurrdluldrruld\"\n #move 0 to end of row\n #move_string += \"r\"*(self.get_width()-2)\n \n # update puzzle\n self.update_puzzle(move_string)\n \n assert self.row1_invariant(target_col-1), \"row1 invariant does not hold after solve row0 tile\"\n \n return move_string", "def move(self, board, player_mark='o'):\n # First things first, let's check if the board is full first before we\n # make a move\n full = 1\n for location in board.keys():\n if board[location] == '-':\n full = 0\n\n if not full:\n # Storm Spirit is a dumb yet aggressive AI, so he does not need to\n # check whether the opponent has created a line.\n\n # Initialize a move variable that determines the location that the\n # AI will mark.\n move = ''\n\n # Let's see if there are any potential lines that we can form,\n # then mark the location that would finish that line.\n print('Searching for potential lines...')\n move = self.find_line_attempt(board, 'x')\n\n if(move == ''):\n print('No potential lines found. Marking random location.')\n # Initialize a boolean variable that tracks whether we have\n # marked a location or not.\n marked = 0\n while not marked:\n location = random.randint(1,9)\n\n # The location will have to be empty\n if(location == 1 and board['topleft'] == '-'):\n marked = 1\n print('Marking topleft location\\n')\n elif(location == 2 and board['topcenter'] == '-'):\n marked = 1\n print('Marking topcenter location\\n')\n elif(location == 3 and board['topright'] == '-'):\n marked = 1\n print('Marking topright location\\n')\n elif(location == 4 and board['middleleft'] == '-'):\n marked = 1\n print('Marking middleleft location\\n')\n elif(location == 5 and board['middlecenter'] == '-'):\n marked = 1\n print('Marking middlecenter location\\n')\n elif(location == 6 and board['middleright'] == '-'):\n marked = 1\n print('Marking middleright location\\n')\n elif(location == 7 and board['bottomleft'] == '-'):\n marked = 1\n print('Marking bottomleft location\\n')\n elif(location == 8 and board['bottomcenter'] == '-'):\n marked = 1\n print('Marking bottomcenter location\\n')\n elif(location == 9 and board['bottomright'] == '-'):\n marked = 1\n print('Marking bottomright location\\n')\n else:\n # There are no more locations to mark, but set marked to\n # true anyway\n print('No empty spaces found! Re-rolling')\n # Mark the location chosen\n if(location == 1):\n board['topleft'] = self.mark\n elif(location == 2):\n board['topcenter'] = self.mark\n elif(location == 3):\n board['topright'] = self.mark\n elif(location == 4):\n board['middleleft'] = self.mark\n elif(location == 5):\n board['middlecenter'] = self.mark\n elif(location == 6):\n board['middleright'] = self.mark\n elif(location == 7):\n board['bottomleft'] = self.mark\n elif(location == 8):\n board['bottomcenter'] = self.mark\n elif(location == 9):\n board['bottomright'] = self.mark\n else:\n # We found a line attempt, let's mark the finishing location\n board[move] = self.mark\n print('Marked location at ' + move)", "def move(argument, player):\n current_tile = world.tile_exists(player.location_x, player.location_y)\n if argument == \"north\":\n if world.tile_exists(player.location_x, player.location_y-1):\n new_tile = world.tile_exists(player.location_x, player.location_y-1)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x, player.location_y-1)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n elif argument == \"south\":\n if world.tile_exists(player.location_x, player.location_y+1):\n new_tile = world.tile_exists(player.location_x, player.location_y+1)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x, player.location_y+1)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n elif argument == \"east\":\n if world.tile_exists(player.location_x+1, player.location_y):\n new_tile = world.tile_exists(player.location_x + 1, player.location_y)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x+1, player.location_y)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n elif argument == \"west\":\n if world.tile_exists(player.location_x-1, player.location_y):\n new_tile = world.tile_exists(player.location_x-1, player.location_y)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x-1, player.location_y)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"Movement not recognized. Specify a cardinal direction.\")\n return", "def move(x, y, direction, board):\n\n piece_at_xy = starter.get_piece(x, y, board); # Getting necessary pieces\n\n assert piece_at_xy != '*', \"Error in swipe logic\"; # Logical debug case\n valid_direction = (direction == \"left\" or\n direction == \"right\" or\n direction == \"up\" or\n direction == \"down\");\n assert valid_direction, \"Invalid direction passed in\"; # Logical debug case\n\n # The new x and y for the current piece (adjacent's current position) are stored alongside adjacent (fewer ifs + redundant code)\n if direction == \"left\":\n adjacent = (starter.get_piece(x - 1, y, board), x - 1, y);\n elif direction == \"right\":\n adjacent = (starter.get_piece(x + 1, y, board), x + 1, y);\n elif direction == \"up\":\n adjacent = (starter.get_piece(x, y - 1, board), x, y - 1);\n elif direction == \"down\":\n adjacent = (starter.get_piece(x, y + 1, board), x, y + 1);\n\n if adjacent[0] == None: # Edge of the board case (no action taken)\n return False;\n\n elif piece_at_xy != adjacent[0] and adjacent[0] != '*': # Can't combine two numbers case (no action taken)\n return False;\n\n elif adjacent[0] == '*': # Empty spot adjacent case (recursive movement in direction)\n starter.place_piece('*', x, y, board);\n starter.place_piece(piece_at_xy, adjacent[1], adjacent[2], board);\n move(adjacent[1], adjacent[2], direction, board);\n return True;\n\n elif piece_at_xy == adjacent[0]: # Adjacent same numbers case (combine them)\n starter.place_piece('*', x, y, board);\n starter.place_piece(str(int(adjacent[0]) * 2), adjacent[1], adjacent[2], board);\n move(adjacent[1], adjacent[2], direction, board);\n return True;\n\n else:\n # Logical debug case\n assert False, \"No way you should be in here. Error in move logic\";\n\n return False;" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Basically a state machine Given a instruction('R' or 'L') and a direction('N' or 'S' or 'E' or 'W'), returns the new direction Throws an exception in case of bad instruction
def _direction_after_turning(self, direction, instruction): next_left_states = {'N':'W', 'W': 'S', 'S': 'E', 'E': 'N'} next_right_states = {'N': 'E', 'E': 'S', 'S': 'W', 'W': 'N'} if instruction == 'R': return next_right_states[direction] elif instruction == 'L': return next_left_states[direction] else: raise RoverException(ExceptionMessages.INVALID_INSTRUCTION)
[ "def go_one_step(old_state, direction):\n assert direction in ['R', 'L', 'U', 'D']\n\n x, y = old_state\n if direction == 'R':\n return (x+1, y)\n if direction == 'L':\n return (x-1, y)\n if direction == 'U':\n return (x, y+1)\n if direction == 'D':\n return (x, y-1)", "def move(direction):\n if direction == 'r':\n return 1\n if direction == 'l':\n return -1", "def _turn(cls, direction, turn):\n directions = ('N', 'E', 'S', 'W')\n i = directions.index(direction)\n if turn == 'L':\n i -= 1\n elif turn == 'R':\n i += 1\n else:\n raise ValueError('invalid instructions: invalid turn')\n if i < 0:\n i += 4\n elif i > 3:\n i -= 4\n return directions[i]", "def _get_direction(self, action, direction):\n left = [2,3,1,0]\n right = [3,2,0,1]\n if direction == 0:\n new_direction = action\n elif direction == -1:\n new_direction = left[action]\n elif direction == 1:\n new_direction = right[action]\n else:\n raise Exception(\"getDir received an unspecified case\")\n return new_direction", "def _get_state(self, state, direction):\n row_change = [-1,1,0,0]\n col_change = [0,0,-1,1]\n row_col = seq_to_col_row(state, self.num_cols)\n row_col[0,0] += row_change[direction]\n row_col[0,1] += col_change[direction]\n\n # check for invalid states\n if self.obs_states is not None:\n if (np.any(row_col < 0) or\n np.any(row_col[:,0] > self.num_rows-1) or\n np.any(row_col[:,1] > self.num_cols-1) or\n np.any(np.sum(abs(self.obs_states - row_col), 1)==0)):\n next_state = state\n else:\n next_state = row_col_to_seq(row_col, self.num_cols)[0]\n else:\n if (np.any(row_col < 0) or\n np.any(row_col[:,0] > self.num_rows-1) or\n np.any(row_col[:,1] > self.num_cols-1)):\n next_state = state\n else:\n next_state = row_col_to_seq(row_col, self.num_cols)[0]\n\n return next_state", "def get_move():\n\n # Repeatedly ask the user for a direction until they enter a valid one\n while True:\n\n # Get input from user\n direction = input(\"\\nEnter a direction: \").lower()\n\n # Break if input is valid\n if direction in [\"n\", \"s\", \"w\", \"e\"]:\n break\n\n # Print an error message and continue if it is not correct\n else:\n print(\"You must move a valid direction (n, s, w, e)\")\n\n return direction", "def get_direction(position, next_position):\n x, y = position\n next_x, next_y = next_position\n if x == next_x:\n if y < next_y:\n return constants.Action.Right\n else:\n return constants.Action.Left\n elif y == next_y:\n if x < next_x:\n return constants.Action.Down\n else:\n return constants.Action.Up\n raise constants.InvalidAction(\"We did not receive a valid position transition.\")", "def adjacent_location(location, direction):\r\n (row, column) = location\r\n # 1 - we know the incoming location is 0, 0\r\n # 2 - we know the direction is the string 'right'\r\n # 3 - we know that the result should be 0, 1\r\n if direction == \"right\":\r\n location = (row, column + 1)\r\n if direction == \"left\":\r\n location = (row, column - 1)\r\n if direction == \"up\":\r\n location = (row - 1, column)\r\n if direction == \"down\":\r\n location = (row + 1, column)\r\n return location", "def direction_increments(direction):\n if direction == 'U':\n return (0, 1)\n if direction == 'D':\n return (0, -1)\n if direction == 'L':\n return (-1, 0)\n if direction == 'R':\n return (1, 0)\n raise ValueError('What direction is this?!?', direction)", "def move(self, direction):\n # 'direction' is given by user keyboard\n # 'next_position' is the hypothetical next position if available\n if direction == \"u\":\n next_position = self.position.up()\n elif direction == \"d\":\n next_position = self.position.down()\n elif direction == \"l\":\n next_position = self.position.left()\n elif direction == \"r\":\n next_position = self.position.right()\n\n # Checks if there is an element to pick up on the next position\n if self.labyrinth.is_a_syringe_element(next_position):\n element = self.labyrinth.is_a_syringe_element(next_position)\n self.pick_up_syringe_element(element)\n self.step(next_position)\n\n # Checks if the guard is on the next position\n elif self.labyrinth.is_near_the_guard(next_position):\n self.step(next_position)\n success = self.fight_guard()\n return success\n\n # Checks if the next position is available (== space)\n elif self.labyrinth.is_available((next_position.x, next_position.y)):\n self.step(next_position)\n\n else:\n print(\"\\nMacGyver can't move in this direction !\")", "def navigate_rover(self, name, instruction_str):\n\n rover = self.rovers.get(name)\n if not rover:\n raise RoverException(ExceptionMessages.BAD_NAME)\n\n coordinate = copy.deepcopy(rover.coordinate)\n direction = rover.direction\n\n for instruction in instruction_str:\n\n if instruction == 'L' or instruction == 'R':\n direction = self._direction_after_turning(direction, instruction)\n elif instruction == 'M':\n coordinate = self._coordinate_after_moving(direction, coordinate)\n else:\n raise RoverException(ExceptionMessages.INVALID_INSTRUCTION)\n\n # This means we have processed all the instructions without exception\n # assign new direction and coordinates to rover\n rover.direction = direction\n rover.coordinate = coordinate", "def _calculate_move_location(self, direction):\n current_row = self._current_loc.get_row()\n current_column = self._current_loc.get_column()\n\n # Calculate the new location for a left move\n if (direction == \"l\"):\n return Location(current_row, current_column - 1)\n # Calculate the new location for an up move\n elif (direction == \"u\"):\n return Location(current_row - 1, current_column)\n # Calculate the new location for a right move\n elif (direction == \"r\"):\n return Location(current_row, current_column + 1)\n # Calculate the new location for a down move\n elif (direction == \"d\"):\n return Location(current_row + 1, current_column)\n return Location()", "def get_new_direction(self):\n\n if not self.direction:\n return self.board.down\n up, right, down, left = self.board.up, self.board.right, self.board.down, self.board.left\n (y, x) = self.head_cell.get_row_col()\n if self.direction is down and y == board.GAME_GRID_DIMENSIONS - 1:\n return left\n elif self.direction is left and x == 0:\n return up\n elif self.direction is right:\n if x == board.GAME_GRID_DIMENSIONS - 1:\n return down\n elif x == board.GAME_GRID_DIMENSIONS - 2 and y > 0:\n return up\n elif self.direction is up:\n return right if x == 0 else left\n\n return self.direction", "def reverse_direction(self) -> None:\n if self._direction == 'N':\n self._direction = 'S'\n elif self._direction == 'S':\n self._direction = 'N'\n elif self._direction == 'E':\n self._direction = 'W'\n else:\n self._direction = 'E'", "def from_str(cls: OrdinalDirection, d: str) -> OrdinalDirection:\n if d == 'N':\n return cls.NORTH\n elif d == 'S':\n return cls.SOUTH\n elif d == 'W':\n return cls.WEST\n elif d == 'E':\n return cls.EAST\n else:\n raise OrdinalDirectionError(value=d, message='Wrong cardinal direction.')", "def test_direction_to_command(self):\r\n generator = BlackBlockCommands(1, 2, 3, True)\r\n dx = (1, 0, -1, 0)\r\n dy = (0, 1, 0, -1)\r\n # 確認事項1.のテスト\r\n for pre_index in range(4):\r\n next_index = (pre_index + 1) % 4\r\n pre_dir = (dx[pre_index], dy[pre_index])\r\n next_dir = (dx[next_index], dy[next_index])\r\n self.assertEqual('r', generator.detect_direction(pre_dir, next_dir))\r\n self.assertEqual('l', generator.detect_direction(next_dir, pre_dir))\r\n # 確認事項2.のテスト\r\n for pre_index in range(4):\r\n next_index = (pre_index + 2) % 4\r\n pre_dir = (dx[pre_index], dy[pre_index])\r\n next_dir = (dx[next_index], dy[next_index])\r\n self.assertEqual('', generator.detect_direction(pre_dir, next_dir))\r\n self.assertEqual('', generator.detect_direction(next_dir, pre_dir))\r\n # 確認事項3.のテスト\r\n for i in range(4):\r\n dirction = (dx[i], dy[i])\r\n self.assertEqual('', generator.detect_direction(dirction, dirction))\r\n # 確認事項4.のテスト\r\n self.assertEqual('', generator.detect_direction((1, 2), (3, 4)))", "def _go(cls, point, direction, blocks):\n x, y = point\n if direction == 'N':\n y += blocks\n elif direction == 'S':\n y -= blocks\n elif direction == 'E':\n x += blocks\n elif direction == 'W':\n x -= blocks\n else:\n raise ValueError('invalid instructions: invalid direction')\n return x, y", "def direction(self, direction = None):\r\n if direction in [N, S, E, W]:\r\n self._direction = direction\r\n return self._direction", "def motorsDirection(self, direction):\n\n print (direction)\n if direction == 'r' or direction == 'R':\n self.motorDirection(self.motor1DirectionPin, self.motorReverse)\n self.motorDirection(self.motor2DirectionPin, self.motorReverse)\n print (\"Direction reverse\")\n else:\n self.motorDirection(self.motor1DirectionPin, self.motorForward)\n self.motorDirection(self.motor2DirectionPin, self.motorForward)\n print (\"Direction forward\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a new coordinate after moving the rover, Based on the direction, it applies a movement of one grid and calculates the new coordinates. Its throws an exception if the new coordinate is off grid the new coordinate results in an collision with another rover
def _coordinate_after_moving(self, direction, coordinate): if direction == 'N': new_coordinate = Coordinate(coordinate.x, coordinate.y + 1) elif direction == 'S': new_coordinate = Coordinate(coordinate.x, coordinate.y - 1) elif direction == 'W': new_coordinate = Coordinate(coordinate.x - 1, coordinate.y) else: new_coordinate = Coordinate(coordinate.x + 1, coordinate.y) if not self._is_coordinate_in_the_grid(new_coordinate): raise RoverException(ExceptionMessages.OFF_GRID) if self._is_coordinate_occupied(new_coordinate): raise RoverException(ExceptionMessages.ROVER_COLLISION) return new_coordinate
[ "def move(self, rover_id, grid_point):", "def _calculate_move_location(self, direction):\n current_row = self._current_loc.get_row()\n current_column = self._current_loc.get_column()\n\n # Calculate the new location for a left move\n if (direction == \"l\"):\n return Location(current_row, current_column - 1)\n # Calculate the new location for an up move\n elif (direction == \"u\"):\n return Location(current_row - 1, current_column)\n # Calculate the new location for a right move\n elif (direction == \"r\"):\n return Location(current_row, current_column + 1)\n # Calculate the new location for a down move\n elif (direction == \"d\"):\n return Location(current_row + 1, current_column)\n return Location()", "def move(self, direction):\r\n #create moving direction vectors if the N S E W or a combination is entered\r\n if direction == \"N\":\r\n new = (0,-1)\r\n elif direction == \"S\":\r\n new = (0,1)\r\n elif direction == \"E\":\r\n new = (1,0)\r\n elif direction == \"W\":\r\n new = (-1,0)\r\n elif direction == \"NW\":\r\n new = (-1,-1)\r\n elif direction == \"NE\":\r\n new = (1,-1)\r\n elif direction == \"SE\":\r\n new = (1,1)\r\n elif direction == \"SW\":\r\n new = (-1,1)\r\n else:\r\n print(\"Invalid Command\")\r\n \r\n #create a copy of the map so as not to disturb the original \r\n g = copy.copy(self.map)\r\n #find the boat's index\r\n B = (self.boat.grid_x,self.boat.grid_y)\r\n #if the boat (x,y) + the new direction(x,y) is in the bounds of the map\r\n if B[0]+new[0] >= 0 and B[0]+new[0] < self.height and B[1]+new[1] >= 0 and B[1]+new[1] < self.width:\r\n #if the new position of the boat is navigable\r\n if g[B[0]+new[0]][B[1]+new[1]].navigable == True or g[B[0]+new[0]][B[1]+new[1]] == self.treasure:\r\n #if the new position is the target position then you win\r\n if g[B[0]+new[0]][B[1]+new[1]] == self.treasure:\r\n self.state = \"WON\"\r\n print(self.state)\r\n #move change the map's position to an dot on the old one and B on the new\r\n self.boat = g[B[0]+new[0]][B[1]+new[1]]\r\n #join the list back together the list we have been working on\r\n #set the map equal to the new one set boat equal to new position\r\n self.map = g\r\n else:\r\n print(\"Cannot Move here\")\r\n else:\r\n print(\"Cannot Move here\")", "def move(self, board, move_dir):\n if move_dir == \"right\":\n # failsafe: do not move through other cars on board\n if board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] == 'r':\n print(\"No movement!\")\n return board\n \n # give board correct new positions (characters)\n else:\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\"\n\n # change car objects positions\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) + 1)\n return board\n elif move_dir == \"left\": \n if board.positions[self.get_rows()[0]][self.get_cols()[0] - 1].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] == 'r':\n print(\"No movement!\")\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 2)] = \"x\"\n\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) - 1)\n return board\n elif move_dir == \"up\":\n #print(board.positions[self.get_rows()[0] - 1][self.get_cols()[0]])\n if board.positions[self.get_rows()[0] - 1][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n else:\n board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] = self.name[0]\n board.positions[self.get_rows()[1] + (self.size - 2)][self.get_cols()[0]] = \"x\"\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) - 1) + \".\" + str(self.get_cols()[0])\n\n #print(board)\n return board\n elif move_dir == \"down\": \n try: \n if board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n except IndexError:\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\" \n board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] = self.name[0]\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) + 1) + \".\" + str(self.get_cols()[0]) \n \n #print(self.position)\n #print(board)\n \n return board\n else:\n #print(\"NO MOVEMENT!\")\n return board", "def advance(self,distance = 1):\n colOffset = 0\n rowOffset = 0\n if self.currentOrientation == GridOrientation.left:\n colOffset = -1 * distance\n if self.currentOrientation == GridOrientation.right:\n colOffset = distance\n if self.currentOrientation == GridOrientation.down:\n rowOffset = -1 * distance\n if self.currentOrientation == GridOrientation.up:\n rowOffset = distance\n self.currentCol += colOffset\n self.currentRow += rowOffset\n \n #See if we've expanded the grid\n if self.currentCol > self.maxCol:\n self.maxCol = self.currentCol\n if self.currentCol < self.minCol:\n self.minCol = self.currentCol\n if self.currentRow > self.maxRow:\n self.maxRow = self.currentRow\n if self.currentRow < self.minRow:\n self.minRow = self.currentRow\n \n return self.getCoordinate()", "def apply_move(cell, x, y):\r\n x2 = (co_ords[cell])[0] + x\r\n y2 = (co_ords[cell])[1] + y\r\n return (x2, y2)", "def calc_new_pos_vec(\n self, current_position: Tuple[int, int, str], command: str\n ) -> Tuple[int, int, str]:\n if self.is_landing_position_valid():\n x_coord, y_coord, orientation = current_position\n else:\n raise Exception('Rover is off planet.')\n\n if not self.is_valid_grid():\n raise Exception('No planet for the rover to navigate on.')\n \n if command in self.ALLOWED_COMMANDS:\n if (command == 'F' and orientation == 'E') or (command == 'B' and orientation == 'W'):\n x_coord += 1\n elif (command == 'F' and orientation == 'W') or (command == 'B' and orientation == 'E'):\n x_coord -= 1\n elif (command == 'F' and orientation == 'N') or (command == 'B' and orientation == 'S'):\n y_coord += 1\n elif (command == 'F' and orientation == 'S') or (command == 'B' and orientation == 'N'):\n y_coord -= 1\n elif (command == 'L' and orientation == 'E') or (command == 'R' and orientation == 'W'):\n orientation = 'N'\n elif (command == 'L' and orientation == 'W') or (command == 'R' and orientation == 'E'):\n orientation = 'S'\n elif (command == 'L' and orientation == 'N') or (command == 'R' and orientation == 'S'):\n orientation = 'W'\n elif (command == 'L' and orientation == 'S') or (command == 'R' and orientation == 'N'):\n orientation = 'E'\n return self.wrap_around((x_coord, y_coord)) + (orientation,)\n else:\n raise Exception('Unrecognised command.')", "def new_move(self, grid_x, grid_y, player):\n #duplication /!\\\n if player == self.X:\n self.draw_X(grid_x, grid_y)\n self.board[grid_y][grid_x] = self.X\n\n elif player == self.O:\n self.draw_O(grid_x, grid_y)\n self.board[grid_y][grid_x] = self.O", "def tryMove(self, newPos):\r\n reward = 0\r\n done = False\r\n\r\n # Boundary check\r\n if newPos[0] < 0 or newPos[0] >= self.gridSize \\\r\n or newPos[1] < 0 or newPos[1] >= self.gridSize:\r\n return done, reward\r\n\r\n targetCell = self.grid.get(newPos[0], newPos[1])\r\n if targetCell == None:\r\n self.previous_cell = targetCell\r\n self.previous_pos = self.agentPos\r\n self.agentPos = newPos\r\n elif targetCell.type == 'goal' and isinstance(self.carrying, Flag):\r\n self.previous_cell = targetCell\r\n self.previous_pos = self.agentPos\r\n self.agentPos = newPos\r\n done = True\r\n reward = 1; #100 - self.stepCount\r\n elif targetCell.type == 'lava':\r\n done = True\r\n reward = -1000 - self.stepCount\r\n elif targetCell.canOverlap():\r\n self.previous_cell = targetCell\r\n self.previous_pos = self.agentPos\r\n self.agentPos = newPos\r\n if targetCell.canPickup() and self.carrying is None:\r\n self.carrying = targetCell\r\n #self.grid.set(*newPos, None)\r\n return done, reward", "def make_move(grid, n_columns, n_rows):\r\n # Generate the game grid to be manipulated\r\n new_grid = [[0] * (n_columns + 1) for i in range(n_rows + 1)]\r\n\r\n\r\n for i in range(n_rows):\r\n for j in range(n_columns):\r\n upper_left = grid[i-1][j-1] # neighbor to upper left of cell of interest\r\n upper = grid[i-1][j] # neighbor above cell of interest\r\n upper_right = grid[i-1][j+1] # neighbor to upper right of cell of interest\r\n left = grid[i][j-1] # neighbor to left of cell of interest\r\n right = grid[i][j+1] # neighbor to right of cell of interest\r\n bot_left = grid[i+1][j-1] # neighbor to bottom left cell of interest\r\n bot = grid[i+1][j] # neighbor below cell of interest\r\n bot_right = grid[i+1][j+1] # neighbor to bottom right of cell of interest\r\n\r\n # sum of the state of all neighbors\r\n on_neighbors = upper_left + upper + upper_right + left + right + bot_left + bot + bot_right\r\n\r\n # Any ON cell with fewer than two ON neighbors turns OFF\r\n if grid[i][j] == 1 and on_neighbors < 2:\r\n new_grid[i][j] = 0\r\n\r\n # Any ON cell with two or three ON neighbours stays ON\r\n elif grid[i][j] == 1 and (on_neighbors == 2 or on_neighbors == 3):\r\n new_grid[i][j] = 1\r\n\r\n # Any ON cell with more than three ON neighbors turns OFF\r\n elif grid[i][j] == 1 and on_neighbors > 3:\r\n new_grid[i][j] = 0\r\n\r\n # Any OFF cell with three ON neighbors turns ON\r\n elif grid[i][j] == 0 and on_neighbors == 3:\r\n new_grid[i][j] = 1\r\n\r\n return new_grid #manipulated game grid\r", "def move(direction, coordinates):\n x,y = coordinates[0],coordinates[1]\n #if elif elif else for the four directions\n if direction == 'L':\n x -= 1\n #add the other conditions!\n pass", "def get_move(self, curr_player):\n column_range = 'abcdefgh'\n row_range = '12345678'\n column_range_msg = \"Please enter a column from 'a' to 'h'.\"\n row_range_msg = \"Please enter a row from '1' to '8'.\"\n while True:\n raw_input = input(\"Enter player's move. Ex: 'd2 d4' to move a piece from d2 to d4.\\n\")\n cs = ''.join(raw_input.split()).lower()\n is_valid_format = (len(cs) == 4 and cs[0].isalpha() and cs[1].isdigit() \n and cs[2].isalpha() and cs[3].isdigit()) \n if cs[0] == '0' and cs[1] == '-' and cs[2] == '0' and len(cs) == 3: # Castling\n if curr_player == self.white:\n return (Coordinate(7,4), Coordinate(7,7), True)\n else:\n return (Coordinate(0,4), Coordinate(0,7), True)\n elif cs[0] == '0' and cs[1] == '-' and cs[2] == '0' and cs[3] == '-' and cs[4] == '0' and len(cs) == 5:\n if curr_player == self.white:\n return (Coordinate(7,4), Coordinate(7,0), True)\n else:\n return (Coordinate(0,4), Coordinate(0,0), True)\n elif not is_valid_format:\n print(\"Invalid format for move. Please enter a move in format 'd2 d4'.\\n\")\n continue\n elif cs[0] not in column_range or cs[2] not in column_range:\n print(\"Column not in range. Please enter column in range 'a' to 'h'.\")\n continue\n elif cs[1] not in row_range or cs[3] not in row_range:\n print(\"Row not in range. Please enter row in range '1' to '8'.\")\n continue\n elif cs[0] == cs[2] and cs[1] == cs[3]:\n print(\"Stationary move is not valid. Please pick another move.\")\n continue\n start_row = 8 - int(cs[1])\n start_col = ord(cs[0]) - ord('a')\n end_row = 8 - int(cs[3])\n end_col = ord(cs[2]) - ord('a')\n return (Coordinate(start_row, start_col), Coordinate(end_row, end_col), False)", "def move(self, row: int, col: int, player: int) -> int:", "def update_robot_pos(robot_pos, robot_dir, num_tiles):\n \n if robot_dir % 4 == NORTH:\n robot_pos[1] -= num_tiles\n elif robot_dir % 4 == EAST:\n robot_pos[0] += num_tiles\n elif robot_dir % 4 == SOUTH:\n robot_pos[1] += num_tiles\n elif robot_dir % 4 == WEST:\n robot_pos[0] -= num_tiles\n return robot_pos", "def move(self, direction):\n # We will add a new tile only if something has moved\n moved = False\n \n # We may extract a row or a column.\n loop_length = self._height + self._width \\\n - len(self._initial_tiles[direction])\n \n # Offsets for grid traversal\n row_off, col_off = OFFSETS[direction]\n \n for row, col in self._initial_tiles[direction]:\n # Computing positions of tiles to extract\n pos_list = [(row + index * row_off, \n col + index * col_off) \n for index in xrange(loop_length)]\n \n # Getting values from the grid and merging\n extracted_list = [self.get_tile(*pos) for pos in pos_list]\n merge_list = merge(extracted_list)\n \n # We modify the grid only if it has changed\n for pos, val_1, val_2 in zip(pos_list, extracted_list, merge_list):\n if val_1 - val_2:\n self.set_tile(*pos, value = val_2)\n moved = True\n \n # Any changes?\n if moved:\n self.new_tile()", "def move(coord, direction):\n vInc, hInc = dirToIncrement(direction)\n return (coord[0]+vInc, coord[1]+hInc)", "def reverse_opponents_move(self, move_coordinates, move_direction, opponent_move):\n\n board = self._game_board.get_grid()\n\n if opponent_move is None: # Checks if the opponent has not made a move yet\n return True\n\n if move_direction == \"R\" and opponent_move[1] == \"L\": # Checks if we're moving in the opposite direction\n if move_coordinates[0] == opponent_move[0][0]: #Check if in the same row\n index = move_coordinates[1]\n\n while index <= 6:\n\n # Checks for an empty space at the same point in the grid as the opponent just moved from\n if board[move_coordinates[0]][index] == \"X\" and index != opponent_move[0][1]:\n return True\n elif index == opponent_move[0][1]: # checks for the same grid pt at the end of the row\n return False\n\n index += 1\n return True\n\n elif move_direction == \"L\" and opponent_move[1] == \"R\": # Checks if we're moving in the opposite direction\n\n if move_coordinates[0] == opponent_move[0][0]: # Checks if we're in the same row\n index = move_coordinates[1]\n\n while index >= 0:\n\n # Checks for an empty space at the same point in the grid as the opponent just moved from\n if board[move_coordinates[0]][index] == \"X\" and index != opponent_move[0][1]:\n return True\n elif index == opponent_move[0][1]: # checks for the same grid pt at the beginning of the row\n return False\n\n index -= 1\n return True\n\n elif move_direction == \"B\" and opponent_move[1] == \"F\": # Checks if we're moving in the opposite direction\n if move_coordinates[1] == opponent_move[0][1]: # Checks if we're in the same column\n index = move_coordinates[0]\n\n while index <= 6:\n\n # Checks for an empty space at the same point in the grid as the opponent just moved from\n if board[index][move_coordinates[1]] == \"X\" and index != opponent_move[0][0]:\n return True\n elif index == opponent_move[0][0]: # checks for the same grid pt at the end of the column\n return False\n\n index += 1\n return True\n\n elif move_direction == \"F\" and opponent_move[1] == \"B\": # Checks if we're moving in the opposite direction\n if move_coordinates[1] == opponent_move[0][1]: # Checks if we're in the same column\n index = move_coordinates[0]\n\n while index >= 0:\n\n # Checks for an empty space at the same point in the grid as the opponent just moved from\n if board[index][move_coordinates[1]] == \"X\" and index != opponent_move[0][0]:\n return True\n elif index == opponent_move[0][0]: # Checks for the same grid pt at the beginning of the column\n return False\n\n index -= 1\n return True\n\n return True", "def result(self, move):\n row, col = self.blankLocation\n if(move == 'up'):\n newrow = row - 1\n newcol = col\n elif(move == 'down'):\n newrow = row + 1\n newcol = col\n elif(move == 'left'):\n newrow = row\n newcol = col - 1\n elif(move == 'right'):\n newrow = row\n newcol = col + 1\n else:\n raise \"Illegal Move\"\n\n # Create a copy of the current eightPuzzle\n newPuzzle = EightPuzzleState([0, 0, 0, 0, 0, 0, 0, 0, 0])\n newPuzzle.cells = [values[:] for values in self.cells]\n # And update it to reflect the move\n newPuzzle.cells[row][col] = self.cells[newrow][newcol]\n newPuzzle.cells[newrow][newcol] = self.cells[row][col]\n newPuzzle.blankLocation = newrow, newcol\n newPuzzle.parent = self\n newPuzzle.move = move\n return newPuzzle", "def get_move(self, board):\n # First, check if we can win in the next move\n winning_move = self.get_winning_move(board, self.letter)\n if winning_move is not None:\n return winning_move\n # Check if the player could win on their next move, and block them.\n blocking_move = self.get_winning_move(board, self.opponent_letter)\n if blocking_move is not None:\n return blocking_move\n # Try to take one of the corners, if they are free.\n corner_move = self.move_in_a_corner(board)\n if corner_move is not None:\n return corner_move\n # Try to take the center, if it is free.\n if board.size % 2 == 1:\n if board.is_position_availible(board.letters[board.size // 2]\n + board.numbers[board.size // 2]):\n return board.letters[board.size // 2] + board.numbers[board.size // 2]\n # Move on one of the sides.\n return self.choose_random_move_from_list(board, list(board.positions.keys()))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return any binary tree that matches the given preorder and postorder traversals. Values in the traversals pre and post are distinct positive integers.
def constructFromPrePost(self, pre, post): if not pre and not post: return None root = TreeNode(pre[0]) if len(pre) == 1 and len(post) == 1: return root if pre[1] == post[-2]: lpre, lpost = pre[1:], post[:len(post)-1] ltree = self.constructFromPrePost(lpre, lpost) root.left = ltree else: lpre = pre[1:pre.index(post[-2])] lpost = post[:post.index(pre[1]) + 1] rpre = pre[pre.index(post[-2]):] rpost = post[post.index(pre[1])+1:-1] ltree = self.constructFromPrePost(lpre, lpost) rtree = self.constructFromPrePost(rpre, rpost) root.left, root.right = ltree, rtree return root
[ "def constructFromPrePost(pre, post):\n \"\"\"\n # 17%\n def helper(l1, r1, l2, r2):\n cur = TreeNode(pre[l1])\n if l1 == r1: return cur\n a1, a2 = pre[l1 + 1], post[r2 - 1]\n nx = d_pre[a2]\n nt = d_post[a1]\n if a1 != a2:\n left = helper(l1 + 1, nx - 1, l2, nt)\n right = helper(nx, r1, nt + 1, r2 - 1)\n cur.left = left\n cur.right = right\n else:\n left = helper(l1 + 1, r1, l2, nt)\n cur.left = left\n return cur\n n = len(pre)\n d_pre = {v: k for k, v in enumerate(pre)}\n d_post = {v: k for k, v in enumerate(post)}\n return helper(0, n - 1, 0, n - 1)\n \"\"\"\n # 100%\n if not pre:\n return None\n cur = TreeNode(pre[0])\n if len(pre) == 1:\n return cur\n i = post.index(pre[1]) + 1\n cur.left = constructFromPrePost(pre[1:i+1], post[:i])\n cur.right = constructFromPrePost(pre[i+1:], post[i:-1])\n return cur", "def build_tree(preorder, inorder):\n\n # Base case\n if preorder == [] or inorder == []:\n return\n\n root = preorder[0]\n\n # Breaks the lists by root, left side, and right side\n in_index = inorder.index(root)\n in_left = inorder[:in_index]\n in_right = inorder[in_index + 1:]\n pre_left = preorder[1 : len(in_left) + 1]\n pre_right = preorder[len(in_left) + 1 :]\n\n # Recursively creates smaller binary trees to make a big binary tree\n tree = BinaryTree(root)\n tree.set_left(build_tree(pre_left, in_left))\n tree.set_right(build_tree(pre_right, in_right))\n\n return tree", "def bstFromPreorder(self, preorder: List[int]) -> TreeNode:\n lenList = len(preorder)\n stack = []\n\n if lenList != 0:\n stack.append(TreeNode(preorder[0]))\n root = stack[0]\n\n i = 1\n while i < lenList:\n node = TreeNode(preorder[i])\n if preorder[i] < stack[-1].val:\n stack[-1].left = node\n stack.append(node)\n else:\n tempNode = stack.pop()\n while stack and preorder[i] > stack[-1].val:\n tempNode = stack.pop()\n\n tempNode.right = node\n stack.append(node)\n\n i += 1\n\n return root", "def bstFromPreorder(preorder):\n n = len(preorder)\n if n == 0: return None\n base = preorder[0]\n root = TreeNode(base)\n i = 1\n while i < n and preorder[i] < base:\n i += 1\n left = bstFromPreorder(preorder[1:i])\n right = bstFromPreorder(preorder[i:])\n if left: root.left = left\n if right: root.right = right\n return root", "def postorder_traversal(root):\n if root is None:\n return []\n else:\n return postorder_traversal(rightchild(root)) +postorder_traversal(leftchild(root)) + [getvalue(root)]", "def test_postorder_single_node_tree():\n b_tree = BinarySearchTree()\n b_tree.insert(17)\n postorder_list = []\n for node in b_tree.in_order():\n postorder_list.append(node.value)\n assert postorder_list == [17]", "def postorder(tree):\n return _postorder(tree)", "def post_order_traversal(root):\n if root is None:\n return []\n left = root.get_left()\n right = root.get_right()\n val = root.get_val()\n returned = []\n returned.extend(post_order_traversal(left))\n returned.extend(post_order_traversal(right))\n\n returned.extend([val])\n return returned", "def test_postorder_no_nodes():\n b_tree = BinarySearchTree()\n postorder_list = []\n for node in b_tree.in_order():\n postorder_list.append(node.value)\n assert postorder_list == []", "def postorder_traversal(tree):\n post = '' # Handles the spaces between the postorder traversal\n # in the string\n\n # To make sure the function doesn't move on if it doesn't have\n # a left child, so it doesn't add to string if it is None\n if tree.get_left() != None:\n post += postorder_traversal(tree.get_left()) + ' '\n\n # To make sure the function doesn't move on if it doesn't have\n # a right child, so it doesn't add to string if it is None\n if tree.get_right() != None:\n post += postorder_traversal(tree.get_right()) + ' '\n\n # Prints the current value (this is all recursed in postorder)\n post += str(tree.get_val())\n\n return post", "def build_tree_from_preorder(values): \r\n \r\n if len(values) == 0 or values[0] == None:\r\n return None\r\n root = TreeNode(values[0])\r\n if len(values) == 1:\r\n return root\r\n root.left = build_tree_from_preorder(values[1:((len(values)-1) // 2 + 1)])\r\n root.right = build_tree_from_preorder(values[((len(values)-1) // 2 + 1):]) \r\n if root.left != None:\r\n root.left.parent = root\r\n if root.right != None:\r\n root.right.parent = root\r\n \r\n return root", "def test_postorder_weird_tree(weird_tree):\n postorder_list = []\n for node in weird_tree.post_order():\n postorder_list.append(node.value)\n assert postorder_list == [2, 49, 48, 44, 102, 103, 100, 90, 83, 80, 79, 50]", "def postorderTraversal(root):\n \"\"\"\n # 递归\n def helper(p):\n if p:\n helper(p.p)\n helper(p.right)\n res.append(p.val)\n res = []\n helper(root)\n return res\n \"\"\"\n \"\"\"\n # 迭代 倒排\n res = []\n if root:\n stk = [root]\n while stk:\n root = stk.pop()\n if root.left:\n stk.append(root.left)\n if root.right:\n stk.append(root.right)\n res.append(root.val)\n return res[::-1]\n \"\"\"\n # 迭代 记录访问位置\n res = []\n if root:\n stk = []\n p, q = root, None\n while stk or p:\n if p:\n stk.append(p)\n p = p.left\n else:\n p = stk[-1]\n if not p.right or p.right == q:\n res.append(p.val)\n q = p\n stk.pop()\n p = None\n else:\n p = p.right\n return res", "def preorder(tree):\n return _preorder(tree)", "def pre_order_traversal(root):\n if root is None:\n return []\n left = root.get_left()\n right = root.get_right()\n val = root.get_val()\n \n returned = [val]\n returned.extend(pre_order_traversal(left))\n returned.extend(pre_order_traversal(right))\n\n return returned", "def test_preorder_single_node_tree():\n b_tree = BinarySearchTree()\n b_tree.insert(17)\n preorder_list = []\n for node in b_tree.in_order():\n preorder_list.append(node.value)\n assert preorder_list == [17]", "def preorder_traversal(root):\n if root is None:\n return []\n else:\n return preorder_traversal(leftchild(root)) + [getvalue(root)] + preorder_traversal(rightchild(root))", "def pre_order_traversal(treeNode):\n\t\tif treeNode != None:\n\t\t\tvisit(node)\n\t\t\tpre_order_traversal(node.left)\n\t\t\tpre_order_traversal(node.right)", "def test_preorder_no_nodes():\n b_tree = BinarySearchTree()\n preorder_list = []\n for node in b_tree.in_order():\n preorder_list.append(node.value)\n assert preorder_list == []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create and return an instance of the Isort plugin.
def setup_isort_tool_plugin(custom_rsc_path=None): arg_parser = argparse.ArgumentParser() if custom_rsc_path is not None: resources = Resources([custom_rsc_path]) else: resources = Resources( [os.path.join(os.path.dirname(statick_tool.__file__), "plugins")] ) config = Config(resources.get_file("config.yaml")) plugin_context = PluginContext(arg_parser.parse_args([]), resources, config) plugin_context.args.output_directory = os.path.dirname(__file__) itp = IsortToolPlugin() itp.set_plugin_context(plugin_context) return itp
[ "def sorter(Plugin):\n return Plugin.order", "def create_plugin(self, **kwargs):\n return self.plugin_class(**kwargs)", "def addSortMethod(*args, **kwargs):\n return mundane_xbmcplugin.addSortMethod(*args, **kwargs)", "def __init__(self, plugin):\n self.plugin = plugin", "def with_sort(self, ami_attribute, descending=False):\n self.sorter = AMISorter(ami_attribute, descending)\n \n return self", "def new(self, sort, properties=None):\n if sort is None:\n sort = UNKNOWNSORT\n # find next available vid\n vid, index = self.vid, self.index\n while vid in index:\n vid += 1\n varstring = '{}{}'.format(sort, vid)\n index[vid] = varstring\n if properties is None:\n properties = []\n self.store[varstring] = properties\n self.vid = vid + 1\n return (varstring, properties)", "def create_r2plugin(self, **kwargs):\n return self.create_tool(cls=R2Plugin, **kwargs)", "def getInstance(config):\n return Plugin(config)", "def _create_plugin(self, page=None, language=None, **plugin_params):\n if page is None:\n page = self.page\n if language is None:\n language = 'en'\n\n placeholder = page.placeholders.all()[0]\n plugin = api.add_plugin(\n placeholder, self.plugin_to_test, language, **plugin_params)\n page.publish(language)\n return plugin", "def __new__(mcls, name, bases, namespace): # @NoSelf - 'mcls' is SortinfoMeta, 'cls' is the new class\n # Check that the namespace is compliant\n if '__slots__' not in namespace:\n raise PydmrsError('Subclasses of Sortinfo must define __slots__')\n if 'features' in namespace:\n raise PydmrsError(\"Subclasses of Sortinfo must not define a 'features' attribute\")\n \n # Force all feature names to be lowercase\n namespace['__slots__'] = tuple(feat.lower() for feat in namespace['__slots__'])\n \n # Create the class, and add the 'features' attribute\n cls = super().__new__(mcls, name, bases, namespace)\n cls.features = tuple(chain.from_iterable(getattr(parent, '__slots__', ())\n for parent in reversed(cls.__mro__)))\n \n # Sortinfo defines a from_normalised_dict method which calls either EventSortinfo or InstanceSortinfo\n # Subclasses need to override this method\n if 'from_normalised_dict' not in namespace:\n cls.from_normalised_dict = cls._from_normalised_dict\n \n return cls", "def test_isort_tool_plugin_found():\n if sys.version_info.major == 3 and sys.version_info.minor < 6:\n pytest.skip(\"isort is only available for Python 3.6+, unable to test\")\n manager = PluginManager()\n # Get the path to statick_tool/__init__.py, get the directory part, and\n # add 'plugins' to that to get the standard plugins dir\n manager.setPluginPlaces(\n [os.path.join(os.path.dirname(statick_tool.__file__), \"plugins\")]\n )\n manager.setCategoriesFilter(\n {\n \"Tool\": ToolPlugin,\n }\n )\n manager.collectPlugins()\n # Verify that a plugin's get_name() function returns \"isort\"\n assert any(\n plugin_info.plugin_object.get_name() == \"isort\"\n for plugin_info in manager.getPluginsOfCategory(\"Tool\")\n )\n # While we're at it, verify that a plugin is named Isort Tool Plugin\n assert any(\n plugin_info.name == \"Isort Tool Plugin\"\n for plugin_info in manager.getPluginsOfCategory(\"Tool\")\n )", "def enableSort(self):", "def new(self, plugin, *args, **kwargs):\n if plugin in self.modules.keys():\n return self.modules[plugin](*args, **kwargs)", "def create_plugin(self, **kwargs):\n plugin_args = self.get_plugin_params()\n if kwargs:\n plugin_args.update(kwargs)\n return self._create_plugin(**plugin_args)", "def __generatePluginObject( self, plugin ):\n try:\n plugModule = __import__( self.pluginLocation, globals(), locals(), ['TransformationPlugin'] )\n except ImportError, e:\n gLogger.exception( \"__generatePluginObject: Failed to import 'TransformationPlugin' %s: %s\" % ( plugin, e ) )\n return S_ERROR()\n try:\n plugin_o = getattr( plugModule, 'TransformationPlugin' )( '%s' % plugin,\n transClient = self.transDB,\n replicaManager = self.rm )\n return S_OK( plugin_o )\n except AttributeError, e:\n gLogger.exception( \"__generatePluginObject: Failed to create %s(): %s.\" % ( plugin, e ) )\n return S_ERROR()", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .eco_valuator import EcoValuatorPlugin\n return EcoValuatorPlugin()", "def _buildSortDict(cls):\r\n cls._SORT_DICT = {}\r\n for elt in cls._SORT_OPTIONS:\r\n cls._SORT_DICT[elt[0]] = elt[2]", "def _make_sorter(self, ax):\n np_array = ax.get_values()\n # return np_array.argsort()\n # ax = ax.take(indexer)\n sorter = RocRadixSortDriver()\n sorted_array, indices = sorter.sort_with_indices(np_array)\n return sorted_array, indices", "def cls(self, full_path, GET):\n request = Mock()\n request.GET = GET\n request.get_full_path.return_value = full_path\n\n from moztrap.view.lists.sort import Sort\n return Sort(request)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the plugin manager can find the Isort plugin.
def test_isort_tool_plugin_found(): if sys.version_info.major == 3 and sys.version_info.minor < 6: pytest.skip("isort is only available for Python 3.6+, unable to test") manager = PluginManager() # Get the path to statick_tool/__init__.py, get the directory part, and # add 'plugins' to that to get the standard plugins dir manager.setPluginPlaces( [os.path.join(os.path.dirname(statick_tool.__file__), "plugins")] ) manager.setCategoriesFilter( { "Tool": ToolPlugin, } ) manager.collectPlugins() # Verify that a plugin's get_name() function returns "isort" assert any( plugin_info.plugin_object.get_name() == "isort" for plugin_info in manager.getPluginsOfCategory("Tool") ) # While we're at it, verify that a plugin is named Isort Tool Plugin assert any( plugin_info.name == "Isort Tool Plugin" for plugin_info in manager.getPluginsOfCategory("Tool") )
[ "def test_plugins(self):\n from omtk import plugin_manager\n pm = plugin_manager.plugin_manager\n\n loaded_plugin_names = [plugin.cls.__name__ for plugin in pm.get_loaded_plugins_by_type('modules')]\n\n builtin_plugin_names = (\n 'Arm',\n 'FK',\n 'AdditiveFK',\n 'AvarGrpOnSurface',\n 'FaceBrow',\n 'FaceEyeLids',\n 'FaceEyes',\n 'FaceJaw',\n 'FaceLips',\n 'FaceNose',\n 'FaceSquint',\n 'Hand',\n 'Head',\n 'IK',\n 'InteractiveFK',\n 'Leg',\n 'LegQuad',\n 'Limb',\n 'Neck',\n 'Ribbon',\n 'SplineIK',\n 'Twistbone',\n )\n\n for plugin_name in builtin_plugin_names:\n self.assertIn(plugin_name, loaded_plugin_names)", "def test_plugin_initialize(self):\n p = PluginCustom()\n self.assertEqual('youpie', p.toto)", "def test_discoverable(self):\r\n plugins = getPlugins(IProcessor)\r\n lmath = [p for p in plugins if p.name == \"mlore\"]\r\n self.assertEqual(len(lmath), 1, \"Did not find math lore plugin: %r\" % (lmath,))", "def test_make_tool_plugin_found():\n manager = PluginManager()\n # Get the path to statick_tool/__init__.py, get the directory part, and\n # add 'plugins' to that to get the standard plugins dir\n manager.setPluginPlaces([os.path.join(os.path.dirname(statick_tool.__file__),\n 'plugins')])\n manager.setCategoriesFilter({\n \"Tool\": ToolPlugin,\n })\n manager.collectPlugins()\n # Verify that a plugin's get_name() function returns \"make\"\n assert any(plugin_info.plugin_object.get_name() == 'make' for\n plugin_info in manager.getPluginsOfCategory(\"Tool\"))\n # While we're at it, verify that a plugin is named Yamllint Tool Plugin\n assert any(plugin_info.name == 'Make Tool Plugin' for\n plugin_info in manager.getPluginsOfCategory(\"Tool\"))", "def test_pytest_cases_plugin_installed(request):\n assert request.session._fixturemanager.getfixtureclosure.func.__module__ == 'pytest_cases.plugin'", "def test_register_dynamic_plugin(self):\n pass", "def test_rstlint_tool_plugin_found():\n manager = PluginManager()\n # Get the path to statick_tool/__init__.py, get the directory part, and\n # add 'plugins' to that to get the standard plugins dir\n manager.setPluginPlaces(\n [os.path.join(os.path.dirname(statick_tool.__file__), \"plugins\")]\n )\n manager.setCategoriesFilter(\n {\n \"Tool\": ToolPlugin,\n }\n )\n manager.collectPlugins()\n # Verify that a plugin's get_name() function returns \"rstlint\"\n assert any(\n plugin_info.plugin_object.get_name() == \"rstlint\"\n for plugin_info in manager.getPluginsOfCategory(\"Tool\")\n )\n # While we're at it, verify that a plugin is named rstlint Tool Plugin\n assert any(\n plugin_info.name == \"rstlint Tool Plugin\"\n for plugin_info in manager.getPluginsOfCategory(\"Tool\")\n )", "def test_register_dynamic_plugin_manager(self):\n pass", "def test_available_plugins_connector():\n plugins = PluginConnector.available_plugins()\n expected_plugins = [e.name for e in LightningCustomPlugins]\n assert plugins == expected_plugins", "def _is_plugin_installed(self):", "def verifyPlugin(pluginName, requiredBy):\n\ttry: ampy.indexOf(pluginName, __allPlugins__)\n\texcept:\n\t\ttry:\n\t\t\tcmds.loadPlugin('%s.py'%pluginName)\n\t\t\tcmds.pluginInfo('%s.py'%pluginName, e=True, autoload=True)\n\t\texcept:\n\t\t\tsys.stderr.write('Error: Could not locate the %s plug-in. It is required for the %s module.\\nPlease ensure that %s.py is located somewhere in your plug-in path.\\n' \n\t\t\t\t%(pluginName, os.path.basename(requiredBy), pluginName))", "def test_discovery() -> None:\n assert AxSweeper.__name__ in [\n x.__name__ for x in Plugins.instance().discover(Sweeper)\n ]", "def test_available_plugins_trainer():\n plugins = Trainer.available_plugins()\n expected_plugins = [e.name for e in LightningCustomPlugins]\n assert plugins == expected_plugins", "def test_isort_tool_plugin_parse_valid():\n itp = setup_isort_tool_plugin()\n total_output = []\n output = \"/tmp/x.py\"\n total_output.append(output)\n output = \"/tmp/y.py\"\n total_output.append(output)\n issues = itp.parse_output(total_output)\n assert len(issues) == 2\n assert issues[0].filename == \"/tmp/x.py\"\n assert issues[0].line_number == \"0\"\n assert issues[0].tool == \"isort\"\n assert issues[0].issue_type == \"formatting\"\n assert issues[0].severity == \"3\"\n assert issues[0].message == \"Imports are incorrectly sorted and/or formatted.\"\n assert issues[1].filename == \"/tmp/y.py\"", "def test_plugin_retrieval(self):\n plugin = ProjectAppPluginPoint.get_plugin(PLUGIN_NAME)\n self.assertIsNotNone(plugin)\n self.assertEqual(plugin.get_model().name, PLUGIN_NAME)\n self.assertEqual(plugin.name, PLUGIN_NAME)\n self.assertEqual(plugin.get_model().title, PLUGIN_TITLE)\n self.assertEqual(plugin.title, PLUGIN_TITLE)\n self.assertEqual(plugin.entry_point_url_id, PLUGIN_URL_ID)", "def test_register_dynamic_plugin_manager1(self):\n pass", "def test_register_dynamic_plugin1(self):\n pass", "def is_plugin():\n return \"__plugins__\" in __name__", "def test_remote_plugin(self):\n plugin_name = 'Slack'\n Plugin.download_plugin(plugin_name)\n target = os.path.join(expanduser('~'), '.drupdates', 'plugins', plugin_name)\n plugins = Plugin.get_plugins()\n assert plugins[plugin_name]['info'][1] == os.path.join(target, '__init__.py')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that we can parse the normal output of isort.
def test_isort_tool_plugin_parse_valid(): itp = setup_isort_tool_plugin() total_output = [] output = "/tmp/x.py" total_output.append(output) output = "/tmp/y.py" total_output.append(output) issues = itp.parse_output(total_output) assert len(issues) == 2 assert issues[0].filename == "/tmp/x.py" assert issues[0].line_number == "0" assert issues[0].tool == "isort" assert issues[0].issue_type == "formatting" assert issues[0].severity == "3" assert issues[0].message == "Imports are incorrectly sorted and/or formatted." assert issues[1].filename == "/tmp/y.py"
[ "def isort(ctx):\n ctx.run(tools.render_isort_check(ctx))", "def isort(context):\n exec_cmd = \"isort . --check --diff\"\n run_cmd(context, exec_cmd)", "def test_shell(self):\n integers = shell_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def isort(c):\n c.run(\"isort -rc .\")", "def isort(command, checkonly=False):\n print(\n \"\"\"\nRunning isort the Python code import sorter\n===========================================\n\"\"\"\n )\n cmd = \"isort --check-only --diff .\" if checkonly else \"isort .\"\n command.run(cmd, echo=True, pty=POSIX)", "def test_version_sorting(self):\n assert natsort(['1', '5', '10', '50']) == ['1', '5', '10', '50']", "def test_errors_sorted(self):\n lines_and_cols = [self.extract_line_col(e) for e in lint(self.text)]\n assert sorted(lines_and_cols) == lines_and_cols", "def test_sort_strings(self):\n chdir(REPO_ROOT)\n cmd = [\"python\", \"scripts/sort_strings.py\", \"--check\"]\n print(\"running:\", \" \".join(str(part) for part in cmd))\n proc = run(cmd, capture_output=True)\n assert proc.returncode == 0, (\n f\"sort strings issues:\\n{proc.stdout.decode('utf-8')}\\n\\n\"\n \"Please run ./scripts/sort_string.py to resolve this issue.\"\n )", "def test_rust_code_analysis_tokei_Rust() -> None:\n\n ret_value = compare(\n \"rust-code-analysis\",\n \"tokei\",\n [\"-g\", \"-f\"],\n [\"SLOC\", \"PLOC\", \"CLOC\", \"BLANK\"],\n \"Rust\",\n \"bubble_sort.rs\",\n )\n\n assert ret_value == 0", "def testUnsorted(self):\n expected = [1, 8, 33, 43, 94]\n actual = insertion_sort(self.unsorted)\n self.assertEqual(expected, actual)", "def test_isort_tool_plugin_found():\n if sys.version_info.major == 3 and sys.version_info.minor < 6:\n pytest.skip(\"isort is only available for Python 3.6+, unable to test\")\n manager = PluginManager()\n # Get the path to statick_tool/__init__.py, get the directory part, and\n # add 'plugins' to that to get the standard plugins dir\n manager.setPluginPlaces(\n [os.path.join(os.path.dirname(statick_tool.__file__), \"plugins\")]\n )\n manager.setCategoriesFilter(\n {\n \"Tool\": ToolPlugin,\n }\n )\n manager.collectPlugins()\n # Verify that a plugin's get_name() function returns \"isort\"\n assert any(\n plugin_info.plugin_object.get_name() == \"isort\"\n for plugin_info in manager.getPluginsOfCategory(\"Tool\")\n )\n # While we're at it, verify that a plugin is named Isort Tool Plugin\n assert any(\n plugin_info.name == \"Isort Tool Plugin\"\n for plugin_info in manager.getPluginsOfCategory(\"Tool\")\n )", "def main():\n\ta = sys.argv[1:]\n\tsort(a)\n\tassert is_sorted(a)\n\t_show(a)", "def test_compare(self):\n parser = parse_args(['-g', '10', '-s', 'bubble', '-c'])\n self.assertTrue(parser.compare)\n self.assertEqual(True, parser.compare)\n\n parser = parse_args(['-g', '10', '-s', 'bubble'])\n self.assertEqual(False, parser.compare)", "def test_natsort(self):\r\n # string with alpha and numerics sort correctly\r\n s = 'sample1 sample2 sample11 sample12'.split()\r\n self.assertEqual(natsort(s),\r\n 'sample1 sample2 sample11 sample12'.split())\r\n s.reverse()\r\n self.assertEqual(natsort(s),\r\n 'sample1 sample2 sample11 sample12'.split())\r\n self.assertEqual(natsort(list('cba321')), list('123abc'))\r\n\r\n # strings with alpha only sort correctly\r\n self.assertEqual(natsort(list('cdba')), list('abcd'))\r\n\r\n # string of ints sort correctly\r\n self.assertEqual(natsort(['11', '2', '1', '0']),\r\n ['0', '1', '2', '11'])\r\n\r\n # strings of floats sort correctly\r\n self.assertEqual(natsort(['1.11', '1.12', '1.00', '0.009']),\r\n ['0.009', '1.00', '1.11', '1.12'])\r\n\r\n # string of ints sort correctly\r\n self.assertEqual(\r\n natsort([('11', 'A'), ('2', 'B'), ('1', 'C'), ('0', 'D')]),\r\n [('0', 'D'), ('1', 'C'), ('2', 'B'), ('11', 'A')])", "def test_sort_feeds_data_valid(self):\n expected_result = [self.unsorted_data[2],\n self.unsorted_data[0],\n self.unsorted_data[1]]\n result = sort_feeds_data(self.unsorted_data, self.timestamp)\n self.assertEqual(result, expected_result)", "def test_parse_valid_output(self):\n test_output_path = os.path.join(TEST_FILES_PATH,\n 'example_crash_fuzzer_output.txt')\n test_summary_path = os.path.join(TEST_FILES_PATH, 'bug_summary_example.txt')\n with tempfile.TemporaryDirectory() as tmp_dir:\n with open(test_output_path, 'r') as test_fuzz_output:\n cifuzz.parse_fuzzer_output(test_fuzz_output.read(), tmp_dir)\n result_files = ['bug_summary.txt']\n self.assertCountEqual(os.listdir(tmp_dir), result_files)\n\n # Compare the bug summaries.\n with open(os.path.join(tmp_dir, 'bug_summary.txt'), 'r') as bug_summary:\n detected_summary = bug_summary.read()\n with open(os.path.join(test_summary_path), 'r') as bug_summary:\n real_summary = bug_summary.read()\n self.assertEqual(detected_summary, real_summary)", "def test_invalid_sort():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n LINE bigfoot X date SORT OOPS Y classification COUNT\n \"\"\"\n\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)", "def test_sort(self):\n self.assertEqual(trouble_sort([8, 9, 7]), [7, 9, 8])\n self.assertEqual(trouble_sort([5, 6, 6, 4, 3]), [3, 4, 5, 6, 6])", "def _is_lexsorted(self) -> bool:\n return self._lexsort_depth == self.nlevels" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test what happens when a CalledProcessError is raised (usually means isort hit an error).
def test_isort_tool_plugin_scan_calledprocesserror(mock_subprocess_check_output): mock_subprocess_check_output.side_effect = subprocess.CalledProcessError( 0, "", output="mocked error" ) itp = setup_isort_tool_plugin() package = Package( "valid_package", os.path.join(os.path.dirname(__file__), "valid_package") ) package["python_src"] = [ os.path.join(os.path.dirname(__file__), "valid_package", "sample.py") ] issues = itp.scan(package, "level") assert len(issues) == 1
[ "def check_returncode(self):\n if self.returncode != 0:\n raise CalledProcessError(self.returncode, self.args, self.stdout)", "def check_returncode(self):\n if self.returncode:\n raise CalledProcessError(self.returncode, self.args, self.stdout,\n self.stderr)", "def check_returncode(self):\n if self.returncode:\n raise CalledProcessError(self.returncode, self.args, self.stdout,\n self.stderr)", "def test_process_exception(self):\n process = Process('test', [\n get_mock_step(reads_input=False),\n get_mock_step(raises_exception=True)\n ])\n\n with self.assertRaises(ProcessException):\n process.run(self._ctx)", "def test_run_called_process_error(mock_subprocess_check_output):\n mock_subprocess_check_output.side_effect = subprocess.CalledProcessError(\n 1, \"\", output=\"mocked error\"\n )\n args = Args(\"Statick tool\")\n args.parser.add_argument(\"--path\", help=\"Path of package to scan\")\n\n statick = Statick(args.get_user_paths())\n statick.gather_args(args.parser)\n sys.argv = [\n \"--output-directory\",\n os.path.dirname(__file__),\n \"--path\",\n os.path.dirname(__file__),\n ]\n parsed_args = args.get_args(sys.argv)\n path = parsed_args.path\n statick.get_config(parsed_args)\n statick.get_exceptions(parsed_args)\n issues, _ = statick.run(path, parsed_args)\n for tool in issues:\n assert not issues[tool]\n try:\n shutil.rmtree(os.path.join(os.path.dirname(__file__), \"statick-default\"))\n except OSError as ex:\n print(f\"Error: {ex}\")", "def test_make_tool_plugin_scan_calledprocesserror(mock_subprocess_check_output):\n mock_subprocess_check_output.side_effect = subprocess.CalledProcessError(1, '', output=\"mocked error\")\n mtp = setup_make_tool_plugin()\n package = Package('valid_package', os.path.join(os.path.dirname(__file__),\n 'valid_package'))\n package['make_targets'] = 'make_targets'\n issues = mtp.scan(package, 'level')\n assert not issues", "def test_004_Process_raiseexception(self):\n\n ## Raise the exception that we expect CommandProcessor.Process\n ## to raise in a bit, so that we know what the exception message\n ## looks like and don't have to duplicate it here in the test\n ## and make the test fragile.\n try:\n RaiseException().DoIt(None)\n except vim.fault.InvalidState, e:\n exceptionMsg = e.msg\n\n CommandProcessor.Process(host=self.host, args=['raiseexception'])\n\n if hasattr(sys.stderr, 'getvalue'):\n self.assertEqual(sys.stderr.getvalue(),\n 'vim.fault.InvalidState: %s\\n' % exceptionMsg)", "def test_uncaught_exception_bubbles(self):\n\n try:\n pobj = SCons.Action._subproc(\n Environment(),\n None,\n stdin='devnull',\n stderr='devnull',\n stdout=subprocess.PIPE,\n )\n pobj.wait()\n except EnvironmentError:\n pass\n except Exception:\n # pass the test\n return\n\n raise Exception(\"expected a non-EnvironmentError exception\")", "def test_execute_command_error(loqus_exe_app, monkeypatch):\n\n # GIVEN a mocked subprocess that gives error\n def mocksubprocess(*args, **kwargs):\n raise subprocess.CalledProcessError(None, None)\n\n monkeypatch.setattr(subprocess, \"check_output\", mocksubprocess)\n\n with loqus_exe_app.app_context():\n # THEN Executing a command will catch the exception and return an empty dict\n assert {} == loqusdb.get_variant({\"_id\": \"a variant\"})", "def test_pypgtap_subprocess_error(self):\n with self.assertRaises(PyPGTAPSubprocessError) as info:\n raise PyPGTAPSubprocessError(\n 'Error encountered', rc=1, cmd='testcommand -e')\n exception = info.exception\n self.assertEqual(exception.msg, 'Error encountered')\n self.assertEqual(exception.rc, 1)\n self.assertEqual(exception.cmd, 'testcommand -e')", "def test_cdk_deploy_raise_called_process_error(\n self,\n mocker: MockerFixture,\n runway_context: RunwayContext,\n tmp_path: Path,\n ) -> None:\n mocker.patch.object(CloudDevelopmentKit, \"gen_cmd\")\n mocker.patch(\n f\"{MODULE}.run_module_command\", side_effect=CalledProcessError(1, \"\")\n )\n with pytest.raises(CalledProcessError):\n CloudDevelopmentKit(runway_context, module_root=tmp_path).cdk_deploy()", "def test_cdk_diff_catch_called_process_error_sys_exit(\n self,\n mocker: MockerFixture,\n return_code: int,\n runway_context: RunwayContext,\n tmp_path: Path,\n ) -> None:\n mocker.patch.object(CloudDevelopmentKit, \"gen_cmd\")\n mocker.patch(\n f\"{MODULE}.run_module_command\",\n side_effect=CalledProcessError(return_code, \"\"),\n )\n with pytest.raises(SystemExit) as excinfo:\n CloudDevelopmentKit(runway_context, module_root=tmp_path).cdk_diff()\n assert excinfo.value.args == (return_code,)", "def called_process_error2exit_decorator(func):\n @functools.wraps(func)\n def func_wrapper(*args, **kwargs):\n try:\n func(*args, **kwargs)\n except subprocess.CalledProcessError as e:\n print(\"{err}:\\n{msg}\".format(err=str(e), msg=e.output))\n sys.exit(1)\n return func_wrapper", "def test_start_args(self, mocked_check, mocked_proc):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n info_source = self.supervisor.supvisors.info_source\n info_source.update_extra_args.side_effect = KeyError\n info_source.supervisor_rpc_interface.startProcess.side_effect = [\n RPCError(Faults.NO_FILE, 'no file'),\n RPCError(Faults.NOT_EXECUTABLE),\n RPCError(Faults.ABNORMAL_TERMINATION),\n 'done']\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with extra arguments and a process that is not compliant\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc', 'dummy arguments')\n self.assertEqual(Faults.BAD_EXTRA_ARGUMENTS, exc.exception.code)\n self.assertEqual(\"BAD_EXTRA_ARGUMENTS: rules for namespec appli:proc\"\n \" are not compatible with extra arguments in command line\",\n exc.exception.text)\n self.assertEqual(0, mocked_check.call_count)\n self.assertEqual(0, info_source.update_extra_args.call_count)\n self.assertEqual(0, info_source.supervisor_rpc_interface.startProcess.call_count)\n # test RPC call with extra arguments and a process that is compliant\n # but unknown in Supervisor\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc', 'dummy arguments')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual(\"BAD_NAME: namespec appli:proc unknown in this Supervisor instance\",\n exc.exception.text)\n self.assertEqual([call('appli:proc', 'dummy arguments')],\n info_source.update_extra_args.call_args_list)\n self.assertEqual(0, info_source.supervisor_rpc_interface.startProcess.call_count)\n info_source.update_extra_args.reset_mock()\n info_source.update_extra_args.side_effect = None\n # test RPC call with start exceptions\n mocked_proc.side_effect = None\n mocked_proc.return_value = None, None\n # NO_FILE exception triggers an update of the process state\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc')\n self.assertEqual(Faults.NO_FILE, exc.exception.code)\n self.assertEqual(\"NO_FILE: no file\", exc.exception.text)\n self.assertEqual([call('appli:proc', '')],\n info_source.update_extra_args.call_args_list)\n self.assertEqual([call('appli:proc', True)],\n info_source.supervisor_rpc_interface.startProcess.call_args_list)\n self.assertEqual([call('appli:proc', 'NO_FILE: no file')],\n info_source.force_process_fatal.call_args_list)\n # reset patches\n info_source.update_extra_args.reset_mock()\n info_source.force_process_fatal.reset_mock()\n info_source.supervisor_rpc_interface.startProcess.reset_mock()\n # NOT_EXECUTABLE exception triggers an update of the process state\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc', wait=False)\n self.assertEqual(Faults.NOT_EXECUTABLE, exc.exception.code)\n self.assertEqual(\"NOT_EXECUTABLE\", exc.exception.text)\n self.assertEqual([call('appli:proc', '')],\n info_source.update_extra_args.call_args_list)\n self.assertEqual([call('appli:proc', False)],\n info_source.supervisor_rpc_interface.startProcess.call_args_list)\n self.assertEqual([call('appli:proc', 'NOT_EXECUTABLE')],\n info_source.force_process_fatal.call_args_list)\n # reset patches\n info_source.update_extra_args.reset_mock()\n info_source.force_process_fatal.reset_mock()\n info_source.supervisor_rpc_interface.startProcess.reset_mock()\n # other exception doesn't trigger an update of the process state\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc', wait=False)\n self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)\n self.assertEqual(\"ABNORMAL_TERMINATION\", exc.exception.text)\n self.assertEqual([call('appli:proc', '')],\n info_source.update_extra_args.call_args_list)\n self.assertEqual([call('appli:proc', False)],\n info_source.supervisor_rpc_interface.startProcess.call_args_list)\n self.assertEqual(0, info_source.force_process_fatal.call_count)\n # reset patches\n info_source.update_extra_args.reset_mock()\n info_source.supervisor_rpc_interface.startProcess.reset_mock()\n # finally, normal behaviour\n self.assertEqual('done', rpc.start_args('appli:proc'))", "def test_startProcessUnknownKeyError(self):\r\n self.assertRaises(KeyError, self.pm.startProcess, \"foo\")", "def test_not_running(): # pragma: windows\n comm_kwargs = dict(comm='IPCComm', direction='send', reverse_names=True)\n nt.assert_raises(RuntimeError, new_comm, 'test', **comm_kwargs)", "def test_cdk_list_raise_called_process_error(\n self,\n fake_process: FakeProcess,\n mocker: MockerFixture,\n runway_context: RunwayContext,\n tmp_path: Path,\n ) -> None:\n mock_gen_cmd = mocker.patch.object(\n CloudDevelopmentKit, \"gen_cmd\", return_value=[\"list\"]\n )\n fake_process.register_subprocess(\n mock_gen_cmd.return_value,\n returncode=1,\n )\n with pytest.raises(CalledProcessError):\n CloudDevelopmentKit(runway_context, module_root=tmp_path).cdk_list()\n assert fake_process.call_count(mock_gen_cmd.return_value) == 1", "def sanity_check_process(self):\n assert_equals(self.proc.returncode, None)\n time.sleep(1)", "def print_unable_to_run(exc: \"CalledProcessError\"):\n _print(str(exc), level=MessageLevel.QUIET)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the release history from pypi Use the json API to get the release history from pypi. The returned json structure includes a 'releases' dictionary which has keys that are release numbers and the value is an array of uploaded files. While we don't have a 'release time' per say (only the upload time on each of the files), we'll consider the timestamp on the first source file found (which will be a .zip or tar.gz typically) to be 'release time'. This is inexact, but should be close enough for our purposes.
def get_releases_for_package(name, since): f = urlreq.urlopen("http://pypi.org/project/%s/json" % name) jsondata = f.read() data = json.loads(jsondata) releases = [] for relname, rellist in data['releases'].iteritems(): for rel in rellist: if rel['python_version'] == 'source': when = _parse_pypi_released(rel['upload_time']) # for speed, only care about when > since if when < since: continue releases.append( Release( name, relname, rel['filename'], when)) break return releases
[ "def list_releases():\n response = requests.get(PYPI_URL.format(package=PYPI_PACKAGE_NAME))\n if response:\n data = response.json()\n\n releases_dict = data.get('releases', {})\n\n if releases_dict:\n for version, release in releases_dict.items():\n release_formats = []\n published_on_date = None\n for fmt in release:\n release_formats.append(fmt.get('packagetype'))\n published_on_date = fmt.get('upload_time')\n\n release_formats = ' | '.join(release_formats)\n print('{:<10}{:>15}{:>25}'.format(version, published_on_date, release_formats))\n else:\n print('No releases found for {}'.format(PYPI_PACKAGE_NAME))\n else:\n print('Package \"{}\" not found on Pypi.org'.format(PYPI_PACKAGE_NAME))", "def get_git_releases_json(url, params={}):\n response = requests.get(url=url, params=params)\n data = json.loads(response.text)\n # pprint.pprint(data)\n return data", "def versionHistory(self):\n url = self.metaData().getLink(\"version-history\")\n assert url is not None\n\n header = self._baseHeader.copy()\n response = self._adapter.getRequest(url, header)\n\n return json.loads(response['Body'])", "def getBuildHistory(self, project, package, target):\n (repo, arch) = target.split('/')\n u = core.makeurl(self.apiurl, ['build', project, repo, arch, package, '_history'])\n f = core.http_GET(u)\n root = ElementTree.parse(f).getroot()\n\n r = []\n for node in root.findall('entry'):\n rev = int(node.get('rev'))\n srcmd5 = node.get('srcmd5')\n versrel = node.get('versrel')\n bcnt = int(node.get('bcnt'))\n t = time.localtime(int(node.get('time')))\n t = time.strftime('%Y-%m-%d %H:%M:%S', t)\n\n r.append((t, srcmd5, rev, versrel, bcnt))\n return r", "def get_releases(self):\n r = self.request.get(\n '{api_url}/releases/'.format(api_url=self.api_url))\n\n if r.status_code not in (200, ):\n r.raise_for_status()\n return r.json()", "def get_current_release_downloads():\n downloads = (\n get_downloads_metadata()\n ['releases']\n [get_current_release()]\n ['downloads'])\n\n def up_to_date(dir, urls):\n try:\n df = pandas.read_csv(join(dir, \"DOWNLOAD_INFO.csv\"))\n return list(df.url) == list(urls)\n except IOError:\n return None\n\n return OrderedDict(\n (download[\"name\"], {\n 'downloaded': exists(join(get_downloads_dir(), download[\"name\"])),\n 'up_to_date': up_to_date(\n join(get_downloads_dir(), download[\"name\"]),\n [download['url']] if 'url' in download else download['part_urls']),\n 'metadata': download,\n }) for download in downloads\n )", "def get_chapter_releases(key, release_history=60):\n q = '/project/{}/versions/'.format(key)\n releases = exec_jira_query(q)\n trs = []\n earliest_release_date = arrow.utcnow().shift(days=-release_history)\n for r in releases:\n # limit ourselves to releases no more than N days old\n recent = not r['archived']\n if r.get('releaseDate', None):\n rel_date = arrow.get(r.get('releaseDate'))\n if rel_date < earliest_release_date:\n recent = False\n if recent:\n trs.append(_build_release(r, key))\n\n return trs", "def parse_release_json(data):\n info = {}\n rows = []\n for release in data:\n print(release)\n\n info = {}\n for field in ['tag_name', 'published_at']:\n info[field] = release[field]\n\n download_count = 0\n for asset in release['assets']:\n # sum over mac, win, linux for release\n download_count += asset['download_count']\n info['download_count'] = download_count\n\n rows.append(info)\n\n # dataframe which is sorted by publishing dates\n df = pd.DataFrame(rows)\n df = df.sort_values(['published_at'])\n\n # calculate cumsum of downloads\n df['download_cum'] = df['download_count'].cumsum()\n\n # sort back\n df = df.sort_values(['published_at'], ascending=False)\n\n # parse times\n df['published_at'] = pd.to_datetime(df.published_at)\n\n return df", "def Releases():\n return releases", "def get_release_list() -> list[MinecraftRelease]:\n releases_res = requests.get(\n \"https://launchermeta.mojang.com/mc/game/version_manifest.json\",\n ).json()\n\n return [ver for ver in releases_res[\"versions\"] if ver[\"type\"] == \"release\"]", "def get_history(config, archiver):\n root = pathlib.Path(config.cache_path) / archiver\n revisions = []\n for i in root.iterdir():\n if i.name.endswith(\".json\"):\n with i.open(\"r\") as rev_f:\n revision_data = json.load(rev_f)\n revisions.append(revision_data)\n return revisions", "def getCommitsSinceLastRelease(self):\n f = open(self.last_released, 'r')\n old_rev = f.read().replace('\\n', '')\n f.close()\n new_rev = commands.getoutput('cd '+self.proj_dir+' && git log -1 --format=%H')\n cmd = 'cd '+self.proj_dir+' && git log --no-merges --pretty=format:\"%s\" '+old_rev+'..'+new_rev\n unreleased_commits = commands.getoutput(cmd) \n print 'Commits since last release:'\n print unreleased_commits\n unreleased_commits = unreleased_commits.split('\\n')\n self.commit_msgs = unreleased_commits\n self.new_rev = new_rev", "def get_release_info():\n major_info = fetch_json(\n \"https://product-details.mozilla.org/1.0/\" \"firefox_history_major_releases.json\"\n )\n if major_info is None:\n raise Exception(\"Failed to fetch major version info\")\n minor_info = fetch_json(\n \"https://product-details.mozilla.org/1.0/\"\n \"firefox_history_stability_releases.json\"\n )\n if minor_info is None:\n raise Exception(\"Failed to fetch minor version info\")\n\n return {\"major\": major_info, \"minor\": minor_info}", "def releases(self):\n return self._releases.values()", "def get_release_info(self, version):\r\n try:\r\n return self._detail[\"releases\"][version]\r\n except KeyError as key_error:\r\n log.warning(key_error)\r\n return []", "def getPackageRevisions(self, packageName):\n revisionsList = list()\n url = self.apiUrl + \"/source/\" + self.name\n url += \"/\" + packageName + \"/_history\"\n printdebug(\"Calling %s\" % url)\n try:\n xmlResult = core.http_request(\"GET\", url).read()\n except HTTPError as e:\n if e.code == 404:\n printdebug(\"History not available for %s, trying to get undated last revision\"\n % packageName)\n return self.getPackageLastRevisions(packageName)[0]\n else:\n raise\n\n xmlRevisionLists = ElementTree.fromstring(xmlResult)\n for xmlRevisionList in xmlRevisionLists.iter(\"revisionlist\"):\n for xmlRevision in xmlRevisionList.iter(\"revision\"):\n revision = PackageRevision(packageName,\n xmlRevision.get(\"rev\", 1),\n xmlRevision.get(\"vrev\", 1))\n for field in xmlRevision.iter():\n if field.text is not None:\n revision.__dict__[field.tag] = field.text\n revisionsList.append(revision)\n\n revisionsList.sort(reverse=True)\n\n if len(revisionsList) > 0:\n return revisionsList[0]\n else:\n return PackageRevision(packageName, \"\", \"\")", "def latest_release_get():\n try:\n return json_response.success({'version': version.latest_version()})\n except version.Error as e:\n return json_response.error(str(e)), 200", "def get_chapter_releases_api(key):\n trs = get_chapter_releases(key)\n rs = ReleaseSchema(many=True, include_data=['issue_digests'])\n rv = rs.dump(trs).data\n return jsonify(rv)", "def recent_scheduled_release_records(self) -> pulumi.Output[Sequence['outputs.RepositoryReleaseConfigRecentScheduledReleaseRecord']]:\n return pulumi.get(self, \"recent_scheduled_release_records\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates X values for given list of Y values in range defined by a and b parameters. X values are simply calculated by dividing given X range by number of nodes, so they are distributed in even range.
def prepare_initial_nodes(x_start, x_end, nodes_y): nodes_x = [float(x_start + ((x_end - x_start) / (len(nodes_y) - 1)) * i) for i in range(0, len(nodes_y))] nodes_y = [float(y) for y in nodes_y] print(nodes_x) print(nodes_y) nodes = list(zip(nodes_x, nodes_y)) return nodes
[ "def arange(a, b, dx, logx=False):\n\tif a > b:\n\t\treturn []\n\telif logx:\n\t\treturn [math.exp(math.log(a) + dx*i) for i in range(int((math.log(b)-math.log(a))/dx))] + [b]\n\telse:\n\t\treturn [a + dx*i for i in range(int((b-a)/dx))] + [b]", "def linear_input(self, a, b):\r\n y = np.linspace(a, b, b - a + 1)\r\n x = np.linspace(a, b, b - a + 1)\r\n x, y = np.meshgrid(x, y)\r\n return (x, y)", "def _x_lafferty(xmin, xmax, function):\n from scipy.optimize import brentq\n from scipy import integrate\n\n indices = np.arange(len(xmin))\n\n x_points = []\n for index in indices:\n deltax = xmax[index] - xmin[index]\n I = integrate.quad(function, xmin[index], xmax[index], args=())\n F = (I[0] / deltax)\n\n def g(x):\n return function(x) - F\n\n x_point = brentq(g, xmin[index], xmax[index])\n x_points.append(x_point)\n return x_points", "def gen_gaussian_list(a, b, n=1000):\n dx = (b-a)/(n-1) # spacing between points\n x = [a + k*dx for k in range(n)] # domain list\n \n # Local implementation of a Gaussian function\n def gauss(x):\n return (1/math.sqrt(2*math.pi))*math.exp(-x**2/2)\n \n g = [gauss(xk) for xk in x] # range list\n return (x, g)", "def get_xrange(self, lower, upper):\n # todo: ensure this functions work as well for y_values\n lower, upper = self.get_xrange_indices()\n return self.x[lower, upper + 1]", "def map_values(data, x_start1, x_end1, x_start2, x_end2):\n return x_start2 + (data-x_start1)*(x_end2-x_start2)/(x_end1-x_start1)", "def build_range(X, y, mesh_size=.02, margin=.25):\r\n x_min = X[:, 0].min() - margin\r\n x_max = X[:, 0].max() + margin\r\n y_min = X[:, 1].min() - margin\r\n y_max = X[:, 1].max() + margin\r\n\r\n xrange = np.arange(x_min, x_max, mesh_size)\r\n yrange = np.arange(y_min, y_max, mesh_size)\r\n return xrange, yrange", "def plot_linear(x_range, w, b):\n\tplt.plot(x_range, x_range * w + b)", "def lagrange_interpolation(x, nodes, function_values):\n # we want to make sure our algorithm meets this condition\n assert len(nodes) == len(function_values)\n \n return functools.reduce(lambda a, b: a + (b * lagrange(x, \n nodes,\n function_values.index(b))\n ), function_values)\n\n # more verbose way of writing this algorithm without a reduce function\n # sum = 0\n # for k, value in enumerate(function_values):\n # sum += (value * lagrange(x, nodes, k))\n \n # return sum", "def value_servay(X_name,Y_name,Y_range,X_range,df):#指定した範囲のオッズ比やリピータ率、購買数を持つvalueを抜き出す関数\n min_Y=Y_range[0]\n max_Y=Y_range[1]\n min_X=X_range[0]\n max_X=X_range[1]\n\n target_df=df[(min_X<df[X_name])&(df[X_name]<=max_X)]#X\n target_df=target_df[(min_Y<target_df[Y_name])&(target_df[Y_name]<=max_Y)]#Y\n\n return target_df", "def calculate_range(self, a_pos, b_pos):\n a_x = a_pos[0]\n a_y = a_pos[1]\n b_x = b_pos[0]\n b_y = b_pos[1]\n\n delta_x = b_x - a_x\n delta_y = b_y - a_y\n\n return np.sqrt(delta_x**2 + delta_y**2)", "def fit_to_range(val: float, a: float, b: float, a1: float, b1: float) -> float:\n new_value = ((val - a) / (b - a)) * (b1 - a1) + a1\n return new_value", "def plot_function(function: Callable, list_range: List[float], step: float = 0.01 ) -> None:\n\n x_values = range(list_range[0], list_range[1], step)\n\n y_values = list(function(x) for x in x_values)\n\n return y_values", "def interpolate2(self, f: callable, a: float, b: float, n: int) -> callable:\n\n # create the array of points\n pointsArr = []\n deltaOfXBetweenPoints = (float)(b - a) / float(n)\n xCounter = a\n\n xs = []\n ys = []\n for i in range(n):\n if (xCounter > b):\n xCounter = b\n yy = float(f(xCounter))\n xx = float(xCounter)\n\n pointsArr.append((xx, yy))\n xs.append(xx)\n ys.append(yy)\n xCounter = xCounter + deltaOfXBetweenPoints\n\n return lagrange(xs,ys)", "def rangeX(iterations):\n if not isinstance(iterations, (tuple)):\n raise AttributeError\n return itertools.product(*map(range, iterations))", "def slice_data(xdata, ydata, x_range):\n\tdata = zip(xdata, ydata)\n\tsliced_data = [d for d in data if d[0] >= x_range[0] and d[0] <= x_range[1]]\n\treturn array(zip(*sliced_data))", "def getRange(function, x_range, number_of_samples=100):\n\n x_lo, x_hi = x_range\n y_lo = None\n y_hi = None\n for x in hep.num.range(x_lo, x_hi, (x_hi - x_lo) / number_of_samples):\n y = function(x)\n if y is None:\n continue\n if y_lo is None or y < y_lo:\n y_lo = y\n if y_hi is None or y > y_hi:\n y_hi = y\n return y_lo, y_hi", "def mul_interval(x, y):\n p1 = lower_bound(x) * lower_bound(y)\n p2 = lower_bound(x) * upper_bound(y)\n p3 = upper_bound(x) * lower_bound(y)\n p4 = upper_bound(x) * upper_bound(y)\n return interval(min(p1, p2, p3, p4), max(p1, p2, p3, p4))", "def generate_values_in_range():\n\treturn [x * 0.5 for x in range(4,12)]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes list of divided differences nodes and calculates new divided differences node from each pair of nodes_to_compute. In other words, it computes next level of so called Newton's second interpolation form tree.
def calculate_divided_differences_row(nodes_to_compute): divided_differences = [] if len(nodes_to_compute) == 1: return None for i in range(0, len(nodes_to_compute) - 1): child = DividedDifferenceNode.create_child_node(nodes_to_compute[i], nodes_to_compute[i + 1]) child.calculate_value() divided_differences.append(child) for node in divided_differences: print(node, end='') print('\n') return divided_differences
[ "def calculate_divided_differences(nodes):\n nodes_to_compute = []\n divided_differences = []\n for node in nodes:\n nodes_to_compute.append(DividedDifferenceNode(x=node[0], divided_difference=node[1]))\n\n divided_differences.append(tuple(nodes_to_compute))\n\n while len(nodes_to_compute) > 1:\n next_node_row = calculate_divided_differences_row(nodes_to_compute)\n divided_differences.append(tuple(next_node_row))\n nodes_to_compute = next_node_row\n\n return divided_differences", "def calculate_newton_interpolation(divided_differences):\n polynomial = []\n for i, divided_differences_row in enumerate(divided_differences):\n polynomial_part = '({0})'.format(divided_differences_row[0].divided_difference)\n\n for j in range(0, i):\n polynomial_part += '*(x-{0})'.format(divided_differences[0][j].x)\n\n polynomial_part += '+'\n polynomial.append(polynomial_part)\n polynomial_str = ''.join(polynomial)[:-1]\n\n print('Calculated polynomial: {0}'.format(polynomial_str))\n # Heuristic simplification of calculated polynomial\n simplified_polynomial = sy.simplify(polynomial_str)\n print(\"Simplified polynomial: {0}\".format(simplified_polynomial))\n return simplified_polynomial", "def update(self):\n for d in range(1, 3):\n # d is 1 or 2\n if self.test:\n print('---------------------------------degree {}--------------------------'.format(d))\n nodes = self.get_node_by_degree(d=d) # a list of node id\n for node in nodes:\n if node in list(self.g.nodes):\n neighbors = self.get_neighbors(node) # a list of node id\n if self.test:\n print()\n print('## Current node is: {}'.format(node))\n print(' >>> Neighbors of this node are : {}'.format(','.join([str(i) for i in neighbors])))\n for neighbor in neighbors:\n # neighbor may be deleted on this process, so need to check if it exists\n if d == 1: # degree = 1, only leaves\n if self.test:\n print(' >>> Start to check if {} and {} can be merged...'.format(neighbor, node))\n if (neighbor in list(self.g.nodes)) and self.check_if_merge(neighbor, node):\n if self.test:\n print(' >>> Start to merge {} to {}...'.format(node, neighbor))\n self.merge_two_nodes(left_id=neighbor, right_id=node)\n if d == 2: # degree = 2, only merge with the neighbor which degree is 2\n if self.get_degree_by_node(neighbor) == 2:\n if self.test:\n print(' >the degree of neighbor {} is {}'.format(\n neighbor, self.get_degree_by_node(neighbor)))\n print(' >>> Start to check if {} and {} can be merged...'.format(neighbor, node))\n if (neighbor in list(self.g.nodes)) and self.check_if_merge(neighbor, node):\n if self.test:\n print(' >>> Start to merge {} to {}...'.format(neighbor, node))\n self.merge_two_nodes(left_id=node, right_id=neighbor)\n\n n2n = {n: list(self.g.neighbors(n)) for n in list(self.g.nodes())} # node 2 neighbors, {id: [], ... }\n id2smiles = nx.get_node_attributes(self.g, 'smiles')\n id2mol_inx = nx.get_node_attributes(self.g, 'mol_inx')\n return {'n2n': n2n, 'id2smiles': id2smiles, 'f2f': self.f2f, 'id2mol_inx': id2mol_inx}", "def compute_relations(nodes: List[Node]) -> None:\n # Calculate parents\n for node in nodes:\n node.parents = []\n for node in nodes:\n for child in node.children():\n child.parents.append(node)\n\n def compute_dominators(\n entry: Node,\n parents: Callable[[Node], List[Node]],\n dominators: Callable[[Node], Set[Node]],\n immediately_dominates: Callable[[Node], List[Node]],\n set_immediate_dominator: Callable[[Node, Optional[Node]], None],\n ) -> None:\n # See https://en.wikipedia.org/wiki/Dominator_(graph_theory)#Algorithms\n # Note: if `n` is unreachable from `entry`, then *every* node will\n # vacuously belong to `n`'s dominator set.\n for n in nodes:\n dominators(n).clear()\n if n == entry:\n dominators(n).add(n)\n else:\n dominators(n).update(nodes)\n\n changes = True\n while changes:\n changes = False\n for node in nodes:\n if node == entry:\n continue\n nset = dominators(node)\n for parent in parents(node):\n nset = nset.intersection(dominators(parent))\n nset.add(node)\n if len(nset) < len(dominators(node)):\n assert nset.issubset(dominators(node))\n dominators(node).intersection_update(nset)\n changes = True\n\n # Compute immediate dominator, and the inverse relation\n for node in nodes:\n immediately_dominates(node).clear()\n for node in nodes:\n doms = dominators(node).difference({node})\n # If `node == entry` or the flow graph is not reducible, `doms` may be empty.\n # TODO: Infinite loops could be made reducible by introducing\n # branches like `if (false) { return; }` without breaking semantics\n if doms:\n # There should be a unique max `len(dominators(d))` if the flowgraph\n # is reducible. Fall back to largest index for irreducible graphs.\n imdom = max(doms, key=lambda d: (len(dominators(d)), d.block.index))\n immediately_dominates(imdom).append(node)\n set_immediate_dominator(node, imdom)\n else:\n set_immediate_dominator(node, None)\n for node in nodes:\n immediately_dominates(node).sort(key=lambda x: x.block.index)\n\n def _set_immediate_dominator(node: Node, imdom: Optional[Node]) -> None:\n node.immediate_dominator = imdom\n\n def _set_immediate_postdominator(node: Node, impdom: Optional[Node]) -> None:\n node.immediate_postdominator = impdom\n\n entry = nodes[0]\n terminal = nodes[-1]\n assert isinstance(terminal, TerminalNode)\n\n # Compute dominators & immediate dominators\n compute_dominators(\n entry=entry,\n parents=lambda n: n.parents,\n dominators=lambda n: n.dominators,\n immediately_dominates=lambda n: n.immediately_dominates,\n set_immediate_dominator=_set_immediate_dominator,\n )\n\n # Compute postdominators & immediate postdominators\n # This uses the same algorithm as above, but with edges reversed\n compute_dominators(\n entry=terminal,\n parents=lambda n: n.children(),\n dominators=lambda n: n.postdominators,\n immediately_dominates=lambda n: n.immediately_postdominates,\n set_immediate_dominator=_set_immediate_postdominator,\n )\n\n # Iterate over all edges n -> c and check for backedges, which define natural loops\n for node in nodes:\n for child in node.children():\n if child not in node.dominators:\n continue\n # Found a backedge node -> child where child dominates node; child is the \"head\" of the loop\n if child.loop is None:\n child.loop = NaturalLoop(child)\n child.loop.nodes |= {child, node}\n child.loop.backedges.add(node)\n for parent in nodes:\n if reachable_without(parent, node, child):\n child.loop.nodes.add(parent)", "def gradients(output_node, node_list):\r\n\r\n # a map from node to a list of gradient contributions from each output node\r\n node_to_output_grads_list = {}\r\n # Special note on initializing gradient of output_node as oneslike_op(output_node):\r\n # We are really taking a derivative of the scalar reduce_sum(output_node)\r\n # instead of the vector output_node. But this is the common case for loss function.\r\n node_to_output_grads_list[output_node] = [oneslike_op(output_node)]\r\n # a map from node to the gradient of that node\r\n node_to_output_grad = {}\r\n # Traverse graph in reverse topological order given the output_node that we are taking gradient wrt.\r\n reverse_topo_order = list(reversed(find_topo_sort([output_node])))\r\n #node_to_output_grad[output_node] = oneslike_op(output_node)\r\n \r\n \r\n \"\"\"TODO: Your code here\"\"\"\r\n \r\n for node in reverse_topo_order:\r\n #print(node)\r\n #print(node_to_output_grad)\r\n if not(node in node_to_output_grad):\r\n #node_to_output_grad[node] = node.op.gradient(node, sum_node_list ([node_to_output_grad[node1] for node1 in node_to_output_grads_list[node] ]))\r\n sum_node = sum_node_list (node_to_output_grads_list[node]) \r\n grad = node.op.gradient(node, sum_node)\r\n node_to_output_grad[node] = sum_node\r\n #print(grad)\r\n #print(len(node.inputs))\r\n for i in range(len(node.inputs)):\r\n #print(i)\r\n if (not(node.inputs[i] in node_to_output_grads_list)):\r\n node_to_output_grads_list[node.inputs[i]]=[]\r\n node_to_output_grads_list[node.inputs[i]].append(grad[i])\r\n \r\n #input_grad = \r\n \r\n \r\n '''for node1 in node_to_output_grads_list[node]:\r\n print(node1)\r\n if (node in node_to_output_grad):\r\n node_to_output_grad[node] = node_to_output_grad[node] + node_to_output_grad[node1]\r\n else:\r\n node_to_output_grad[node] = node_to_output_grad[node1]\r\n '''\r\n #print(\"node to output \")\r\n #print(node_to_output_grad)\r\n\r\n del reverse_topo_order\r\n # Collect results for gradients requested.\r\n grad_node_list = [node_to_output_grad[node] for node in node_list]\r\n return grad_node_list", "def contract_nodes(self, nodes, optimize='auto-hq', check=False):\r\n if len(nodes) == 1:\r\n return next(iter(nodes))\r\n\r\n if len(nodes) == 2:\r\n return self.contract_nodes_pair(*nodes, check=check)\r\n\r\n # create the bottom and top nodes\r\n grandparent = union_it(nodes)\r\n self._add_node(grandparent, check=check)\r\n for node in nodes:\r\n self._add_node(node, check=check)\r\n\r\n # if more than two nodes need to find the path to fill in between\r\n # \\\r\n # GN <- 'grandparent'\r\n # / \\\r\n # ?????????\r\n # ????????????? <- to be filled with 'temp nodes'\r\n # / \\ / / \\\r\n # N0 N1 N2 N3 N4 <- ``nodes``, or, subgraphs\r\n # / \\ / / \\\r\n path_inputs = [oset(self.get_legs(x)) for x in nodes]\r\n path_output = oset(self.get_legs(grandparent))\r\n\r\n if isinstance(optimize, str):\r\n path_fn = get_path_fn(optimize)\r\n else:\r\n path_fn = optimize\r\n\r\n path = path_fn(path_inputs, path_output, self.size_dict)\r\n\r\n # now we have path create the nodes in between\r\n temp_nodes = list(nodes)\r\n for p in path:\r\n to_contract = [\r\n temp_nodes.pop(i) for i in sorted(p, reverse=True)\r\n ]\r\n temp_nodes.append(\r\n self.contract_nodes(\r\n to_contract, optimize=optimize, check=check\r\n )\r\n )\r\n\r\n parent, = temp_nodes\r\n\r\n if check:\r\n # final remaining temp input should be the 'grandparent'\r\n assert parent == grandparent\r\n\r\n return parent", "def insert_nodes(self):\n neighbour_max_distance = 5\n new_nodes = []\n for node in self.nodes:\n left_distance = node.get_distance(node.neighbour1)\n right_distance = node.get_distance(node.neighbour2)\n if left_distance > neighbour_max_distance:\n # halfway\n half_point = (\n node.x + (node.neighbour1.x - node.x) / 2,\n node.y + (node.neighbour1.y - node.y) / 2\n )\n new_node = Node(half_point)\n node.neighbour1.connect(node.neighbour1.neighbour1, new_node)\n new_node.connect(node.neighbour1, node)\n node.connect(new_node, node.neighbour2)\n new_nodes.append(new_node)\n new_nodes.append(node)\n\n if right_distance > neighbour_max_distance:\n # halfway\n half_point = (\n node.x + (node.neighbour2.x - node.x) / 2,\n node.y + (node.neighbour2.y - node.y) / 2\n )\n new_node = Node(half_point)\n node.neighbour2.connect(new_node, node.neighbour2.neighbour2)\n new_node.connect(node, node.neighbour2)\n node.connect(node.neighbour1, new_node)\n new_nodes.append(new_node)\n\n return new_nodes", "def job_tree(self):\n\n # 1. Enforce depth of 1 for steps\n def depth_one(steps):\n depth_one = []\n for step in steps:\n if type(step) is list:\n if type(step[0]) is list:\n depth_one.append(step[0])\n else:\n depth_one.append(step)\n else:\n depth_one.append([step])\n return depth_one\n\n # 2. Convert steps to list of node objects (0,1,2,3...)\n def assign_nodes(steps):\n nodes = [i for i in range(len(steps))]\n objects = list(\n set([elem for sublist in steps for elem in sublist]))\n\n # checks for multiple src and dst objects -- added when looking for\n # mutiples\n split_objects = []\n for obj in objects:\n if len(obj) > 1:\n new_objs = obj.split(\", \")\n split_objects.extend(new_objs)\n else:\n split_objects.append(obj)\n objects = split_objects\n del(split_objects)\n\n # populate with leafless trees (Node objects, no edges)\n for node in nodes:\n nodes[node] = Node(str(node))\n\n # search for leafy trees\n for obj in objects:\n\n # accounts for multiple drc/dst objects\n leaves = []\n for i, sublist in enumerate(steps):\n for string in sublist:\n if string.count(',') > 0:\n if obj in string:\n leaves.append(i)\n else:\n if obj in sublist:\n leaves.append(i)\n leaves = sorted(list(set(leaves)))\n\n if len(leaves) > 1:\n viable_edges = []\n\n # compute cross-product\n for leaf1 in leaves:\n for leaf2 in leaves:\n if str(leaf1) != str(leaf2) and sorted((leaf1, leaf2)) not in viable_edges:\n viable_edges.append(sorted((leaf1, leaf2)))\n\n # form edge networks\n for edge in viable_edges:\n n1, n2 = nodes[edge[0]], nodes[edge[1]]\n n1.add_edge(n2)\n n2.add_edge(n1)\n nodes[int(n1.name)], nodes[int(n2.name)] = n1, n2\n return nodes\n\n # 3. Determine number of trees and regroup by connected nodes\n def connected_nodes(nodes):\n proto_trees = []\n nodes = set(nodes)\n\n while nodes:\n n = nodes.pop()\n group = {n}\n queue = [n]\n while queue:\n n = queue.pop(0)\n neighbors = n.edges\n neighbors.difference_update(group)\n nodes.difference_update(neighbors)\n group.update(neighbors)\n queue.extend(neighbors)\n proto_trees.append(group)\n return proto_trees\n\n # 4. Convert nodes to nested dictionary of parent-children relations\n # i.e. adding depth -- also deals with tree-node sorting and path\n # optimization\n def build_tree_dict(trees, steps):\n # node sorting in trees\n sorted_trees = []\n for tree in trees:\n sorted_trees.append(\n sorted(tree, key=lambda x: int(x.name)))\n\n # retrieve values of the nodes (the protocol's containers)\n # for each tree ... may want to use dictionary eventually\n all_values = []\n for tree in sorted_trees:\n values = [steps[int(node.name)] for node in tree]\n all_values.append(values)\n\n # create relational tuples:\n all_digs = []\n singles = []\n dst_potentials = []\n for tree_idx in range(len(sorted_trees)):\n edge_flag = False\n tree_digs = []\n for node_idx in range(len(sorted_trees[tree_idx])):\n\n # digs: directed graph vectors\n digs = []\n dst_nodes = []\n node_values = all_values[tree_idx][node_idx]\n src_node = str(sorted_trees[tree_idx][node_idx].name)\n\n # ACTION ON MULTIPLE OBJECTS (E.G. TRANSFER FROM SRC -> DST\n # WELLS)\n # Outcome space: {1-1, 1-many, many-1, many-many}\n if len(node_values) == 2:\n # single destination (x-1)\n if node_values[1].count(\",\") == 0:\n dst_nodes = [i for i, sublist in enumerate(\n steps) if node_values[1] == sublist[0]]\n # multiple destinations (x-many)\n elif node_values[1].count(\",\") > 0:\n dst_nodes = []\n for dst in node_values[1].replace(\", \", \"\"):\n for i, sublist in enumerate(steps):\n if i not in dst_nodes and dst == sublist[0]:\n dst_nodes.append(i)\n\n # ACTION ON A SINGLE OBJECT\n elif len(node_values) == 1:\n dst_nodes = [i for i, sublist in enumerate(\n steps) if node_values[0] == sublist[0]]\n\n # Constructing tuples in (child, parent) format\n for dst_node in dst_nodes:\n dig = (int(dst_node), int(src_node))\n digs.append(dig)\n\n # else: an edge-case for dictionaries constructed with no edges\n # initiates tree separation via flag\n if digs != []:\n edge_flag = False\n tree_digs.append(digs)\n else:\n edge_flag = True\n digs = [(int(src_node), int(src_node))]\n tree_digs.append(digs)\n\n # digraph cycle detection: avoids cycles by overlooking set\n # repeats\n true_tree_digs = []\n for digs in tree_digs:\n for dig in digs:\n if tuple(sorted(dig, reverse=True)) not in true_tree_digs:\n true_tree_digs.append(\n tuple(sorted(dig, reverse=True)))\n\n # edge-case for dictionaries constructed with no edges\n if true_tree_digs != [] and edge_flag == False:\n all_digs.append(true_tree_digs)\n elif edge_flag == True:\n all_digs.extend(tree_digs)\n\n # Enforces forest ordering\n all_digs = sorted(all_digs, key=lambda x: x[0])\n\n # job tree traversal to find all paths:\n forest = []\n for digs_set in all_digs:\n\n # pass 1: initialize nodes dictionary\n nodes = OrderedDict()\n for tup in digs_set:\n id, parent_id = tup\n # ensure all nodes accounted for\n nodes[id] = OrderedDict({'id': id})\n nodes[parent_id] = OrderedDict({'id': parent_id})\n\n # pass 2: create trees and parent-child relations\n for tup in digs_set:\n id, parent_id = tup\n node = nodes[id]\n # links node to its parent\n if id != parent_id:\n # add new_node as child to parent\n parent = nodes[parent_id]\n if not 'children' in parent:\n # ensure parent has a 'children' field\n parent['children'] = []\n children = parent['children']\n children.append(node)\n\n desired_tree_idx = sorted(list(nodes.keys()))[0]\n forest.append(nodes[desired_tree_idx])\n return forest\n\n # 5. Convert dictionary-stored nodes to unflattened, nested list of\n # parent-children relations\n def dict_to_list(forest):\n forest_list = []\n for tree in forest:\n tString = str(json.dumps(tree))\n tString = tString.replace('\"id\": ', \"\").replace('\"children\": ', \"\").replace(\n '[{', \"[\").replace('}]', \"]\").replace('{', \"[\").replace('}', \"]\")\n\n # find largest repeated branch (if applicable)\n # maybe think about using prefix trees or SIMD extensions for better\n # efficiency\n x, y, length, match = 0, 0, 0, ''\n for y in range(len(tString)):\n for x in range(len(tString)):\n substring = tString[y:x]\n if len(list(re.finditer(re.escape(substring), tString))) > 1 and len(substring) > length:\n match = substring\n length = len(substring)\n\n # checking for legitimate branch repeat\n if \"[\" in match and \"]\" in match:\n hits = []\n index = 0\n if len(tString) > 3:\n while index < len(tString):\n index = tString.find(str(match), index)\n if index == -1:\n break\n hits.append(index)\n index += len(match)\n\n # find all locations of repeated branch and remove\n if len(hits) > 1:\n for start_loc in hits[1:]:\n tString = tString[:start_loc] + \\\n tString[start_loc:].replace(match, \"]\", 1)\n\n # increment all numbers in string to match the protocol\n newString = \"\"\n numString = \"\"\n for el in tString:\n if el.isdigit(): # build number\n numString += el\n else:\n if numString != \"\": # convert it to int and reinstantaite numString\n numString = str(int(numString) + 1)\n newString += numString\n newString += el\n numString = \"\"\n tString = newString\n del newString\n\n forest_list.append(ast.literal_eval(tString))\n return forest_list\n\n # 6. Print job tree(s)\n def print_tree(lst, level=0):\n print(' ' * (level - 1) + '+---' * (level > 0) + str(lst[0]))\n for l in lst[1:]:\n if type(l) is list:\n print_tree(l, level + 1)\n else:\n print(' ' * level + '+---' + l)\n\n # 1\n steps = depth_one(self.object_list)\n # 2\n nodes = assign_nodes(steps)\n # 3\n proto_forest = connected_nodes(nodes)\n # 4\n forest = build_tree_dict(proto_forest, steps)\n # 5\n self.forest_list = dict_to_list(forest)\n # 6\n print(\"\\n\" + \"A suggested Job Tree based on container dependency: \\n\")\n for tree_list in self.forest_list:\n print_tree(tree_list)", "def get_locations(nodes, tl, br):\n \n # Base cases:\n if len(nodes) == 1: # for singleton, only choice is to place in the single spot in 1x1 square\n return {nodes[0]: tl}\n if len(nodes) == 2: # for two nodes, arbitrarily chose to place the first node in top left\n return {nodes[0]: tl, nodes[1]: br}\n\n # Recursive case, need to create and solve subproblems:\n ret = {}\n\n num_edges = count_num_edges(nodes)\n if num_edges == 0: # for empty graphs, no need to run METIS, just assign arbitrarily\n i = 0\n for x in range(tl.x, br.x+1): \n for y in range(tl.y, br.y+1):\n if i < len(nodes):\n ret.update({nodes[i]: Point(x,y)})\n i += 1\n return ret\n\n filename = splitext(basename(sys.argv[1]))[0] + '.p.' + sys.argv[2] + '.yx.' + sys.argv[3] + '.drop.' + sys.argv[4] + '.' +\\\n '_'.join(['delete', str(tl.x), str(tl.y), str(br.x), str(br.y)]) \n\n # special case for the very first call of get_locations. For example, suppose that there are\n # 97 nodes on a 10x10 grid. Instead of dividing the 97 nodes into 2 equal partitions, we should\n # divide them into a partition of 90 nodes and a partition of 7 nodes. The former should be\n # placed on a 10x9 grid and te latter should be placed on a 1x7 grid.\n if len(nodes) < (br.x - tl.x + 1) * (br.y - tl.y + 1):\n assert tl == Point(0, 0)\n size_tl_nodes = (br.x + 1) * int(len(nodes) / (br.x + 1))\n if size_tl_nodes == len(nodes):\n ret.update(get_locations(nodes, tl=Point(0, 0), br=Point(br.x, len(nodes) / (br.x + 1) - 1)))\n return ret\n\n nodes_tl, nodes_br = partition(nodes, size_tl_nodes, filename)\n # complicated indexing here. As an example, for the 97 into 10x10 case, we want to send 90 nodes\n # to a rectangle spanned by tl=Point(0, 0) and br=Point(9, 8) and we want to send 7 nodes to a \n # rectangle spanned by tl=Point(0, 9) and br=Point(6, 9)\n ret.update(get_locations(nodes_tl, tl=Point(0, 0), br=Point(br.x, len(nodes) / (br.x + 1) - 1)))\n ret.update(get_locations(nodes_br, tl=Point(0, len(nodes) / (br.x + 1)), br=Point(len(nodes) % (br.x + 1) - 1, len(nodes) / (br.x + 1))))\n return ret\n\n if br.x - tl.x > br.y - tl.y: # if rectangle is wider than tall, split on y axis\n half = tl.x + (br.x - tl.x - 1) / 2\n size_tl_nodes = (half - tl.x + 1) * (br.y - tl.y + 1)\n else: # split on x axis\n half = tl.y + (br.y - tl.y - 1) / 2\n size_tl_nodes = (br.x - tl.x + 1) * (half - tl.y + 1)\n\n nodes_tl, nodes_br = partition(nodes, size_tl_nodes, filename)\n\n if br.x - tl.x > br.y - tl.y: # if rectangle is wider than tall, split on y axis\n ret.update(get_locations(nodes_tl, tl=tl, br=Point(half, br.y)))\n ret.update(get_locations(nodes_br, tl=Point(half + 1,tl.y), br=br))\n else: # split on x axis\n ret.update(get_locations(nodes_tl, tl=tl, br=Point(br.x, half)))\n ret.update(get_locations(nodes_br, tl=Point(tl.x, half + 1), br=br))\n\n return ret", "def make_sidewalk_nodes(street, prev_node, curr_node, next_node):\n if prev_node is None:\n v = - curr_node.vector_to(next_node, normalize=False)\n vec_prev = curr_node.vector() + v\n prev_node = Node(None, vec_prev[0], vec_prev[1])\n elif next_node is None:\n v = - curr_node.vector_to(prev_node, normalize=False)\n vec_next = curr_node.vector() + v\n next_node = Node(None, vec_next[0], vec_next[1])\n\n curr_latlng = np.array(curr_node.location())\n\n v_cp_n = curr_node.vector_to(prev_node, normalize=True)\n v_cn_n = curr_node.vector_to(next_node, normalize=True)\n v_sidewalk = v_cp_n + v_cn_n\n\n if np.linalg.norm(v_sidewalk) < 1e-10:\n v_sidewalk_n = np.array([v_cn_n[1], - v_cn_n[0]])\n else:\n v_sidewalk_n = v_sidewalk / np.linalg.norm(v_sidewalk)\n\n p1 = curr_latlng + street.distance_to_sidewalk * v_sidewalk_n\n p2 = curr_latlng - street.distance_to_sidewalk * v_sidewalk_n\n\n p_sidewalk_1 = Node(None, p1[0], p1[1])\n p_sidewalk_2 = Node(None, p2[0], p2[1])\n\n curr_node.append_sidewalk_node(street.id, p_sidewalk_1)\n curr_node.append_sidewalk_node(street.id, p_sidewalk_2)\n\n # Figure out on which side you want to put each sidewalk node\n v_c1 = curr_node.vector_to(p_sidewalk_1)\n if np.cross(v_cn_n, v_c1) > 0:\n return p_sidewalk_1, p_sidewalk_2\n else:\n return p_sidewalk_2, p_sidewalk_1", "def _create_new_nodes(self, level, n):\n if (level + 1) == len(self._node_list):\n self._node_list.append([])\n\n split_val = self._node_list[level][n].get_split()\n idx = self._node_list[level][n].get_col()\n\n # Split data\n lower_x_data, lower_y_data, upper_x_data, upper_y_data = self._split_data(level, n, idx, split_val)\n\n # Now check if all the same in lower/upper\n # Do not change y_data to average over all values\n if (lower_x_data.shape[0] > 1) and ((lower_x_data - lower_x_data[0, :]) == 0).all():\n lower_x_data = lower_x_data[[0], :]\n if (upper_x_data.shape[0] > 1) and ((upper_x_data - upper_x_data[0, :]) == 0).all():\n upper_x_data = upper_x_data[[0], :]\n # Make lower node if one can\n if lower_x_data.shape[0] > 0:\n lower_curr_index = len(self._node_list[level + 1])\n self._node_list[level + 1].append(self._create_node(lower_x_data, lower_y_data))\n self._node_list[level][n].set_lower_split_index(lower_curr_index)\n else:\n lower_curr_index = None\n # Make upper node\n if upper_x_data.shape[0] > 0:\n upper_curr_index = len(self._node_list[level + 1])\n self._node_list[level + 1].append(self._create_node(upper_x_data, upper_y_data))\n self._node_list[level][n].set_upper_split_index(upper_curr_index)\n else:\n upper_curr_index = None\n\n return [level + 1, lower_curr_index], [level + 1, upper_curr_index]", "def _generate_nodes(self, diff_list: list[g.Bunch], revs_list: list[str]) -> None:\n # self._trace_diff_list(diff_list)\n for b in diff_list:\n # self._trace_kind(b, revs_list)\n p = self.root.insertAsLastChild()\n gnx0_s = b.gnx0 or ''\n gnx1_s = b.gnx1 or ''\n gnxs_s = gnx0_s if b.gnx0 == b.gnx1 else f\"{gnx0_s} {gnx1_s}\"\n p.h = f\"{b.i:>4} {b.kind}\" # {gnx0_s} {gnx1_s}\"\n if b.kind == 'diff':\n diff = list(difflib.unified_diff(b.body0 or [], b.body1 or [], b.rev0, b.rev1))\n p.b = f\"diff {gnxs_s}\\n\\n{''.join(diff)}\"\n child1 = p.insertAsLastChild()\n child1.h = 'old'\n child1.b = ''.join(b.body0 or [])\n child2 = p.insertAsLastChild()\n child2.h = 'new'\n child2.b = ''.join(b.body1 or [])\n elif b.kind == 'add':\n p.b = f\"add {gnxs_s}\\n\\n{''.join(b.body1 or [])}\"\n elif b.kind == 'delete':\n p.b = f\"delete {gnxs_s}\\n\\n{''.join(b.body0 or [])}\"\n else:\n g.trace(f\"Bad b.kind: {b.kind!r}\")", "def _process_nodes(self, x, y, sample_weight, node, parent, branch_index):\n if not node.is_leaf():\n old_weight = node.last_split_reevaluation_at\n new_weight = node.total_weight\n stop_flag = False\n\n if (new_weight - old_weight) >= self.min_samples_reevaluate:\n # Reevaluate the best split\n stop_flag = self._reevaluate_best_split(node, parent, branch_index)\n\n if not stop_flag:\n # Move in depth\n child_index = node.instance_child_index(x)\n if child_index >= 0:\n child = node.get_child(child_index)\n if child is not None:\n self._process_nodes(x, y, sample_weight, child, node, child_index)\n elif self._growth_allowed and node.is_active():\n if node.depth >= self.max_depth: # Max depth reached\n node.deactivate()\n self._n_inactive_leaves += 1\n self._n_active_leaves -= 1\n else:\n weight_seen = node.total_weight\n weight_diff = weight_seen - node.last_split_attempt_at\n if weight_diff >= self.grace_period:\n # Attempt to split\n self._attempt_to_split(node, parent, branch_index)\n node.last_split_attempt_at = weight_seen", "def evalChildren(nodes, goal, d1, d2):\n\tfor node in nodes:\n\t\tnode.g, node.h, node.f = calcScore(node, goal, d1, d2)\n\treturn nodes", "def getNextNodeUsingCellDiff(kGoalState):\n \n global fringe\n global solutions\n\n \n\n\n\n minNode = None\n minCost = 99999999999\n minNodeIndex = -1\n\n \n pnode = None\n pcost = None\n\n if len(solutions)>0 and solutions[0] != None:\n pnode = solutions[0];\n pcost = getHValueForNode(pnode,kGoalState)\n #print pnode, pcost\n # raw_input()\n \n\n\n\n for idx,node in enumerate(fringe):\n #get the heu. function values\n g_value = getHValueForNode(node,kGoalState)\n \n\n if g_value < minCost:\n minNode = node\n minNodeIndex = idx\n minCost = g_value\n\n\n fringe.pop(minNodeIndex)\n c = getHValueForNode(minNode,kGoalState)\n if pnode != None:\n if c > pcost:\n minNode = None\n \n return minNode", "def node_repulsion(self):\r\n\r\n for node in self.nodes:\r\n node.vx, node.vy = 0, 0 # reset velocity back to zero\r\n\r\n for node2 in node.cluster:\r\n if node == node2: continue\r\n\r\n dx = node.x - node2.x\r\n dy = node.y - node2.y\r\n\r\n magnitude = math.sqrt(dx * dx + dy * dy)\r\n\r\n\r\n if magnitude:\r\n force = self.force_constant * self.force_constant / magnitude\r\n node.vx += dx / magnitude * force\r\n node.vy += dy / magnitude * force", "def calcInternalNodes(self):\n self.internalNodes = []\n self.calcInternalNodes_(self.head)", "def _dfs(self, node, feed_dict):\n if node.parent is None:\n # We are in a leaf\n if not node in feed_dict:\n raise RuntimeError(\"Some source op don't have provided values !\")\n \n lambda_list = list()\n for element in feed_dict[node]:\n lambda_list.append(InstanceNode(node.funct, pickle.dumps(element).hex(), self.curr_idx, None))\n self.curr_idx += 1\n self.treated[node] = lambda_list\n return \n\n for parent in node.parent:\n if not parent in self.treated: \n self._dfs(parent, feed_dict)\n \n dependencies = list()\n for idx, parent in enumerate(node.parent):\n dependencies.append(node.dispenser[idx](len(self.treated[parent])))\n\n if len(set(map(lambda x: len(x), dependencies)))>1:\n raise RuntimeError(\"Node get multiple parent with differents dim !\")\n\n lambda_list = list()\n # If we have only one parent, we will have a 1dim data field\n if(len(dependencies)==0):\n raise RuntimeError(\"Intern Eror\")\n elif(len(dependencies)==1):\n dependencies = dependencies[0]\n for dep in dependencies:\n curr_parents = list()\n for element in dep:\n curr_parents.append(self.treated[node.parent[0]][element])\n lambda_list.append(InstanceNode(node.funct, None, self.curr_idx, curr_parents))\n self.curr_idx+=1\n\n # If we have mult-parent, we shall get a 2 dim data field\n else:\n for dep in zip(*dependencies):\n curr_parents = list()\n for i in range(len(dep)):\n sub_curr_parents= list()\n for sub in dep[i]:\n sub_curr_parents.append(self.treated[node.parent[i]][sub])\n curr_parents.append(sub_curr_parents)\n\n lambda_list.append(InstanceNode(node.funct, None, self.curr_idx, curr_parents))\n self.curr_idx+=1\n self.treated[node] = lambda_list", "def space(input_nodes, nodes_of_interest, force_iter=10):\n\n # get the locations of the nodes of interest in 2-D\n # makes a subdictionary for the particular layer\n nodes = dict((k, input_nodes[k])\n for k in nodes_of_interest if k in input_nodes)\n\n # a matrix to store distances\n dist_mat = np.zeros(shape=(len(nodes.keys()), len(nodes.keys())))\n\n # store the distances in the matrix\n for node1, node2 in combinations(nodes.keys(), 2):\n delta = [x2 - x1 for x1,\n x2 in zip(nodes[node1][\"location\"], nodes[node2][\"location\"])]\n distance = sqrt(sum(d ** 2 for d in delta))\n dist_mat[nodes_of_interest.index(\n node1), nodes_of_interest.index(node2)] = distance\n dist_mat[nodes_of_interest.index(\n node2), nodes_of_interest.index(node1)] = distance\n\n # find the two furthest nodes\n [max_loc, temp] = np.where(dist_mat == dist_mat.max())\n [forward_node, reverse_node] = max_loc\n\n # create edges starting from the terminal nodes\n temp_edges = []\n forward_mat = dist_mat.copy()\n reverse_mat = dist_mat.copy()\n\n # walk through the network and makes temporary connections\n for i in range(0, len(nodes.keys())):\n next_node = forward_mat[forward_node, :].argsort()[1]\n forward_mat[forward_node, :] = dist_mat.max() + 1\n forward_mat[:, forward_node] = dist_mat.max() + 1\n temp_edges.append({\"source\": nodes_of_interest[\n forward_node], \"target\": nodes_of_interest[next_node]})\n forward_node = next_node\n\n next_node = reverse_mat[reverse_node, :].argsort()[1]\n reverse_mat[reverse_node, :] = dist_mat.max() + 1\n reverse_mat[:, reverse_node] = dist_mat.max() + 1\n temp_edges.append({\"source\": nodes_of_interest[\n reverse_node], \"target\": nodes_of_interest[next_node]})\n reverse_node = next_node\n\n nodes = run_forcing(temp_edges, nodes, iterations=force_iter)\n\n # update the x,y values for the forced nodes\n for node in nodes.keys():\n input_nodes[node][\"location\"][0:2] = nodes[node][\"location\"][0:2]\n\n return input_nodes" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates divided differences for given interpolation nodes. It is assumed, that at least two interpolation nodes are provided. Each tuple of returned list represents one level of divided differences tree.
def calculate_divided_differences(nodes): nodes_to_compute = [] divided_differences = [] for node in nodes: nodes_to_compute.append(DividedDifferenceNode(x=node[0], divided_difference=node[1])) divided_differences.append(tuple(nodes_to_compute)) while len(nodes_to_compute) > 1: next_node_row = calculate_divided_differences_row(nodes_to_compute) divided_differences.append(tuple(next_node_row)) nodes_to_compute = next_node_row return divided_differences
[ "def calculate_divided_differences_row(nodes_to_compute):\n divided_differences = []\n\n if len(nodes_to_compute) == 1:\n return None\n\n for i in range(0, len(nodes_to_compute) - 1):\n child = DividedDifferenceNode.create_child_node(nodes_to_compute[i], nodes_to_compute[i + 1])\n child.calculate_value()\n divided_differences.append(child)\n\n for node in divided_differences:\n print(node, end='')\n\n print('\\n')\n return divided_differences", "def divided_diff(order, ys, dts):\n assert(len(ys) == order+1)\n assert(len(dts) == order)\n\n if order > 1:\n return ((divided_diff(order-1, ys[:-1], dts[:-1])\n - divided_diff(order-1, ys[1:], dts[1:]))\n /\n sum(dts))\n else:\n return (ys[0] - ys[-1])/(dts[0])", "def diffdiffEdges(nodes_x,nodes_y,edges_x,edges_y):\n de_x = edges_x.copy()\n de_y = edges_y.copy()\n\n ### step 1: dry_diff_edges_1\n # process edges df to remove weight_x, count_x, average_x (and *_y); rename \"avg_subt\"\n de_x = de_x[['node_i','node_j','attribute','avg_subt']]\n de_x.rename(columns={'avg_subt':'average'}, inplace=True)\n\n ### step 2: dry_diff_edges_0\n # process edges df to remove weight_x, count_x, average_x (and *_y); rename \"avg_subt\"\n de_y = de_y[['node_i','node_j','attribute','avg_subt']]\n de_y.rename(columns={'avg_subt':'average'}, inplace=True)\n\n ### step 3: dry_diff_edges_1 - dry_diff_edges_0\n # the \"nodes in 1st, not in 2nd\" results make less sense here since diff of diff\n diff_nodes_10, diff_edges_10, mutstart_10, offset_10 = diffEdges(nodes_x,nodes_y,de_x,de_y)\n\n return diff_nodes_10, diff_edges_10, mutstart_10, offset_10", "def interpolate_tuples(first, second, ndiv):\n def interp1d(one,two,ndiv):\n return [one+1.0*(two-one)*i/(ndiv-1) for i in range(ndiv)]\n return list(zip(*map(lambda x: interp1d(x[0],x[1],ndiv), zip(first,second))))", "def diff(self):\n return [node.diff for node in self]", "def test_frac_diffNonGaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"AGGG\")\n s3 = self.RNA(\"GGGG\")\n s4 = self.RNA(\"AG--GA-G\")\n s5 = self.RNA(\"CU--CU-C\")\n s6 = self.RNA(\"AC--GC-G\")\n s7 = self.RNA(\"--------\")\n s8 = self.RNA(\"AAAA----\")\n s9 = self.RNA(\"A-GG-A-C\")\n e = self.RNA(\"\")\n\n def test(x, y, z):\n return self.assertFloatEqual(x.frac_diff_non_gaps(y), z)\n\n test(s1, s2, 0.75)\n test(s1, s3, 1)\n test(s2, s3, 0.25)\n test(s1, s4, 0.5)\n test(s4, s5, 1)\n test(s4, s6, 0.4)\n test(s4, s7, 0)\n test(s4, s8, 0.5)\n test(s4, s9, 1 / 3.0)\n test(e, s4, 0)", "def split_diff(\n diff: Dict[str, Dict[str, Optional[Union[List, Dict]]]], model_handle: str\n) -> List[Tuple[str, Union[Entity, dict]]]:\n diff_segments = []\n diff_order = [\n REMOVE_NODE,\n ADD_NODE,\n REMOVE_PROPERTY,\n ADD_PROPERTY,\n REMOVE_RELATIONSHIP,\n ADD_RELATIONSHIP,\n ]\n\n def add_node(entity, diff_segments):\n diff_segments.append((ADD_NODE, entity))\n\n def remove_node(entity, diff_segments):\n diff_segments.append((REMOVE_NODE, entity))\n\n def add_relationship(src, rel, dst, diff_segments):\n diff_segments.append((ADD_RELATIONSHIP, {\"rel\": rel, \"src\": src, \"dst\": dst}))\n\n def remove_relationship(src, rel, dst, diff_segments):\n diff_segments.append(\n (REMOVE_RELATIONSHIP, {\"rel\": rel, \"src\": src, \"dst\": dst})\n )\n\n def add_property(entity, prop_handle, prop_value, diff_segments):\n diff_segments.append(\n (\n ADD_PROPERTY,\n {\n \"entity\": entity,\n \"prop_handle\": prop_handle,\n \"prop_value\": prop_value,\n },\n )\n )\n\n def remove_property(entity, prop_handle, prop_value, diff_segments):\n diff_segments.append(\n (\n REMOVE_PROPERTY,\n {\n \"entity\": entity,\n \"prop_handle\": prop_handle,\n \"prop_value\": prop_value,\n },\n )\n )\n\n node_diff = diff.get(\"nodes\")\n edge_diff = diff.get(\"edges\")\n prop_diff = diff.get(\"props\")\n\n if node_diff:\n if node_diff.get(\"a\"):\n for node_hdl in node_diff.get(\"a\"):\n remove_node(\n Node({\"handle\": node_hdl, \"model\": model_handle}), diff_segments\n )\n if node_diff.get(\"b\"):\n for node_hdl in node_diff.get(\"b\"):\n add_node(\n Node({\"handle\": node_hdl, \"model\": model_handle}), diff_segments\n )\n for node_hdl, change in node_diff.items():\n if node_hdl in {\"a\", \"b\"}:\n continue\n node_props = change[\"props\"]\n node = Node({\"handle\": node_hdl, \"model\": model_handle})\n if node_props.get(\"a\"):\n for prop_hdl in node_props.get(\"a\"):\n remove_relationship(\n node,\n \"has_property\",\n Property({\"handle\": prop_hdl, \"model\": model_handle}),\n diff_segments,\n )\n if node_props.get(\"b\"):\n for prop_hdl in node_props.get(\"b\"):\n add_relationship(\n node,\n \"has_property\",\n Property({\"handle\": prop_hdl, \"model\": model_handle}),\n diff_segments,\n )\n if edge_diff:\n if edge_diff.get(\"a\"):\n for edge_hdl, src_hdl, dst_hdl in edge_diff.get(\"a\"):\n edge = Edge(\n {\n \"handle\": edge_hdl,\n \"src\": Node({\"handle\": src_hdl, \"model\": model_handle}),\n \"dst\": Node({\"handle\": dst_hdl, \"model\": model_handle}),\n }\n )\n remove_node(edge, diff_segments)\n if edge_diff.get(\"b\"):\n for edge_hdl, src_hdl, dst_hdl in edge_diff.get(\"b\"):\n edge = Edge(\n {\n \"handle\": edge_hdl,\n \"src\": Node({\"handle\": src_hdl, \"model\": model_handle}),\n \"dst\": Node({\"handle\": dst_hdl, \"model\": model_handle}),\n }\n )\n add_node(edge, diff_segments)\n add_relationship(edge, \"has_src\", edge.src, diff_segments)\n add_relationship(edge, \"has_dst\", edge.dst, diff_segments)\n for edge_tup, change_dict in edge_diff.items():\n if edge_tup in {\"a\", \"b\"}:\n continue\n edge = Edge(\n {\n \"handle\": edge_tup[0],\n \"src\": Node({\"handle\": edge_tup[1], \"model\": model_handle}),\n \"dst\": Node({\"handle\": edge_tup[2], \"model\": model_handle}),\n }\n )\n for edge_attr, change in change_dict.items():\n if edge_attr == \"props\" and change.get(\"a\"):\n for prop_hdl in change.get(\"a\"):\n remove_relationship(\n edge,\n \"has_property\",\n Property({\"handle\": prop_hdl, \"model\": model_handle}),\n diff_segments,\n )\n if edge_attr == \"props\" and change.get(\"b\"):\n for prop_hdl in change.get(\"b\"):\n add_relationship(\n edge,\n \"has_property\",\n Property({\"handle\": prop_hdl, \"model\": model_handle}),\n diff_segments,\n )\n else:\n if change.get(\"a\"):\n remove_property(\n edge,\n edge_attr,\n change.get(\"a\"),\n diff_segments,\n )\n if change.get(\"b\"):\n add_property(\n edge,\n edge_attr,\n change.get(\"b\"),\n diff_segments,\n )\n if prop_diff:\n if prop_diff.get(\"a\"):\n for prop_tuple in prop_diff.get(\"a\"):\n # parent_hdls = prop_tuple[:-1]\n prop_hdl = prop_tuple[-1]\n prop = Property({\"handle\": prop_hdl, \"model\": model_handle})\n remove_node(prop, diff_segments)\n if prop_diff.get(\"b\"):\n for prop_tuple in prop_diff.get(\"b\"):\n # parent_hdls = prop_tuple[:-1]\n prop_hdl = prop_tuple[-1]\n prop = Property({\"handle\": prop_hdl, \"model\": model_handle})\n add_node(prop, diff_segments)\n for prop_tup, change_dict in prop_diff.items():\n if prop_tup in {\"a\", \"b\"}:\n continue\n prop = Property({\"handle\": prop_tup[1]})\n for prop_attr, change in change_dict.items():\n if prop_attr == \"value_set\" and change.get(\"a\"):\n value_set = change.get(\"a\")\n remove_relationship(\n prop,\n \"has_value_set\",\n value_set,\n diff_segments,\n )\n if prop_attr == \"value_set\" and change.get(\"b\"):\n value_set = change.get(\"b\")\n add_node(value_set, diff_segments)\n add_relationship(\n prop,\n \"has_value_set\",\n value_set,\n diff_segments,\n )\n for term in value_set.terms:\n term_ent = Term({\"value\": term})\n add_node(term_ent, diff_segments)\n add_relationship(value_set, \"has_term\", term_ent, diff_segments)\n else:\n if change.get(\"a\"):\n remove_property(\n prop,\n prop_attr,\n change.get(\"a\"),\n diff_segments,\n )\n if change.get(\"b\"):\n add_property(\n prop,\n prop_attr,\n change.get(\"b\"),\n diff_segments,\n )\n return sorted(diff_segments, key=lambda x: diff_order.index(x[0]))", "def _divide(product):\n return product.replace(' ', '').split('->')", "def getVolumeFractions(self):\n children = self.getChildren()\n numerator = [c.getVolume() for c in children]\n denom = sum(numerator)\n if denom == 0.0:\n numerator = [c.getArea() for c in children]\n denom = sum(numerator)\n\n fracs = [(ci, nu / denom) for ci, nu in zip(children, numerator)]\n return fracs", "def _compute_derivatives(self):\n derivatives = []\n for i, (timestamp, value) in enumerate(self.time_series_items):\n if i > 0:\n pre_item = self.time_series_items[i - 1]\n pre_timestamp = pre_item[0]\n pre_value = pre_item[1]\n td = timestamp - pre_timestamp\n derivative = (value - pre_value) / td if td != 0 else value - pre_value\n derivative = abs(derivative)\n derivatives.append(derivative)\n # First timestamp is assigned the same derivative as the second timestamp.\n if derivatives:\n derivatives.insert(0, derivatives[0])\n self.derivatives = derivatives", "def pressure_diff(lop1, lop2, tol):\n# lopet = []\n \n# for i in range(len(lop1)):\n# if abs(lop1[i] - lop2[i]) > tol:\n# lopet = lopet + [(lop1[i], lop2[i])]\n# \n# return lopet\n \n# for t in zip(lop1, lop2):\n# if abs(t[0] - t[1]) > tol:\n# lopet = lopet + [t]\n# return lopet\n\n# return [t for t in zip(lop1, lop2) if abs(t[0] - t[1]) > tol]\n \n return [(p1, p2) for (p1, p2) in zip(lop1, lop2) if abs(p1 - p2) > tol]", "def divideelement(self, eleref):\n numnod = self.p.shape[0]\n tnew = np.zeros((1, 3), np.int32)\n pnew = np.zeros((1, 2))\n for j in range(len(eleref)):\n elei = eleref[j]\n index = self.t[elei, :]\n p1 = (self.p[index[0], :] + self.p[index[1], :]) / 2\n p2 = (self.p[index[0], :] + self.p[index[2], :]) / 2\n p3 = (self.p[index[1], :] + self.p[index[2], :]) / 2\n pi = np.array(([[p1[0], p1[1]], [p2[0], p2[1]], [p3[0], p3[1]]]))\n newele = np.array(\n (\n [\n [index[0], numnod + j * 3, numnod + j * 3 + 1],\n [index[1], numnod + j * 3, numnod + j * 3 + 2],\n [index[2], numnod + j * 3 + 1, numnod + j * 3 + 2],\n [numnod + j * 3, numnod + j * 3 + 1, numnod + j * 3 + 2],\n ]\n )\n )\n tnew = np.append(tnew, newele, axis=0)\n pnew = np.append(pnew, pi, axis=0)\n tnew = np.delete(tnew, 0, axis=0)\n pnew = np.delete(pnew, 0, axis=0)\n self.t = np.delete(self.t, eleref, axis=0)\n self.t = np.append(self.t, tnew, axis=0)\n self.p = np.append(self.p, pnew, axis=0)\n self.removeduplicatenode()\n poi, local = self.nodeedge()\n layer = 0\n while len(local) != 0:\n layer = 1\n self.removehangingnode(poi, local, layer)\n self.removeduplicatenode()\n poi, local = self.nodeedge()\n return self.p, self.t, len(eleref)", "def divide(list1, list2):\n assert len(list1) == len(list2)\n return [e1 / e2 for (e1, e2) in zip(list1, list2)]", "def test_frac_diffGaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"GGGG\")\n s3 = self.RNA(\"----\")\n s4 = self.RNA(\"A-A-\")\n s5 = self.RNA(\"-G-G\")\n s6 = self.RNA(\"UU--\")\n s7 = self.RNA(\"-\")\n s8 = self.RNA(\"GGG\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_diff_gaps(s1), 0)\n self.assertEqual(s1.frac_diff_gaps(s2), 0)\n self.assertEqual(s1.frac_diff_gaps(s3), 1)\n self.assertEqual(s1.frac_diff_gaps(s4), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s5), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s6), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s7), 1)\n self.assertEqual(s1.frac_diff_gaps(e), 0)\n self.assertEqual(s3.frac_diff_gaps(s3), 0)\n self.assertEqual(s3.frac_diff_gaps(s4), 0.5)\n self.assertEqual(s3.frac_diff_gaps(s7), 0.0)\n self.assertEqual(e.frac_diff_gaps(e), 0.0)\n self.assertEqual(s4.frac_diff_gaps(s5), 1.0)\n self.assertEqual(s4.frac_diff_gaps(s6), 0.5)\n self.assertFloatEqual(s6.frac_diff_gaps(s8), 1 / 3.0)", "def node_waiting_time_pairs(tree):\n treecalc.add_depth_to_nodes(tree)\n depths = [(n, n.depth) for n in tree.internal_nodes()] \n depths.sort(lambda x, y: int(x[1] - y[1])) \n intervals = []\n intervals.append(depths[0])\n for i, d in enumerate(depths[1:]):\n intervals.append( (d[0], d[1] - depths[i][1]) )\n return intervals", "def compute_subgraph_divergence(self, nodes):\n # We initialize the subgraph divergence with number of combinations for set nodes of size 2\n subgraph_divergence = Ensemble.compute_combinations(len(nodes), 2)\n induced_subgraphs_dict = self.find_subgraphs_induced_by_nodes(nodes)\n probability_of_subgraphs = [float(len(timestamps)) / self.get_num_of_timestamps() for timestamps in\n induced_subgraphs_dict.values()]\n for p in probability_of_subgraphs:\n if p > 0.0:\n subgraph_divergence += p * math.log2(p)\n return subgraph_divergence", "def edge_diffs(self):\n iterator = _tskit.TreeDiffIterator(self._ll_tree_sequence)\n for interval, edge_tuples_out, edge_tuples_in in iterator:\n edges_out = [Edge(*e) for e in edge_tuples_out]\n edges_in = [Edge(*e) for e in edge_tuples_in]\n yield interval, edges_out, edges_in", "def detect_divide_before_multiply(contract):\n\n # Create our result set.\n # List of tuple (function -> list(list(nodes)))\n # Each list(nodes) of the list is one bug instances\n # Each node in the list(nodes) is involved in the bug\n results = []\n\n # Loop for each function and modifier.\n for function in contract.functions_declared:\n if not function.entry_point:\n continue\n\n # List of list(nodes)\n # Each list(nodes) is one bug instances\n f_results = []\n\n # lvalue -> node\n # track all the division results (and the assignment of the division results)\n divisions = defaultdict(list)\n\n _explore({function.entry_point}, f_results, divisions)\n\n for f_result in f_results:\n results.append((function, f_result))\n\n # Return the resulting set of nodes with divisions before multiplications\n return results", "def rdiv(data, # a list of class Nums\r\n all, # all the data combined into one num\r\n div, # function: find the best split\r\n big, # function: rejects small splits\r\n same, # function: rejects similar splits\r\n epsilon): # small enough to split two parts\r\n def recurse(parts,all,rank=0):\r\n \"Split, then recurse on each part.\"\r\n cut,left,right = maybeIgnore(div(parts,all,big,epsilon),\r\n same,parts)\r\n if cut:\r\n # if cut, rank \"right\" higher than \"left\"\r\n rank = recurse(parts[:cut],left,rank) + 1\r\n rank = recurse(parts[cut:],right,rank)\r\n\r\n else:\r\n # if no cut, then all get same rank\r\n for part in parts:\r\n part.rank = rank\r\n return rank\r\n recurse(sorted(data),all)\r\n return data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates polynomial from given list of divided differences. Polynomial string is created according to equation provided in project docs.
def calculate_newton_interpolation(divided_differences): polynomial = [] for i, divided_differences_row in enumerate(divided_differences): polynomial_part = '({0})'.format(divided_differences_row[0].divided_difference) for j in range(0, i): polynomial_part += '*(x-{0})'.format(divided_differences[0][j].x) polynomial_part += '+' polynomial.append(polynomial_part) polynomial_str = ''.join(polynomial)[:-1] print('Calculated polynomial: {0}'.format(polynomial_str)) # Heuristic simplification of calculated polynomial simplified_polynomial = sy.simplify(polynomial_str) print("Simplified polynomial: {0}".format(simplified_polynomial)) return simplified_polynomial
[ "def list_to_poly(polynomial_list):\n max_degree = len(polynomial_list) - 1\n strings = []\n opts = ['x', '']\n for index, num in enumerate(polynomial_list):\n if num == 0:\n continue\n if index < max_degree - 1:\n string = '{}x^{}'.format(num, max_degree - index)\n strings.append(string)\n else:\n strings.append(str(num) + opts[index - (max_degree - 1)])\n polynomial = ' + '.join(strings).replace('+ -', '- ')\n return polynomial", "def generate_polynomial(coefs, variable='x'):\n power = len(coefs) - 1\n polynomial = ''\n\n first = coefs[0]\n coefs = coefs[1:]\n\n if first == 1:\n polynomial = variable + \"^\" + str(power)\n else:\n polynomial = str(first) + variable + \"^\" + str(power)\n\n power = power - 1\n\n for term in coefs:\n if term == 0:\n power = power - 1\n continue\n\n if term == 1 and power != 0:\n operator_term = \" + \"\n value_term = ''\n elif term == -1 and power != 0:\n operator_term = \" - \"\n value_term = ''\n elif term < 0:\n operator_term = \" - \"\n value_term = str(abs(term))\n elif term > 0:\n operator_term = \" + \"\n value_term = str(term)\n\n else:\n continue\n\n if power == 1:\n power_term = variable\n elif power == 0:\n power_term = ''\n else:\n power_term = variable + '^' + str(power)\n\n polynomial = polynomial + operator_term + value_term + power_term\n\n power = power - 1\n\n return polynomial", "def generate_latex_for_polynomial(num,\n den,\n variable):\n\n # Build the numerator string\n\n def build_string(values):\n string = ''\n # First I need a list of values in descending order.\n # Then I need to enumerate them to create the (degree, value) vector,\n # and finally I need this in descending order.\n # TODO: use generators properly.\n\n tuples = list(reversed(list(enumerate(reversed(values)))))\n # Special case for the first element:\n (first_degree, first_value) = tuples[0]\n\n if len(values) == 1: # Handle the special case first\n return str(values[0])\n\n if first_value != 0:\n if first_value < 0:\n string = string + '-'\n if first_value != 1:\n string = string + str(first_value)\n string = string + variable + '^'\n if first_degree >= 10:\n string = string + '{' + str(first_degree) + '}'\n else:\n string = string + str(first_degree)\n\n for (order, value) in tuples[1:]:\n # Correctly add the +/-.\n if value == 0:\n continue\n elif value < 0:\n string = string + ' - '\n else:\n string = string + ' + '\n if not (value == 1 or value == -1) and order != 0:\n string = string + str(value)\n\n # And the variable and power.\n if order == 1:\n string = string + variable\n elif order >= 10:\n string = string + variable + '^{' + str(order) + '}'\n elif order == 0:\n string = string + str(value)\n elif order != 0:\n string = string + variable + '^' + str(order)\n\n return string\n\n num_string = build_string(num)\n den_string = build_string(den)\n return \"\\\\frac{\" + num_string + \"}{\" + den_string + \"}\"", "def make_polynomial_function(coeffs):\n pass", "def eval_poly(coeff, x):\n return reduce(lambda a, b: a*x+b, coeff[::-1])", "def coefficients_from_Weierstrass_polynomial(f):\n R = f.parent()\n cubic_variables = [x for x in R.gens() if f.degree(x) == 3]\n quadratic_variables = [y for y in R.gens() if f.degree(y) == 2]\n try:\n x = cubic_variables[0]\n y = quadratic_variables[0]\n except IndexError:\n raise ValueError('polynomial is not in long Weierstrass form')\n\n a1 = a2 = a3 = a4 = a6 = 0\n x3 = y2 = None\n for coeff, mon in f:\n if mon == x**3:\n x3 = coeff\n elif mon == x**2:\n a2 = coeff\n elif mon == x:\n a4 = coeff\n elif mon == 1:\n a6 = coeff\n elif mon == y**2:\n y2 = -coeff\n elif mon == x*y:\n a1 = -coeff\n elif mon == y:\n a3 = -coeff\n else:\n raise ValueError('polynomial is not in long Weierstrass form')\n\n if x3 != y2:\n raise ValueError('the coefficient of x^3 and -y^2 must be the same')\n elif x3 != 1:\n a1, a2, a3, a4, a6 = a1/x3, a2/x3, a3/x3, a4/x3, a6/x3\n return [a1, a2, a3, a4, a6]", "def polynomial(self) -> str:\r\n char = str()\r\n for i, element in enumerate(self.poly_coef[::-1]):\r\n ele = np.round(element, 4)\r\n if i == 0:\r\n char += '{}'.format(ele)\r\n elif i == 1:\r\n if ele > 0.0:\r\n char += '+'\r\n char += (\r\n '{0:.4f}'.format(ele)\r\n + 'B')\r\n else:\r\n if ele > 0.0:\r\n char += '+'\r\n char += (\r\n '{0:.4f}'.format(ele) \r\n + 'B^' \r\n + str(i))\r\n return char", "def gen_factorised_polynomial(constants, coefficient=1, var=\"x\"):\n if coefficient == 0:\n coefficient = 1\n x = sympy.Symbol(var)\n expression = 1\n for c in constants:\n expression = expression * ((coefficient * x) + c)\n coefficient = 1\n return expression", "def latex_polynomial(expression):\n return weblatex.escape_latex_for_web(sympy.latex(expression))", "def evalPolynomial(n,a,b):\n return n**2 + a*n + b", "def fourth_poly(a, b, c, d, e):\n return lambda z: a*z**4 + b*z**3 + c*z**2 + d*z + e", "def poly(x, order, **kwargs):\n\n\n return x**order", "def linear_simplify_poly(poly):\n if len(poly) < 4:\n return poly\n\n q = Queue()\n for v in poly:\n q.put(v)\n\n new_poly = []\n a = q.get()\n b = q.get()\n while True:\n if q.empty():\n new_poly += [a,b]\n break\n c = q.get()\n e1 = (b-a).normalized()\n e2 = (c-b).normalized()\n if abs(1.0 - e1.dot(e2)) < 1e-2:\n # colinear. skip b.\n a = a\n b = c\n else:\n # a,b needed.\n new_poly += [a]\n a = b\n b = c\n return new_poly", "def polyLS(pd, x, y, f, X, Y \\\n, coeff = [], xmc = [], ymc = [], ell = [], w = [], ELL = [], W = []) :\n xmc, ymc, ell, w, ELL, W = assignDefaults(x, y, xmc, ymc, ell, w, ELL, W)\n \n numP = int((pd + 1) * (pd + 2) / 2)\n \n if (len(xmc) == 1) and (len(ymc) == 1) :\n \n\n if coeff == [] :\n p = poly(x, y, pd)\n coeff = np.linalg.lstsq(p, f, rcond=None)[0]\n\n B = poly(X, Y, pd)\n approx = B.dot(coeff).flatten()\n coeff_copy = coeff\n \n else :\n \n approx = np.zeros(len(X), float)\n \n if coeff == [] :\n for i in range(len(xmc)) :\n IND = inSquare(x, y, xmc[i], ymc[i], ELL, W)\n if len(IND) < int(1.5 * numP) :\n raise ValueError(\"Not enough data for this polynomial \" \\\n + \"degree.\\nEither lower the polynomial degree or \" \\\n + \"decrease the number of subdivisions.\")\n p = poly(x[IND], y[IND], pd)\n lam = np.linalg.lstsq(p, f[IND], rcond=None)[0]\n coeff.append(lam)\n\n coeff_copy = coeff.copy()\n\n for i in range(len(xmc) - 1, -1, -1) :\n IND = inSquare(X, Y, xmc[i], ymc[i], ell, w)\n B = poly(X[IND], Y[IND], pd)\n lam = coeff.pop()\n approx[IND] = B.dot(lam).flatten()\n \n return approx, coeff_copy", "def linear_polynomial(self, e: 'PFElement') -> Polynomial:\n poly = self.polynomial(-e)\n poly += poly.monic(1)\n return poly", "def eval_polynomial(x, coeffs):\n pass", "def parse_poly(self, expr: str) -> Polynomial:\n return symbolic_polynomial(expr, self)", "def polynomial(self, *args, indeterminate: str = 'X') -> Polynomial:\n return Polynomial([self.element(c) for c in args], base_field=self, indeterminate=indeterminate)", "def build_poly(x, degree):\n poly=np.ones([x.shape[0],degree+1])\n for i in range(0,x.shape[0]):\n for j in range(1, degree+1):\n poly[i][j]=math.pow(x[i],j)\n return poly" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draws interpolation plot for given interpolation polynomial and nodes.
def draw_interpolation_plot(start_x, end_x, interpolation_polynomial, nodes, freq=200, additional_polynomial=None, additional_nodes=None): # TODO: calculate figure size dynamically plt.figure(figsize=(8, 6), dpi=80) x = numpy.linspace(start_x, end_x, freq) # TODO: eval should be changed to something more secure (like numexpr evaluate())... y = eval(str(interpolation_polynomial)) plt.subplot(211) plt.plot(x, y, [node[0] for node in nodes], [node[1] for node in nodes], 'ro') plt.grid(True) if additional_polynomial: poly_values = eval(str(additional_polynomial)) plt.subplot(212) plt.plot(x, poly_values, [node[0] for node in additional_nodes], [node[1] for node in additional_nodes], 'ro') plt.grid(True) plt.show()
[ "def create_lookup_plot():\n\n # init data based on function of this module\n temp_points = [30, 55, 80, 105, 130]\n delta_surface = [2.04, 4.04, 7.01, 11.01, 14.64]\n delta_air = [2.89, 6.37, 12.16, 15.41, 18.52]\n delta_electrode = [3.08, 7.24, 13.28, 15.75, 20.80]\n\n # calculate coefficients for interpolation\n delta_surface_fit = numpy.polyfit(temp_points, delta_surface, 1)\n delta_air_fit = numpy.polyfit(temp_points, delta_air, 1)\n delta_electrode_fit = numpy.polyfit(temp_points, delta_electrode, 1)\n\n # define functions based on polynomial fit\n x = numpy.linspace(30, 130, 130)\n func_surface = delta_surface_fit[0]*x + delta_surface_fit[1]\n func_air = delta_air_fit[0] * x + delta_air_fit[1]\n func_electrode = delta_electrode_fit[0] * x + delta_electrode_fit[1]\n\n # create plot\n plt.rcParams[\"font.family\"] = \"Times New Roman\"\n # plt.title(\"Control temperature deviation\")\n plt.plot(x, func_surface, \"r\")\n plt.plot(x, func_air, \"b\")\n plt.plot(x, func_electrode, \"g\")\n plt.scatter(temp_points, delta_surface, c='None', edgecolors=\"red\", marker='o', label=\"Inner surface of test cell\")\n plt.scatter(temp_points, delta_air, c='None', edgecolors=\"blue\", marker='^', label=\"Air at medium height\")\n plt.scatter(temp_points, delta_electrode, c='None', edgecolors=\"green\", marker='d', label=\"Between the electrodes\")\n plt.xlabel(\"Control temperature in °C\")\n plt.ylabel(\"Temperature drop in °C\")\n plt.legend(loc=\"lower right\")\n plt.grid()\n plt.savefig(\"temperature_lookup.png\", dpi=300)\n plt.show()", "def drawPolynomial(self, index, color, precision=200):\n graph = self.graphs[index]\n if len(graph) > 1:\n p = PolynomialInterpolation(graph, color)\n p.show(self.context, precision)", "def plot_nodes(nodes):\n x = [node.x for node in nodes]\n y = [node.y for node in nodes]\n plt.plot(x, y, 'k.')\n# plot_nodes_id(nodes)\n plot_nodes_energy(nodes)", "def plot_interpolation(self):\r\n self.plot_all_logcalls(True)\r\n print_log('info', 'Interpolation was finished.')", "def plot_interpolation(ddG_x,ddG_y,ddG_x2,ddG_y2,ddG_x_interp,ddG_y_interp,close=True,save=True):\n\tplt.rc('text', usetex=True)\n\tplt.plot(ddG_x,ddG_y,'*',markersize=5,fillstyle='none')\n\tplt.plot(ddG_x2,ddG_y2,'o',markersize=5,fillstyle='none')\n\tplt.plot(ddG_x_interp, ddG_y_interp, '-')\n\tplt.legend(['input lambdas', 'cub/lin interpolated lambdas','cubic interpolation func'], loc='best')\n\tplt.xlabel(r'$\\lambda$') \n\tplt.ylabel(r\"$\\Delta \\Delta G / \\mathrm{kJ mol^{-1}}$\")\n\tif save:\n\t\tplt.savefig('ddG_interpolation.pdf')\n\tif close:\n\t\tplt.close()", "def make_plot(x,y):", "def trace_interpolation(x, y):\n\n liste_a, liste_b = liste_matrices_a(x, y), liste_matrices_b(x, y)\n c = np.linalg.solve(liste_a, liste_b)\n\n x_spline = np.linspace(x[0], x[-1], 200)\n\n y_spline = np.zeros(len(x_spline))\n\n for i in range(len(x) - 1):\n mask = (x_spline >= x[i]) & (x_spline <= x[i + 1])\n x_i = x_spline[mask]\n y_spline[mask] = c[i, 0] * (x_i**3) + c[i, 1] * (\n x_i**2) + c[i, 2] * x_i + c[i, 3]\n\n plt.scatter(x, y)\n plt.plot(x_spline, y_spline)", "def _plot_interpolation(x, y, x_new, y_new, title=\"\"):\n f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)\n axes = (ax1, ax2, ax3)\n coord = [\"X\", \"Y\", \"Z\"]\n\n for idx, ax in enumerate(axes):\n ax.set_title(title + \" (\" + coord[idx] + \" coordinate)\", fontsize=12)\n ax.set_ylabel(\"m\")\n ax.plot(x, y[:, idx], \"bo\", label=\"Original data\")\n ax.plot(x_new, y_new[:, idx], \"ro\", label=\"Interpolated data\")\n\n ax3.set_xlabel(\"Time\")\n ax1.legend(fontsize=8, loc=1)\n f.subplots_adjust(hspace=0.3)\n plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)\n plt.show()", "def solve_i():\r\n x = np.array([ -2.1, -1.45, -1.3, -0.2, 0.1, 0.15, 0.8, 1.1, 1.5, 2.8, 3.8 ])\r\n y = np.array([0.012155, 0.122151, 0.184520, 0.960789, 0.990050, 0.977751,\r\n 0.527292, 0.298197, 0.105399, 3.936690E-4, 5.355348E-7])\r\n # find and plot both interpolations and the oiginal points\r\n plt.figure(1)\r\n cubic_interpol(x,y)\r\n lin_interpol(x,y)\r\n plt.plot(x, y, 'rx', ms = 10, label = 'Points')\r\n # plot settings\r\n plt.title('Cubic & Linear Interpolation Given Points')\r\n plt.xlabel('x',fontsize = 14)\r\n plt.ylabel('y',fontsize = 14)\r\n plt.legend()", "def plot_all_logcalls(self, interpolation=False):\r\n\r\n # instead of ax.hold(False)\r\n self.figure.clear()\r\n\r\n # create an axis\r\n self.ax1 = self.figure.add_subplot(111)\r\n self.ax1.set_title('Logcalls pattern')\r\n\r\n any_node_logcalls = dict()\r\n sr_x = list()\r\n\r\n for e in self.called_func_route:\r\n for i in self.nodes:\r\n (i not in sr_x) and sr_x.append(i)\r\n if e not in any_node_logcalls:\r\n any_node_logcalls[e] = list()\r\n any_node_logcalls[e].append(self.nodes[i].get_func_count(e))\r\n\r\n progress_value = 0\r\n self.progressBar.setValue(0)\r\n for e in self.called_func_route:\r\n sr_fx = any_node_logcalls[e]\r\n self.ax1.plot(sr_x, sr_fx, linestyle='', marker='o', color='b')\r\n\r\n if interpolation:\r\n Lx = self.get_sub_two_interpolation_func(sr_x, sr_fx)\r\n # Enlargement the range to 10 folds for drawing the result of interpolation.\r\n self.tmp_x = [\r\n i / 10.0 for i in range(sr_x[0] * 10, sr_x[-1] * 10 + 1)\r\n ]\r\n self.tmp_y = [Lx(i) for i in self.tmp_x]\r\n self.ax1.plot(\r\n self.tmp_x, self.tmp_y, linestyle='--', marker='', label=e)\r\n else:\r\n self.ax1.plot(sr_x, sr_fx, linestyle='--', marker='o', label=e)\r\n\r\n progress_value += 1\r\n self.progressBar.setValue(\r\n float(progress_value) / len(self.called_func_route) * 100)\r\n\r\n self.ax1.legend(loc='best')\r\n\r\n # refresh canvas\r\n self.canvas.draw()", "def plot(self, show=True):\n \n curve_x = [node.x for node in self.nodeList]\n curve_y = [node.y for node in self.nodeList]\n pl.plot(curve_x, curve_y)\n \n if show:\n pl.show()", "def cubic_interpol(X_P, Y_P):\r\n y_derivs = derivatives( X_P, Y_P ).flatten() # flatten as FB_sub returns 2d array\r\n \r\n for j in np.arange( X_P.shape[0] - 1 ): # for every x[i] and x[i+1] pair\r\n plot_points = np.linspace( X_P[j], X_P[j+1], 20) # points to plot in the interval\r\n params = [ X_P[j], X_P[j+1], Y_P[j], Y_P[j+1],\r\n y_derivs[j], y_derivs[j+1]]\r\n f_points = f(plot_points, params)\r\n plt.plot(plot_points, f_points, 'b-', ms = .5, label = 'Cubic'if j==0 else \"\") # only label one plot\r", "def plot_mpl(self, prog=\"dot\"):\n nx.draw(self.graph, self.pretty_plot_coordinates(prog=prog))\n plt.show()", "def plot_graph(self) -> None:\n\n nodes_on_graph = self.dw_graph.get_all_v()\n for k, v in nodes_on_graph.items():\n if v.position is None:\n x_rand = random.uniform(0.5, self.dw_graph.v_size())\n y_rand = random.uniform(0.5, self.dw_graph.v_size())\n v.position = (x_rand, y_rand)\n x_vals = []\n y_vals = []\n n = list(nodes_on_graph.keys())\n for k, v in nodes_on_graph.items(): # draw nodes\n x_vals.append(v.position[0])\n y_vals.append(v.position[1])\n\n fig, ax = plt.subplots()\n plt.plot(x_vals, y_vals, 'ro', markersize=5, data=\"d\")\n\n for p, txt in enumerate(n):\n ax.annotate(n[p], (x_vals[p]+0.00003, y_vals[p]), color='g')\n\n for n in nodes_on_graph:\n n1 = self.dw_graph.get_nodes(n)\n x = n1.position[0]\n y = n1.position[1]\n for r in self.dw_graph.all_out_edges_of_node(n):\n dx = self.dw_graph.get_nodes(r).position[0]\n dy = self.dw_graph.get_nodes(r).position[1]\n ax.quiver(x, y, dx-x, dy-y, angles='xy', scale_units='xy', scale=1)\n #plt.arrow(x, y, dx - x, dy - y, head_width=0.0009, width=0.00005, length_includes_head=True)\n\n\n plt.xlabel(\"x axis \")\n plt.ylabel(\"y axis \")\n plt.title(\"The title of the graph\")\n plt.show()", "def plot_graph(self) -> None:", "def plotTI():\n min_dl = dlam[dlam != 0].min()\n S = int(0.4/min_dl)\n fig = pl.figure(figsize = (8,6))\n ax = fig.add_subplot(1,1,1)\n ax.spines['bottom'].set_position('zero')\n ax.spines['top'].set_color('none')\n ax.spines['right'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n for k, spine in ax.spines.items():\n spine.set_zorder(12.2)\n\n xs, ndx, dx = [0], 0, 0.001\n colors = ['r', 'g', '#7F38EC', '#9F000F', 'b', 'y']\n min_y, max_y = 0, 0\n\n lines = tuple()\n ## lv_names2 = [r'$Coulomb$', r'$vdWaals$'] ## for the paper\n lv_names2 = []\n for j in range(n_components):\n y = ave_dhdl[:,j]\n if not (y == 0).all():\n lv_names2.append(r'$%s$' % P.lv_names[j].capitalize())\n\n for j in range(n_components):\n\n y = ave_dhdl[:,j]\n if not (y == 0).all():\n\n # Get the coordinates.\n lj = lchange[:,j]\n x = lv[:,j][lj]\n y = y[lj]/P.beta_report\n\n if 'TI' in P.methods:\n # Plot the TI integration area.\n ss = 'TI'\n for i in range(len(x)-1):\n min_y = min(y.min(), min_y)\n max_y = max(y.max(), max_y)\n #pl.plot(x,y)\n if i%2==0:\n pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=1.0)\n else:\n pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=0.5)\n xlegend = [-100*wnum for wnum in range(len(lv_names2))]\n pl.plot(xlegend, [0*wnum for wnum in xlegend], ls='-', color=colors[ndx], label=lv_names2[ndx]) ## for the paper\n\n if 'TI-CUBIC' in P.methods and not cubspl[j]==0:\n # Plot the TI-CUBIC interpolation curve.\n ss += ' and TI-CUBIC'\n xnew = numpy.arange(0, 1+dx, dx)\n ynew = cubspl[j].interpolate(y, xnew)\n min_y = min(ynew.min(), min_y)\n max_y = max(ynew.max(), max_y)\n pl.plot(xnew+ndx, ynew, color='#B6B6B4', ls ='-', solid_capstyle='round', lw=3.0)\n\n else:\n # Plot the TI-CUBIC integration area.\n ss = 'TI-CUBIC'\n for i in range(len(x)-1):\n xnew = numpy.arange(x[i], x[i+1]+dx, dx)\n ynew = cubspl[j].interpolate(y, xnew)\n ynew[0], ynew[-1] = y[i], y[i+1]\n min_y = min(ynew.min(), min_y)\n max_y = max(ynew.max(), max_y)\n if i%2==0:\n pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=1.0)\n else:\n pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=0.5)\n\n # Store the abscissa values and update the subplot index.\n xs += (x+ndx).tolist()[1:]\n ndx += 1\n\n # Make sure the tick labels are not overcrowded.\n xs = numpy.array(xs)\n dl_mat = numpy.array([xs-i for i in xs])\n ri = range(len(xs))\n\n def getInd(r=ri, z=[0]):\n primo = r[0]\n min_dl=ndx*0.02*2**(primo>10)\n if dl_mat[primo].max()<min_dl:\n return z\n for i in r:\n for j in range(len(xs)):\n if dl_mat[i,j]>min_dl:\n z.append(j)\n return getInd(ri[j:], z)\n\n xt = [i if (i in getInd()) else '' for i in range(K)]\n pl.xticks(xs[1:], xt[1:], fontsize=10)\n pl.yticks(fontsize=10)\n #ax = pl.gca()\n #for label in ax.get_xticklabels():\n # label.set_bbox(dict(fc='w', ec='None', alpha=0.5))\n\n # Remove the abscissa ticks and set up the axes limits.\n for tick in ax.get_xticklines():\n tick.set_visible(False)\n pl.xlim(0, ndx)\n min_y *= 1.01\n max_y *= 1.01\n pl.ylim(min_y, max_y)\n\n for i,j in zip(xs[1:], xt[1:]):\n pl.annotate(('%.2f' % (i-1.0 if i>1.0 else i) if not j=='' else ''), xy=(i, 0), xytext=(i, 0.01), size=10, rotation=90, textcoords=('data', 'axes fraction'), va='bottom', ha='center', color='#151B54')\n if ndx>1:\n lenticks = len(ax.get_ymajorticklabels()) - 1\n if min_y<0: lenticks -= 1\n if lenticks < 5:\n from matplotlib.ticker import AutoMinorLocator as AML\n ax.yaxis.set_minor_locator(AML())\n pl.grid(which='both', color='w', lw=0.25, axis='y', zorder=12)\n pl.ylabel(r'$\\mathrm{\\langle{\\frac{ \\partial U } { \\partial \\lambda }}\\rangle_{\\lambda}\\/%s}$' % P.units, fontsize=20, color='#151B54')\n pl.annotate('$\\mathit{\\lambda}$', xy=(0, 0), xytext=(0.5, -0.05), size=18, textcoords='axes fraction', va='top', ha='center', color='#151B54')\n if not P.software.title()=='Sire':\n lege = ax.legend(prop=FP(size=14), frameon=False, loc=1)\n for l in lege.legendHandles:\n l.set_linewidth(10)\n pl.savefig(os.path.join(P.output_directory, 'dhdl_TI.pdf'))\n pl.close(fig)\n return", "def spline_plot( spline, *args, **kwargs ):\n xmin = kwargs.pop( 'xmin', spline.GetXmin() )\n xmax = kwargs.pop( 'xmax', spline.GetXmax() )\n n = kwargs.pop( 'n', spline.GetNp() )\n\n x = N.linspace( xmin, xmax, n )\n fcn = N.frompyfunc( spline.Eval, 1, 1 )\n y = fcn( x )\n\n return P.plot( x, y, *args, **kwargs )", "def create_plots(self):\n x_values = nmp.array(range(self.button_start_var.get(), self.button_end_var.get()))\n # the 'step' below ensures that the list of points will not be too long\n # to help graphs load faster\n step = 2 * int( nmp.size(x_values) / 100) + 1\n x_values = x_values[::step]\n plt.style.use('Solarize_Light2')\n plt.plot([x_values[0],x_values[-1]], [nmp.pi, nmp.pi], linestyle='-.', label='Actual value of PI (16 s.f)')\n plt.annotate('PI: {}'.format(nmp.pi),(x_values[0], nmp.pi),\n bbox=dict(boxstyle=\"round,pad=0.25\"), fontsize=9)\n plt.xlabel('Number of terms used to approximate Pi')\n plt.ylabel('Approximated value of Pi')\n if self.options_selected[0].get()==1:\n plt.plot(x_values, nmp.array([basel(x) for x in x_values]),\n label='Basel:\\n{}'.format(basel(x_values[-1])), linewidth=1)\n if self.options_selected[1].get()==1:\n plt.plot(x_values, nmp.array([wallis(x) for x in x_values]),\n label='Wallis:\\n{}'.format(wallis(x_values[-1])), linewidth=1)\n if self.options_selected[2].get()==1:\n y_values = [leibniz_madhava(x) for x in x_values]\n plt.plot(x_values, nmp.array([leibniz_madhava(x) for x in x_values]),\n label='Leibniz-Madhava:\\n{}'.format(leibniz_madhava(x_values[-1])), linewidth=1)\n if self.options_selected[3].get()==1:\n plt.plot(x_values, nmp.array([odd_product_formula(x) for x in x_values]),\n label='Sum of Odd Products:\\n{}'.format(odd_product_formula(x_values[-1])), linewidth=1)\n # control statements below change y-axis zoom based on start/end points\n # to give it a cleaner look\n if self.button_end_var.get() > 500:\n plt.ylim(3.140,3.143)\n elif self.button_end_var.get() > 50:\n plt.ylim(3.11,3.17)\n elif self.button_end_var.get() > 10:\n plt.ylim(3,3.3)\n plt.title('Approximating Pi using infinite series')\n plt.legend(loc=1, fontsize=7)\n plt.show()", "def plot_partition(g,part,title,fname='figure',nod_labels = None, pos = None,\n within_mod = 'none', part_coeff = 'none',les_dam='none'):\n\n\n write_labels = False\n nnod = g.number_of_nodes()\n\n if nod_labels == None:\n nod_labels = dict(zip(range(nnod),range(nnod)))\n else:\n nod_labels = dict(zip(range(nnod),nod_labels))\n\n\n plt.figure()\n plt.subplot(111)\n plt.axis('off')\n\n if pos == None:\n pos=nx.circular_layout(g)\n\n #col=colors.cnames.keys()\n col = ['r','g','b','m','c','y']\n col2 = ['#000066','#000099','#660000','#CC6633','#FF0099','#FF00FF','#33FFFF','#663366','#FFCC33','#CCFF66','#FFCC99','#33CCCC','#FF6600','#FFCCFF','#CCFFFF','#CC6699','#CC9900','#FF6600','#99FF66','#CC0033','#99FFFF','#CC00CC','#CC99CC','#660066','#33CC66','#336699','#3399FF','#339900','#003300','#00CC00','#330033','#333399','#0033CC','#333333','#339966','#333300']\n\n niter = 0\n edge_list_between = []\n for m,val in part.iteritems():\n\n if niter <len(col):\n if within_mod == 'none': #note: assumes part_coeff also there\n for v in val:\n if les_dam != 'none':\n plt.scatter(pos[v][0],pos[v][1],s=100*les_dam[v],c='orange',marker=(10,1,0))\n\n nx.draw_networkx_nodes(g,pos,nodelist=list(val),node_color=col[niter],node_size=50)\n else:\n for v in val:\n if les_dam != 'none':\n plt.scatter(pos[v][0],pos[v][1],s=500*les_dam[v],c='orange',marker=(10,1,0))\n\n if within_mod[v] > 1:\n nx.draw_networkx_nodes(g,pos,nodelist=[v],node_color=col[niter],node_size=part_coeff[v] * 500+50,node_shape='s',linewidths=2)\n else:\n nx.draw_networkx_nodes(g,pos,nodelist=[v],node_color=col[niter],node_size=part_coeff[v] * 500+50,node_shape='o',linewidths=0.5)\n\n else:\n #print 'out of colors!!'\n if within_mod == 'none': #note: assumes part_coeff also there\n for v in val:\n if les_dam != 'none':\n plt.scatter(pos[v][0],pos[v][1],s=100*les_dam[v],c='orange',marker=(10,1,0))\n\n nx.draw_networkx_nodes(g,pos,nodelist=list(val),node_color=col2[niter],node_size=50)\n else:\n for v in val:\n if les_dam != 'none':\n plt.scatter(pos[v][0],pos[v][1],s=500*les_dam[v],c='orange',marker=(10,1,0))\n\n if within_mod[v] > 1:\n nx.draw_networkx_nodes(g,pos,nodelist=[v],node_color=col2[niter],node_size=part_coeff[v] * 500+50,node_shape='s',linewidths=2)\n else:\n nx.draw_networkx_nodes(g,pos,nodelist=[v],node_color=col2[niter],node_size=part_coeff[v] * 500+50,node_shape='o',linewidths=0.5)\n\n\n\n val_array = np.array(val)\n edge_list_within = []\n for edg in g.edges():\n #temp = np.array(edge_list_between)\n n1_ind = np.where(val_array == edg[0])[0]\n n2_ind = np.where(val_array == edg[1])[0]\n #edg_ind = np.where(temp == edg)\n\n if len(n1_ind) > 0 and len(n2_ind) > 0:\n #add on the edge if it is within the partition\n edge_list_within.append(edg)\n elif len(n1_ind)>0 and len(n2_ind) == 0:\n #add on the edge if it hasn't been seen before\n edge_list_between.append(edg)\n elif len(n2_ind)>0 and len(n1_ind) == 0:\n edge_list_between.append(edg)\n\n\n if niter <len(col):\n nx.draw_networkx_edges(g,pos,edgelist=edge_list_within,edge_color=col[niter])\n else:\n nx.draw_networkx_edges(g,pos,edgelist=edge_list_within,edge_color=col2[niter])\n niter += 1\n\n\n #nx.draw_networkx_edges(g,pos,edgelist=nx.edges(g))\n nx.draw_networkx_edges(g,pos,edgelist=edge_list_between,edge_color='k')\n if write_labels:\n nx.draw_networkx_labels(g,pos,nod_labels,font_size=6)\n\n #add loop for damage labels\n if les_dam != 'none':\n for m,val in part.iteritems():\n for v in val:\n if les_dam[v] > 0:\n plt.scatter(pos[v][0],pos[v][1],s=500*les_dam[v]+100,c='orange',marker=(10,1,0))\n\n plt.title(title)\n #plt.savefig(fname)\n #plt.close()\n #plt.show()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method generates a header file containing the data contained in the numpy array provided. It is used to capture the tensor data (for both inputs and expected outputs) to be bundled into the standalone application.
def _create_header_file(tensor_name, npy_data, output_path, data_linkage): file_path = pathlib.Path(f"{output_path}/" + tensor_name).resolve() # create header file raw_path = file_path.with_suffix(".h").resolve() with open(raw_path, "w") as header_file: header_file.write("#include <stddef.h>\n") header_file.write("#include <stdint.h>\n") header_file.write("#include <dlpack/dlpack.h>\n") header_file.write(f"const size_t {tensor_name}_len = {npy_data.size};\n") _emit_data_linkage(header_file, data_linkage) header_file.write(f"{NP_TYPE_TO_C[str(npy_data.dtype)]} {tensor_name}[] =") header_file.write("{") for i in np.ndindex(npy_data.shape): header_file.write(f"{npy_data[i]}, ") header_file.write("};\n\n")
[ "def create_header_file(name, tensor_name, tensor_data, output_path):\n file_path = pathlib.Path(f\"{output_path}/\" + name).resolve()\n # Create header file with npy_data as a C array\n raw_path = file_path.with_suffix(\".h\").resolve()\n with open(raw_path, \"w\") as header_file:\n header_file.write(\n \"\\n\"\n + f\"const size_t {tensor_name}_len = {tensor_data.size};\\n\"\n + f'__attribute__((section(\".data.tvm\"), aligned(16))) int8_t {tensor_name}[] = \"'\n )\n\n data_hexstr = tensor_data.tobytes().hex()\n for i in range(0, len(data_hexstr), 2):\n header_file.write(f\"\\\\x{data_hexstr[i:i+2]}\")\n header_file.write('\";\\n\\n')", "def generate_header(data, outfile):\n ts_count = count_timesteps(outfile.name)\n header = {'numnodes': len(data),\n 'numdims': 4,\n 'numtimesteps': ts_count\n }\n\n return header", "def _gen_header():\n\n shared_fields = [\n ('nx', numpy.int32),\n ('ny', numpy.int32),\n ('nz', numpy.int32),\n ('mode', numpy.int32),\n ('nxstart', numpy.int32),\n ('nystart', numpy.int32),\n ('nzstart', numpy.int32),\n ('mx', numpy.int32),\n ('my', numpy.int32),\n ('mz', numpy.int32),\n ('xlen', numpy.float32),\n ('ylen', numpy.float32),\n ('zlen', numpy.float32),\n ('alpha', numpy.float32), # defocus\n ('beta', numpy.float32), # astig_ang\n ('gamma', numpy.float32), # astig_mag\n ('mapc', numpy.int32),\n ('mapr', numpy.int32),\n ('maps', numpy.int32),\n ('amin', numpy.float32),\n ('amax', numpy.float32),\n ('amean', numpy.float32),\n ('ispg', numpy.int32),\n ('nsymbt', numpy.int32),\n ]\n\n header_image_dtype = numpy.dtype(shared_fields + [\n ('extra', 'S100'),\n ('xorigin', numpy.float32), # 208 320 4 char cmap; Contains \"MAP \"\n ('yorigin', numpy.float32),\n ('zorigin', numpy.float32),\n ('map', 'S4'),\n ('byteorder', numpy.int32),\n ('rms', numpy.float32),\n ('nlabels', numpy.int32),\n ('label0', 'S80'),\n ('label1', 'S80'), # Image Type\n # Allow image `type`s are:\n # - Power Spectra: 'P'\n # - Windowed Particle: 'W'\n # - Micrograph: 'M'\n ('label2', 'S80'),\n ('label3', 'S80'),\n ('label4', 'S80'),\n ('label5', 'S80'),\n ('label6', 'S80'),\n ('label7', 'S80'),\n ('label8', 'S80'),\n ('label9', 'S80'),\n ])\n\n return header_image_dtype", "def write_headers(outfile, header):\n from struct import pack\n\n outfile.write(pack('fff',\n header['numnodes'],\n header['numdims'],\n header['numtimesteps']\n )\n )", "def build_header(self):\n self._get_inputs()\n self._build_enums()\n self._build_structs()\n with open(self.config_dict['output_header_file'], 'w') as fd:\n #_build_structs()\n\n # write file boiler plate stuff\n hdr_name = self.config_dict[\"output_header_file\"]\n hdr_name = os.path.basename(hdr_name)\n hdr_name = hdr_name.replace('.', '_')\n hdr_name = hdr_name.replace('/', '_')\n hdr_name = hdr_name.replace('\\\\', '_')\n # include guards for file.\n print(\"// auto generated file, edits may be overwritten.\", file=fd)\n print(\"// generated from source json file \"+self.config_file+' at {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()), file=fd )\n print(\"#ifndef __\"+hdr_name+\"__\", file=fd)\n print(\"#define __\"+hdr_name+\"__\", file=fd)\n print(\"\", file=fd)\n print(\"// standard include files..\", file=fd);\n print(\"#include <string>\", file=fd)\n print(\"#include <cstring>\", file=fd)\n print(\"#include <cstdint>\", file=fd)\n print(\"#include <iostream>\", file=fd)\n\n print(\"\", file=fd)\n # print out all the prototypes\n print(\"////////////////////////////////////////////////////////\", file=fd)\n print(\"// Prototypes for defined datatypes and helper functions\", file=fd)\n print(\"////////////////////////////////////////////////////////\", file=fd)\n # write in the prototype buffer\n print(self.prototypes.getvalue(), file=fd)\n # print out all the definitions\n print(\"////////////////////////////////////////////////////////\", file=fd)\n print(\"// definitions for all data types hand helper functions\", file=fd)\n print(\"////////////////////////////////////////////////////////\", file=fd)\n print(self.header_output.getvalue(), file=fd)\n print(\"\", file=fd)\n \n #end include guard\n print(\"#endif // end include guard\", file=fd)", "def _makeHeader(self) -> bytearray:\n header = Header()\n header.width = self.width\n header.height = self.height\n header.format = int(self.format)\n header.usage = 1 # XXX check these fields\n header.magFilter = 1\n header.mipmapVar1D = 6\n return bytearray(header)", "def write_header(dot_H_code_filename):\n\n filename = dot_H_code_filename.replace(\".\", \"_\")\n filename = filename.upper()\n filename = \"__\" + filename + \"_\"\n\n first_str = \"\"\"\n#pragma once\n\n\n#ifndef \"\"\"\n\n second_str = \"#define \"\n\n third_str = \"\"\"\n \n#include <string>\n#include <vector>\n#include <map>\n\n//using namespace std;\n\n\"\"\"\n\n d_header= \"\".join((first_str,\n filename,\n \"\\n\",\n second_str,\n filename,\n \"\\n\",\n third_str) )\n return d_header", "def writeHeader(cls, f):\n s = []\n s.append(\"# basename is the base file name of the data files.\")\n s.append(\"# modelbase is the base file name of the dist, fmod, pmode files corresponding to the data files\")\n s.append(\"# $Id:$\")\n s.append(\"#\")\n s.append(\"# basename modelbase ditherx dithery seeing norm airmass\")\n s.append(\"#\")\n f.write('\\n'.join(s) + \"\\n\")", "def write_header(_metadata, rename_padding=False):\n template = \"\"\"\\\n VERSION {version}\n FIELDS {fields}\n SIZE {size}\n TYPE {type}\n COUNT {count}\n WIDTH {width}\n HEIGHT {height}\n VIEWPOINT {viewpoint}\n POINTS {points}\n DATA {data}\n \"\"\"\n str_metadata = _metadata.copy()\n\n if not rename_padding:\n str_metadata['fields'] = ' '.join(_metadata['fields'])\n else:\n new_fields = []\n for f in _metadata['fields']:\n if f == '_':\n new_fields.append('padding')\n else:\n new_fields.append(f)\n str_metadata['fields'] = ' '.join(new_fields)\n str_metadata['size'] = ' '.join(map(str, _metadata['size']))\n str_metadata['type'] = ' '.join(_metadata['type'])\n str_metadata['count'] = ' '.join(map(str, _metadata['count']))\n str_metadata['width'] = str(_metadata['width'])\n str_metadata['height'] = str(_metadata['height'])\n str_metadata['viewpoint'] = ' '.join(map(str, _metadata['viewpoint']))\n str_metadata['points'] = str(_metadata['points'])\n tmpl = template.format(**str_metadata)\n return tmpl", "def generate_header(value_type, num_elements, element_multiplier, imag, name_length, name):\n result = []\n\n result += Ensemble.int32_to_bytes(value_type) # Value Type\n result += Ensemble.int32_to_bytes(num_elements) # Number of elements\n result += Ensemble.int32_to_bytes(element_multiplier) # Element Multiplier\n result += Ensemble.int32_to_bytes(imag) # Image\n result += Ensemble.int32_to_bytes(name_length) # Name Length\n result += name.encode() # Name\n\n return result", "def generate_package_header(self):\n headerLength=80\n tsh=numpy.zeros(headerLength,dtype=numpy.uint8) # declare memory for header\n tsh[0:8]=numpy.array([self.timingstring.shape[0]],dtype=numpy.uint64).view(numpy.uint8) # timingstring length in bytes\n tsh[8:16]=numpy.array([self.tGroup],dtype=numpy.uint64).view(numpy.uint8) # timing group number\n tsh[16]={'DigitalOutput':0,'AnalogOutput':1,'DigitalInput':2,'AnalogInput':3,'DelayTrain':4}[self.get_tgType()] # type of hardware interface\n# tsh[17]= GOING TO IGNORE THESE AS TOO SPECIFIC FOR PARSER TO GENERATE\n# tsh[18]=\n tsh[19:23]=numpy.array([1000./self.clockgenresolution],dtype=numpy.uint32).view(numpy.uint8)\n tsh[23]=hasattr(self,'swTrigger') # whether this group software-triggers acquisition (taken directly from XTSM)\n tsh[24]=self.isSparse # whether the sparse/dense conversion should be run on this data by the acquisition hardware\n tsh[25]=1 # HEADER VERSION\n# tsh[26:32]= Reserved for future use\n tsh[32:56]=numpy.fromstring(self.tGroupNode.Name[0].PCDATA[0:24].ljust(24,u\" \"),dtype=numpy.uint8) # tGroup Name\n tsh[56:80]=numpy.fromstring(self.tGroupNode.ClockedBy[0].PCDATA[0:24].ljust(24,u\" \"),dtype=numpy.uint8) # Clock Channel Name\n return tsh", "def _make_header(self):\n header = fits.Header()\n header[\"COMP\"] = (\"Galactic supernova remnants (SNRs)\",\n \"Emission component\")\n header[\"UNIT\"] = (\"Kelvin\", \"Map unit\")\n header[\"CREATOR\"] = (__name__, \"File creator\")\n # TODO:\n history = []\n comments = []\n for hist in history:\n header.add_history(hist)\n for cmt in comments:\n header.add_comment(cmt)\n self.header = header\n logger.info(\"Created FITS header\")", "def write_header(self): # -> None:\n ...", "def generateNHDRHeader(self, inputFile):\r\n\r\n logging.info('Processing started')\r\n #initialize PCR object\r\n imagePCRFile = PCRDataObject()\r\n #import image parameters of PCR object\r\n imagePCRFile.ImportFromFile(inputFile)\r\n\r\n filePathName, fileExtension = os.path.splitext(inputFile)\r\n #The directory of the .nhdr file\r\n nhdrPathName = filePathName + \".nhdr\"\r\n\r\n if fileExtension == \".pcr\":\r\n if imagePCRFile.form == 1 or imagePCRFile.form == 5 or imagePCRFile.form == 10:\r\n with open(nhdrPathName, \"w\") as headerFile:\r\n headerFile.write(\"NRRD0004\\n\")\r\n headerFile.write(\"# Complete NRRD file format specification at:\\n\")\r\n headerFile.write(\"# http://teem.sourceforge.net/nrrd/format.html\\n\")\r\n if imagePCRFile.form == 5:\r\n headerFile.write(\"type: ushort\\n\")\r\n elif imagePCRFile.form == 10:\r\n headerFile.write(\"type: float\\n\")\r\n elif imagePCRFile.form == 1:\r\n headerFile.write(\"type: uchar\\n\")\r\n headerFile.write(\"dimension: 3\\n\")\r\n headerFile.write(\"space: left-posterior-superior\\n\")\r\n sizeX = imagePCRFile.X\r\n sizeY = imagePCRFile.Y\r\n sizeZ = imagePCRFile.Z\r\n headerFile.write(f\"sizes: {sizeX} {sizeY} {sizeZ}\\n\")\r\n volSpace = imagePCRFile.voxelSize\r\n headerFile.write(f\"space directions: ({volSpace}, 0.0, 0.0) (0.0, {volSpace}, 0.0) (0.0, 0.0, {volSpace})\\n\")\r\n headerFile.write(\"kinds: domain domain domain\\n\")\r\n headerFile.write(\"endian: little\\n\")\r\n headerFile.write(\"encoding: raw\\n\")\r\n headerFile.write(\"space origin: (0.0, 0.0, 0.0)\\n\")\r\n volPathName = filePathName + \".vol\"\r\n volPathSplit = []\r\n volPathSplit = volPathName.split('/')\r\n volFileName = volPathSplit[len(volPathSplit)-1]\r\n headerFile.write(f\"data file: {volFileName}\\n\")\r\n # print(imagePCRFile.form)\r\n print(f\".nhdr file path is: {nhdrPathName}\")\r\n #Automatically loading .vol file using the generated .nhdr file.\r\n if os.path.exists(volPathName):\r\n slicer.util.loadVolume(nhdrPathName)\r\n print(f\"{volFileName} loaded\\n\")\r\n else:\r\n print(f\"{volFileName} is not in the same directory\\n\")\r\n else:\r\n print(\"The format of this dataset is currently not supported by this module. Currently only float (format=10), unsigned 16 bit integer (format=5) and unsigned 8 bit integer (format=1) data types are supported. Please contact us with this dataset to enable this data type.\")\r\n else:\r\n print(\"This is not a PCR file, please re-select a PCR file\")", "def write_cpp_header(self):\n prefix = \"#include <frc/controller/\"\n headers = []\n headers.append(prefix + self.plant_coeffs_header + \".h>\")\n headers.append(prefix + self.ctrl_coeffs_header + \".h>\")\n headers.append(prefix + self.obsv_coeffs_header + \".h>\")\n headers.append(prefix + self.loop_header + \".h>\")\n\n with open(\n self.class_name + \"Coeffs.\" + self.header_extension, \"w\"\n ) as header_file:\n print(\"#pragma once\" + os.linesep, file=header_file)\n for header in sorted(headers):\n print(header, file=header_file)\n header_file.write(os.linesep)\n self.__write_cpp_func_name(\n header_file, self.plant_coeffs_type, \"PlantCoeffs\", in_header=True\n )\n self.__write_cpp_func_name(\n header_file, self.ctrl_coeffs_type, \"ControllerCoeffs\", in_header=True\n )\n self.__write_cpp_func_name(\n header_file, self.obsv_coeffs_type, \"ObserverCoeffs\", in_header=True\n )\n self.__write_cpp_func_name(\n header_file, self.loop_type, \"Loop\", in_header=True\n )", "def make_headers(self):\n f = open(rospack.get_path('e190_bot')+\"/data/\"+self.file_name, 'a+')\n f.write('{0} {1:^1} {2:^1} {3:^1} {4:^1} {5:^1} \\n'.format('TIME','X','Y','LIR','CIR','RIR'))\n f.close()", "def build_header(dictobject=\"\", version=\"2.3.x\", fileclass=\"dictionary\",\n incl_foamfile=True):\n txt = \\\n r\"\"\"/*--------------------------------*- C++ -*----------------------------------*\\\n| ========= | |\n| \\\\ / F ield | OpenFOAM: The Open Source CFD Toolbox |\n| \\\\ / O peration | Version: {} |\n| \\\\ / A nd | Web: www.OpenFOAM.org |\n| \\\\/ M anipulation | |\n\\*---------------------------------------------------------------------------*/\"\"\".format(version)\n if incl_foamfile:\n txt += \\\n r\"\"\"\nFoamFile\n{{\n version 2.0;\n format ascii;\n class {};\n object {};\n}}\"\"\".format(fileclass, dictobject)\n return txt", "def IIR_sos_header(fname_out,SOS_mat):\r\n Ns,Mcol = SOS_mat.shape\r\n f = open(fname_out,'wt')\r\n f.write('//define a IIR SOS CMSIS-DSP coefficient array\\n\\n')\r\n f.write('#include <stdint.h>\\n\\n')\r\n f.write('#ifndef STAGES\\n')\r\n f.write('#define STAGES %d\\n' % Ns)\r\n f.write('#endif\\n')\r\n f.write('/*********************************************************/\\n');\r\n f.write('/* IIR SOS Filter Coefficients */\\n');\r\n f.write('float32_t ba_coeff[%d] = { //b0,b1,b2,a1,a2,... by stage\\n' % (5*Ns))\r\n for k in range(Ns):\r\n if (k < Ns-1):\r\n f.write(' %+-13e, %+-13e, %+-13e,\\n' % \\\r\n (SOS_mat[k,0],SOS_mat[k,1],SOS_mat[k,2]))\r\n f.write(' %+-13e, %+-13e,\\n' % \\\r\n (-SOS_mat[k,4],-SOS_mat[k,5]))\r\n else:\r\n f.write(' %+-13e, %+-13e, %+-13e,\\n' % \\\r\n (SOS_mat[k,0],SOS_mat[k,1],SOS_mat[k,2]))\r\n f.write(' %+-13e, %+-13e\\n' % \\\r\n (-SOS_mat[k,4],-SOS_mat[k,5]))\r\n # for k in range(Ns):\r\n # if (k < Ns-1):\r\n # f.write(' %15.12f, %15.12f, %15.12f,\\n' % \\\r\n # (SOS_mat[k,0],SOS_mat[k,1],SOS_mat[k,2]))\r\n # f.write(' %15.12f, %15.12f,\\n' % \\\r\n # (-SOS_mat[k,4],-SOS_mat[k,5]))\r\n # else:\r\n # f.write(' %15.12f, %15.12f, %15.12f,\\n' % \\\r\n # (SOS_mat[k,0],SOS_mat[k,1],SOS_mat[k,2]))\r\n # f.write(' %15.12f, %15.12f\\n' % \\\r\n # (-SOS_mat[k,4],-SOS_mat[k,5]))\r\n f.write('};\\n')\r\n f.write('/*********************************************************/\\n')\r\n f.close()", "def write_header(self):\r\n if self.arguments['--out']:\r\n self.file = open(self.arguments['--out'], \"w+\")\r\n self.file.write(self.version)\r\n for list_item in self.list_of_header_objects:\r\n self.file.write(list_item.line)\r\n self.file.write(self.body_header_line.line)\r\n self.file.close()\r\n else:\r\n for list_item in self.list_of_header_objects:\r\n print(list_item.line)\r\n print(self.body_header_line.line)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a tflite model buffer in a Relay module
def convert_to_relay(tflite_model_buf, bind_params_by_name=True): # TFLite.Model.Model has changed to TFLite.Model from 1.14 to 2.1 try: import tflite.Model # pylint: disable=import-outside-toplevel tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0) except AttributeError: import tflite # pylint: disable=import-outside-toplevel tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0) except ImportError: raise ImportError("The tflite package must be installed") mod, params = relay.frontend.from_tflite(tflite_model) if bind_params_by_name: mod["main"] = relay.build_module.bind_params_by_name(mod["main"], params) return mod, params
[ "def create_relay_module_and_inputs_from_tflite_file(tflite_model_file, bind_params_by_name=True):\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n mod, params = convert_to_relay(tflite_model_buf, bind_params_by_name)\n\n inputs = dict()\n for param in mod[\"main\"].params:\n name = str(param.name_hint)\n data_shape = [int(i) for i in param.type_annotation.shape]\n dtype = str(param.type_annotation.dtype)\n if np.issubdtype(dtype, np.floating):\n # Since np.random.uniform only allows the ranges of float32,\n # at first float16 is used and scaled afterwards, if necessary.\n in_min, in_max = (np.finfo(\"float16\").min, np.finfo(\"float16\").max)\n data = np.random.uniform(low=in_min, high=in_max, size=data_shape).astype(dtype)\n scale = np.finfo(dtype).min / np.finfo(\"float16\").min\n data *= scale\n elif np.issubdtype(dtype, np.integer):\n in_min, in_max = (np.iinfo(dtype).min, np.iinfo(dtype).max)\n data = np.random.randint(in_min, high=in_max, size=data_shape, dtype=dtype)\n else:\n raise TypeError(f\"Type {dtype} not supported\")\n inputs[name] = data\n\n return mod, inputs, params", "def from_tflite(model, prog_name): #, shape_dict, dtype_dict):\n try:\n import tflite.Model\n import tflite.SubGraph\n import tflite.BuiltinOperator\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n assert isinstance(model, tflite.Model.Model)\n\n # keep the same as tflite\n assert model.SubgraphsLength() == 1, \"only support one subgraph (main subgraph)\"\n subgraph = model.Subgraphs(0)\n\n # model inputs / outputs\n model_inputs = subgraph.InputsAsNumpy()\n model_outputs = subgraph.OutputsAsNumpy()\n assert model_inputs.size == 1, \"Model should have only one input\"\n assert model_outputs.size == 1, \"Model should have only one output\"\n\n # op code in model\n op_converter = OperatorConverter(model, subgraph, prog_name)\n op_converter.is_dequantize = False\n op_converter.check_unsupported_ops()\n\n in_tensor = op_converter.get_tensors(model_inputs)[0]\n out_tensor = op_converter.get_tensors(model_outputs)[0]\n\n op_converter.define_model_sizes(\"IN\", in_tensor)\n op_converter.define_model_sizes(\"OUT\", out_tensor)\n\n op_converter.nn_add_input(in_tensor)\n\n output_nodes = op_converter.convert_op_to_hexagon_nn()\n\n op_converter.nn_add_output(output_nodes)\n\n op_converter.print_nn_nodes()\n\n print(\"tensor_tab:\")\n print(op_converter.tensor_tab)\n\n op_converter.close()\n print(\"Converted Hexagon Model saved to {}\".format(op_converter.h_file.name))", "def decode(self, model: bytes):\n _, path = tempfile.mkstemp()\n with open(path, \"wb\") as fd:\n fd.write(model)\n onnx_model = onnx.load(path)\n pytorch_model = ConvertModel(onnx_model)\n os.remove(path)\n return pytorch_model", "def _main(args):\n # model = keras.models.load_model(args.h5_file, custom_objects={'prelu': prelu})\n with tf.keras.utils.custom_object_scope({'prelu': prelu}):\n converter = tf.lite.TFLiteConverter.from_keras_model_file(args.h5_file)\n tflite_file = converter.convert()\n open(args.tflite_file, 'wb').write(tflite_file)\n print('='*30)\n print('tffile file save in {}.'.format(args.tflite_file))", "def load_vm_flatbuffer(\n vm_flatbuffer: bytes, *, driver: Optional[str] = None, backend: Optional[str] = None\n) -> BoundModule:\n config = _create_config(driver=driver, backend=backend)\n vm_module = _binding.VmModule.copy_buffer(config.vm_instance, vm_flatbuffer)\n return load_vm_module(vm_module, config)", "def saved_model_to_tflite(input_dir, save_dir, metadata_file=None):\n print(f'\\nConverting to TFLite:\\nInput:{input_dir}\\nOutput:{save_dir}\\n')\n # Convert the model.\n tflite_converter = tf.lite.TFLiteConverter.from_saved_model(input_dir)\n tflite_converter.target_spec.supported_ops = [\n tf.lite.OpsSet.TFLITE_BUILTINS, # Enable TensorFlow Lite ops.\n tf.lite.OpsSet.SELECT_TF_OPS, # Enable extended TensorFlow ops.\n ]\n tflite_model = tflite_converter.convert() # Byte string.\n # Save the model.\n save_path = os.path.join(save_dir, 'model.tflite')\n with tf.io.gfile.GFile(save_path, 'wb') as f:\n f.write(tflite_model)\n\n if metadata_file is not None:\n populator = _metadata.MetadataPopulator.with_model_file(save_path)\n populator.load_associated_files([metadata_file])\n populator.populate()\n print('TFLite Conversion Success!')", "def __init__(self, model_name, base_path, **graph_info):\n super(SplitGraphConverter, self).__init__(graph_info[\"framework\"],\n base_path)\n print(\"{} bmodel converter init\".format(model_name))\n self.converter_config = graph_info\n self.tensors_dict = {}\n if len(self.converter_config['input_names']) == \\\n len(self.converter_config['input_shapes']):\n for input_name, input_shape in zip(self.converter_config['input_names'],\n self.converter_config['input_shapes']):\n self.tensors_dict[input_name] = np.ndarray((input_shape),\n dtype=np.float32)", "def _convert_buffer_to_features(self):\n # samples for conversion to features.\n # Add look_back to have context for the first feature\n samples = self.sample_buffer[: -(self.n_chunk_samples + self.n_chunk_look_back)]\n device = self.asr_model.device\n audio_signal = samples.unsqueeze_(0).to(device)\n audio_signal_len = torch.Tensor([samples.shape[1]]).to(device)\n features, features_len = self.raw_preprocessor(input_signal=audio_signal, length=audio_signal_len,)\n features = features.squeeze()\n self._update_feature_buffer(features[:, -self.feature_chunk_len :])", "def run_model(model):\n\n model.create_initialised_input()\n\n model.run_from_buffer()\n\n output = model.output_parse()\n return output", "def convert(input_ops, output_ops, byte_order, bigdl_type):\n\n input_names = map(lambda x: x.name.split(\":\")[0], input_ops)\n output_names = map(lambda x: x.name.split(\":\")[0], output_ops)\n temp = tempfile.mkdtemp()\n\n dump_model(path=temp)\n model_path = temp + '/model.pb'\n bin_path = temp + '/model.bin'\n\n model = Model.load_tensorflow(model_path, input_names, output_names,\n byte_order, bin_path, bigdl_type)\n\n try:\n shutil.rmtree(temp)\n except OSError as e:\n if e.errno != errno.ENOENT:\n invalidOperationError(False, str(e), cause=e)\n\n return model", "def _convert_to_tflite(saved_model_dir, num_styles, image_size, quantize,\n output_model):\n # Append filename if output_model is a directory name\n if tf.io.gfile.isdir(output_model):\n if quantize:\n filename = 'stylize_quantized.tflite'\n else:\n filename = 'stylize.tflite'\n output_model = os.path.join(output_model, filename)\n\n # Initialize TF Lite Converter\n converter = tf.lite.TFLiteConverter.from_saved_model(\n saved_model_dir=saved_model_dir,\n input_shapes={\n 'input_image': [None, image_size, image_size, 3],\n 'style_weights': num_styles\n })\n\n # Specify quantization option\n if quantize:\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n\n # Convert and save the TF Lite model\n tflite_model = converter.convert()\n with tf.io.gfile.GFile(output_model, 'wb') as f:\n f.write(tflite_model)\n tf.logging.info('Converted to TF Lite model: %s; Size: %d KB.' %\n (output_model, len(tflite_model) / 1024))", "def vid2tensor( self, current_frame):", "def _lower_model_to_backend(\n self,\n mod: torch.fx.GraphModule,\n inputs: Iterable[torch.Tensor]\n ):\n # Current code for lowering is place-holder, subject to future change\n # based on feeds model's actual status\n interp = TRTInterpreter(mod, InputTensorSpec.from_tensors(inputs))\n engine, input_names, output_names = interp.run(*inputs)\n return TRTModule(engine, input_names, output_names)", "def to_payload(self, model):\n return model", "def load_model(model_path):\n interpreter = tflite.Interpreter(model_path=model_path)\n interpreter.allocate_tensors()\n return interpreter", "def _convert_openvino_model_to_compressed_model(\n model: ov.Model, target_device: str\n) -> pot.graph.nx_model.CompressedModel:\n with tempfile.TemporaryDirectory(dir=tempfile.gettempdir()) as tmp_dir:\n xml_path = str(Path(tmp_dir) / \"model.xml\")\n bin_path = str(Path(tmp_dir) / \"model.bin\")\n ov.serialize(model, xml_path, bin_path)\n model_config = {\n \"model_name\": \"model\",\n \"model\": xml_path,\n \"weights\": bin_path,\n }\n pot_model = pot.load_model(model_config, target_device)\n\n return pot_model", "def convert_to_onnx(cls, model_name, output_path, task_type, convert_to_float16=False, quantize=False, opset_version=11):\n language_model_class = LanguageModel.get_language_model_class(model_name)\n if language_model_class not in ['Bert', 'Roberta', 'XLMRoberta']:\n raise Exception(\"The current ONNX conversion only support 'BERT', 'RoBERTa', and 'XLMRoberta' models.\")\n task_type_to_pipeline_map = {'question_answering': 'question-answering', 'embeddings': 'feature-extraction', 'ner': 'ner'}\n convert(pipeline_name=task_type_to_pipeline_map[task_type], framework='pt', model=model_name, output=output_path / 'model.onnx', opset=opset_version, use_external_format=True if language_model_class == 'XLMRoberta' else False)\n processor = Processor.convert_from_transformers(tokenizer_name_or_path=model_name, task_type=task_type, max_seq_len=256, doc_stride=128, use_fast=True)\n processor.save(output_path)\n model = AdaptiveModel.convert_from_transformers(model_name, device='cpu', task_type=task_type)\n model.save(output_path)\n os.remove(output_path / 'language_model.bin')\n onnx_model_config = {'task_type': task_type, 'onnx_opset_version': opset_version, 'language_model_class': language_model_class, 'language': model.language_model.language}\n with open(output_path / 'onnx_model_config.json', 'w') as f:\n json.dump(onnx_model_config, f)\n if convert_to_float16:\n config = AutoConfig.from_pretrained(model_name)\n optimized_model = optimizer.optimize_model(input=str(output_path / 'model.onnx'), model_type='bert', num_heads=config.num_hidden_layers, hidden_size=config.hidden_size)\n optimized_model.convert_model_float32_to_float16()\n optimized_model.save_model_to_file(str(output_path / 'model.onnx'))\n if quantize:\n quantize_model(output_path / 'model.onnx')", "def quantize_model(model, filename=\"quantized_model\", fullpath=\"./\",):\n converter = tf.lite.TFLiteConverter.from_keras_model(model)\n tflite_model = converter.convert()\n exportFile = fullpath + filename + \".tflite\"\n open(exportFile,\"wb\").write(tflite_model)\n print(\"Quantization Successful...! Exported File at \", exportFile)", "def make_model(self):\n onnx_graph = onnx.helper.make_graph(\n self._nodes, self._name, self._inputs, self._outputs, self._initializers\n )\n kwargs = {}\n kwargs[\"opset_imports\"] = self._get_opsets()\n kwargs[\"producer_name\"] = \"TVM Relay\"\n kwargs[\"producer_version\"] = tvm.__version__\n\n return onnx.helper.make_model(onnx_graph, **kwargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate reference data through executing the relay module
def generate_ref_data(mod, input_data, params=None, target="llvm"): with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): lib = relay.build(mod, target=target, params=params) lib_name = "mod.so" temp = utils.tempdir() lib_path = temp.relpath(lib_name) lib.export_library(lib_path) lib = tvm.runtime.load_module(lib_path) grt_mod = graph_executor.GraphModule(lib["default"](tvm.cpu())) grt_mod.set_input(**input_data) grt_mod.run() output_count = grt_mod.get_num_outputs() out = [grt_mod.get_output(i).numpy() for i in range(output_count)] if isinstance(mod, tvm.relay.Function): main = mod else: main = mod["main"] if main.attrs is None or main.attrs["output_tensor_names"] is None: output_tensor_names = ( ["output"] if output_count == 1 else [f"output{i}" for i in range(output_count)] ) else: output_tensor_names = main.attrs["output_tensor_names"] return dict(zip(output_tensor_names, out))
[ "def make_reference(self):\n self.make_reference2()", "def link_data(ctx, output_path='./material/'):\n run_data_linking(output_path)", "def refant() :\n return ref", "def use(self):", "def generate(self):\n self.graph_repl = self.master.graph_repl", "def do_generate(self):\n pass", "def generate_replica(self):", "def __init__(self):\n self.variables = {}\n self.connectors = {}", "def createReference(self, fromnode, tonode, edge_data='direct'):\n return super(ModuleGraph, self).createReference(fromnode, tonode, edge_data=edge_data)", "def transfer_data(self):\n pass", "def codegen_reload_data():\n reload_params = {\"package\": u\"fn_qradar_integration\",\n \"incident_fields\": [u\"qradar_id\"], \n \"action_fields\": [], \n \"function_params\": [u\"qradar_query\", u\"qradar_query_param1\", u\"qradar_query_param2\", u\"qradar_query_param3\", u\"qradar_query_param4\", u\"qradar_query_param5\", u\"qradar_query_range_end\", u\"qradar_query_range_start\", u\"qradar_reference_set_item_value\", u\"qradar_reference_set_name\"], \n \"datatables\": [u\"qradar_offense_event\", u\"qradar_reference_set\"], \n \"message_destinations\": [u\"fn_qradar_integration\"], \n \"functions\": [u\"qradar_add_reference_set_item\", u\"qradar_delete_reference_set_item\", u\"qradar_find_reference_set_item\", u\"qradar_find_reference_sets\", u\"qradar_search\"], \n \"phases\": [], \n \"automatic_tasks\": [], \n \"scripts\": [], \n \"workflows\": [u\"qradar_add_reference_set_item\", u\"qradar_delete_reference_set_item\", u\"qradar_find_reference_set_item\", u\"qradar_find_reference_sets_artifact\", u\"qradar_move_item_to_different_ref_set\", u\"qradar_search_event_offense\"], \n \"actions\": [u\"Delete from QRadar Reference Set\", u\"Find All QRadar Reference Sets\", u\"Find in QRadar Reference Set\", u\"QRadar Add to Reference Set\", u\"QRadar Move from suspect to blocked\", u\"Search QRadar for offense id\"] \n }\n return reload_params", "def codegen_reload_data():\n reload_params = {\"package\": u\"fn_cisco_amp4ep\",\n \"incident_fields\": [], \n \"action_fields\": [u\"amp_artifact_type_activities\", u\"amp_artifact_type_events\", u\"amp_artifact_type_trajectory\", u\"amp_group_name\", u\"amp_limit\", u\"amp_offset\", u\"amp_q\", u\"amp_scd_name\", u\"amp_severity\", u\"amp_start_date\"], \n \"function_params\": [u\"amp_application_sha256\", u\"amp_conn_guid\", u\"amp_detection_sha256\", u\"amp_event_type\", u\"amp_external_ip\", u\"amp_file_description\", u\"amp_file_list_guid\", u\"amp_file_sha256\", u\"amp_group_guid\", u\"amp_group_name\", u\"amp_hostname\", u\"amp_internal_ip\", u\"amp_limit\", u\"amp_offset\", u\"amp_q\", u\"amp_scd_name\", u\"amp_severity\", u\"amp_start_date\"], \n \"datatables\": [u\"amp_activity\", u\"amp_computer_trajectory\", u\"amp_computers\", u\"amp_event_types\", u\"amp_events\", u\"amp_file_list_files\", u\"amp_groups\", u\"amp_scd_file_lists\"], \n \"message_destinations\": [u\"fn_cisco_amp\"], \n \"functions\": [u\"fn_amp_delete_file_list_files\", u\"fn_amp_get_activity\", u\"fn_amp_get_computer\", u\"fn_amp_get_computer_trajectory\", u\"fn_amp_get_computers\", u\"fn_amp_get_event_types\", u\"fn_amp_get_events\", u\"fn_amp_get_file_list_files\", u\"fn_amp_get_file_lists\", u\"fn_amp_get_groups\", u\"fn_amp_move_computer\", u\"fn_amp_set_file_list_files\"], \n \"phases\": [], \n \"automatic_tasks\": [], \n \"scripts\": [u\"scr_amp_add_artifact_from_activity\", u\"scr_amp_add_artifact_from_event\", u\"scr_amp_add_artifact_from_trajectory\"], \n \"workflows\": [u\"wf_amp_add_artifact_from_activity\", u\"wf_amp_add_artifact_from_event\", u\"wf_amp_add_artifact_from_trajectory\", u\"wf_amp_delete_file_list_files\", u\"wf_amp_get_activity\", u\"wf_amp_get_computer_by_guid\", u\"wf_amp_get_computer_by_name\", u\"wf_amp_get_computer_refresh\", u\"wf_amp_get_computer_trajectory\", u\"wf_amp_get_computer_trajectory_by_activity\", u\"wf_amp_get_event_types\", u\"wf_amp_get_events\", u\"wf_amp_get_events_by_type\", u\"wf_amp_get_file_list_files\", u\"wf_amp_get_file_lists\", u\"wf_amp_get_group_name_by_guid\", u\"wf_amp_get_groups\", u\"wf_amp_move_computer\", u\"wf_amp_set_file_list_files\"], \n \"actions\": [u\"Example: AMP add artifact from activity\", u\"Example: AMP add artifact from event\", u\"Example: AMP add artifact from trajectory\", u\"Example: AMP delete file from list\", u\"Example: AMP get computer (refresh)\", u\"Example: AMP get computer by connector guid\", u\"Example: AMP get computer by name\", u\"Example: AMP get computer trajectory\", u\"Example: AMP get computer trajectory by activity\", u\"Example: AMP get computers with activity\", u\"Example: AMP get event types\", u\"Example: AMP get events\", u\"Example: AMP get events by type\", u\"Example: AMP get files from list\", u\"Example: AMP get group name by guid\", u\"Example: AMP get groups\", u\"Example: AMP get SCD file lists\", u\"Example: AMP move computer\", u\"Example: AMP set file in list\"], \n \"incident_artifact_types\": [] \n }\n return reload_params", "def codegen_reload_data():\n return {\n \"package\": u\"fn_utilities\",\n \"message_destinations\": [u\"fn_utilities\"],\n \"functions\": [u\"utilities_artifact_hash\", u\"utilities_attachment_hash\", u\"utilities_attachment_to_base64\", u\"utilities_attachment_zip_extract\", u\"utilities_attachment_zip_list\", u\"utilities_base64_to_artifact\", u\"utilities_base64_to_attachment\", u\"utilities_call_rest_api\", u\"utilities_domain_distance\", u\"utilities_email_parse\", u\"utilities_excel_query\", u\"utilities_expand_url\", u\"utilities_extract_ssl_cert_from_url\", u\"utilities_get_contact_info\", u\"utilities_json2html\", u\"utilities_parse_ssl_certificate\", u\"utilities_pdfid\", u\"utilities_resilient_search\", u\"utilities_shell_command\", u\"utilities_string_to_attachment\", u\"utilities_timer\", u\"utilities_xml_transformation\"],\n \"workflows\": [u\"example_artifact_attachment_to_base64\", u\"example_artifact_hash\", u\"example_attachment_hash\", u\"example_attachment_to_base64\", u\"example_call_rest_api\", u\"example_create_artifacts_from_excel_data\", u\"example_domain_distance\", u\"example_email_parsing_artifact\", u\"example_email_parsing_attachment\", u\"example_extract_ssl_cert_from_url\", u\"example_get_incident_contact_info\", u\"example_get_task_contact_info\", u\"example_json2html\", u\"example_parse_ssl_certificate\", u\"example_pdfid\", u\"example_resilient_search\", u\"example_shell_command\", u\"example_string_to_attachment\", u\"example_timer\", u\"example_timer_parallel\", u\"example_xml_transformation\", u\"example_zip_list\", u\"example_zip_to_artifact\", u\"utilities_expand_url\"],\n \"actions\": [u\"Example: (Artifact) Attachment to Base64\", u\"Example: Artifact Hash\", u\"Example: Attachment Hash\", u\"Example: Attachment to Base64\", u\"Example: Call REST API\", u\"Example: Domain Distance\", u\"Example: Email Parsing (Artifact)\", u\"Example: Email Parsing (Attachment)\", u\"Example: Expand URL\", u\"Example: Extract SSL Certificate\", u\"Example: Get Incident Contact Info\", u\"Example: Get Task Contact Info\", u\"Example: JSON2HTML\", u\"Example: Parse SSL Certificate\", u\"Example: PDFiD\", u\"Example: Resilient Search\", u\"Example: Shell Command\", u\"Example: String to Attachment\", u\"Example: Timer Epoch\", u\"Example: Timers in Parallel\", u\"Example: Use Excel Data\", u\"Example: XML Transformation\", u\"Example: Zip Extract\", u\"Example: Zip List\"],\n \"incident_fields\": [],\n \"incident_artifact_types\": [],\n \"incident_types\": [],\n \"datatables\": [],\n \"automatic_tasks\": [],\n \"scripts\": [u\"Convert JSON to rich text v1.0\"],\n \"playbooks\": []\n }", "def _gen_goals_reference(self):\n goals = gen_tasks_goals_reference_data()\n glopts = gen_goals_glopts_reference_data()\n\n # generate the .rst file\n template = resource_string(__name__,\n os.path.join(self._templates_dir, 'goals_reference.mustache'))\n filename = os.path.join(self._outdir, 'goals_reference.rst')\n self.context.log.info('Generating %s' % filename)\n with safe_open(filename, 'wb') as outfile:\n generator = Generator(template, goals=goals, glopts=glopts)\n generator.write(outfile)\n\n # generate the .html file\n template = resource_string(__name__,\n os.path.join(self._templates_dir, 'gref_html.mustache'))\n filename = os.path.join(self._outdir, 'goals_reference.html')\n self.context.log.info('Generating %s' % filename)\n with safe_open(filename, 'wb') as outfile:\n generator = Generator(template, goals=goals, glopts=glopts)\n generator.write(outfile)", "def main():\n insert_gateway_values(\"hermes/bin/gateways.txt\")\n return", "def create_reference_data(path,echo=True):\n # Grab reference data input files from directory\n if os.path.isdir(path):\n fnames = os.listdir(path)\n \n fnames = [name for name in fnames if '.csv' in name]\n \n grabbed_files = []\n tree_constructor= []\n for file in fnames: #glob.glob('*.csv'):\n if 'RefData' in file:\n branch = file.strip('RefData_').strip('.csv').split('_')\n tree_constructor.append(branch)\n grabbed_files.append(file)\n \n # Create my reference data construct (tips of tress are pandas dataframes)\n ref_data = Tree()\n for file, branch in zip(grabbed_files,tree_constructor):\n full_file_path = os.path.join(path,file)\n df = pd.read_csv(full_file_path)\n ref_data[branch[0]][branch[1]][branch[2]] = df\n \n # Echo\n if echo:\n for file in grabbed_files:\n print(file)\n for branch in tree_constructor:\n print(branch)\n print(\"Created reference database.\")\n return ref_data", "def codegen_reload_data():\n reload_params = {\"package\": u\"fn_ansible_tower\",\n \"incident_fields\": [], \n \"action_fields\": [u\"ansible_tower_arguments\", u\"ansible_tower_credential\", u\"ansible_tower_hosts\", u\"ansible_tower_inventory\", u\"ansible_tower_job_name\", u\"ansible_tower_module\", u\"ansible_tower_module_arguments\", u\"ansible_tower_run_tags\", u\"ansible_tower_skip_tags\", u\"job_status\", u\"last_updated\", u\"tower_project\", u\"tower_save_as\", u\"tower_template_pattern\"], \n \"function_params\": [u\"incident_id\", u\"tower_arguments\", u\"tower_credential\", u\"tower_hosts\", u\"tower_inventory\", u\"tower_job_id\", u\"tower_job_status\", u\"tower_last_updated\", u\"tower_module\", u\"tower_project\", u\"tower_run_tags\", u\"tower_save_as\", u\"tower_skip_tags\", u\"tower_template_id\", u\"tower_template_name\", u\"tower_template_pattern\"], \n \"datatables\": [u\"ansible_tower_job_templates\", u\"ansible_tower_launched_jobs\"], \n \"message_destinations\": [u\"fn_ansible_tower\"], \n \"functions\": [u\"ansible_tower_get_ad_hoc_command_results\", u\"ansible_tower_get_job_results\", u\"ansible_tower_launch_job_template\", u\"ansible_tower_list_job_templates\", u\"ansible_tower_list_jobs\", u\"ansible_tower_run_an_ad_hoc_command\"], \n \"phases\": [], \n \"automatic_tasks\": [], \n \"scripts\": [], \n \"workflows\": [u\"ansible_tower_get_ad_hoc_command_results\", u\"ansible_tower_get_job_results\", u\"ansible_tower_launch_job_template\", u\"ansible_tower_list_job_templates\", u\"ansible_tower_list_jobs\", u\"ansible_tower_run_an_ad_hoc_command\", u\"ansible_tower_run_job__artifact\", u\"ansible_tower_run_job__incident\"], \n \"actions\": [u\"Ansible Tower Get Ad Hoc Command Results\", u\"Ansible Tower Get Job Results\", u\"Ansible Tower List Job Templates\", u\"Ansible Tower List Jobs\", u\"Ansible Tower Run an Ad Hoc Command\", u\"Ansible Tower Run Job\", u\"Ansible Tower Run Job - Artifact\", u\"Ansible Tower Run Job - Incident\"], \n \"incident_artifact_types\": [] \n }\n return reload_params", "def onSetRelayOutput(self, event):", "def __init__(self):\n # Flag this instance as compiled now\n self.is_compiled = True\n \n super(HEReference_Copy, self).__init__(name='HEReference_Copy', num_nodes=104, edges=[])\n \n # Add the edges\n self.add_edges([[0, 3], [3, 6], [1, 4], [4, 7], [6, 8], [8, 77], [6, 9], [9, 78], [6, 10], [10, 79], [6, 11], [11, 80], [6, 12], [12, 81], [6, 13], [13, 82], [6, 14], [14, 83], [6, 15], [15, 84], [6, 16], [16, 85], [6, 17], [17, 86], [6, 18], [18, 87], [6, 19], [19, 88], [6, 20], [20, 89], [7, 21], [21, 90], [22, 23], [23, 90], [22, 24], [24, 77], [7, 25], [25, 91], [26, 27], [27, 91], [26, 28], [28, 78], [7, 29], [29, 92], [30, 31], [31, 92], [30, 32], [32, 79], [7, 33], [33, 93], [34, 35], [35, 93], [34, 36], [36, 80], [7, 37], [37, 94], [38, 39], [39, 94], [38, 40], [40, 81], [7, 41], [41, 95], [42, 43], [43, 95], [42, 44], [44, 82], [7, 45], [45, 96], [46, 47], [47, 96], [46, 48], [48, 83], [7, 49], [49, 97], [50, 51], [51, 97], [50, 52], [52, 84], [7, 53], [53, 98], [54, 55], [55, 98], [54, 56], [56, 85], [7, 57], [57, 99], [58, 59], [59, 99], [58, 60], [60, 86], [7, 61], [61, 100], [62, 63], [63, 100], [62, 64], [64, 87], [7, 65], [65, 101], [66, 67], [67, 101], [66, 68], [68, 88], [7, 69], [69, 102], [70, 71], [71, 102], [70, 72], [72, 89], [7, 73], [73, 103], [74, 75], [75, 103], [74, 76], [76, 5], [0, 2], [2, 1]])\n # Set the graph attributes\n self[\"mm__\"] = ['HimesisMM']\n self[\"name\"] = \"\"\"EReference_Copy\"\"\"\n self[\"GUID__\"] = 4769270775441325434\n \n # Set the node attributes\n self.vs[0][\"mm__\"] = \"\"\"MatchModel\"\"\"\n self.vs[0][\"GUID__\"] = 1922868571827344071\n self.vs[1][\"mm__\"] = \"\"\"ApplyModel\"\"\"\n self.vs[1][\"GUID__\"] = 3299211479905012087\n self.vs[2][\"mm__\"] = \"\"\"paired_with\"\"\"\n self.vs[2][\"GUID__\"] = 6402237704381179133\n self.vs[3][\"mm__\"] = \"\"\"match_contains\"\"\"\n self.vs[3][\"GUID__\"] = 5511911928217461672\n self.vs[4][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[4][\"GUID__\"] = 1550704846952991080\n self.vs[5][\"name\"] = \"\"\"solveRef\"\"\"\n self.vs[5][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[5][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[5][\"GUID__\"] = 5649657449024228538\n self.vs[6][\"name\"] = \"\"\"\"\"\"\n self.vs[6][\"classtype\"] = \"\"\"EReference\"\"\"\n self.vs[6][\"mm__\"] = \"\"\"EReference\"\"\"\n self.vs[6][\"cardinality\"] = \"\"\"+\"\"\"\n self.vs[6][\"GUID__\"] = 8574377317263353694\n self.vs[7][\"name\"] = \"\"\"\"\"\"\n self.vs[7][\"classtype\"] = \"\"\"EReference\"\"\"\n self.vs[7][\"mm__\"] = \"\"\"EReference\"\"\"\n self.vs[7][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[7][\"GUID__\"] = 8554662502437830359\n self.vs[8][\"mm__\"] = \"\"\"hasAttribute_S\"\"\"\n self.vs[8][\"GUID__\"] = 8766363618937894411\n self.vs[9][\"mm__\"] = \"\"\"hasAttribute_S\"\"\"\n self.vs[9][\"GUID__\"] = 5272865645620665093\n self.vs[10][\"mm__\"] = \"\"\"hasAttribute_S\"\"\"\n self.vs[10][\"GUID__\"] = 1059339475425853475\n self.vs[11][\"mm__\"] = \"\"\"hasAttribute_S\"\"\"\n self.vs[11][\"GUID__\"] = 2175638429619253312\n self.vs[12][\"mm__\"] = \"\"\"hasAttribute_S\"\"\"\n self.vs[12][\"GUID__\"] = 1608362923929882756\n self.vs[13][\"mm__\"] = \"\"\"hasAttribute_S\"\"\"\n self.vs[13][\"GUID__\"] = 990583573423850071\n self.vs[14][\"mm__\"] = \"\"\"hasAttribute_S\"\"\"\n self.vs[14][\"GUID__\"] = 5357529843741940612\n self.vs[15][\"mm__\"] = \"\"\"hasAttribute_S\"\"\"\n self.vs[15][\"GUID__\"] = 3566341956756435505\n self.vs[16][\"mm__\"] = \"\"\"hasAttribute_S\"\"\"\n self.vs[16][\"GUID__\"] = 2231709159606514738\n self.vs[17][\"mm__\"] = \"\"\"hasAttribute_S\"\"\"\n self.vs[17][\"GUID__\"] = 5773665996282992816\n self.vs[18][\"mm__\"] = \"\"\"hasAttribute_S\"\"\"\n self.vs[18][\"GUID__\"] = 6908308361778840795\n self.vs[19][\"mm__\"] = \"\"\"hasAttribute_S\"\"\"\n self.vs[19][\"GUID__\"] = 5535885370286525920\n self.vs[20][\"mm__\"] = \"\"\"hasAttribute_S\"\"\"\n self.vs[20][\"GUID__\"] = 461418003231064638\n self.vs[21][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[21][\"GUID__\"] = 2711245002324516084\n self.vs[22][\"name\"] = \"\"\"eq_\"\"\"\n self.vs[22][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[22][\"GUID__\"] = 1057657690938414118\n self.vs[23][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[23][\"GUID__\"] = 4361162298745049796\n self.vs[24][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[24][\"GUID__\"] = 5169349945393008729\n self.vs[25][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[25][\"GUID__\"] = 4064270692088133154\n self.vs[26][\"name\"] = \"\"\"eq_\"\"\"\n self.vs[26][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[26][\"GUID__\"] = 4765172655991662734\n self.vs[27][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[27][\"GUID__\"] = 8634772267261399653\n self.vs[28][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[28][\"GUID__\"] = 388552528060407336\n self.vs[29][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[29][\"GUID__\"] = 1846365277705875449\n self.vs[30][\"name\"] = \"\"\"eq_\"\"\"\n self.vs[30][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[30][\"GUID__\"] = 3534625206810400887\n self.vs[31][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[31][\"GUID__\"] = 5280951362980612591\n self.vs[32][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[32][\"GUID__\"] = 7141362869146183198\n self.vs[33][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[33][\"GUID__\"] = 3998457730141967652\n self.vs[34][\"name\"] = \"\"\"eq_\"\"\"\n self.vs[34][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[34][\"GUID__\"] = 279701156757123450\n self.vs[35][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[35][\"GUID__\"] = 4218060743020018621\n self.vs[36][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[36][\"GUID__\"] = 4800892861649237140\n self.vs[37][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[37][\"GUID__\"] = 7610247075265006329\n self.vs[38][\"name\"] = \"\"\"eq_\"\"\"\n self.vs[38][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[38][\"GUID__\"] = 633919048122780185\n self.vs[39][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[39][\"GUID__\"] = 3040558356517687748\n self.vs[40][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[40][\"GUID__\"] = 1563836980594635755\n self.vs[41][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[41][\"GUID__\"] = 8578370926749835818\n self.vs[42][\"name\"] = \"\"\"eq_\"\"\"\n self.vs[42][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[42][\"GUID__\"] = 329114035990952966\n self.vs[43][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[43][\"GUID__\"] = 463881168622663617\n self.vs[44][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[44][\"GUID__\"] = 2095231843381003763\n self.vs[45][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[45][\"GUID__\"] = 1902247100769877430\n self.vs[46][\"name\"] = \"\"\"eq_\"\"\"\n self.vs[46][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[46][\"GUID__\"] = 6147744214864599035\n self.vs[47][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[47][\"GUID__\"] = 1283883902854289730\n self.vs[48][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[48][\"GUID__\"] = 8855228948080663456\n self.vs[49][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[49][\"GUID__\"] = 1892661364473694944\n self.vs[50][\"name\"] = \"\"\"eq_\"\"\"\n self.vs[50][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[50][\"GUID__\"] = 3909755974433384819\n self.vs[51][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[51][\"GUID__\"] = 3453276672396471470\n self.vs[52][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[52][\"GUID__\"] = 7985197132778664230\n self.vs[53][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[53][\"GUID__\"] = 3025257034221858308\n self.vs[54][\"name\"] = \"\"\"eq_\"\"\"\n self.vs[54][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[54][\"GUID__\"] = 9088394634969370109\n self.vs[55][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[55][\"GUID__\"] = 2502754991559785520\n self.vs[56][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[56][\"GUID__\"] = 6601737846321326555\n self.vs[57][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[57][\"GUID__\"] = 7511056067096504616\n self.vs[58][\"name\"] = \"\"\"eq_\"\"\"\n self.vs[58][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[58][\"GUID__\"] = 8116660845801415097\n self.vs[59][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[59][\"GUID__\"] = 1453264874158896464\n self.vs[60][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[60][\"GUID__\"] = 2808176667855399186\n self.vs[61][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[61][\"GUID__\"] = 2202557240248034298\n self.vs[62][\"name\"] = \"\"\"eq_\"\"\"\n self.vs[62][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[62][\"GUID__\"] = 5979617625886822059\n self.vs[63][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[63][\"GUID__\"] = 6124442521306908740\n self.vs[64][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[64][\"GUID__\"] = 6976076951384158687\n self.vs[65][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[65][\"GUID__\"] = 6039731227116077923\n self.vs[66][\"name\"] = \"\"\"eq_\"\"\"\n self.vs[66][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[66][\"GUID__\"] = 1286586042220571239\n self.vs[67][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[67][\"GUID__\"] = 1627913583245389102\n self.vs[68][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[68][\"GUID__\"] = 1881984517303405403\n self.vs[69][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[69][\"GUID__\"] = 1101137735845015167\n self.vs[70][\"name\"] = \"\"\"eq_\"\"\"\n self.vs[70][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[70][\"GUID__\"] = 1339306812127802523\n self.vs[71][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[71][\"GUID__\"] = 1818024385034486303\n self.vs[72][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[72][\"GUID__\"] = 4869927025024697808\n self.vs[73][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[73][\"GUID__\"] = 2435808332474529975\n self.vs[74][\"name\"] = \"\"\"eq_\"\"\"\n self.vs[74][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[74][\"GUID__\"] = 605365425206687373\n self.vs[75][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[75][\"GUID__\"] = 5350905309588125391\n self.vs[76][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[76][\"GUID__\"] = 8780921805608155233\n self.vs[77][\"name\"] = \"\"\"name\"\"\"\n self.vs[77][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[77][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[77][\"GUID__\"] = 779576727200586123\n self.vs[78][\"name\"] = \"\"\"ordered\"\"\"\n self.vs[78][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[78][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[78][\"GUID__\"] = 4758250762787819523\n self.vs[79][\"name\"] = \"\"\"unique\"\"\"\n self.vs[79][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[79][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[79][\"GUID__\"] = 2619812606792492431\n self.vs[80][\"name\"] = \"\"\"lowerBound\"\"\"\n self.vs[80][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[80][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[80][\"GUID__\"] = 6665072067022886250\n self.vs[81][\"name\"] = \"\"\"upperBound\"\"\"\n self.vs[81][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[81][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[81][\"GUID__\"] = 8420167753203801035\n self.vs[82][\"name\"] = \"\"\"changeable\"\"\"\n self.vs[82][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[82][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[82][\"GUID__\"] = 5084495199701769101\n self.vs[83][\"name\"] = \"\"\"volatile\"\"\"\n self.vs[83][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[83][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[83][\"GUID__\"] = 317583742334201937\n self.vs[84][\"name\"] = \"\"\"transient\"\"\"\n self.vs[84][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[84][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[84][\"GUID__\"] = 6435363960095794317\n self.vs[85][\"name\"] = \"\"\"defaultValueLiteral\"\"\"\n self.vs[85][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[85][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[85][\"GUID__\"] = 7975235162001331376\n self.vs[86][\"name\"] = \"\"\"unsettable\"\"\"\n self.vs[86][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[86][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[86][\"GUID__\"] = 2402538856917234659\n self.vs[87][\"name\"] = \"\"\"derived\"\"\"\n self.vs[87][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[87][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[87][\"GUID__\"] = 4666605540204348480\n self.vs[88][\"name\"] = \"\"\"containment\"\"\"\n self.vs[88][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[88][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[88][\"GUID__\"] = 4156059468716113611\n self.vs[89][\"name\"] = \"\"\"resolveProxies\"\"\"\n self.vs[89][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[89][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[89][\"GUID__\"] = 9197605424513532602\n self.vs[90][\"name\"] = \"\"\"name\"\"\"\n self.vs[90][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[90][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[90][\"GUID__\"] = 6686149105550565950\n self.vs[91][\"name\"] = \"\"\"ordered\"\"\"\n self.vs[91][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[91][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[91][\"GUID__\"] = 4324121939578190455\n self.vs[92][\"name\"] = \"\"\"unique\"\"\"\n self.vs[92][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[92][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[92][\"GUID__\"] = 2464944745583296217\n self.vs[93][\"name\"] = \"\"\"lowerBound\"\"\"\n self.vs[93][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[93][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[93][\"GUID__\"] = 8564948989049106863\n self.vs[94][\"name\"] = \"\"\"upperBound\"\"\"\n self.vs[94][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[94][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[94][\"GUID__\"] = 6737713164432453520\n self.vs[95][\"name\"] = \"\"\"changeable\"\"\"\n self.vs[95][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[95][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[95][\"GUID__\"] = 563382532502440053\n self.vs[96][\"name\"] = \"\"\"volatile\"\"\"\n self.vs[96][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[96][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[96][\"GUID__\"] = 4209542895246991386\n self.vs[97][\"name\"] = \"\"\"transient\"\"\"\n self.vs[97][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[97][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[97][\"GUID__\"] = 7939668832417997741\n self.vs[98][\"name\"] = \"\"\"defaultValueLiteral\"\"\"\n self.vs[98][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[98][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[98][\"GUID__\"] = 2214093918514521429\n self.vs[99][\"name\"] = \"\"\"unsettable\"\"\"\n self.vs[99][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[99][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[99][\"GUID__\"] = 2498936721493940848\n self.vs[100][\"name\"] = \"\"\"derived\"\"\"\n self.vs[100][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[100][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[100][\"GUID__\"] = 5221159539423402770\n self.vs[101][\"name\"] = \"\"\"containment\"\"\"\n self.vs[101][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[101][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[101][\"GUID__\"] = 4804030453332290054\n self.vs[102][\"name\"] = \"\"\"resolveProxies\"\"\"\n self.vs[102][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[102][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[102][\"GUID__\"] = 200328547471240765\n self.vs[103][\"name\"] = \"\"\"ApplyAttribute\"\"\"\n self.vs[103][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[103][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[103][\"GUID__\"] = 7933135856126564115" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A helper function to create a Relay IRModule with inputs and params from a tflite file
def create_relay_module_and_inputs_from_tflite_file(tflite_model_file, bind_params_by_name=True): with open(tflite_model_file, "rb") as f: tflite_model_buf = f.read() mod, params = convert_to_relay(tflite_model_buf, bind_params_by_name) inputs = dict() for param in mod["main"].params: name = str(param.name_hint) data_shape = [int(i) for i in param.type_annotation.shape] dtype = str(param.type_annotation.dtype) if np.issubdtype(dtype, np.floating): # Since np.random.uniform only allows the ranges of float32, # at first float16 is used and scaled afterwards, if necessary. in_min, in_max = (np.finfo("float16").min, np.finfo("float16").max) data = np.random.uniform(low=in_min, high=in_max, size=data_shape).astype(dtype) scale = np.finfo(dtype).min / np.finfo("float16").min data *= scale elif np.issubdtype(dtype, np.integer): in_min, in_max = (np.iinfo(dtype).min, np.iinfo(dtype).max) data = np.random.randint(in_min, high=in_max, size=data_shape, dtype=dtype) else: raise TypeError(f"Type {dtype} not supported") inputs[name] = data return mod, inputs, params
[ "def create_relay_module(\n input_shape: List[int], dtype: str, ops: List[Union[OpPattern, Tuple[str, str]]]\n) -> tvm.IRModule:\n input_data = relay.var(\"input\", shape=input_shape, dtype=dtype)\n\n cur_data = input_data\n for op_info in ops:\n # Progressively build type info\n relay.transform.InferTypeLocal(cur_data)\n if isinstance(op_info, tuple):\n # layout transform case\n src_layout, dst_layout = op_info\n cur_data = apply_layout_transform(cur_data, src_layout, dst_layout)\n else:\n cur_data = pattern_level_to_op[op_info](cur_data)\n\n relay.transform.InferTypeLocal(cur_data)\n return tvm.IRModule.from_expr(cur_data)", "def convert_to_relay(tflite_model_buf, bind_params_by_name=True):\n # TFLite.Model.Model has changed to TFLite.Model from 1.14 to 2.1\n try:\n import tflite.Model # pylint: disable=import-outside-toplevel\n\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)\n except AttributeError:\n import tflite # pylint: disable=import-outside-toplevel\n\n tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n\n mod, params = relay.frontend.from_tflite(tflite_model)\n if bind_params_by_name:\n mod[\"main\"] = relay.build_module.bind_params_by_name(mod[\"main\"], params)\n return mod, params", "def create_tflite_network(model_file: str, backends: list = ('CpuAcc', 'CpuRef')):\n net_id, parser, runtime = __create_network(model_file, backends, ann.ITfLiteParser())\n graph_id = parser.GetSubgraphCount() - 1\n\n return net_id, graph_id, parser, runtime", "def _main(args):\n # model = keras.models.load_model(args.h5_file, custom_objects={'prelu': prelu})\n with tf.keras.utils.custom_object_scope({'prelu': prelu}):\n converter = tf.lite.TFLiteConverter.from_keras_model_file(args.h5_file)\n tflite_file = converter.convert()\n open(args.tflite_file, 'wb').write(tflite_file)\n print('='*30)\n print('tffile file save in {}.'.format(args.tflite_file))", "def from_bitcode(fileobj_or_str):\r\n if isinstance(fileobj_or_str, bytes):\r\n bc = fileobj_or_str\r\n else:\r\n bc = fileobj_or_str.read()\r\n errbuf = BytesIO()\r\n context = api.llvm.getGlobalContext()\r\n m = api.llvm.ParseBitCodeFile(bc, context, errbuf)\r\n if not m:\r\n raise Exception(errbuf.getvalue())\r\n errbuf.close()\r\n return Module(m)", "def create_reader(self):\n\n py_reader = fluid.layers.py_reader(\n capacity=70,\n shapes=[f.shape for f in self.data_types.values()],\n dtypes=[f.dtype for f in self.data_types.values()],\n lod_levels=[f.lod_level for f in self.data_types.values()],\n name=self.pyreader_name,\n use_double_buffer=True)\n self.paddle_py_reader = py_reader\n\n self.fields_dict_ = collections.OrderedDict()\n input_placeholders = fluid.layers.read_file(py_reader)\n\n assert len(input_placeholders) == len(self.data_types)\n for op, (name, _) in zip(input_placeholders, self.data_types.items()):\n self.fields_dict_[name] = op\n\n return py_reader", "def load_model(model_path):\n interpreter = tflite.Interpreter(model_path=model_path)\n interpreter.allocate_tensors()\n return interpreter", "def create_test_module(\n file_format: gtirb.Module.FileFormat,\n isa: gtirb.Module.ISA,\n binary_type: Iterable[str] = None,\n) -> Tuple[gtirb.IR, gtirb.Module]:\n ir = gtirb.IR()\n m = gtirb.Module(isa=isa, file_format=file_format, name=\"test\")\n m.ir = ir\n\n add_standard_aux_data_tables(m)\n if binary_type:\n m.aux_data[\"binaryType\"].data = list(binary_type)\n\n return ir, m", "def from_tflite(model, prog_name): #, shape_dict, dtype_dict):\n try:\n import tflite.Model\n import tflite.SubGraph\n import tflite.BuiltinOperator\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n assert isinstance(model, tflite.Model.Model)\n\n # keep the same as tflite\n assert model.SubgraphsLength() == 1, \"only support one subgraph (main subgraph)\"\n subgraph = model.Subgraphs(0)\n\n # model inputs / outputs\n model_inputs = subgraph.InputsAsNumpy()\n model_outputs = subgraph.OutputsAsNumpy()\n assert model_inputs.size == 1, \"Model should have only one input\"\n assert model_outputs.size == 1, \"Model should have only one output\"\n\n # op code in model\n op_converter = OperatorConverter(model, subgraph, prog_name)\n op_converter.is_dequantize = False\n op_converter.check_unsupported_ops()\n\n in_tensor = op_converter.get_tensors(model_inputs)[0]\n out_tensor = op_converter.get_tensors(model_outputs)[0]\n\n op_converter.define_model_sizes(\"IN\", in_tensor)\n op_converter.define_model_sizes(\"OUT\", out_tensor)\n\n op_converter.nn_add_input(in_tensor)\n\n output_nodes = op_converter.convert_op_to_hexagon_nn()\n\n op_converter.nn_add_output(output_nodes)\n\n op_converter.print_nn_nodes()\n\n print(\"tensor_tab:\")\n print(op_converter.tensor_tab)\n\n op_converter.close()\n print(\"Converted Hexagon Model saved to {}\".format(op_converter.h_file.name))", "def resnext34(**kwargs):\n model = ResNeXt(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "def resnext18( **kwargs):\n model = ResNeXt(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def compile_ir(engine, llvm_ir):\n # Create a LLVM module object from the IR\n mod = llvm.parse_assembly(llvm_ir)\n mod.verify()\n # Now add the module and make sure it is ready for execution\n engine.add_module(mod)\n engine.finalize_object()\n return mod", "def read_module(stream):\n data = stream.read()\n if len(data) % 4 != 0:\n raise ParseError('File length is not divisible by 4')\n words = array.array('I', data)\n binary = SpirvBinary(words)\n\n module = ir.Module()\n module.value_to_id = {}\n try:\n parse_global_instructions(binary, module)\n parse_functions(binary, module)\n return module\n finally:\n del module.value_to_id", "def construct_model(attribute_vector_file,\n img_height=220,\n img_width=176,\n TV_weight=50,\n alpha=4,\n vgg19_weights_file=\"models/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5\",\n vgg_output_layers=[\"block3_conv1\", \"block4_conv1\", \"block5_conv1\"],\n vgg_output_layer_weights=[1.0, 1.0, 1.0]):\n \n # LOAD PREVIOUSLY GENERATED ATTRIBUTE VECTOR\n w = np.load(attribute_vector_file)\n \n \n \n \n # CREATE CUSTOM LAYERS FOR USE IN MODEL:\n \n # Create Normalization layer\n class Normalize(Layer):\n def __init__(self, **kwargs):\n super(Normalize, self).__init__(**kwargs)\n\n def call(self, x, mask=None):\n return x / 255.\n \n # Create Denormalization layer\n class Denormalize(Layer):\n def __init__(self, **kwargs):\n super(Denormalize, self).__init__(**kwargs)\n\n def call(self, x, mask=None):\n return x * 255\n \n\n # Allows network to learn identify function, (NOT SURE IF THIS IS NECESSARY)\n def residual_block(ip, id):\n init = ip\n\n x = ReflectionPadding2D()(ip)\n x = Conv2D(128, (3, 3), activation='linear', padding='valid',\n name='res_conv_' + str(id) + '_1')(x)\n x = BatchNormalization(axis=1, name=\"res_batchnorm_\" + str(id) + \"_1\")(x)\n x = Activation('relu', name=\"res_activation_\" + str(id) + \"_1\")(x)\n\n x = ReflectionPadding2D()(x)\n x = Conv2D(128, (3, 3), activation='linear', padding='valid',\n name='res_conv_' + str(id) + '_2')(x)\n x = BatchNormalization(axis=1, name=\"res_batchnorm_\" + str(id) + \"_2\")(x)\n\n m = Add()([x, init])\n #m = Activation('relu', name=\"res_activation_\" + str(id))(m)\n\n return m\n \n \n \n # create ReflectionPadding layer\n # https://github.com/misgod/fast-neural-style-keras/blob/master/layers.py\n class ReflectionPadding2D(Layer):\n def __init__(self, padding=(1, 1), dim_ordering='default', **kwargs):\n super(ReflectionPadding2D, self).__init__(**kwargs)\n\n if dim_ordering == 'default':\n dim_ordering = K.image_dim_ordering()\n\n self.padding = padding\n if isinstance(padding, dict):\n if set(padding.keys()) <= {'top_pad', 'bottom_pad', 'left_pad', 'right_pad'}:\n self.top_pad = padding.get('top_pad', 0)\n self.bottom_pad = padding.get('bottom_pad', 0)\n self.left_pad = padding.get('left_pad', 0)\n self.right_pad = padding.get('right_pad', 0)\n else:\n raise ValueError('Unexpected key found in `padding` dictionary. '\n 'Keys have to be in {\"top_pad\", \"bottom_pad\", '\n '\"left_pad\", \"right_pad\"}.'\n 'Found: ' + str(padding.keys()))\n else:\n padding = tuple(padding)\n if len(padding) == 2:\n self.top_pad = padding[0]\n self.bottom_pad = padding[0]\n self.left_pad = padding[1]\n self.right_pad = padding[1]\n elif len(padding) == 4:\n self.top_pad = padding[0]\n self.bottom_pad = padding[1]\n self.left_pad = padding[2]\n self.right_pad = padding[3]\n else:\n raise TypeError('`padding` should be tuple of int '\n 'of length 2 or 4, or dict. '\n 'Found: ' + str(padding))\n\n if dim_ordering not in {'tf'}:\n raise ValueError('dim_ordering must be in {tf}.')\n self.dim_ordering = dim_ordering\n self.input_spec = [InputSpec(ndim=4)] \n\n\n def call(self, x, mask=None):\n top_pad=self.top_pad\n bottom_pad=self.bottom_pad\n left_pad=self.left_pad\n right_pad=self.right_pad \n\n\n paddings = [[0,0],[left_pad,right_pad],[top_pad,bottom_pad],[0,0]]\n\n\n return tf.pad(x,paddings, mode='REFLECT', name=None)\n\n def compute_output_shape(self,input_shape):\n if self.dim_ordering == 'tf':\n rows = input_shape[1] + self.top_pad + self.bottom_pad if input_shape[1] is not None else None\n cols = input_shape[2] + self.left_pad + self.right_pad if input_shape[2] is not None else None\n\n return (input_shape[0],\n rows,\n cols,\n input_shape[3])\n else:\n raise ValueError('Invalid dim_ordering:', self.dim_ordering)\n\n\n def get_config(self):\n config = {'padding': self.padding}\n base_config = super(ReflectionPadding2D, self).get_config()\n return dict(list(base_config.items()) + list(config.items())) \n\n \n \n # Build mask generation net\n def generate_mask_net():\n \n # Build model architecture\n input_img = Input(shape=(img_height, img_width, 3))\n\n x = Normalize()(input_img)\n\n x = ReflectionPadding2D(padding=(4,4))(x)\n x = Conv2D(32, (9, 9), strides=(1,1), activation='linear', padding='valid')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n\n x = Conv2D(64, (3, 3), strides=(2,2), activation='linear', padding='same')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n\n x = Conv2D(128, (3, 3), strides=(2,2), activation='linear', padding='same')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n\n\n r1 = residual_block(x, 1)\n r2 = residual_block(r1, 2)\n r3 = residual_block(r2, 3)\n r4 = residual_block(r3, 4)\n x = residual_block(r4, 5)\n\n\n x = Conv2DTranspose(64, (3, 3), strides=(2,2), padding='same')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n\n x = Conv2DTranspose(32, (3, 3), strides=(2,2), padding='same')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n\n x = ReflectionPadding2D((4, 4))(x)\n y = Conv2D(3, (9, 9), strides=(1,1), activation='tanh', padding='valid',\n name='mask_output')(x)\n\n\n model_mask_net = Model(inputs=input_img, outputs=y)\n print(\"Mask model architecture is loaded\")\n\n return model_mask_net\n \n \n \n \n # add vgg19 convolutional base to autoencoder\n def vgg_net(mask, orig_input):\n \n orig_input_norm = Normalize()(orig_input)\n masked_input = Add()([mask, orig_input_norm])\n \n \n # create new tensor of original and masked inputs\n input_tensor = Concatenate(axis=0)([masked_input, orig_input_norm]) \n\n # Build out VGG19 Architecture\n x = Conv2D(64, (3, 3), activation='relu', name='block1_conv1', padding='same')(input_tensor)\n x = Conv2D(64, (3, 3), activation='relu', name='block1_conv2', padding='same')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2))(x)\n\n x = Conv2D(128, (3, 3), activation='relu', name='block2_conv1', padding='same')(x)\n x = Conv2D(128, (3, 3), activation='relu', name='block2_conv2', padding='same')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2))(x)\n\n x = Conv2D(256, (3, 3), activation='relu', name='block3_conv1', padding='same')(x)\n x = Conv2D(256, (3, 3), activation='relu', name='block3_conv2', padding='same')(x)\n x = Conv2D(256, (3, 3), activation='relu', name='block3_conv3', padding='same')(x)\n x = Conv2D(256, (3, 3), activation='relu', name='block3_conv4', padding='same')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2))(x)\n\n x = Conv2D(512, (3, 3), activation='relu', name='block4_conv1', padding='same')(x)\n x = Conv2D(512, (3, 3), activation='relu', name='block4_conv2', padding='same')(x)\n x = Conv2D(512, (3, 3), activation='relu', name='block4_conv3', padding='same')(x)\n x = Conv2D(512, (3, 3), activation='relu', name='block4_conv4', padding='same')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2))(x)\n\n x = Conv2D(512, (3, 3), activation='relu', name='block5_conv1', padding='same')(x)\n x = Conv2D(512, (3, 3), activation='relu', name='block5_conv2', padding='same')(x)\n x = Conv2D(512, (3, 3), activation='relu', name='block5_conv3', padding='same')(x)\n x = Conv2D(512, (3, 3), activation='relu', name='block5_conv4', padding='same')(x)\n z = MaxPooling2D((2, 2), strides=(2, 2))(x) \n\n\n full_model = Model(inputs=orig_input, outputs=z)\n print(\"VGG Model has been loaded and appended to network\")\n\n # Download and set weights\n f = h5py.File(vgg19_weights_file)\n layer_names = [name for name in f.attrs['layer_names']][1:] # chop off input layer\n\n for i, layer in enumerate(full_model.layers[-21:]):\n g = f[layer_names[i]]\n weights = [g[name] for name in g.attrs['weight_names']]\n layer.set_weights(weights)\n print(\"VGG19 Weights have been set successfully\")\n\n\n\n\n\n\n #Add losses as regulizers\n add_interpolation_loss(full_model, orig_input_norm, alpha, w)\n add_total_variation_loss(mask_net.layers[-1], weight=TV_weight)\n \n # Freeze all VGG layers\n for layer in full_model.layers[-21:]:\n layer.trainable = False\n\n return full_model\n \n \n \n \n def add_interpolation_loss(full_model, orig_input, alpha, w):\n\n vgg_layers = dict([(layer.name, layer) for layer in full_model.layers[-21:]])\n \n # output layers\n output_layers = vgg_output_layers\n\n layers = [vgg_layers[layer] for layer in output_layers]\n interpolation_regularizer = FeatureInterpolationRegularizer()(layers, alpha, w)\n \n # add_loss function to apply regularization loss to any layer\n layers[2].add_loss(interpolation_regularizer)\n \n \n \n # Loss function to be applied as a Regularizer\n class FeatureInterpolationRegularizer(Regularizer):\n\n def __init__(self):\n super(FeatureInterpolationRegularizer, self).__init__()\n\n def __call__(self, layer, a, w):\n phi_x_r = [K.flatten(layer[i].output[0])*vgg_output_layer_weights[i] for i in range(3)]\n phi_x_r = K.concatenate(phi_x_r) # Image + mask feature\n\n phi_x = [K.flatten(layer[i].output[1])*vgg_output_layer_weights[i] for i in range(3)]\n phi_x = K.concatenate(phi_x) # Original image features\n\n delta = phi_x_r - (phi_x + w * a)\n loss = K.sum(K.square(delta))\n\n return loss\n \n \n \n def add_total_variation_loss(transform_output_layer,weight):\n # Total Variation Regularization\n layer = transform_output_layer # Output layer\n tv_regularizer = TVRegularizer(weight)(layer)\n layer.add_loss(tv_regularizer)\n \n \n class TVRegularizer(Regularizer):\n \"\"\" Enforces smoothness in image output. \"\"\"\n\n def __init__(self, weight):\n self.weight = weight\n self.uses_learning_phase = False\n super(TVRegularizer, self).__init__()\n\n def __call__(self, x):\n assert K.ndim(x.output) == 4\n x_out = x.output \n\n shape = K.shape(x_out)\n img_width, img_height,channel = (shape[1],shape[2], shape[3])\n size = img_width * img_height * channel \n if K.image_dim_ordering() == 'th':\n a = K.square(x_out[:, :, :img_width - 1, :img_height - 1] - x_out[:, :, 1:, :img_height - 1])\n b = K.square(x_out[:, :, :img_width - 1, :img_height - 1] - x_out[:, :, :img_width - 1, 1:])\n else:\n a = K.square(x_out[:, :img_width - 1, :img_height - 1, :] - x_out[:, 1:, :img_height - 1, :])\n b = K.square(x_out[:, :img_width - 1, :img_height - 1, :] - x_out[:, :img_width - 1, 1:, :])\n loss = self.weight * K.sum(K.pow(a + b, 1.25)) \n return loss\n \n \n \n \n \n \n \n mask_net = generate_mask_net()\n model = vgg_net(mask_net.output,mask_net.input)\n model.summary()\n \n return model, mask_net", "def _create_flax_module(self) -> flax_nn.Module:\n raise NotImplementedError()", "def compile_ir(engine, llvm_ir):\n # Create a LLVM module object from the IR\n mod = llvm.parse_assembly(llvm_ir)\n mod.verify()\n # Optimize the module\n pmb = llvm.create_pass_manager_builder()\n pmb.opt_level = 2\n pm = llvm.create_module_pass_manager()\n pmb.populate(pm)\n pm.run(mod)\n # Now add the module and make sure it is ready for execution\n engine.add_module(mod)\n engine.finalize_object()\n return mod", "def setup_from_file(self, dir):\n self.shared_resources.load(os.path.join(dir, \"shared_resources\"))\n self.input_module.setup()\n self.input_module.load(os.path.join(dir, \"input_module\"))\n self.model_module.setup(self.is_train)\n self.sess.run([v.initializer for v in self.model_module.variables])\n self.model_module.load(self.sess, os.path.join(dir, \"model_module\"))\n self.output_module.setup()\n self.output_module.load(os.path.join(dir, \"output_module\"))", "def add_ir_module(self, ir_module):", "def new(id):\r\n context = api.llvm.getGlobalContext()\r\n m = api.llvm.Module.new(id, context)\r\n return Module(m)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
2. SELECTION PHASE. If a tree does not reproduce, it becomes extinct. Thus, this leads to the requirement of a competitive exclusion in order to eliminate those trees with lower metric values. This is done to limit the maximum number of trees in the forest. Initially, fast reproduction of trees take place and all of them are included in the forest. The fitter trees reproduce more than the undesirable ones. Here, "fitter" is either in terms of objective or novelty (in novelty search). This elimination mechanism is activated when the population exceeds the preselected maximum number of trees in the forest. To do so, the trees and their seeds are ranked and those with lower fitness values are removed to sustain a manageable tree population.
def select(self): def truncate(self): """ Truncates forest to maximum number of trees. """ self.population = self.population[:self.max_number_trees] def SortOnItem(list_, item_loc): """ Sorts based on a given item. """ templist = [elmt[item_loc] for elmt in list_] index = np.argsort(templist) return [list_[i] for i in index] # adds current seedlings to forest for tree in self.seedlings: # if tree does not competes with another existing one, adds it if tree not in self.population: self.population.append(tree) # sorts the trees of the forest in ascending values - minimization self.population = SortOnItem(self.population, item_loc=0) # removes unfit trees from forest truncate(self)
[ "def hyperparameter_selection_rf():\n # Number of trees in random forest\n n_estimators = [int(x) for x in np.linspace(start=100, stop=1200, num=12)]\n # Number of features to consider at every split\n max_features = ['auto', 'sqrt']\n # Maximum number of levels in tree\n max_depth = [int(x) for x in np.linspace(5, 30, num=6)]\n # Minimum number of samples required to split a node\n min_samples_split = [2, 5, 10, 15, 100]\n # Minimum number of samples required at each leaf node\n min_samples_leaf = [1, 2, 5, 10]\n\n random_grid = {'n_estimators': n_estimators,\n 'max_features': max_features,\n 'max_depth': max_depth,\n 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf}\n\n return(random_grid)", "def prune_tree(self, tree):\n tips = tree.get_terminals()\n try:\n tips2 = random.sample(tips, self.ntips)\n except ValueError:\n tips2 = tips\n\n for tip in tips:\n if tip in tips2:\n continue\n _ = tree.prune(tip)\n\n return tree", "def subtree_reconfigure_forest(\r\n self,\r\n num_trees=8,\r\n num_restarts=10,\r\n restart_fraction=0.5,\r\n subtree_maxiter=100,\r\n subtree_size=10,\r\n subtree_search=('random', 'bfs'),\r\n subtree_select=('random',),\r\n subtree_weight_what=('flops', 'size'),\r\n subtree_weight_pwr=(2,),\r\n parallel='auto',\r\n parallel_maxiter_steps=4,\r\n minimize='flops',\r\n progbar=False,\r\n inplace=False,\r\n ):\r\n tree = self if inplace else self.copy()\r\n\r\n # candidate trees\r\n num_keep = max(1, int(num_trees * restart_fraction))\r\n\r\n # how to rank the trees\r\n score = get_score_fn(minimize)\r\n\r\n # set up the initial 'forest' and parallel machinery\r\n pool = parse_parallel_arg(parallel)\r\n if pool is not None:\r\n is_worker = maybe_leave_pool(pool)\r\n # store the trees as futures for the entire process\r\n forest = [pool.scatter(tree)]\r\n maxiter = subtree_maxiter // parallel_maxiter_steps\r\n else:\r\n forest = [tree]\r\n maxiter = subtree_maxiter\r\n\r\n if progbar:\r\n import tqdm\r\n pbar = tqdm.tqdm(total=num_restarts)\r\n pbar.set_description(_describe_tree(tree))\r\n\r\n try:\r\n for _ in range(num_restarts):\r\n\r\n # on the next round take only the best trees\r\n forest = itertools.cycle(forest[:num_keep])\r\n\r\n # select some random configurations\r\n saplings = [{\r\n 'tree': next(forest),\r\n 'maxiter': maxiter,\r\n 'minimize': minimize,\r\n 'subtree_size': subtree_size,\r\n 'subtree_search': random.choice(subtree_search),\r\n 'select': random.choice(subtree_select),\r\n 'weight_pwr': random.choice(subtree_weight_pwr),\r\n 'weight_what': random.choice(subtree_weight_what),\r\n } for _ in range(num_trees)]\r\n\r\n if pool is None:\r\n forest = [_reconfigure_tree(**s) for s in saplings]\r\n res = [{'tree': t, **_get_tree_info(t)} for t in forest]\r\n else:\r\n # submit in smaller steps to saturate processes\r\n for _ in range(parallel_maxiter_steps):\r\n for s in saplings:\r\n s['tree'] = submit(pool, _reconfigure_tree, **s)\r\n\r\n # compute scores remotely then gather\r\n forest_futures = [s['tree'] for s in saplings]\r\n res_futures = [submit(pool, _get_tree_info, t)\r\n for t in forest_futures]\r\n res = [{'tree': tree_future, **res_future.result()}\r\n for tree_future, res_future in\r\n zip(forest_futures, res_futures)]\r\n\r\n # update the order of the new forest\r\n res.sort(key=score)\r\n forest = [r['tree'] for r in res]\r\n\r\n if progbar:\r\n pbar.update()\r\n if pool is None:\r\n d = _describe_tree(forest[0])\r\n else:\r\n d = submit(pool, _describe_tree, forest[0]).result()\r\n pbar.set_description(d)\r\n\r\n finally:\r\n if progbar:\r\n pbar.close()\r\n\r\n if pool is None:\r\n tree.set_state_from(forest[0])\r\n else:\r\n tree.set_state_from(forest[0].result())\r\n maybe_rejoin_pool(is_worker, pool)\r\n\r\n return tree", "def prune(tree, testSet, res, technique):\n assert technique in [\"reduced_error\"]\n if technique == \"reduced_error\":\n tbSet = testSet[testSet[tree.col] >= tree.value] #find which test observations belong to this tree's true branch\n fbSet = testSet[testSet[tree.col] < tree.value] #find which test observations belong to this tree's false branch\n \n if tree.tb.results is None: #Check if the true branch of this sub-tree is a leaf\n ptb = prune(tree.tb, tbSet, res, technique) #If not, recursively travel down the true branch and prune it.\n else:\n ptb = tree.tb #If the true branch is a leaf, then the true branch has--in essence--already been pruned.\n if tree.fb.results is None: #Check if the false branch of this sub-tree is a leaf\n pfb = prune(tree.fb, fbSet, res, technique) #If not, recursively travel down the false branch and prune it.\n else:\n pfb = tree.fb #If the false branch is a leaf, then the false branch has--in essence--already been pruned.\n \n #Sum the number of misclassifications of the test data at each of the leaves of this node\n wrong_in_leaves = __deep_count_errors(ptb, tbSet, res) + __deep_count_errors(pfb, fbSet, res)\n \n #Count the number of misclassificationsof the test data that would occur if this node were treated as a leaf\n wrong_at_node = __count_errors(tree, testSet, res)\n \n #Assess whether or not treating the node as a leaf improves the accuracy on the test set\n if wrong_at_node <= wrong_in_leaves: \n #NOTE:The following line of code seems slightly redundant since count_errors(tree, testSet, res) had to call \n #__get_results(tree). I should set up some way to save the output of that function call instead of calling it twice.\n return decisionNode(results = __get_results(tree)) #If so, return a decisionNode where the node is a leaf\n else:\n #If not, return a decisionNode where the node splits on the same column and value as before, but the \n #true and false branches are the pruned-versions of the original true and false branches. See above for\n #definition of ptb and pfb\n return decisionNode(col = tree.col, value = tree.value, tb = ptb, fb = pfb)", "def test_random_forest_max_depth_parameter(params, X_train, X_test, y_train, y_test):", "def decision_tree(original_training_data,call_depth):\n\n ''' Checking the stopping criterion. If yes then it returns the majority class (Muffin or CupCake) '''\n if check_if_stopping_criterion_is_met(original_training_data.values) or call_depth > 10:\n majority = classification(original_training_data)\n return majority\n\n else:\n ''' Each time we split the data and go deeper, we increment the depth of the tree '''\n call_depth += 1\n\n ''' Finding the best attribute, best threshold to split data, best minimum entropy '''\n best_split_index, best_attribute, best_threshold, best_minimum_entropy = find_best_attribute_threshold_entropy(original_training_data)\n original_training_data_values = original_training_data.values\n\n best_split_values = original_training_data_values[:,best_split_index]\n\n less_than_threshold = original_training_data[best_split_values <= best_threshold]\n more_than_threshold = original_training_data[best_split_values > best_threshold]\n\n ''' Initializing a variable called as condition which stores the format of the key for the resulting decision tree dictionary '''\n condition = original_training_data.columns[best_split_index] + \" <= \" + str(best_threshold)\n\n ''' Initializing a dictionary where key is condition and value is a list. This is the basic data structure in which the\n resulting decision tree is stored '''\n sub_tree = {condition: []}\n\n ''' Calling the decision tree recursively '''\n left_tree = decision_tree(less_than_threshold, call_depth)\n right_tree = decision_tree(more_than_threshold, call_depth)\n\n ''' For removing edge cases where on either split, the resulting decision tree gives the same result '''\n if left_tree == right_tree:\n sub_tree = left_tree\n else:\n ''' Appending the smaller trees in the final decision tree '''\n sub_tree[condition].append(left_tree)\n sub_tree[condition].append(right_tree)\n\n return sub_tree", "def forestPandas(data, resCol, maxDepth=None, percentage=70, numfeats = 15, fsize=5, selected=None):\n indices = data.index.tolist()\n trainingSets = {}\n percent = float(percentage)/100\n split = int(percent * len(indices) + 0.5)\n cols = data.columns.tolist() \n for i in range(fsize + 1):\n if selected == None:\n np.random.shuffle(cols)\n selected = cols[:15]\n selected.append(\"spam\")\n np.random.shuffle(indices)\n trainingSets[i] = {}\n trainingSets[i][\"data\"]= data[selected].loc[indices[:split + 1]]\n trainingSets[i][\"tree\"]= buildTreePandas(trainingSets[i][\"data\"], resCol, maxDepth=maxDepth) \n return trainingSets", "def grow_forest(forest, X, y, seeds, labels=None):\n # Convert data\n X, = check_arrays(X, dtype=DTYPE, sparse_format=\"dense\")\n # Make a list container for grown trees\n n_trees = forest.n_estimators\n trees = []\n # For each tree in the forest\n for i in range(n_trees):\n # Make a np.random.RandomState instance from the tree's planting seed\n random_state = check_random_state(seeds[i])\n # generate a random seed for a branching seed\n seed = random_state.randint(MAX_INT)\n # Make a decision tree object\n tree = forest._make_estimator(append=False)\n # Init the tree's RandomState instance with generated seed\n # this will randomize what features the tree will use\n tree.set_params(random_state=check_random_state(seed))\n # If we are bootstraping\n if forest.bootstrap:\n # If we are given labels\n if labels is not None:\n # Then need to bootstrap via labels\n # We can do this by using StratifiedShuffleSplit\n # to gain a random sample from each lable\n sss = cross_validation.StratifiedShuffleSplit(labels, \n n_iter=1, \n test_size=np.unique(labels).size, \n random_state=check_random_state(seed))\n # Then we'll bootstrap our X and y for the lable samples chosen\n for train, test in sss:\n X_lbs = X[test]\n y_lbs = y[test]\n break\n \n # Then get the number of samples\n n_samples = X_lbs.shape[0]\n # To generate a uniform sample weight\n curr_sample_weight = np.ones((n_samples,), dtype=np.float64)\n # Then randomly choses n_samples from all samples with replacement \n indices = random_state.randint(0, n_samples, n_samples)\n # Use this method of bincount to make a randome benning histogram\n # that will sum up to n_samples\n sample_counts = bincount(indices, minlength=n_samples)\n # Apply these randomized counts to the old uniform weights\n curr_sample_weight *= sample_counts\n # Fit the tree using these new sample weights\n tree.fit(X_lbs, y_lbs, sample_weight=curr_sample_weight, check_input=False)\n # Then set the indices of the tree only to the samples that had non-zero weights\n tree.indices_ = sample_counts > 0.\n else:\n # Then get the number of samples\n n_samples = X.shape[0]\n # To generate a uniform sample weight\n curr_sample_weight = np.ones((n_samples,), dtype=np.float64)\n # Then randomly choses n_samples from all samples with replacement \n indices = random_state.randint(0, n_samples, n_samples)\n # Use this method of bincount to make a randome benning histogram\n # that will sum up to n_samples\n sample_counts = bincount(indices, minlength=n_samples)\n # Apply these randomized counts to the old uniform weights\n curr_sample_weight *= sample_counts\n # Fit the tree using these new sample weights\n tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)\n # Then set the indices of the tree only to the samples that had non-zero weights\n tree.indices_ = sample_counts > 0.\n # If we aren't bootstraping\n else:\n # This just fit the data with no random weights\n tree.fit(X, y, check_input=False)\n # Add the grown tree to the container \n trees.append(tree)\n # return all of the trained trees\n return trees", "def depth_of_trees(self):\n\n #Get depth of trees\n max_depth_list = []\n \n rf = RandomForestRegressor(n_estimators=2500,max_features=0.35)\n \n feat_tsf = self.feat_tsf_dataset\n labels = self.labels_dataset\n\n rf.fit(feat_tsf,labels)\n \n for i in rf.estimators_:\n \n max_depth_list.append(i.get_depth())\n \n print(\"Max depht: %i trees\" % max(max_depth_list)) \n \n return pd.DataFrame(max_depth_list,columns=['trees'])", "def reproduce(self):\n\n def compute_seeds(fitness):\n \"\"\" Computes the number of seeds given a fitness value. \"\"\"\n\n seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \\\n (self.max_seeds-self.min_seeds) + self.min_seeds\n\n return round(seeds)\n\n # evaluates max and min fitness for current year\n max_fitness = max(tree[0] for tree in self.population)\n min_fitness = min(tree[0] for tree in self.population)\n\n # computes the number of seeds produced per tree\n for tree in self.population:\n tree[1].seeds = int(compute_seeds(tree[0]))", "def test_lambda_infinity(self):\n for n in [10, 100, 1000, 10000]:\n for max_leaf_nodes in [3, 5, 7, 9, 11]:\n X, y = DGP_limit_test(n)\n _, values = np.unique(y, return_counts=True)\n dist = values / n\n\n skmodel = DecisionTreeClassifier(max_leaf_nodes=max_leaf_nodes)\n skmodel.fit(X, y)\n imodel = HSTreeClassifier(deepcopy(skmodel), reg_param=1e4*n)\n i_tree = imodel.estimator_.tree_\n\n distribution = np.tile(dist, (i_tree.value.shape[0], 1, 1))\n\n np.testing.assert_almost_equal(distribution, i_tree.value, decimal=3)", "def improve_tree(tree, freq_dict):\n # todo", "def cut_feas_bytree(data, feas, target, cut_params={'max_depth': 4, 'thred': 2}):\n df = data[[feas] + [target]].sort_values(feas).reset_index(drop=True)\n tmp = df.dropna(subset=[feas]).reset_index(drop=True)\n\n def tree_fit_bins(tmp, feas, target, cut_params):\n model = mf.tree_ana.TreeAna().fit_save_tree(tmp, target, feature_names=[feas],\n filename=None,\n max_depth=cut_params.get('max_depth', 3), \\\n min_samples_leaf=cut_params.get('min_samples_leaf', 0.05),\n proportion=False)\n\n left = model.tree_.children_left\n idx = np.argwhere(left != -1)[:, 0]\n bins = list(model.tree_.threshold[idx])\n bins = sorted(bins + [tmp[feas].min(), tmp[feas].max()])\n\n return bins\n\n def merge_by_chi2(data, feas, target, bins):\n df, st, bins = cut_feas(data.dropna(subset=[feas]), feas, target, cut_params=bins)\n st['index'] = range(st.shape[0])\n st['good'] = st[feas] * (1 - st[target])\n st['bad'] = st[feas] * st[target]\n chi_list = []\n for i in range(len(st) - 1):\n chi_list.append(chi2_contingency(np.array(st[i:i + 2].iloc[:, -2:]))[0])\n st['chi'] = chi_list + [np.nan]\n min_chi = st['chi'].min()\n\n return st, min_chi, st.shape\n\n bins = tree_fit_bins(tmp, feas, target, cut_params)\n\n # get thred&max_bins\n thred = cut_params.get('thred', -1)\n max_bins = cut_params.get('max_bins', 9999)\n\n if len(bins) == 2:\n bins = tree_fit_bins(tmp, feas, target, {'max_depth': 1, 'min_samples_leaf': 0.01})\n thred, max_bins = -1, 9999\n\n st, min_chi, st_shape = merge_by_chi2(tmp, feas, target, bins)\n\n while (min_chi < thred) | (st_shape[0] > max_bins):\n try:\n idx = st[st['chi'] == min_chi]['index'].values[0]\n bins = bins[:idx + 1] + bins[idx + 2:]\n except IndexError:\n bins = bins\n st, min_chi, st_shape = merge_by_chi2(tmp, feas, target, bins)\n\n return bins", "def DecisionTree(df, n, TYPE, save_name, SCORES):\n from sklearn.ensemble import RandomForestClassifier\n from sklearn.ensemble import RandomForestRegressor\n from math import sqrt\n\n X_all = df.drop('Class', axis=1).values \n Y_all = df.loc[:, 'Class'].values\n\n fean_num_feat_sel = len(list(df.columns.values)[1:])\n if TYPE.lower() == 'c':\n feat_sel_forest = RandomForestClassifier(criterion='entropy', max_features= round(sqrt(fean_num_feat_sel)), n_estimators=500, n_jobs=1)\n elif TYPE.lower() == 'r':\n Y_all = Y_all.astype('float')\n feat_sel_forest = RandomForestRegressor(max_features= round(sqrt(fean_num_feat_sel)), n_estimators=500, n_jobs=1)\n else:\n print('Need to specify -type r/c (regression/classification)')\n exit()\n\n print(\"=====* Running decision tree based feature selection *=====\")\n\n #Train the model & derive importance scores\n feat_sel_forest = feat_sel_forest.fit(X_all, Y_all)\n importances = feat_sel_forest.feature_importances_\n\n # Sort importance scores and keep top n\n feat_names = list(df.columns.values)[1:]\n temp_imp = pd.DataFrame(importances, columns = [\"imp\"], index=feat_names) \n indices = np.argsort(importances)[::-1]\n\n if SCORES.lower() != 'f':\n save_scores = save_name + '_RFScores.txt'\n temp_imp.to_csv(save_scores)\n\n for n_size in n:\n indices_keep = indices[0:int(n_size)]\n fixed_index = []\n # Translate keep indices into the indices in the df\n for i in indices_keep:\n new_i = i + 1\n fixed_index.append(new_i)\n fixed_index = [0] + fixed_index\n good = [df.columns[i] for i in fixed_index]\n print(\"Features selected using DecisionTree feature selection: %s\" % str(good.remove('Class')))\n\n save_name2 = save_name + \"_\" + str(n_size)\n SaveTopFeats(good, save_name2)", "def fewer_spanningtrees_mirrors(G,e_m, print_statement = False):\n start = timer()\n all_spanning_trees = []\n all_mirror_trees = []\n num_v = len(G.nodes())\n\n must_have_edges,all_edges_list = find_musthave_edges(G) #edges that will be in tree for sure\n num_missing_edges = (n-1)-len(must_have_edges)# n is the number of nodes, n-1 is the number of edges in a spanning tree\n\n if print_statement == True:\n print (\"found %s must-have edges out of %s of the tree\" %(len(must_have_edges),n-1))\n\n # if the must have edges are already all the edges that we need for the spanning tree\n #solution found\n if num_missing_edges == 0: # extreme case that a graph is already a spanning tree\n all_spanning_trees.append(G)\n all_mirror_trees.append(G)\n\n # we need to pick the difference from the remaining unchoosen edges, \"random_edges\"\n else:\n random_edges = [item for item in all_edges_list if item not in must_have_edges]\n #random_edges = list((set(all_edges_list)-set(must_have_edges))) #bunch of edges to choose from\n all_missing_combo = list(combinations(random_edges,num_missing_edges))\n\n combos = []\n counter = 0\n rej_counter =0\n if print_statement == True:\n print (\"there are in total %s possible combination that could give spanning trees\" %len(all_missing_combo))\n print (\"calculating spanning trees of lenght %s for G with %s nodes\"%(num_v-1,num_v))\n\n for i in range(len(all_missing_combo)):\n selected_edges = must_have_edges + list(all_missing_combo[i]) # create ONE list with all the edges\n\n selected_edges_noweights = [x[:2] for x in selected_edges]\n all_edges_flat = [element for tupl in selected_edges_noweights for element in tupl]\n selected_nodes = set(all_edges_flat)\n\n sG = G.edge_subgraph(selected_edges_noweights)\n\n # if the combination gives a spanning tree\n if len(selected_nodes) == len(G.nodes()) and nx.is_connected(sG):\n\n try: # search for cycles\n nx.find_cycle(sG)\n except:#if none continue\n counter +=1\n mirror_edges = []\n for edge in selected_edges:\n if edge[0]< edge[1]: # in the dictionary e_m the first node is always the smallest\n u,v = edge[0], edge[1]\n else:\n u,v = edge[1],edge[0],\n mirror_edge = e_m[(u,v,edge[2]['weight'])]\n mirror_edge_noweights = mirror_edge[:2]\n mirror_edges.append(mirror_edge_noweights)\n\n # add the tree and his mirror to a list\n if print_statement == \"mirror_details\":\n print (\"\")\n print (\"edges: \",selected_edges_noweights)\n print (\"mirror_edges: \",mirror_edges)\n\n #sG = G.edge_subgraph(selected_edges_noweights)\n sG_mirror = G.edge_subgraph(mirror_edges)\n all_spanning_trees.append(sG)\n all_mirror_trees.append(sG_mirror)\n else:\n rej_counter +=1\n continue\n else:\n rej_counter +=1\n continue\n end = timer()\n delta = end - start\n if print_statement == True:\n print (\"%s spanning trees found\" %(len(all_spanning_trees)))\n print (\"%s selected edges excluded \" %(rej_counter))\n print (\"time required for computation of fewer_spanningtrees_mirrors : \",delta)\n return all_spanning_trees,all_mirror_trees,delta", "def gp(threshold):\n\n # Create test cases:\n test_cases = make_test_cases()\n\n # Create a population\n population = [Individual(initialize_tree(2, 5)) for _ in range(POPULATION_SIZE)]\n\n for generation in range(MAX_GENERATIONS):\n\n # Evaluate the population\n best_ind = population[0]\n for ind in population:\n ind.evaluate_individual(test_cases)\n #print(ind)\n\n if ind.total_error < best_ind.total_error:\n best_ind = ind\n\n\n # Report about generation\n report(generation, best_ind)\n\n if best_ind.is_solution(threshold):\n return best_ind\n\n # Create children\n old_population = population\n population = []\n\n for _ in range(POPULATION_SIZE):\n # Use 50% mutation, 50% crossover\n\n if random.random() < 0.5:\n parent = tournament_selection(old_population, TOURNAMENT_SIZE)\n child = mutation(parent)\n else:\n parent1 = tournament_selection(old_population, TOURNAMENT_SIZE)\n parent2 = tournament_selection(old_population, TOURNAMENT_SIZE)\n child = crossover(parent1, parent2)\n\n population.append(Individual(child))\n\n\n return \"FAILURE\"", "def forest_classify(forest, obs):\n votes = {}\n for l in LABELS:\n votes[l] = 0\n for tree in forest:\n l = tree.classify(obs)\n votes[l] += 1\n return max(votes.keys(), key=(lambda key: votes[key]))\n # leader = None\n # for l in LABELS:\n # if leader == None:\n # leader = [l]\n # else:\n # if votes[l]>leader:\n # leader = [l]\n # elif votes[l]==leader:\n # leader.append(l)\n # return random.choice(leader)", "def search_best_rf(self,n_trees = 2500,\n saveStats = True):\n #Process Time\n start = time.time()\n \n #Datasets\n feat_tsf = self.feat_tsf_dataset\n labels = self.labels_dataset\n \n #Generate random state\n #min_samples_split_values to test \n max_features_list = np.arange(0.20,0.66,0.01).tolist()\n max_features_list = [ round(elem, 2) for elem in max_features_list ]\n \n max_features_list.append('sqrt')\n max_features_list.append('auto')\n \n #Get max n_trees\n max_n_trees = self.depth_of_trees.max()[0]\n max_depth_list = np.arange(int(max_n_trees/4),\n max_n_trees,\n 1).tolist()\n max_depth_list.append(None)\n \n #min_impurity_decrease\n min_impurity_decrease_list = np.arange(0.01,0.26,0.01).tolist()\n min_impurity_decrease_list = [ round(elem, 2) for elem in min_impurity_decrease_list ]\n \n #min_samples_leaf_list.append(None)\n \n param_grid = {\"max_features\":max_features_list,\n \"max_depth\":max_depth_list,\n \"min_impurity_decrease\":min_impurity_decrease_list}\n \n #RF Model to test\n rf = RandomForestRegressor(\n bootstrap = True,\n oob_score = True,\n n_estimators = n_trees,\n random_state=7)\n \n \n #Define and execute pipe \n grid_cv= HalvingRandomSearchCV(estimator=rf,\n param_distributions=param_grid,\n random_state=7,\n max_resources='auto',\n verbose = 3).fit(feat_tsf,labels)\n \n \n df_results = pd.DataFrame(grid_cv.cv_results_)\n \n #Save CV Results\n if saveStats:\n \n df_results.to_csv('data/cv_hyperparams_model.csv')\n \n \n print (\"Best Params:\") \n print(grid_cv.best_params_)\n \n print(\"Saving model in 'model_params.joblib'\")\n # Writing joblibfile with best model \n dump(grid_cv.best_estimator_, 'model_params.joblib')\n \n #Save json file with params best model\n json_txt = json.dumps(grid_cv.best_params_, indent=4)\n with open('model_params', 'w') as file:\n file.write(json_txt)\n \n #End Time\n end = time.time()\n time_elapsed = round((end - start)/60,1)\n\n return ('Time elapsed minutes: %1.f' % \n (time_elapsed))", "def warn_treedepth(idata: arviz.InferenceData) -> List[SamplerWarning]:\n sampler_stats = idata.get(\"sample_stats\", None)\n if sampler_stats is None:\n return []\n\n rmtd = sampler_stats.get(\"reached_max_treedepth\", None)\n if rmtd is None:\n return []\n\n warnings = []\n for c in rmtd.chain:\n if sum(rmtd.sel(chain=c)) / rmtd.sizes[\"draw\"] > 0.05:\n warnings.append(\n SamplerWarning(\n WarningType.TREEDEPTH,\n f\"Chain {int(c)} reached the maximum tree depth.\"\n \" Increase `max_treedepth`, increase `target_accept` or reparameterize.\",\n \"warn\",\n )\n )\n return warnings" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Truncates forest to maximum number of trees.
def truncate(self): self.population = self.population[:self.max_number_trees]
[ "def delete_max(self):\n if len(self) == 0:\n raise IndexError('underflow')\n self.root = self._delete_max_node(self.root)", "def prune_overly_long_traces(self):\n for trace in self.trace_pool:\n if len(trace.nodes) > self.max_trace_length:\n trace.nodes = trace.nodes[-self.max_trace_length:]", "def drop_fossils(self, max_age=0.):\n if max_age == np.inf:\n return\n\n leafs = list(self.iter_leafs())\n t_final = self.root().height()\n\n too_old = []\n for node in leafs:\n if node.depth < t_final - max_age:\n too_old.append(node)\n\n self.remove_nodes(too_old)", "def reset_max_depth(self) -> None:\n # The max depth is now calculated on the fly, so this is a no-op.\n pass", "def depth_of_trees(self):\n\n #Get depth of trees\n max_depth_list = []\n \n rf = RandomForestRegressor(n_estimators=2500,max_features=0.35)\n \n feat_tsf = self.feat_tsf_dataset\n labels = self.labels_dataset\n\n rf.fit(feat_tsf,labels)\n \n for i in rf.estimators_:\n \n max_depth_list.append(i.get_depth())\n \n print(\"Max depht: %i trees\" % max(max_depth_list)) \n \n return pd.DataFrame(max_depth_list,columns=['trees'])", "def maximumDistance(self):\n from ete2 import Tree\n t = Tree(name='LUCA_root')\n empty_forest = {'sp':t,'gns':t,'fam':t,'ord':t,'cls':t,'phy':t,'kng':t}\n return self.distanceToTree(empty_forest,update_inner_attributes=False)", "def prune(self, threshold = 0.02):\n\n\n limits, quantiles = self.eval_limit(\"prune_whole\")\n total_limit = limits[2]\n\n n_pruned_away = 0\n\n for l in self.get_leaves():\n \n # Store the discriminator axis\n original_discriminator = l.discriminator_axis\n # Deactivate for now\n l.discriminator_axis = None\n \n # Important - eval the limit starting from the node under\n # study - not just the leave limit\n limits, quantiles = self.eval_limit(\"prune_\" + str(l))\n limit = limits[2]\n\n # Deactivating this category is too costly so we turn it\n # back on\n if (limit-total_limit)/total_limit > threshold:\n l.discriminator_axis = original_discriminator\n else:\n n_pruned_away += 1\n # End of loop over leaves\n \n print \"Pruned away\", n_pruned_away, \"leaves\"\n self.print_tree(verbose=True)", "def update_max_fringe(self):\n fringe_length = self.fringe_size()\n if fringe_length > self.max_fringe_size:\n self.max_fringe_size = fringe_length", "def _check_trees(tree, specified_maxlen=-1):\n try:\n getattr(tree, '_index')\n except AttributeError:\n raise AttributeError('tree needs to be indexed')\n\n assert(np.all([type(leaf) in [int, np.intp] for leaf in tree.leaves()]))\n max_len = np.max([tree[pos[:-1]]._index for pos in\n tree.treepositions('leaf')]) + 1\n return(np.max([max_len, specified_maxlen]))", "def _subtree_below_maximum_leaves(self, root, threshold):\r\n\r\n nodes = root.get_terminals()\r\n return len(nodes) <= threshold", "def _cleanup(zkclient, path, max_count):\n nodes = sorted(zkclient.get_children(path))\n extra = len(nodes) - max_count\n if extra > 0:\n for node in nodes[0:extra]:\n zkutils.ensure_deleted(zkclient,\n z.join_zookeeper_path(path, node))", "def _prune(self):\n while len(self.data) > self.limit:\n self.data.popleft()", "def delete_top_from_max_heap(x):\n last = x[-1]\n x = x.at[0].set(last)[:-1]\n return heapify_subtree(x, 0)", "def _delete_max_node(self, node):\n if node.right is None:\n return node.left\n node.right = self._delete_max_node(node.right)\n node.size = 1 + self._get_size(node.left) + self._get_size(node.right)\n return node", "def select(self):\n\n def truncate(self):\n \"\"\" Truncates forest to maximum number of trees. \"\"\"\n\n self.population = self.population[:self.max_number_trees]\n\n def SortOnItem(list_, item_loc):\n \"\"\" Sorts based on a given item. \"\"\"\n\n templist = [elmt[item_loc] for elmt in list_]\n index = np.argsort(templist)\n return [list_[i] for i in index]\n\n # adds current seedlings to forest\n for tree in self.seedlings:\n\n # if tree does not competes with another existing one, adds it\n if tree not in self.population:\n self.population.append(tree)\n\n # sorts the trees of the forest in ascending values - minimization\n self.population = SortOnItem(self.population, item_loc=0)\n\n # removes unfit trees from forest\n truncate(self)", "def test_max_depth(self):\n sys.setrecursionlimit(200)\n code = 'endlessly_recursive_func(0)'\n suggs = [\"increase the limit with `sys.setrecursionlimit(limit)`\"\n \" (current value is 200)\", AVOID_REC_MSG]\n self.throws(code, MAXRECURDEPTH, suggs)\n sys.setrecursionlimit(initial_recursion_limit)", "def truncation(x, N):\r\n f = ClassTruncation.apply\r\n x = f(x, N)\r\n return x", "def prune_features(self):\r\n for i, features in enumerate(self.curr_features):\r\n # Continue if the number of features in this grid does\r\n # not exceed the upper bound.\r\n if len(features) <= self.config.grid_max_feature_num:\r\n continue\r\n self.curr_features[i] = sorted(features, key=lambda x:x.lifetime, \r\n reverse=True)[:self.config.grid_max_feature_num]", "def _truncate(self):\n dif = len(self) - self._maxLen\n if dif > 0:\n #return\n self[:dif] = []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
3. REPRODUCTION PHASE. The trees will produce seeds based on their relative fitness which will then be spread over the problem space. Each seed, in turn, will grow into a new tree depending on external factors. A linear increase in the number of seeds produced by the trees of the forest is considered from max_seeds for the tree with the lowest value to min_seeds for the one with the highest value (i.e. minimization problem).
def reproduce(self): def compute_seeds(fitness): """ Computes the number of seeds given a fitness value. """ seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \ (self.max_seeds-self.min_seeds) + self.min_seeds return round(seeds) # evaluates max and min fitness for current year max_fitness = max(tree[0] for tree in self.population) min_fitness = min(tree[0] for tree in self.population) # computes the number of seeds produced per tree for tree in self.population: tree[1].seeds = int(compute_seeds(tree[0]))
[ "def grow_forest(forest, X, y, seeds, labels=None):\n # Convert data\n X, = check_arrays(X, dtype=DTYPE, sparse_format=\"dense\")\n # Make a list container for grown trees\n n_trees = forest.n_estimators\n trees = []\n # For each tree in the forest\n for i in range(n_trees):\n # Make a np.random.RandomState instance from the tree's planting seed\n random_state = check_random_state(seeds[i])\n # generate a random seed for a branching seed\n seed = random_state.randint(MAX_INT)\n # Make a decision tree object\n tree = forest._make_estimator(append=False)\n # Init the tree's RandomState instance with generated seed\n # this will randomize what features the tree will use\n tree.set_params(random_state=check_random_state(seed))\n # If we are bootstraping\n if forest.bootstrap:\n # If we are given labels\n if labels is not None:\n # Then need to bootstrap via labels\n # We can do this by using StratifiedShuffleSplit\n # to gain a random sample from each lable\n sss = cross_validation.StratifiedShuffleSplit(labels, \n n_iter=1, \n test_size=np.unique(labels).size, \n random_state=check_random_state(seed))\n # Then we'll bootstrap our X and y for the lable samples chosen\n for train, test in sss:\n X_lbs = X[test]\n y_lbs = y[test]\n break\n \n # Then get the number of samples\n n_samples = X_lbs.shape[0]\n # To generate a uniform sample weight\n curr_sample_weight = np.ones((n_samples,), dtype=np.float64)\n # Then randomly choses n_samples from all samples with replacement \n indices = random_state.randint(0, n_samples, n_samples)\n # Use this method of bincount to make a randome benning histogram\n # that will sum up to n_samples\n sample_counts = bincount(indices, minlength=n_samples)\n # Apply these randomized counts to the old uniform weights\n curr_sample_weight *= sample_counts\n # Fit the tree using these new sample weights\n tree.fit(X_lbs, y_lbs, sample_weight=curr_sample_weight, check_input=False)\n # Then set the indices of the tree only to the samples that had non-zero weights\n tree.indices_ = sample_counts > 0.\n else:\n # Then get the number of samples\n n_samples = X.shape[0]\n # To generate a uniform sample weight\n curr_sample_weight = np.ones((n_samples,), dtype=np.float64)\n # Then randomly choses n_samples from all samples with replacement \n indices = random_state.randint(0, n_samples, n_samples)\n # Use this method of bincount to make a randome benning histogram\n # that will sum up to n_samples\n sample_counts = bincount(indices, minlength=n_samples)\n # Apply these randomized counts to the old uniform weights\n curr_sample_weight *= sample_counts\n # Fit the tree using these new sample weights\n tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)\n # Then set the indices of the tree only to the samples that had non-zero weights\n tree.indices_ = sample_counts > 0.\n # If we aren't bootstraping\n else:\n # This just fit the data with no random weights\n tree.fit(X, y, check_input=False)\n # Add the grown tree to the container \n trees.append(tree)\n # return all of the trained trees\n return trees", "def select(self):\n\n def truncate(self):\n \"\"\" Truncates forest to maximum number of trees. \"\"\"\n\n self.population = self.population[:self.max_number_trees]\n\n def SortOnItem(list_, item_loc):\n \"\"\" Sorts based on a given item. \"\"\"\n\n templist = [elmt[item_loc] for elmt in list_]\n index = np.argsort(templist)\n return [list_[i] for i in index]\n\n # adds current seedlings to forest\n for tree in self.seedlings:\n\n # if tree does not competes with another existing one, adds it\n if tree not in self.population:\n self.population.append(tree)\n\n # sorts the trees of the forest in ascending values - minimization\n self.population = SortOnItem(self.population, item_loc=0)\n\n # removes unfit trees from forest\n truncate(self)", "def _grow_random_tree(self):\n # selected the bootstrap sample and random subspace\n bootstrap_sample = self.df.sample(n=self.bootstrap_size, replace=False)\n random_subspace = random.sample(self.feature_space, self.subspace_size)\n\n # build tree based on the bootstrap sample and random subspace\n # top-level branch depth is set at 1\n logging.info('current training with subspace: {}'.format(json.dumps(random_subspace, indent=4)))\n return self._fork_tree(data=bootstrap_sample, subspace=random_subspace, current_branch_depth=1)", "def initialize_tree(min_depth, max_depth):\n depth = random.randint(min_depth, max_depth)\n if random.random() < 0.5:\n return generate_tree_full(depth)\n else:\n return generate_tree_grow(depth)", "def subtree_reconfigure_forest(\r\n self,\r\n num_trees=8,\r\n num_restarts=10,\r\n restart_fraction=0.5,\r\n subtree_maxiter=100,\r\n subtree_size=10,\r\n subtree_search=('random', 'bfs'),\r\n subtree_select=('random',),\r\n subtree_weight_what=('flops', 'size'),\r\n subtree_weight_pwr=(2,),\r\n parallel='auto',\r\n parallel_maxiter_steps=4,\r\n minimize='flops',\r\n progbar=False,\r\n inplace=False,\r\n ):\r\n tree = self if inplace else self.copy()\r\n\r\n # candidate trees\r\n num_keep = max(1, int(num_trees * restart_fraction))\r\n\r\n # how to rank the trees\r\n score = get_score_fn(minimize)\r\n\r\n # set up the initial 'forest' and parallel machinery\r\n pool = parse_parallel_arg(parallel)\r\n if pool is not None:\r\n is_worker = maybe_leave_pool(pool)\r\n # store the trees as futures for the entire process\r\n forest = [pool.scatter(tree)]\r\n maxiter = subtree_maxiter // parallel_maxiter_steps\r\n else:\r\n forest = [tree]\r\n maxiter = subtree_maxiter\r\n\r\n if progbar:\r\n import tqdm\r\n pbar = tqdm.tqdm(total=num_restarts)\r\n pbar.set_description(_describe_tree(tree))\r\n\r\n try:\r\n for _ in range(num_restarts):\r\n\r\n # on the next round take only the best trees\r\n forest = itertools.cycle(forest[:num_keep])\r\n\r\n # select some random configurations\r\n saplings = [{\r\n 'tree': next(forest),\r\n 'maxiter': maxiter,\r\n 'minimize': minimize,\r\n 'subtree_size': subtree_size,\r\n 'subtree_search': random.choice(subtree_search),\r\n 'select': random.choice(subtree_select),\r\n 'weight_pwr': random.choice(subtree_weight_pwr),\r\n 'weight_what': random.choice(subtree_weight_what),\r\n } for _ in range(num_trees)]\r\n\r\n if pool is None:\r\n forest = [_reconfigure_tree(**s) for s in saplings]\r\n res = [{'tree': t, **_get_tree_info(t)} for t in forest]\r\n else:\r\n # submit in smaller steps to saturate processes\r\n for _ in range(parallel_maxiter_steps):\r\n for s in saplings:\r\n s['tree'] = submit(pool, _reconfigure_tree, **s)\r\n\r\n # compute scores remotely then gather\r\n forest_futures = [s['tree'] for s in saplings]\r\n res_futures = [submit(pool, _get_tree_info, t)\r\n for t in forest_futures]\r\n res = [{'tree': tree_future, **res_future.result()}\r\n for tree_future, res_future in\r\n zip(forest_futures, res_futures)]\r\n\r\n # update the order of the new forest\r\n res.sort(key=score)\r\n forest = [r['tree'] for r in res]\r\n\r\n if progbar:\r\n pbar.update()\r\n if pool is None:\r\n d = _describe_tree(forest[0])\r\n else:\r\n d = submit(pool, _describe_tree, forest[0]).result()\r\n pbar.set_description(d)\r\n\r\n finally:\r\n if progbar:\r\n pbar.close()\r\n\r\n if pool is None:\r\n tree.set_state_from(forest[0])\r\n else:\r\n tree.set_state_from(forest[0].result())\r\n maybe_rejoin_pool(is_worker, pool)\r\n\r\n return tree", "def createNewForest():\n forest = {'width': WIDTH, 'height': HEIGHT}\n for x in range(WIDTH):\n for y in range(HEIGHT):\n if (random.randint(1, 10000) / 100) <= INITIAL_TREE_DENSITY:\n forest[(x, y)] = TREE # Start as a tree.\n else:\n forest[(x, y)] = EMPTY # Start as an empty space.\n return forest", "def compute_seeds(fitness):\n\n seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \\\n (self.max_seeds-self.min_seeds) + self.min_seeds\n\n return round(seeds)", "def generateAndTrain(generationParameters, trainingParameters,\n resultsDir): \n \n \n generatedObjectTree = simpleRandomBifurcations(generationParameters[\"numberOfLevels\"],\n generationParameters[\"maxNumberOfNodes\"],\n bifurcationFunctionArguments=generationParameters[\"bifurcationsParameter\"],\n gaussian=generationParameters[\"gaussian\"])\n \n \n # ['BranchSizes', 'RootName', 'LevelSize', 'NumberOfNodes', 'NumberOfLevels']\n # print treeStats.allStats(generatedObjectTree)[\"NumberOfLevels\"]\n \n tuples = generateNTuples(generatedObjectTree,\n generationParameters[\"numberOfTuples\"],\n verbose=False,\n withChildren=generationParameters[\"withChildren\"],\n withNoise=generationParameters[\"withNoise\"])\n \n \n # trainedTree = obtainTree(tuples)\n allTrees, homeless, parentToDescendantStats, bestParentToDescendantStats = obtainTreeNoise2(tuples,\n trainingParameters[\"threshold\"],\n verbose=False)\n \n purity = purityFromGroundTruth(generatedObjectTree,\n allTrees,\n verbose=False)\n \n return generatedObjectTree, allTrees, purity", "def generate(seed_generation):\n length = len(seed_generation)\n # Keep the fittest 50%\n new_generation = seed_generation[:length/2]\n \n offspring = []\n while len(offspring) < length / 2:\n mum = roulette_wheel_selection(seed_generation)\n dad = roulette_wheel_selection(seed_generation)\n if similarity(mum, dad) > len(mum) /2:\n continue\n children = mum.breed(dad)\n offspring.extend(children)\n \n \n \n # Ensure the new generation is the right length\n new_generation.extend(offspring)\n new_generation = new_generation[:length]\n \n return new_generation", "def genetic_algorithm(self, data, label_col, tree_constructors, population_size=15, num_crossovers=3, val_fraction=0.25,\n num_iterations=5, seed=1337, tournament_size=3, prune=False, max_samples=3,\n nr_bootstraps=5, mutation_prob=0.25):\n np.random.seed(seed)\n\n feature_mins = {}\n feature_maxs = {}\n feature_column_names = list(set(data.columns) - set([label_col]))\n\n for feature in feature_column_names:\n feature_mins[feature] = np.min(data[feature])\n feature_maxs[feature] = np.max(data[feature])\n\n labels_df = DataFrame()\n labels_df[label_col] = data[label_col].copy()\n features_df = data.copy()\n features_df = features_df.drop(label_col, axis=1)\n\n data = features_df.copy()\n data[label_col] = labels_df[label_col]\n\n sss = StratifiedShuffleSplit(labels_df[label_col], 1, test_size=val_fraction, random_state=seed)\n\n for train_index, test_index in sss:\n train_features_df, test_features_df = features_df.iloc[train_index, :].copy(), features_df.iloc[test_index,\n :].copy()\n train_labels_df, test_labels_df = labels_df.iloc[train_index, :].copy(), labels_df.iloc[test_index,\n :].copy()\n train_features_df = train_features_df.reset_index(drop=True)\n test_features_df = test_features_df.reset_index(drop=True)\n train_labels_df = train_labels_df.reset_index(drop=True)\n test_labels_df = test_labels_df.reset_index(drop=True)\n train = data.iloc[train_index, :].copy().reset_index(drop=True)\n\n tree_list = bootstrap(train, label_col, tree_constructors, boosting=True, nr_classifiers=nr_bootstraps)\n for constructor in tree_constructors:\n tree = constructor.construct_classifier(train, train_features_df.columns, label_col)\n tree.populate_samples(train_features_df, train_labels_df[label_col].values)\n tree_list.append(tree)\n\n\n # Adding the random forest trees to the population\n rf = RFClassification()\n xgb = XGBClassification()\n\n feature_cols = list(train_features_df.columns)\n rf.construct_classifier(train, feature_cols, label_col)\n xgb_model = xgb.construct_classifier(train, feature_cols, label_col)\n\n # print 'Random forest number of estimators:', len(rf.clf.estimators_)\n\n for i, estimator in enumerate(rf.clf.estimators_):\n tree = self._convert_sklearn_to_tree(estimator, feature_cols)\n tree.populate_samples(train_features_df, train_labels_df[label_col].values)\n predicted_labels = tree.evaluate_multiple(test_features_df).astype(int)\n # accuracy = accuracy_score(test_labels_df[label_col].values.astype(str), predicted_labels.astype(str))\n # print 'RF tree', i, '/', len(rf.clf.estimators_), ':', accuracy\n tree_list.append(tree)\n\n n_classes = len(np.unique(train[label_col].values))\n if n_classes > 2:\n for idx, tree_string in enumerate(xgb_model.clf._Booster.get_dump()):\n tree = self.parse_xgb_tree_string(tree_string, train, feature_cols, label_col,\n np.unique(train[label_col].values)[idx % n_classes])\n tree_list.append(tree)\n else:\n for tree_string in xgb_model.clf._Booster.get_dump():\n tree = self.parse_xgb_tree_string(tree_string, train, feature_cols, label_col, 0)\n tree_list.append(tree)\n\n tree_list = [tree for tree in tree_list if tree is not None ]\n\n start = time.clock()\n\n for k in range(num_iterations):\n print \"Calculating accuracy and sorting\"\n tree_accuracy = []\n for tree in tree_list:\n predicted_labels = tree.evaluate_multiple(test_features_df)\n accuracy = accuracy_score(test_labels_df[label_col].values.astype(int), predicted_labels.astype(int))\n tree_accuracy.append((tree, accuracy, tree.count_nodes()))\n\n tree_list = [x[0] for x in sorted(tree_accuracy, key=lambda x: (-x[1], x[2]))[:min(len(tree_list), population_size)]]\n print(\"----> Best tree till now: \", [(x[1], x[2]) for x in sorted(tree_accuracy, key=lambda x: (-x[1], x[2]))[:min(len(tree_list), population_size)]])\n\n # Crossovers\n mngr = multiprocessing.Manager()\n return_dict = mngr.dict()\n jobs = []\n for i in range(num_crossovers):\n p = multiprocessing.Process(target=self._tournament_selection_and_merging, args=[tree_list, train_features_df, train_labels_df,\n test_features_df, test_labels_df, label_col,\n feature_column_names, feature_maxs, feature_mins,\n max_samples, return_dict, k * i + i, tournament_size])\n jobs.append(p)\n p.start()\n\n\n for proc in jobs:\n proc.join()\n\n for new_tree in return_dict.values():\n if new_tree is not None:\n print 'new tree added', accuracy_score(test_labels_df[label_col].values.astype(int), new_tree.evaluate_multiple(test_features_df).astype(int))\n tree_list.append(new_tree)\n\n if prune:\n print 'Pruning the tree...', new_tree.count_nodes()\n new_tree = new_tree.cost_complexity_pruning(train_features_df, train_labels_df[label_col], None, cv=False,\n val_features=test_features_df,\n val_labels=test_labels_df[label_col])\n print 'Done', new_tree.count_nodes(), accuracy_score(test_labels_df[label_col].values.astype(int), new_tree.evaluate_multiple(test_features_df).astype(int))\n tree_list.append(new_tree)\n\n # Mutation phase\n for tree in tree_list:\n value = np.random.rand()\n if value < mutation_prob:\n new_tree1 = self._mutate_shift_random(tree, train_features_df, train_labels_df[label_col].values)\n print 'new mutation added', accuracy_score(test_labels_df[label_col].values.astype(int),\n new_tree1.evaluate_multiple(test_features_df).astype(int))\n new_tree2 = self._mutate_swap_subtrees(tree, train_features_df, train_labels_df[label_col].values)\n print 'new mutation added', accuracy_score(test_labels_df[label_col].values.astype(int),\n new_tree2.evaluate_multiple(test_features_df).astype(int))\n tree_list.append(new_tree1)\n tree_list.append(new_tree2)\n\n\n end = time.clock()\n print \"Took \", (end - start), \" seconds\"\n start = end\n\n tree_accuracy = []\n for tree in tree_list:\n predicted_labels = tree.evaluate_multiple(test_features_df)\n accuracy = accuracy_score(test_labels_df[label_col].values.astype(int), predicted_labels.astype(int))\n tree_accuracy.append((tree, accuracy, tree.count_nodes()))\n\n\n print [x for x in sorted(tree_accuracy, key=lambda x: (-x[1], x[2]))[:min(len(tree_list), population_size)]]\n\n best_tree = sorted(tree_accuracy, key=lambda x: (-x[1], x[2]))[0][0]\n return best_tree", "def hyperparameter_selection_rf():\n # Number of trees in random forest\n n_estimators = [int(x) for x in np.linspace(start=100, stop=1200, num=12)]\n # Number of features to consider at every split\n max_features = ['auto', 'sqrt']\n # Maximum number of levels in tree\n max_depth = [int(x) for x in np.linspace(5, 30, num=6)]\n # Minimum number of samples required to split a node\n min_samples_split = [2, 5, 10, 15, 100]\n # Minimum number of samples required at each leaf node\n min_samples_leaf = [1, 2, 5, 10]\n\n random_grid = {'n_estimators': n_estimators,\n 'max_features': max_features,\n 'max_depth': max_depth,\n 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf}\n\n return(random_grid)", "def grow_depth_first(self, \n o, a, r=[], p=[], n=[], w=[], # Dataset.\n split_by = 'weighted', # Attribute to split by: action, value or both.\n impurity_weights = [1.,1.,1.], # Weight of each impurity component (if by='weighted').\n max_depth = np.inf, # Depth at which to stop splitting. \n min_samples_split = 2, # Min samples at a node to consider splitting. \n min_weight_fraction_split = 0, # Min weight fraction at a node to consider splitting.\n min_samples_leaf = 1, # Min samples at a leaf to accept split.\n min_split_quality = 0, # Min relative impurity gain to accept split.\n stochastic_splits = False, # Whether to samples splits proportional to impurity gain. Otherwise deterministic argmax.\n ):\n assert split_by in ('action','value','derivative','pick','weighted')\n if split_by in ('value','pick','weighted'): assert r != [], 'Need reward information to split by value.'\n if split_by in ('derivative','pick','weighted'): assert n != [], 'Need successor information to split by derivatives.'\n self.split_by = split_by\n self.impurity_weights = np.array(impurity_weights).astype(float) \n self.max_depth = max_depth\n self.min_samples_split = min_samples_split \n self.min_samples_leaf = min_samples_leaf\n self.min_weight_fraction_split = min_weight_fraction_split\n self.stochastic_splits = stochastic_splits\n self.min_split_quality = min_split_quality\n self.load_data(o, a, r, p, n, w)\n self.seed()\n def recurse(node, depth):\n if depth < self.max_depth and self.split(node):\n recurse(node.left, depth+1)\n recurse(node.right, depth+1)\n print('Growing depth first...')\n recurse(self.tree, 0)\n # List all the leaf integers.\n self.leaf_nints = self.get_leaf_nints()\n # Compute leaf transition probabilities, both marginal and conditional.\n self.compute_all_leaf_transition_probs()", "def simulate_graph(seed, cluster_sizes, del_factor, ins_factor):\n rand.seed(seed)\n cluster_boundaries = np.cumsum(cluster_sizes)\n print(\"#seed:\", seed)\n print(\"#deletion factor:\", del_factor)\n print(\"#insertion factor:\", ins_factor)\n optimal_costs = np.array([0])\n for c in range(0, len(cluster_sizes)-1):\n n_c = cluster_sizes[c+1]\n offset_c = cluster_boundaries[c]\n edges_c = generate_edges(n_c, offset_c)\n disturb_cluster(n_c, offset_c, edges_c, del_factor, optimal_costs)\n additional_edges(cluster_boundaries, ins_factor, optimal_costs)\n print(\"#optimal costs:\", optimal_costs)", "def grow_tree(self):\n\n decision_node = self.root\n internal_env = copy.copy(self.env)\n\n while (not decision_node.is_final) and decision_node.visits > 1:\n\n a = self.select(decision_node)\n\n new_random_node = decision_node.next_random_node(a, self._hash_action)\n\n (new_decision_node, r) = self.select_outcome(internal_env, new_random_node)\n\n new_decision_node = self.update_decision_node(new_decision_node, new_random_node, self._hash_space)\n\n new_decision_node.reward = r\n new_random_node.reward = r\n\n decision_node = new_decision_node\n\n decision_node.visits += 1\n cumulative_reward = self.evaluate(internal_env)\n\n while not decision_node.is_root:\n random_node = decision_node.father\n cumulative_reward += random_node.reward\n random_node.cumulative_reward += cumulative_reward\n random_node.visits += 1\n decision_node = random_node.father\n decision_node.visits += 1", "def test_random_forest_max_depth_parameter(params, X_train, X_test, y_train, y_test):", "def top_down_random(self, random_seed=None):\n configure = {k: -1 for k in self.features}\n\n # set root first\n configure[self.root] = 1\n\n def fill_child(node):\n if configure[node] == -1:\n assert False, \"check here\"\n\n if configure[node] == 0:\n for c in node.children:\n configure[c] = 0\n return\n\n if node.node_type == 'g': # grantee the group limits\n samples = random.sample(node.children, random.randint(node.g_d, node.g_u))\n for c in node.children:\n configure[c] = 1 if c in samples else 0\n return\n\n for c in node.children:\n if c.node_type == 'r' or c.node_type == 'm' or c.node_type == 'g':\n configure[c] = 1\n else:\n configure[c] = random.choice([0, 1])\n return\n\n self.pre_order(self.root, fill_child)\n\n return configure", "def evolve(self, elitism='on', save='off', probability=0.05, rate=0.05):\n if self.state == 'dead':\n\n self.member_fitness = [self.members[i].fitness for i in range(self.size)]\n\n self.fittest_brain = self.members[self.member_fitness.index(max(self.member_fitness))]\n\n if save == 'on':\n self.fittest_brain.save_as('fittest_brain')\n\n self.total_population_fitness = sum(self.member_fitness)\n\n print('Total population fitness is %s' % (self.total_population_fitness))\n\n self.mating_pool = [[self.members[i]] * round(self.member_fitness[i] * 1000 / self.total_population_fitness) for i in range(self.size)]\n\n self.mating_pool = [brain for sublist in self.mating_pool for brain in sublist]\n\n self.children = []\n\n if elitism == 'on':\n\n self.children.append(self.fittest_brain)\n\n for i in range(self.size - 1):\n parent1 = random.choice(self.mating_pool)\n parent2 = random.choice(self.mating_pool)\n child = crossover(parent1, parent2)\n child.mutate(probability, rate)\n self.children.append(child)\n else:\n for i in range(self.size):\n parent1 = random.choice(self.mating_pool)\n parent2 = random.choice(self.mating_pool)\n child = crossover(parent1, parent2)\n child.mutate(probability, rate)\n self.children.append(child)\n\n self.members = self.children\n\n '''\n We need to set the state of the first bird to alive as it has been taken from the previous generation and thus its state is dead from the previous generation\n '''\n self.members[0].state = 'alive'\n\n self.state = 'alive'\n self.generation += 1", "def greedy_build(nodes, priors=None, cutoff=200, considered=set(), uniq='', targets=[]):\n\n\t# Tracks frequency of states for each character in nodes\n\tcharacter_mutation_mapping = defaultdict(int)\n\n\t# G models the network that is returned recursively\n\tG = nx.DiGraph()\n\n\troot = root_finder(nodes)\n\n\t# Base case check for recursion, returns a graph with one node corresponding to the root of the remaining nodes\n\tif len(nodes) <= cutoff or len(nodes) == 1:\n\t\troot = root_finder(nodes)\n\t\tG.add_node(root)\n\t\treturn G, [[root, nodes]]\n\n\t# Accounting for frequency of mutated states per character, in order to choose the best split\n\tfor node in nodes:\n\t\tnode_list = node.split(\"_\")[0].split('|')\n\t\tfor i in range(0, len(node_list)):\n\t\t\tchar = node_list[i]\n\t\t\tif char != '0' and char != '-':\n\t\t\t\tcharacter_mutation_mapping[(str(i), char)] += 1\n #if char != '0':\n # if char == \"-\":\n # character_mutation_mapping[(str(i), char)] -= 1\n # else:\n # character_mutation_mapping[(str(i), char)] += 1\n\n\t# Choosing the best mutation to split on (ie character and state)\n\tcharacter, state = 0, 0\n\tmax_cost = 0\n\n\tmin_prior = 1\n\tif priors:\n\t\tfor i in priors.keys():\n\t\t\tfor j in priors[i].keys():\n\t\t\t\tmin_prior = min(min_prior, priors[i][j])\n\n\tfor i,j in character_mutation_mapping:\n\t\tif not (i,j) in considered:\n\t\t\tif not priors:\n\t\t\t\tif max_cost < character_mutation_mapping[(i, j)]:\n\t\t\t\t\tmax_cost = character_mutation_mapping[(i, j)]\n\t\t\t\t\tcharacter, state = i, j\n\t\t\telse:\n\t\t\t\tif j not in priors[int(i)]:\n\t\t\t\t\tpriors[int(i)][j] = min_prior\n\t\t\t\tif max_cost < -np.log(priors[int(i)][j]) * character_mutation_mapping[(i, j)]:\n\t\t\t\t\tmax_cost = -np.log(priors[int(i)][j]) * character_mutation_mapping[(i, j)]\n\t\t\t\t\tcharacter, state = i, j\n\tcharacter = int(character)\n\n\n\t# If there is no good split left, stop the process and return a graph with the remainder of nodes\n\tif character == 0 and state == 0:\n\t\tif len(nodes) == 1:\n\t\t\tG.add_node(nodes[0])\n\t\telse:\n\t\t\tfor i in range(0, len(nodes)):\n\t\t\t\tif nodes[i] != root:\n\t\t\t\t\tG.add_edge(root, nodes[i])\n\t\treturn G, []\n\n\t# Splitting nodes based on whether they have the mutation, don't have the mutation, or are NA('-') in that character\n\t# Right split is where nodes with the mutation go, everyone else goes to left split or NA chars\n\tleft_split, right_split, NA_chars = [], [], []\n\tright_split_temp = []\n\tleft_split_temp = []\n\tfor node in nodes:\n\t\tnode_list = node.split('|')\n\t\tif node_list[character] == state:\n\t\t\tright_split.append(node)\n\t\telif node_list[character] == '-':\n\t\t\tNA_chars.append(node)\n\t\telse:\n\t\t\tleft_split.append(node)\n\n\n\t# Seperates all nodes with NA in the character chosen to be split upon\n\t# Puts in right split or left split based on which list shares more mutated characters with this string\n\tfor node in NA_chars:\n\t\tright_split_score = 0\n\t\tleft_split_score = 0\n\t\tnode_list = node.split('|')\n\t\tnum_not_missing = len([n for n in node_list if n != \"-\"])\n\t\tfor i in range(0, len(node_list)):\n\t\t\tif node_list[i] != '0' and node_list[i] != '-':\n\t\t\t\tfor node_2 in left_split:\n\t\t\t\t\tnode2_list = node_2.split('|')\n\t\t\t\t\tif node_list[i] == node2_list[i]:\n\t\t\t\t\t\tleft_split_score += 1\n\t\t\t\tfor node_2 in right_split:\n\t\t\t\t\tnode2_list = node_2.split('|')\n\t\t\t\t\tif node_list[i] == node2_list[i]:\n\t\t\t\t\t\tright_split_score += 1\n\n\t\tavg_left_split_score = left_split_score / float(len(left_split) * num_not_missing + 1)\n\t\tavg_right_split_score = right_split_score / float(len(right_split) * num_not_missing + 1)\n\n\t\tif avg_left_split_score < avg_right_split_score:\n\t\t\tright_split_temp.append(node)\n\t\telse:\n\t\t\tleft_split_temp.append(node)\n\n\tright_split += right_split_temp\n\tleft_split += left_split_temp\n\n\t# Add character, state that split occurred to already considered mutations\n\tconsidered.add((str(character), state))\n\tG = nx.DiGraph()\n\t#splitter = str(character) + \" \" + str(state) + \" (\" + uniq + \")\"\n\tsplitter = root\n\n\t# Recursively build left side of network (ie side that did not mutation at the character with the specific state)\n\tG.add_node(splitter)\n\tleft_subproblems = []\n\tleft_network = None\n\tif len(left_split) != 0:\n\t\tleft_root = root_finder(left_split)\n\t\t# if left_root not in left_split and left_root in targets:\n\t\t# \tleft_root = left_root + \"_unique\"\n\n\t\tleft_network, left_subproblems = greedy_build(left_split, priors, cutoff, considered.copy(), uniq + \"0\", targets=targets)\n\n\t\tleft_nodes = [node for node in left_network.nodes() if left_network.in_degree(node) == 0]\n\t\tdup_dict = {}\n\t\tfor n in left_network:\n\t\t\tif n in list(G.nodes()) and n != left_root:\n\t\t\t\tdup_dict[n] = n + \"_\" + str(hashlib.md5(left_root.encode('utf-8')).hexdigest())\n\t\tleft_network = nx.relabel_nodes(left_network, dup_dict)\n\t\tG = nx.compose(G, left_network)\n\t\tif root != left_root:\n\t\t\tG.add_edge(splitter, left_root, weight=0, label=\"None\")\n\n\t# Recursively build right side of network\n\tright_network, right_subproblems = greedy_build(right_split, priors, cutoff, considered.copy(), uniq + \"1\", targets=targets)\n\tright_nodes = [node for node in right_network.nodes() if right_network.in_degree(node) == 0]\n\tright_root = root_finder(right_split)\n\n\tdup_dict = {}\n\tfor n in right_network:\n\t\tif n in list(G.nodes()) and n != right_root:\n\t\t\tdup_dict[n] = n + \"_\" + str(hashlib.md5(right_root.encode('utf-8')).hexdigest())\n\tfor n in dup_dict:\n\t\trename_dict = {n: dup_dict[n]}\n\t\tif right_network.out_degree(n) != 0:\n\t\t\tright_network = nx.relabel_nodes(right_network, rename_dict)\n\t\telse:\n\t\t\trename_dict = {n: dup_dict[n]}\n\t\t\tG = nx.relabel_nodes(G, rename_dict)\n\n\tG = nx.compose(G, right_network)\n\t# if right_root not in right_split and right_root in targets:\n\t# \tright_root = right_root + \"_unique\"\n\t#for node in right_nodes:\n\tif root != right_root:\n\t\tif not priors:\n\t\t\tG.add_edge(splitter, right_root, weight=1, label = str(character) + \": 0 -> \" + str(state))\n\t\telse:\n\t\t\tG.add_edge(splitter, right_root, weight=-np.log(priors[int(character)][state]), label=str(character) + \": 0 -> \" + str(state))\n\n\n\treturn G, left_subproblems + right_subproblems", "def prejudge_stack_tree_train(self, train_seed, cv_seed):\n if self.save_auto_train_results:\n auto_train_path = auto_train_pred_path\n else:\n auto_train_path = None\n\n # useful_feature_list_l1 = [2, 7, 8, 10, 15, 23, 26, 32, 51, 53, 64, 69, 70, 73, 77, 86]\n useful_feature_list_l1 = None\n reuse_feature_list_final = range(87)\n\n era_list_n = preprocess.negative_era_list\n era_sign_train = np.array([0 if era in era_list_n else 1 for era in self.e_train])\n era_sign_test = utils.load_pkl_to_data(prejudged_data_path + 'binary_era_sign_test.p')\n\n hyper_params = {'n_valid': (4, 4),\n 'n_era': (20, 20),\n 'n_epoch': (1, 8),\n 'final_n_cv': 20,\n 'train_seed': train_seed,\n 'cv_seed': cv_seed,\n 'models_l1': ('lgb', 'xgb', 'dnn'),\n 'models_l2': (),\n 'model_final': 'lgb',\n 'num_boost_round_lgb_l1': 108,\n 'num_boost_round_xgb_l1': 110,\n 'num_boost_round_final': 88,\n 'useful_feature_list_l1': useful_feature_list_l1,\n 'reuse_feature_list_final': reuse_feature_list_final,\n 'scale_blender_final': True,\n 'save_epoch_results': False}\n\n layer1_params = ModelStacking.get_layer1_params(train_seed)\n # layer2_params = ModelStacking.get_layer2_params(train_seed)\n final_layer_params = ModelStacking.get_final_layer_params(train_seed)\n\n layers_params = [layer1_params,\n # layer2_params,\n final_layer_params]\n\n idx = self.train_args['idx']\n\n STK = stacking.PrejudgeStackTree(self.x_train, self.y_train, self.w_train, self.e_train,\n self.x_test, self.id_test, self.x_g_train, self.x_g_test,\n layers_params=layers_params, hyper_params=hyper_params,\n options=self.train_options)\n\n STK.stack(pred_path=stack_pred_path, auto_train_pred_path=auto_train_path, loss_log_path=loss_log_path,\n stack_output_path=stack_output_path, csv_log_path=csv_log_path+'stack_final_',\n era_list_n=era_list_n, era_sign_train=era_sign_train, era_sign_test=era_sign_test, csv_idx=idx)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the number of seeds given a fitness value.
def compute_seeds(fitness): seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \ (self.max_seeds-self.min_seeds) + self.min_seeds return round(seeds)
[ "def iterations(self, n, fitness_function):", "def calcFitness (self) :\n fitnessArray = [[8, 4, 2, 1],\n [16, 8, 4, 2],\n [32, 16, 8, 4],\n [64, 32, 16, 8]]\n # fitnessArray = [[160, 80, 5, 4],\n # [320, 40, 4, 3],\n # [640, 20, 3, 2],\n # [1280, 10, 2, 1]]\n fitness = 0\n for k in range(4) :\n for i in range (4) :\n fitness += self.grid[k,i] * fitnessArray[k][i]\n return (fitness / 100)", "def reproduce(self):\n\n def compute_seeds(fitness):\n \"\"\" Computes the number of seeds given a fitness value. \"\"\"\n\n seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \\\n (self.max_seeds-self.min_seeds) + self.min_seeds\n\n return round(seeds)\n\n # evaluates max and min fitness for current year\n max_fitness = max(tree[0] for tree in self.population)\n min_fitness = min(tree[0] for tree in self.population)\n\n # computes the number of seeds produced per tree\n for tree in self.population:\n tree[1].seeds = int(compute_seeds(tree[0]))", "def calculate_fitness(self, phenotype) -> int:\n pass", "def evaluate(self, state):\n\n fitness = np.sum(state)\n self.num_evals += 1\n #print(self.num_evals)\n return fitness", "def fitness_function(self):\n fitness = 0\n weight = 0\n size_arti = 0\n for i in range(Number_Of_Items ):\n if self.gene[i] == 1:\n weight += items[i][0]\n fitness += items[i][1]\n size_arti += items[i][2]\n if (weight > Maximum_Weight and size_arti < Maximum_Size) or (weight < Maximum_Weight and size_arti > Maximum_Size):\n fitness = int(fitness / 2)\n elif weight < Maximum_Weight and size_arti < Maximum_Size:\n fitness = fitness\n elif weight > Maximum_Weight or size_arti > Maximum_Size:\n fitness = 0\n return fitness", "def fitness(individual):\n\n delays = simulate(individual)\n fitness = sum(delays)\n print(fitness)\n return (fitness,)", "def fitness(problem, population, point, dom_func):\n return len([1 for another in population if dom_func(problem, point, another)])", "def count(self,value = 1):\n n = 0\n for s in self.sample:\n if s == value:\n n += 1\n return n", "def calculate_fitness_value(self):\n sequence = ''.join(self.genes)\n if sequence in seq_to_fitness:\n self.fitness_value = seq_to_fitness[sequence]\n else:\n self.fitness_value = polly_stats.get_amount_of_bad_regions(\n self.genes, self.environment)", "def numberOfNodes( gen ):\n return int( scipy.sum( [ 3.**i for i in range( 1, gen + 2 ) ] ) )", "def set_total_fitness(self,value):\n \n #Se actualiza el valor de la variable correspondiente.\n self.__total_fitness = value", "def evaluate(self, state):\n\n fitness = 0\n\n for i in range(1, len(state)):\n if state[i] != state[i - 1]:\n fitness += 1\n\n return fitness", "def get_fitness_evaluation_count(self):\n return self._ea.evaluation.eval_count", "def calculate_fitness_test(self, **kwargs):\n if self.genes_test is None:\n raise ValueError(\"Genes test is not set!\")\n\n self.__fitness_test = self.fitness_function.calculate(self.__genes_test, **kwargs)\n self.num_fitness_eval += 1", "def fitness(self):\r\n history = self.history\r\n return sum(history) / len(history)", "def calc_sum_fitness(self):\n fitness: float = 0\n for c in self.characters:\n fitness += c.fitness\n self.sum_fitness = round(fitness, 3)", "def fitness_function(taken_loot):\n value_sum = 0\n for item in taken_loot:\n value_sum += item.value\n return value_sum", "def fitness(self) -> float:\n return self._fitness" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draws a random float number from a uniform distribution given by U[lower, upper].
def uniform(lower, upper): return lower + random.random() * (upper - lower)
[ "def uniform(lower: float, upper: float):\n return Float(lower, upper).uniform()", "def _rand_float(self, low, high):\n\n return self.np_random.uniform(low, high)", "def rand_uni_val() -> float:\n return random.uniform(0, 1)", "def random_float(low: float, high: float):\n seed = time.time()\n random.seed(seed)\n return random.uniform(low, high)", "def random_float():\n return (random() - 0.5) * 2", "def discrete_uniform_sampler(upper_value):\n return int(np.random.random() * upper_value)", "def _random_float(self):\n return random.uniform(1, 2)", "def uniform_(self, low=0, high=1):\n return init_funcs.uniform_fill(self, low, high)", "def random_value(low, high, decimals=2):\n return round(random.uniform(low, high),decimals)", "def random_value(low, high, decimals=2):\n return round(random.uniform(low, high), decimals)", "def uniform(*args):\n return _yarp.Random_uniform(*args)", "def rand_uniform(a, b):\n\n\treturn a + lcg.draw_rand_number() * (b - a)", "def sample_log_float(rng, low, high):\n return float(np.exp(rng.uniform(np.log(float(low)), np.log(float(high)))))", "def uniform_sample(upper, num):\n sample = []\n for i in range(num):\n value = random.randint(0, upper - 1)\n sample.append(value)\n return sample", "def _uniform(val_range):\r\n return np.random.uniform(val_range[0], val_range[1])", "def uniform(num_values=100000):\n data = generator.random(num_values)\n\n def distribution_function(x):\n return np.where((x >= 0) & (x <= 1), 1, 0)\n\n plot_histogram(data, r\"Rovnoměrné rozdělení na $\\langle0,1\\rangle$\", distribution_function, normalize=True)", "def generate_float(self):\n\n value = random()\n return value", "def random() -> float:\n ...", "def floatGenerator(min=-2 ** 31, max=2 ** 31):\n while True:\n yield random.uniform(min, max)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }