query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
sequencelengths 19
20
| metadata
dict |
---|---|---|---|
Determines the convective heat transfer coefficient, either free, or forced. In the absence of any wind, the "free" wind_heat_transfer_coefficient is returned. If there is wind present, then this parameter is known as the "forced" wind_heat_transfer_coefficient. | def wind_heat_transfer_coefficient(self) -> float:
return 3.8 + 2 * self.wind_speed
# return 4.5 + 2.9 * self.wind_speed | [
"def thermal_conductivity(self):\n return self.fluid.conductivity(self.T_C)",
"def thermalConductivity(self, Tk=None, Tc=None):\n Tk = getTk(Tc, Tk)\n self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n thermalConductivity = (\n 2.13014e-08 * Tk**3\n - 6.31916e-05 * Tk**2\n + 1.11629e-01 * Tk\n - 2.00043e00\n )\n return thermalConductivity * 1e-3",
"def thermalConductivity(self, Tk=None, Tc=None):\n Tk = getTk(Tc, Tk)\n self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n\n return 2.45 * Tk / (86.334 + 0.0511 * Tk)",
"def thermal_conductivity(self):\n return self._thermal_conductivity",
"def compute_heat_transfer_coefficient(self, compute_heat_transfer_coefficient):\n\n self._compute_heat_transfer_coefficient = compute_heat_transfer_coefficient",
"def get_chiller_temperature(self) -> float:\n\n return self.send(self.cmd.GET_COOLING_ACT)",
"def getHeatFlux(self, T):\n\t\tQ = self.heat_transfer_coefficient * (self.T_wall - T)\n\t\treturn Q",
"def calc_mf_exchange_coupling(self):\n try:\n self.mf_exch_coupling = 3*self.eff_spin/(self.eff_spin+1)*constants.k*self._curie_temp\n except AttributeError:\n # on initialization self._curie_temp\n self.mf_exch_coupling = 0",
"def h_cp(self, temp=None):\n\t\t_t = temp or self.ref_temp\n\t\ttemp_term = (1.0/_t) - (1.0/self.ref_temp)\n\t\texp_term = np.exp(self._t_dep * temp_term)\n\t\treturn self._ref_h_cp * exp_term",
"def conductive_heat_flux(discr, eos, cv, grad_t):\n transport = eos.transport_model()\n return -transport.thermal_conductivity(eos, cv)*grad_t",
"def target_flow_temperature(self) -> Optional[float]:\n op_mode = self.operation_mode\n if op_mode is None:\n return None\n\n if op_mode in [\n ZONE_OPERATION_MODE_COOL_THERMOSTAT,\n ZONE_OPERATION_MODE_COOL_FLOW,\n ]:\n return self.target_cool_flow_temperature\n\n return self.target_heat_flow_temperature",
"def getHeatTransferCoefficient(self, FluidDensity):\n if FluidDensity > 5:\n return 5000\n else:\n return 100",
"def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646",
"def setHeatTransferCoeff(self, u):\n return _cantera.wall_setHeatTransferCoeff(self.__wall_id, u)",
"def get_concentration(self, e_fermi: float, temperature: float) -> float:\n if self.fixed_concentration is None:\n expfac = -self.get_formation_energy(e_fermi) / (kboltz * temperature)\n concentration = self.degeneracy * np.exp(expfac)\n else:\n concentration = self.fixed_concentration\n return concentration",
"def heatFlowRate(self):\n return _cantera.wall_Q(self.__wall_id)",
"def comp_fill_factor(self):\n if self.winding is None or self.winding.qs == 0:\n return 0\n else:\n (Nrad, Ntan) = self.winding.get_dim_wind()\n S_slot_wind = self.slot.comp_surface_wind()\n S_wind_act = (\n self.winding.conductor.comp_surface_active()\n * self.winding.Ntcoil\n * Nrad\n * Ntan\n )\n\n return S_wind_act / S_slot_wind",
"def calc_maintenance_cost (self):\n\n if str(self.comp_specs['operational costs']) \\\n != 'UNKNOWN':\n self.maintenance_cost = \\\n self.comp_specs['operational costs']\n else:\n self.maintenance_cost = \\\n (self.comp_specs['percent o&m'] / 100.0) * self.capital_costs\n #~ print 'self.maintenance_cost',self.maintenance_cost",
"def fixed_concentration(self) -> Optional[float]:\n return self._fixed_concentration"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a nice representation of the weather conditions. | def __repr__(self) -> str:
return (
"WeatherConditions("
f"ambient_temperature: {self.ambient_temperature:.3f}K, "
f"azimuthal_angle: {self.azimuthal_angle}deg, "
f"declination: {self.declination}deg, "
f"density: {self.density_of_air:.3f}kg/m^3, "
f"dynamic_viscosity: {self.dynamic_viscosity_of_air:.3f}kg/m*s, "
f"heat_capacity: {self.heat_capacity_of_air}:.3fJ/kg*K, "
f"irradiance: {self.irradiance:.3f}W/m^2, "
f"kinematic_viscosity: {self.kinematic_viscosity_of_air:.3f}m^2/s, "
f"sky_temperature: {self.sky_temperature:.3f}K, "
f"thermal_conductivity: {self.thermal_conductivity_of_air:.3f}W/m*K, "
f"thermal_expansion_coefficient: {self.thermal_expansivity_of_air:.3f}K^-1, "
f"wind_heat_transfer_coefficient: {self.wind_heat_transfer_coefficient:2f}W/m*K, "
f"wind_speed: {self.wind_speed:.3f}m/s, "
")"
) | [
"def print_conditions(self):\n _outstr = \"\"\n first = True\n for cond in self._conditions:\n if not first:\n _outstr += \", \"\n if cond in ThresholdCheck._default_min_conditions:\n _outstr += \"{:s}={:.2e}\".format(cond, self._conditions[cond])\n elif cond in ThresholdCheck._default_max_conditions:\n _outstr += \"{:s}={:d}\".format(cond, self._conditions[cond])\n first = False\n return _outstr",
"def stop_conditions_str(self):\n s = 'Stop Conditions'\n if self.stop_conditions:\n for sc in self.stop_conditions:\n s += '\\n- {0}'.format(sc)\n else:\n s += ' - N/A'\n return s",
"def weather(self) -> str:\n return self.period[\"weather\"]",
"def build_weather_string(observation):\n t = observation[\"time\"]\n top_line = \"%2.0d.%2.0d.%4.0d %2.0d:%2.0d\"\n top_data = (t.day, t.month, t.year, t.hour, t.minute)\n bottom_line =\"%3.0fC %s\"\n bottom_data = (observation[\"temp\"], observation[\"status\"])\n return (top_line % top_data, bottom_line % bottom_data)",
"def conditions(self, json):\n conditions = str(json['forecast']['simpleforecast']['forecastday'][0]['conditions'])\n return conditions",
"def condition_cn(self):\n if self._now_weather_data:\n return self._now_weather_data[\"cond\"][\"txt\"]\n else:\n return self._msg",
"def genWeather():\n\n weather = random.choice(weather_conditions.keys())\n condition = weather_conditions[weather]\n (tMax, tMin) = condition[\"temperature\"]\n (pMax, pMin) = condition[\"pressure\"]\n (hMax, hMin) = condition[\"humidity\"]\n\n return weather + \"|\" + str(round(random.uniform(tMax, tMin), 1)) + \"|\" + \\\n str(round(random.uniform(pMax, pMin), 1)) + \"|\" + \\\n str(random.randrange(hMax, hMin, -1))",
"def condition(self):\n if self._now_weather_data:\n return CONDITION_MAP[self._now_weather_data[\"cond\"][\"code\"]]\n else:\n return self._msg",
"def format_response(weather):\n try:\n name = weather['name']\n desc = weather['weather'][0]['description']\n temp = weather['main']['temp']\n final_str = 'City: %s \\nConditions: %s \\nTemperature(°F): %s ' % (name, desc, temp) \n except:\n final_str = 'There was a problem \\n getting that information'\n \n return final_str",
"def get_current_weather(self) -> str:\n desc = self.raw_current[\"weather\"][0][\"description\"].capitalize()\n temp = self._generate_temp_report(int(self.raw_current[\"temp\"]), int(self.raw_current[\"feels_like\"]))\n humidity = self.raw_current[\"humidity\"]\n wind = self._generate_wind_report(self.raw_current[\"wind_speed\"], self.raw_current[\"wind_deg\"])\n report = (\n f\"\\nCurrently in {self.loc_name}: \"\n f\"{desc} | \"\n f\"{temp} | \"\n f\"{humidity}% humid | \"\n f\"{wind}\"\n )\n return report",
"def __str__(self) -> str:\n return f'Average: {self.avg_temp} Low: {self.low_temp} ' \\\n f'High: {self.high_temp} Precipitation: {self.precipitation} ' \\\n f'Snow: {self.snowfall} Rain: {self.rainfall}'",
"def format_condition(self):\n condition_format = conditionFormat(self.dict_questions, self.dict_countries, self.year, self.list_bool)\n self.dict_questions = condition_format.run()",
"def sky_conditions(self, sep=\"; \"):\n text_list = []\n for skyi in self.sky:\n (cover, height, cloud) = skyi\n if cover in [\"SKC\", \"CLR\", \"NSC\"]:\n text_list.append(SKY_COVER[cover])\n else:\n if cloud:\n what = CLOUD_TYPE.get(cloud, \"unknown CLOUD_TYPE of %s\" % (cloud,))\n elif SKY_COVER[cover].endswith(\" \"):\n what = \"clouds\"\n else:\n what = \"\"\n label = \"%s %s\" % (SKY_COVER[cover], what)\n # HACK here to account for 'empty' entries with above format\n label = \" \".join(label.strip().split())\n if cover == \"VV\":\n label += \", vertical visibility to %s\" % (str(height),)\n else:\n label += \" at %s\" % (str(height),)\n text_list.append(label)\n return sep.join(text_list)",
"def weather(self):\r\n try:\r\n return str(self.connect()['weather'][0]['description'])\r\n except:\r\n return '@weather'",
"def get_weather_description(self):\n return self.weather_description",
"def get_condition(humidity, temperature):\n\n if (humidity > 80) & (temperature > 0):\n condition = \"Rain\"\n elif (humidity > 70) & (temperature < 0):\n condition = \"Snow\"\n else:\n condition = \"Sunny\"\n\n return condition",
"def conditions(self) -> Sequence['outputs.DiagnosticConditionResponse']:\n return pulumi.get(self, \"conditions\")",
"def get_hourly_weather(self) -> str:\n raw_dicts = self.raw_hourly[:24]\n hourly_dicts = [self._generate_hourly_report(d) for d in raw_dicts]\n reports = self._format_hourly_reports(hourly_dicts)\n report_string = (\n f\"\\nNext 24 hours in {self.loc_name}:\\n\"\n )\n for report in [self._generate_hourly_report_string(report) for report in reports]:\n report_string += report + \"\\n\"\n return report_string",
"def getWaterConditions(self):\n return self._getConditions(restrict=['CS-Eau'])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a Model from a formula and dataframe. | def from_formula(cls, formula, data, subset=None,
drop_cols=None, *args, **kwargs):
# TODO: provide a docs template for args/kwargs from child models
# TODO: subset could use syntax. GH#469.
if subset is not None:
data = data.loc[subset]
eval_env = kwargs.pop('eval_env', None)
if eval_env is None:
eval_env = 2
elif eval_env == -1:
from patsy import EvalEnvironment
eval_env = EvalEnvironment({})
else:
eval_env += 1 # we're going down the stack again
missing = kwargs.get('missing', 'drop')
if missing == 'none': # with patsy it's drop or raise. let's raise.
missing = 'raise'
tmp = handle_formula_data(data, None, formula, depth=eval_env,
missing=missing)
((endog, exog), missing_idx, design_info) = tmp
if drop_cols is not None and len(drop_cols) > 0:
# TODO: not hit in tests
cols = [x for x in exog.columns if x not in drop_cols]
if len(cols) < len(exog.columns):
exog = exog[cols]
cols = list(design_info.term_names)
for col in drop_cols:
try:
cols.remove(col)
except ValueError:
pass # OK if not present
design_info = design_info.subset(cols)
kwargs.update({'missing_idx': missing_idx,
'missing': missing,
'formula': formula, # attach formula for unpckling
'design_info': design_info})
mod = cls(endog, exog, *args, **kwargs)
mod.formula = formula
# since we got a dataframe, attach the original
mod.data.frame = data
return mod | [
"def from_formula(\n cls, formula, data, subset=None, drop_cols=None, *args, **kwargs\n ):\n raise NotImplementedError(\"formulas are not supported for VAR models.\")",
"def convert(self, df):\n return convert_df_to_model(\n model_type=self.model_type, df=df,\n outcome_variables=self.outcome_variables,\n fixed_effects=self.fixed_effects,\n random_effect=self.random_effect,\n spline=self.spline,\n offset=self.offset,\n weight=self.weight\n )",
"def build_model(x,y):\n model = LinearRegression(normalize=True,fit_intercept=True)\n model.fit(x,y)\n return model",
"def create_lr() -> LogisticRegression:\n dataframe: pd.DataFrame = load_dataframe()\n preprocess_df(dataframe)\n x_train, x_test, y_train, y_test = get_train_test(dataframe)\n\n return compute_logistic_model(x_train, x_test, y_train, y_test)",
"def build_model():",
"def load_model():\n\n def model(row):\n raw_title = row[\"job_title_raw\"]\n clean_title = clean_raw_job_title(raw_title)\n return predict_soc_and_title(clean_title)\n\n return model",
"def apply_model(row):\n model = load_model()\n return model(row)",
"def from_dataframe(cls: 'BaseModel', data: 'DataFrame', *args, **kwargs) -> 'BaseModel':\n return cls(list(data.index),\n *args,\n **{k: v.values for k, v in data.items()},\n **kwargs)",
"def get_trained_model(dataframe, features, target, method='logistic'):\n if method == 'logistic':\n model = LogisticRegression()\n model.fit(dataframe[features], dataframe[target])\n return model\n else:\n raise NotImplementedError",
"def run_model(df_requests):\n \n # Get the list of POI IDs\n poi_list = df_requests.POIID.unique()\n \n # Initializing the list\n results = []\n # Loop through each POI\n for i in poi_list:\n # Apply the Mathematical model\n results.append(mathematical_model(df_requests,poi = i))\n # Scale the results\n scaled = minmaxscaler(results,-10,10)\n # Build the dataframe\n df_results = pd.DataFrame()\n df_results = df_results.assign(POIID=poi_list)\n df_results = df_results.assign(Score=scaled)\n return df_results",
"def dmatrix_from_formula(formula: str, df: pd.DataFrame) -> pd.DataFrame:\n\n # Parse the formula into a list of dimensions\n dimensions = formula.replace(\" \", \"\").replace(\"~-1+\", \"\").replace(\n \"~0+\", \"\").replace(\"~1+\", \"\").replace(\"~\", \"\").split(\"+\")\n return pd.concat([\n patsy.highlevel.dmatrix(\n f\"~ 0 + {dimension}\", df,\n return_type=\"dataframe\") for dimension in dimensions\n ], axis=1)",
"def convert(self, formula):\n raise NotImplementedError",
"def from_formula(cls, formula, data, *, sigma=None, weights=None):\n return super(SUR, cls).from_formula(formula, data, sigma=sigma, weights=weights)",
"def makeCalc(self, dataSet):\n\n #cyl = sasmodels.core.load_model_info('cylinder')\n #hs = sasmodels.core.load_model_info('hardsphere')\n #cylhs = sasmodels.core.load_model_info('cylinder@hardsphere')\n cylhmsa = sasmodels.core.load_model_info('cylinder@hayter_msa')\n\n # Build using c version instead of python. Avoids pyopencl\n model = sasmodels.core.build_model(cylhmsa, platform='dll')\n self.calculator = sasmodels.direct_model.DirectModel(dataSet, model)\n\n return",
"def r_formula(rdf: RDataFrame, target: str, predictors: List[str]) -> RFormula:\n\n lhs_items = [target, '~']\n rhs_items = []\n\n for predictor in predictors:\n rhs_items.append(predictor)\n rhs_items.append('+')\n\n rhs_items = rhs_items[:-1] # remove the last '+'\n all_items = lhs_items + rhs_items\n formula_string = ' '.join(all_items)\n\n formula = RFormula(formula_string)\n\n for predictor in predictors:\n formula.environment[predictor] = rdf.rx(predictor)\n\n return formula",
"def build_numeric_model(movie_df):\n import statsmodels.formula.api as smf\n #build a multivariate reg model\n linmodel_multi_f = smf.ols(formula='domestic_gross ~ opening_per_theater + opening_weekend_take + production_budget + widest_release + worldwide_gross', data=movie_df).fit()\n linmodel_multi_f.summary()",
"def calcModel(self):\n pass",
"def apply_model(model_obj, x_div, y_div=None):\n # List containing the available model that can be applied\n # model_obj must be a Foo class with info field containing one of the model type in AVAILABLE_MODEL\n AVAILABLE_MODEL = ['ridge_regression']\n\n if type(model_obj).__name__ != 'Foo':\n raise ValueError('model_obj argument is not an instance of the Foo class')\n if 'info' not in model_obj.field():\n raise ValueError('info about model_obj is not find: the model_obj you provide is not a model.')\n if model_obj.info.type not in AVAILABLE_MODEL:\n raise ValueError(str(model_obj.info.type) + ' is unknown for model application.')\n\n \n X = x_div.d\n if y_div is not None:\n Y = y_div.d\n else:\n Y = None\n n, p = X.shape\n\n # Model application\n if model_obj.info.type == 'ridge_regression':\n Beta = model_obj.beta\n mean_y = model_obj.mean_y\n Predy = np.dot(X, Beta.d) + mean_y.d\n if Y is not None:\n Error = Predy - Y\n Rmset = np.sqrt(np.mean(np.square(Error),axis=0))\n R2t = []\n for i in range(Beta.v.shape[0]):\n y_corr = np.corrcoef(np.ravel(Y), Predy[:,i])\n R2t.append(np.square(y_corr[0,1]))\n R2t = np.array(R2t)\n # Outputs\n rmset_div = util.Div(d=Rmset, i='RMSET', v=Beta.v)\n r2t_div = util.Div(d=R2t, i='R2T', v=Beta.v)\n predy_div = util.Div(d=Predy, i=x_div.i, v=Beta.v)\n\n out_obj = util.Foo(predy=predy_div, rmset=rmset_div, r2t=r2t_div)\n\n \n return out_obj",
"def parseL3FormulaWithModel(*args):\n return _libsbml.parseL3FormulaWithModel(*args)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Score vector of model. Default implementation sums score_obs. The gradient of loglike with respect to each parameter. | def score(self, params, *args, **kwargs):
try:
# If an analytic score_obs is available, try this first before
# falling back to numerical differentiation below
return self.score_obs(params, *args, **kwargs).sum(0)
except NotImplementedError:
# Fallback in case a `loglike` is implemented but `loglikeobs`
# is not.
approx_func = (approx_fprime_cs
if self._use_approx_cs else approx_fprime)
return approx_func(params, self.loglike, args=args, kwargs=kwargs) | [
"def score_full(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n\n score_fe = np.zeros(self.k_fe, dtype=np.float64)\n score_re = np.zeros(self.k_re2, dtype=np.float64)\n\n # Handle the covariance penalty.\n if self.cov_pen is not None:\n score_re -= self.cov_pen.grad(cov_re, cov_re_inv)\n\n # Handle the fixed effects penalty.\n if self.fe_pen is not None:\n score_fe -= self.fe_pen.grad(fe_params)\n\n # resid' V^{-1} resid, summed over the groups (a scalar)\n rvir = 0.\n\n # exog' V^{-1} resid, summed over the groups (a k_fe\n # dimensional vector)\n xtvir = 0.\n\n # exog' V^{_1} exog, summed over the groups (a k_fe x k_fe\n # matrix)\n xtvix = 0.\n\n # V^{-1} exog' dV/dQ_jj exog V^{-1}, where Q_jj is the jj^th\n # covariance parameter.\n xtax = [0.,] * self.k_re2\n\n # Temporary related to the gradient of log |V|\n dlv = np.zeros(self.k_re2, dtype=np.float64)\n\n # resid' V^{-1} dV/dQ_jj V^{-1} resid (a scalar)\n rvavr = np.zeros(self.k_re2, dtype=np.float64)\n\n for k in range(self.n_groups):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n\n # The residuals\n expval = np.dot(exog, fe_params)\n resid = self.endog_li[k] - expval\n\n if self.reml:\n viexog = _smw_solve(1., ex_r, ex2_r, cov_re,\n cov_re_inv, exog)\n xtvix += np.dot(exog.T, viexog)\n\n # Contributions to the covariance parameter gradient\n jj = 0\n vex = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n ex_r)\n vir = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n resid)\n for jj,mat in self._gen_dV_dPsi(ex_r):\n dlv[jj] = np.trace(_smw_solve(1., ex_r, ex2_r, cov_re,\n cov_re_inv, mat))\n rvavr[jj] += np.dot(vir, np.dot(mat, vir))\n if self.reml:\n xtax[jj] += np.dot(viexog.T, np.dot(mat, viexog))\n\n # Contribution of log|V| to the covariance parameter\n # gradient.\n score_re -= 0.5 * dlv\n\n # Needed for the fixed effects params gradient\n rvir += np.dot(resid, vir)\n xtvir += np.dot(exog.T, vir)\n\n fac = self.n_totobs\n if self.reml:\n fac -= self.exog.shape[1]\n\n score_fe += fac * xtvir / rvir\n score_re += 0.5 * fac * rvavr / rvir\n\n if self.reml:\n for j in range(self.k_re2):\n score_re[j] += 0.5 * np.trace(np.linalg.solve(\n xtvix, xtax[j]))\n\n score_vec = np.concatenate((score_fe, score_re))\n\n if self._freepat is not None:\n return self._freepat.get_packed() * score_vec\n else:\n return score_vec",
"def score_model(self, length):\n train_score = self.dtr.score(self.X_train, self.y_train)\n test_score = self.dtr.score(self.X_test, self.y_test)\n self.scores.append([length, train_score, test_score])",
"def SVM_loss(scores, y, delta = 1.0):\n n_examples = scores.shape[1] \n errors = scores - scores[y,list(range(n_examples))] \n errors += delta # adding the delta\n errors = errors.clamp(0) # set to zero all elements smaller than zero\n errors[y,list(range(n_examples))] = 0 # set to zero all elements which were just (scores[i][j] - scores[i][j] + delta)\n loss = (errors.sum())/n_examples\n\n return loss",
"def prob_calibration_function(truthvec, scorevec, reg_param_vec='default', knots='sample',\n method='logistic', force_prob=True, eps=1e-15, max_knots=200,\n transform_fn='none', random_state=942, verbose=False, cv_folds=5,\n unity_prior_weight=1, unity_prior_gridsize=20):\n from sklearn import linear_model\n from sklearn.metrics import log_loss, make_scorer\n\n if (unity_prior_weight>0):\n scorevec_coda, truthvec_coda = create_yeqx_bias_vectors(unity_prior_gridsize)\n coda_wt = unity_prior_weight/unity_prior_gridsize\n weightvec = np.concatenate((np.ones(len(scorevec)), coda_wt * np.ones(len(scorevec_coda))))\n scorevec = np.concatenate((scorevec, scorevec_coda))\n truthvec = np.concatenate((truthvec, truthvec_coda))\n\n if transform_fn != 'none':\n scorevec = transform_fn(scorevec)\n\n knot_vec = np.unique(scorevec)\n if (knots == 'sample'):\n num_unique = len(knot_vec)\n if (num_unique > max_knots):\n smallest_knot, biggest_knot = knot_vec[0], knot_vec[-1]\n inter_knot_vec = knot_vec[1:-1]\n random.seed(random_state)\n random.shuffle(inter_knot_vec)\n reduced_knot_vec = inter_knot_vec[:(max_knots-2)]\n reduced_knot_vec = np.concatenate((reduced_knot_vec, [smallest_knot, biggest_knot]))\n reduced_knot_vec = np.concatenate((reduced_knot_vec, np.linspace(0, 1, 21)))\n if (unity_prior_weight>0):\n reduced_knot_vec = np.concatenate((reduced_knot_vec, scorevec_coda))\n knot_vec = np.unique(reduced_knot_vec)\n if verbose:\n print(\"Originally there were {} knots. Reducing to {} while preserving first and last knot.\".format(num_unique, len(knot_vec)))\n X_mat = _natural_cubic_spline_basis_expansion(scorevec, knot_vec)\n\n if (method == 'logistic'):\n if ((type(reg_param_vec) == str) and (reg_param_vec == 'default')):\n reg_param_vec = 10**np.linspace(-7, 5, 61)\n if verbose:\n print(\"Trying {} values of C between {} and {}\".format(len(reg_param_vec), np.min(reg_param_vec), np.max(reg_param_vec)))\n reg = linear_model.LogisticRegressionCV(Cs=reg_param_vec, cv=StratifiedKFold(cv_folds, shuffle=True),\n scoring=make_scorer(log_loss, needs_proba=True, greater_is_better=False))\n if (unity_prior_weight>0):\n reg.fit(X_mat, truthvec, weightvec)\n else:\n reg.fit(X_mat, truthvec)\n if verbose:\n print(\"Best value found C = {}\".format(reg.C_))\n\n if (method == 'ridge'):\n if ((type(reg_param_vec) == str) and (reg_param_vec == 'default')):\n reg_param_vec = 10**np.linspace(-7, 7, 71)\n if verbose:\n print(\"Trying {} values of alpha between {} and {}\".format(len(reg_param_vec), np.min(reg_param_vec),np.max(reg_param_vec)))\n reg = linear_model.RidgeCV(alphas=reg_param_vec, cv=KFold(cv_folds, shuffle=True), scoring=make_scorer(mean_squared_error_trunc,needs_proba=False, greater_is_better=False))\n reg.fit(X_mat, truthvec)\n if verbose:\n print(\"Best value found alpha = {}\".format(reg.alpha_))\n\n def calibrate_scores(new_scores):\n new_scores = np.maximum(new_scores,knot_vec[0]*np.ones(len(new_scores)))\n new_scores = np.minimum(new_scores,knot_vec[-1]*np.ones(len(new_scores)))\n if transform_fn != 'none':\n new_scores = transform_fn(new_scores)\n basis_exp = _natural_cubic_spline_basis_expansion(new_scores,knot_vec)\n if (method == 'logistic'):\n outvec = reg.predict_proba(basis_exp)[:,1]\n if (method == 'ridge'):\n outvec = reg.predict(basis_exp)\n if force_prob:\n outvec = np.where(outvec < eps, eps, outvec)\n outvec = np.where(outvec > 1-eps, 1-eps, outvec)\n return outvec\n\n return calibrate_scores",
"def combine_model_scores(self, scores):\n\n assert len(scores) == self.num_models, 'we need a vector of scores for each model in the ensemble'\n # this hack lets us do ad-hoc truncation of the vocabulary if we need to\n scores = [a[:, :self.word_dicts[i]['trg_size']-1] if self.word_dicts[i]['trg_size'] is not None else a\n for i, a in enumerate(scores)]\n scores = numpy.array(scores)\n\n # Note: this is another implicit batch size = 1 assumption\n scores = numpy.squeeze(scores, axis=1)\n\n # multiply weights along each row (rows correspond to the softmax output for a particular model)\n # Note the negative sign here, letting us treat the score as a cost to minimize\n all_weighted_scores = -numpy.log(scores) * self.model_weights[:, numpy.newaxis]\n\n # we pass these through so they can be used for optimization\n unweighted_scores = -(numpy.log(scores))\n\n combined_weighted_scores = numpy.sum(all_weighted_scores, axis=0)\n\n # We don't use the model weights with probs because we want them to sum to 1\n probs = numpy.sum(scores, axis=0) / float(self.num_models)\n\n return combined_weighted_scores, unweighted_scores, probs",
"def scoring_function(self, model, y_true, y_predicted_probability):",
"def score(self,x,**kwargs):\r\n if self.kfun != 'matrix' and len(self.sv): \r\n k = self.kfun(x,self.sv,**self.cparam)\r\n #print \"Kernel after test: \", k\r\n else:\r\n k = x\r\n \r\n \r\n self.W=self.alphas \r\n self.mat=self.kfun(np.array([self.sv[1]]), self.sv,**self.cparam) \r\n self.bias=self.svLabels[1]- np.dot((self.alphas*self.svLabels).T,self.mat.T) \r\n z=np.dot((self.alphas*self.svLabels).T,k.T)+self.bias\r\n \r\n #print \"bias: \", self.bias, \"\\nZ: \",z\r\n \r\n \r\n return z",
"def get_score(self, solution: np.array) -> float:\n pass",
"def score_funct(x):\n network.copy_memory(x)\n actual_output = []\n for input_data in training_input:\n output_data = network.compute_regression(input_data)\n actual_output.append(output_data)\n return ErrorCalculation.mse(np.array(actual_output), training_ideal)",
"def lm(self, lm_para=LmPara()):\r\n if self.doc_len == 0:\r\n return np.log(MIN_LM_SCORE)\r\n v_tf = np.maximum(self.v_tf, lm_para.min_tf)\r\n v_tf /= self.doc_len\r\n v_tf = np.maximum(v_tf, MIN_LM_SCORE)\r\n score = np.log(v_tf).dot(self.v_q_tf)\r\n\r\n return score",
"def loss(self, data, labels):\n data = self.transform_data(data)\n \n y_n = LogisticRegression.sigmoid(np.dot(data, self.w))\n t_n = labels\n \n return ((-t_n * np.log(y_n) - (1 - t_n) * np.log(1 - y_n)).sum() + self.l / 2 * np.dot(self.w.T, self.w)) / len(data)",
"def rslvq_loss(probabilities, targets, prototype_labels):\n whole, correct, _ = _get_class_probabilities(probabilities, targets,\n prototype_labels)\n\n likelihood = correct / whole\n log_likelihood = torch.log(likelihood)\n return -1.0 * log_likelihood",
"def objective(labels, probs):\n\n sum = 0\n N = 0\n for (l, p) in zip(labels, probs):\n sum += l*np.log(p) + (1-l)*np.log(1-p)\n N += 1\n return -sum/N",
"def scoreModel(self, model):\n\n # Compile the data to fit with the regression method\n x = self.parent.proc_xTraining[:, list(model)]\n\n # Check if we're using a regression model that requires non-NaN data\n if any(map(lambda x: self.regressionName in x, [\"Regr_ZScore\"])):\n y = self.parent.proc_yTraining\n\n else:\n y = self.parent.proc_yTraining[~np.isnan(x).any(axis=1)]\n x = x[~np.isnan(x).any(axis=1)]\n\n # Fit the model with the regression method and get the resulting score\n try:\n _, _, score, _ = self.regression.fit(x, y, crossValidate = True)\n\n except Exception as E:\n print(E)\n score = {self.regression.scoringParameters[i]: np.nan for i, scorer in enumerate(self.regression.scorers)}\n\n return score",
"def sklearn_model(model_log,folds,total_data,y):\r\n \r\n cv= cross_validation.StratifiedKFold(y,n_folds=folds)\r\n cv=list(cv)\r\n\r\n\r\n\r\n results=np.zeros(folds)\r\n\r\n for j in range(folds):\r\n \r\n data_train=total_data.loc[cv[j][0]]\r\n data_test=total_data.loc[cv[j][1]]\r\n \r\n data_train_label = y[cv[j][0]]\r\n data_test_label = y[cv[j][1]]\r\n \r\n #model_log=LogisticRegression(penalty='l2', random_state=2123)\r\n \r\n model_log.fit(data_train,data_train_label)\r\n p_log=model_log.predict(data_test)\r\n \r\n score=metrics.accuracy_score(y[cv[j][1]],p_log)\r\n results[j]=score\r\n #print(score)\r\n\r\n avg_score=np.mean(results)\r\n return(avg_score,results)",
"def log_likelihood_grad_rew(self, data, reward_model, bias_params):",
"def loss_gradient(self, targets, scores):\n m = targets * scores\n numer = 4. * (2. * numpy.arctan(m) - 1.)\n denom = 1. + m**2\n return numer/denom",
"def score(self, X):\n return _betadiv(X, parafac(self.factors_), self.beta).sum()",
"def score(self, model, context):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If no start_params are given, use reasonable defaults. | def _get_start_params(self, start_params=None):
if start_params is None:
if hasattr(self, 'start_params'):
start_params = self.start_params
elif self.exog is not None:
# fails for shape (K,)?
start_params = [0] * self.exog.shape[1]
else: # pragma: no cover
raise ValueError("If exog is None, then start_params should "
"be specified")
return start_params | [
"def _sets_default_params(self):\n pass",
"def _apply_params(self):\n config = self.get_startup_config()\n # Pass true to _set_params so we know these are startup values\n self._set_params(config, True)",
"def add_default_params(self):\r\n self.params = class_from_string(\r\n BaseFramework._configuration._default_param_type\r\n )()",
"def test_construct_with_start(self):\n paginator = DummyAPIPaginator(\n client=None,\n url='http://example.com/api/list/?foo=1',\n start=10)\n query_params = paginator.request_kwargs['query']\n\n self.assertEqual(query_params['start'], 10)",
"def prep_streaming_params(self, stop: Optional[List[str]] = None) -> Dict[str, Any]:\n params = self._invocation_params\n # if params[\"best_of\"] != 1:\n # raise ValueError(\"OpenAI only supports best_of == 1 for streaming\")\n if stop is not None:\n if \"stop\" in params:\n raise ValueError(\"`stop` found in both the input and default params.\")\n params[\"stop\"] = stop\n params[\"stream\"] = True\n return params",
"def set_params(self, **params):\n if not \"min_child_samples\" in params.keys():\n params.update({\"min_child_samples\":2})\n \n self.base_estimator.set_params(**params)",
"def set_params(self,\n **kwargs) -> None:\n # Warm start should be true to get .fit() to keep existing estimators.\n kwargs['warm_start'] = True\n\n for key, value in kwargs.items():\n setattr(self, key, value)",
"def apply_startup_params(self):\n config = self._protocol.get_startup_config()\n \n if not isinstance(config, dict):\n raise InstrumentParameterException(\"Incompatible initialization parameters\")\n \n self.set_resource(config, NotUserRequested=True)",
"def test_startup_params(self):\n self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 10)\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 15)\n # make sure some value isnt the default value\n self.driver._protocol._param_dict.update(\"bar=20\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 20)\n self.driver._protocol._param_dict.update(\"baz=30\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 30)\n\n # pretend to manually adjust a few things:\n self.driver._protocol._param_dict.update(\"foo=1000\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 1000)\n self.driver._protocol._param_dict.update(\"bar=1500\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 1500)\n self.driver._protocol._param_dict.update(\"baz=2000\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 2000)\n\n # pretend to go through the motions of a startup sequence\n self.driver.set_init_params({'foo': 100, \"bar\": 150, \"baz\": 200})\n\n # Now a virtual method in the protocol that asserts when not implemented\n # behavior proven in derived protocol classes\n # self.driver.apply_startup_params()\n\n # check the values on the other end\n # running_config = self.driver._protocol.get_cached_config()\n\n # confirm that the default values were set back appropriately.\n # self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 100)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 150)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 2000)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"bat\"), 40)\n\n ##### Integration tests for startup config in the SBE37 integration suite",
"def start(self, args = []):\n pass",
"def set_start(self, start):\n self._reset(self)\n self.start = start",
"def Params_defaultParams(): # real signature unknown; restored from __doc__\n pass",
"def test_parse_change_start_parameters(self, controller):\n\n start = {\"focus\": 0, \"lense-current\": 0, \"x-tilt\": 0}\n series = {\"variable\": \"focus\", \"start\": 1, \"end\": 10, \"step-width\": 1, \n \"on-each-point\": {\"variable\": \"lense-current\", \"start\": 1, \n \"end\": 3, \"step-width\": 0.1, \n \"on-each-point\": {\"variable\": \"x-tilt\", \n \"start\": 10, \n \"end\": 20, \n \"step-width\": 5}}}\n\n steps = pylo.MeasurementSteps(controller, start, series)\n\n assert steps.start[\"focus\"] == 1\n assert steps.start[\"lense-current\"] == 1\n assert steps.start[\"x-tilt\"] == 10",
"def start(total_param):\n global start_time\n global total\n\n if type(total_param) is list:\n total_param = len(total_param)\n if type(total_param) is not int:\n sys.exit(\"bad total_param. Should be list or int.\")\n\n start_time = time.time()\n total = total_param",
"def _default_params(self) -> Dict[str, Any]:\n normal_params = {\n \"temperature\": self.temperature,\n \"max_tokens\": self.max_tokens,\n \"top_p\": self.top_p,\n \"frequency_penalty\": self.frequency_penalty,\n \"presence_penalty\": self.presence_penalty,\n \"n\": self.n,\n # \"best_of\": self.best_of,\n \"request_timeout\": self.request_timeout,\n \"logit_bias\": self.logit_bias,\n }\n return {**normal_params, **self.model_kwargs}",
"def __setup_params(self, kwargs):\n\n # default args provided by user. Use deepcopy to prevent from changing in place\n user_default_args = deepcopy(kwargs.get(\"default_args\", {}))\n \n # get all the parameters required by cwltool with preset by user defaults\n required_cwl_args = get_default_cwl_args(\n preset_cwl_args=user_default_args.get(\"cwl\", {})\n )\n\n # update default args provided by user with required by cwltool args\n user_default_args.update({\n \"cwl\": required_cwl_args\n })\n\n # default arguments required by CWL-Airflow (no need to put it in a separate function so far)\n required_default_args = {\n \"start_date\": days_ago(180),\n \"email_on_failure\": False,\n \"email_on_retry\": False,\n \"on_failure_callback\": task_on_failure,\n \"on_success_callback\": task_on_success,\n \"on_retry_callback\": task_on_retry\n }\n \n # Updated default arguments required by CWL-Airflow with those that are provided by user for cwltool\n required_default_args.update(user_default_args)\n\n # update kwargs with correct default_args and callbacks if those were not set by user\n kwargs.update(\n {\n \"default_args\": required_default_args,\n \"on_failure_callback\": kwargs.get(\"on_failure_callback\", dag_on_failure),\n \"on_success_callback\": kwargs.get(\"on_success_callback\", dag_on_success),\n \"schedule_interval\": kwargs.get(\"schedule_interval\", None)\n }\n )",
"def _set_start(self, coordinates):\n self._start = coordinates",
"def _SetRunParameters(self, params: Mapping[str, Any]) -> None:\n # Ideally YCSB should be refactored to include a function that just takes\n # commands for a run, but that will be a large refactor.\n FLAGS['ycsb_run_parameters'].unparse()\n FLAGS['ycsb_run_parameters'].parse([f'{k}={v}' for k, v in params.items()])",
"def set_params(self, **params):\n if('threshold' in params.keys()):\n self.threshold = params['threshold']\n if('subsample' in params.keys()):\n self.subsample = params['subsample']\n if('estimator' in params.keys()):\n self.estimator = params['estimator']\n if('n_folds' in params.keys()):\n self.n_folds = params['n_folds']\n if('stratify' in params.keys()):\n self.stratify = params['stratify']\n if('random_state' in params.keys()):\n self.random_state = params['random_state']\n if('n_jobs' in params.keys()):\n self.n_jobs = params['n_jobs']"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the Ftest for a joint linear hypothesis. This is a special case of `wald_test` that always uses the F distribution. | def f_test(self, r_matrix, cov_p=None, scale=1.0, invcov=None):
res = self.wald_test(r_matrix, cov_p=cov_p, scale=scale,
invcov=invcov, use_f=True)
return res | [
"def test_variational_expectations(likelihood_setup):\n likelihood = likelihood_setup.likelihood\n F = Datum.F\n Y = likelihood_setup.Y\n r1 = likelihood.log_prob(F, Y)\n r2 = likelihood.variational_expectations(F, tf.zeros_like(F), Y)\n assert_allclose(r1, r2, atol=likelihood_setup.atol, rtol=likelihood_setup.rtol)",
"def test_fwhm(self):\n for i, func in enumerate(self.fwhm_funcs):\n for j, arr1d in enumerate(self.input_arrays):\n res = func(arr1d)\n assert_allclose(res.fwhm, self.answers[i][j], atol=1e-4)",
"def test_basis_fem_result(self):\n testtrimesh = tm.TriMesh(self.testdatatriangles,\n self.testdatavertices)\n sb_fem = sb.SpharaBasis(testtrimesh, mode='fem')\n sb_fem_fun, sb_fem_freq = sb_fem.basis()\n self.assertTrue(\n np.allclose(np.absolute(np.matmul(np.matmul\n (np.transpose(sb_fem_fun),\n self.testdatamassmatrix),\n self.testdataspharabasisfemweight)),\n np.identity(np.size(sb_fem_freq)))\n and\n np.allclose(sb_fem_freq, self.testdataspharanatfreqfemweight)\n )",
"def test_f_divergence(alpha, dist1, dist2):\n def f_alpha(alpha):\n if alpha == 1:\n def f(x):\n return x * np.log2(x)\n elif alpha == -1:\n def f(x):\n return - np.log2(x)\n else:\n def f(x):\n return 4.0 / (1.0 - alpha**2) * (1.0 - np.power(x, (1.0 + alpha) / 2))\n return f\n\n def f_tsallis(alpha):\n if alpha == 1:\n def f(x):\n return -np.log2(x)\n else:\n def f(x):\n return (np.power(x, 1.0 - alpha) - 1.0) / (alpha - 1.0)\n return f\n\n test_functions = [\n (f_alpha(alpha), partial(alpha_divergence, alpha=alpha)),\n (f_tsallis(alpha), partial(tsallis_divergence, alpha=alpha)),\n ]\n\n for f, div_func in test_functions:\n div1 = f_divergence(dist1, dist2, f)\n div2 = div_func(dist1, dist2)\n assert div1 == pytest.approx(div2, abs=1e-1)",
"def testTestFns():\n\n # Check 2D Rosenbrock function, compare to the known global minimum\n test = lh.rosenbrockLnlike([1, 1])\n errMsg = \"2D Rosenbrock function is incorrect\"\n truth = 0\n assert np.allclose(test, truth), errMsg\n\n # Check 5D Rosenbrock function, compare to the known global minimum\n test = lh.rosenbrockLnlike([1, 1, 1, 1, 1])\n errMsg = \"5D Rosenbrock function is incorrect\"\n truth = 0\n assert np.allclose(test, truth), errMsg\n\n # Check sphere function, compare to the known global minimum\n test = lh.sphereLnlike([0, 0])\n errMsg = \"Sphere function is incorrect\"\n truth = 0\n assert np.allclose(test, truth), errMsg\n\n # Check 1D BayesOpt test function, compare to the known global maximum\n test = lh.testBOFn(-0.359)\n errMsg = \"1D test BayesOpt function is incorrect\"\n truth = 0.5003589\n assert np.allclose(test, truth), errMsg",
"def test_f_1_is_equal_to_sklearn_fbeta(self):\n\n y_true = [1, 0, 1, 1, 0, 1]\n y_pred = [0, 0, 1, 1, 1, 1]\n tp = 3\n fp = 1\n fn = 1\n tn = 1\n beta = 1.0\n\n f_beta_custom = Metrics.f_beta(tp=tp, fp=fp, fn=fn, beta=beta)\n f_beta_sklearn = f1_score(y_true=y_true, y_pred=y_pred)\n\n self.assertEqual(f_beta_custom, f_beta_sklearn)",
"def test_derivatives(loss, x0, y_true):\n loss = _LOSSES[loss](sample_weight=None)\n y_true = np.array([y_true], dtype=np.float64)\n x0 = np.array([x0], dtype=np.float64)\n\n def func(x: np.ndarray) -> np.ndarray:\n \"\"\"Compute loss plus constant term.\n\n The constant term is such that the minimum function value is zero,\n which is required by the Newton method.\n \"\"\"\n return loss.loss(\n y_true=y_true, raw_prediction=x\n ) + loss.constant_to_optimal_zero(y_true=y_true)\n\n def fprime(x: np.ndarray) -> np.ndarray:\n return loss.gradient(y_true=y_true, raw_prediction=x)\n\n def fprime2(x: np.ndarray) -> np.ndarray:\n return loss.gradient_hessian(y_true=y_true, raw_prediction=x)[1]\n\n optimum = newton(\n func,\n x0=x0,\n fprime=fprime,\n fprime2=fprime2,\n maxiter=100,\n tol=5e-8,\n )\n\n # Need to ravel arrays because assert_allclose requires matching\n # dimensions.\n y_true = y_true.ravel()\n optimum = optimum.ravel()\n assert_allclose(loss.link.inverse(optimum), y_true)\n assert_allclose(func(optimum), 0, atol=1e-14)\n assert_allclose(loss.gradient(y_true=y_true, raw_prediction=optimum), 0, atol=5e-7)",
"def fisher_exact_test(c1, c2, c3, c4):\n ret_type = TStruct(['pValue', 'oddsRatio', 'ci95Lower', 'ci95Upper'],\n [TFloat64(), TFloat64(), TFloat64(), TFloat64()])\n return _func(\"fet\", ret_type, c1, c2, c3, c4)",
"def fligner_policello_test(X, Y):\n P_i = []\n for x in X:\n count = 0\n for y in Y:\n if y <= x:\n count += 1\n P_i.append(count)\n\n Q_j = []\n for y in Y:\n count = 0\n for x in X:\n if x <= y:\n count += 1\n Q_j.append(count)\n\n P_i = np.array(P_i)\n Q_j = np.array(Q_j)\n P_bar = np.average(P_i)\n Q_bar = np.average(Q_j)\n V1 = sum((P_i - P_bar) ** 2)\n V2 = sum((Q_j - Q_bar) ** 2)\n z = (sum(Q_j) - sum(P_i)) / (2 * np.sqrt(V1 + V2 + P_bar * Q_bar))\n p_value = 2. * norm.sf(abs(z)) # two sided test\n\n return z, p_value",
"def test_partial_derivative_f1(self):\r\n # Verified with Wolfram Alpha.\r\n\r\n # f2 > 0\r\n obs = self.estimator1._partial_derivative_f1(2, 3, 10, 42)\r\n assert_almost_equal(obs, 1.22672908818)\r\n\r\n # f2 == 0\r\n obs = self.estimator1._partial_derivative_f1(2, 0, 10, 42)\r\n assert_almost_equal(obs, 1.272173492918482)\r\n\r\n # f1 == 0, f2 == 0\r\n obs = self.estimator1._partial_derivative_f1(0, 0, 10, 42)\r\n assert_almost_equal(obs, 1.2961664362634027)",
"def test_f_two_sample(self):\r\n\r\n # The expected values in this test are obtained through R.\r\n # In R the F test is var.test(x,y) different alternative hypotheses\r\n # can be specified (two sided, less, or greater).\r\n # The vectors are random samples from a particular normal distribution\r\n #(mean and sd specified).\r\n\r\n # a: 50 elem, mean=0 sd=1\r\n a = [-0.70701689, -1.24788845, -1.65516470, 0.10443876, -0.48526915,\r\n -0.71820656, -1.02603596, 0.03975982, -2.23404324, -0.21509363,\r\n 0.08438468, -0.01970062, -0.67907971, -0.89853667, 1.11137131,\r\n 0.05960496, -1.51172084, -0.79733957, -1.60040659, 0.80530639,\r\n -0.81715836, -0.69233474, 0.95750665, 0.99576429, -1.61340216,\r\n -0.43572590, -1.50862327, 0.92847551, -0.68382338, -1.12523522,\r\n -0.09147488, 0.66756023, -0.87277588, -1.36539039, -0.11748707,\r\n -1.63632578, -0.31343078, -0.28176086, 0.33854483, -0.51785630,\r\n 2.25360559, -0.80761191, 1.18983499, 0.57080342, -1.44601700,\r\n -0.53906955, -0.01975266, -1.37147915, -0.31537616, 0.26877544]\r\n\r\n # b: 50 elem, mean=0, sd=1.2\r\n b = [\r\n 0.081418743, 0.276571612, -\r\n 1.864316504, 0.675213612, -0.769202643,\r\n 0.140372825, -1.426250184, 0.058617884, -\r\n 0.819287409, -0.007701916,\r\n -0.782722020, -\r\n 0.285891593, 0.661980419, 0.383225191, 0.622444946,\r\n -0.192446150, 0.297150571, 0.408896059, -\r\n 0.167359383, -0.552381362,\r\n 0.982168338, 1.439730446, 1.967616101, -\r\n 0.579607307, 1.095590943,\r\n 0.240591302, -1.566937143, -\r\n 0.199091349, -1.232983905, 0.362378169,\r\n 1.166061081, -0.604676222, -\r\n 0.536560206, -0.303117595, 1.519222792,\r\n -0.319146503, 2.206220810, -\r\n 0.566351124, -0.720397392, -0.452001377,\r\n 0.250890097, 0.320685395, -\r\n 1.014632725, -3.010346273, -1.703955054,\r\n 0.592587381, -1.237451255, 0.172243366, -0.452641122, -0.982148581]\r\n\r\n # c: 60 elem, mean=5, sd=1\r\n c = [4.654329, 5.242129, 6.272640, 5.781779, 4.391241, 3.800752,\r\n 4.559463, 4.318922, 3.243020, 5.121280, 4.126385, 5.541131,\r\n 4.777480, 5.646913, 6.972584, 3.817172, 6.128700, 4.731467,\r\n 6.762068, 5.082983, 5.298511, 5.491125, 4.532369, 4.265552,\r\n 5.697317, 5.509730, 2.935704, 4.507456, 3.786794, 5.548383,\r\n 3.674487, 5.536556, 5.297847, 2.439642, 4.759836, 5.114649,\r\n 5.986774, 4.517485, 4.579208, 4.579374, 2.502890, 5.190955,\r\n 5.983194, 6.766645, 4.905079, 4.214273, 3.950364, 6.262393,\r\n 8.122084, 6.330007, 4.767943, 5.194029, 3.503136, 6.039079,\r\n 4.485647, 6.116235, 6.302268, 3.596693, 5.743316, 6.860152]\r\n\r\n # d: 30 elem, mean=0, sd =0.05\r\n d = [\r\n 0.104517366, 0.023039678, 0.005579091, 0.052928250, 0.020724823,\r\n -0.060823243, -0.019000890, -\r\n 0.064133996, -0.016321594, -0.008898334,\r\n -0.027626992, -0.051946186, 0.085269587, -\r\n 0.031190678, 0.065172938,\r\n -0.054628573, 0.019257306, -\r\n 0.032427056, -0.058767356, 0.030927400,\r\n 0.052247357, -\r\n 0.042954937, 0.031842104, 0.094130522, -0.024828465,\r\n 0.011320453, -0.016195062, 0.015631245, -0.050335598, -0.031658335]\r\n\r\n a, b, c, d = map(array, [a, b, c, d])\r\n self.assertEqual(map(len, [a, b, c, d]), [50, 50, 60, 30])\r\n\r\n # allowed error. This big, because results from R\r\n # are rounded at 4 decimals\r\n error = 1e-4\r\n\r\n self.assertFloatEqual(f_two_sample(a, a), (49, 49, 1, 1), eps=error)\r\n self.assertFloatEqual(f_two_sample(a, b), (49, 49, 0.8575, 0.5925),\r\n eps=error)\r\n self.assertFloatEqual(f_two_sample(b, a), (49, 49, 1.1662, 0.5925),\r\n eps=error)\r\n self.assertFloatEqual(f_two_sample(a, b, tails='low'),\r\n (49, 49, 0.8575, 0.2963), eps=error)\r\n self.assertFloatEqual(f_two_sample(a, b, tails='high'),\r\n (49, 49, 0.8575, 0.7037), eps=error)\r\n self.assertFloatEqual(f_two_sample(a, c),\r\n (49, 59, 0.6587, 0.1345), eps=error)\r\n # p value very small, so first check df's and F value\r\n self.assertFloatEqualAbs(f_two_sample(d, a, tails='low')[0:3],\r\n (29, 49, 0.0028), eps=error)\r\n assert f_two_sample(d, a, tails='low')[3] < 2.2e-16 # p value\r",
"def test_basic():\n\n epsilon = 0.00001\n\n Z = np.asarray([\n [73, 71, 52],\n [87, 74, 46],\n [72, 2, 7],\n [80, 89, 71]\n ])\n x = np.asarray([49, 67, 68, 20])\n\n d, res = fnnls(Z, x)\n\n expected_d = ([0.64953844,0,0]) \n\n assert(np.max(np.abs(d - expected_d)) < epsilon)",
"def test_w_f_approx(self):\n x = np.logspace(-3., 3., 100)\n y = np.logspace(-3., 3., 100)\n\n X, Y = np.meshgrid(x, y)\n\n w_f_app = self.gaussian_kappa_ellipse.w_f_approx(X+1j*Y)\n w_f_scipy = wofz(X+1j*Y)\n\n npt.assert_allclose(w_f_app.real, w_f_scipy.real, rtol=4e-5, atol=0)\n npt.assert_allclose(w_f_app.imag, w_f_scipy.imag, rtol=4e-5, atol=0)\n\n # check `derivatives()` method with and without `scipy.special.wofz()`\n x = 1.\n y = 1.\n e1, e2 = 5e-5, 0\n sigma = 1.\n amp = 2.\n\n # with `scipy.special.wofz()`\n gauss_scipy = GaussianEllipseKappa(use_scipy_wofz=True)\n f_x_sp, f_y_sp = gauss_scipy.derivatives(x, y, amp, sigma, e1, e2)\n\n # with `GaussEllipseKappa.w_f_approx()`\n gauss_approx = GaussianEllipseKappa(use_scipy_wofz=False)\n f_x_ap, f_y_ap = gauss_approx.derivatives(x, y, amp, sigma, e1, e2)\n\n npt.assert_almost_equal(f_x_sp, f_x_ap, decimal=4)\n npt.assert_almost_equal(f_y_sp, f_y_ap, decimal=4)",
"def test_partial_derivative_f2(self):\r\n # Verified with Wolfram Alpha.\r\n\r\n # f2 > 0\r\n obs = self.estimator1._partial_derivative_f2(2, 3, 10, 42)\r\n assert_almost_equal(obs, 0.9651585982441183)\r\n\r\n # f2 == 0\r\n obs = self.estimator1._partial_derivative_f2(2, 0, 10, 42)\r\n assert_almost_equal(obs, 0.9208698803111386)\r\n\r\n # f1 ==0, f2 == 0\r\n obs = self.estimator1._partial_derivative_f2(0, 0, 10, 42)\r\n assert_almost_equal(obs, 1.0)",
"def get_fscore(y_pred, y_test, rows, weights):\n unique_rows = np.unique(rows)\n unique_weights = np.array([weights[rows == elem].sum() for elem in unique_rows])\n rows = unique_rows\n weights = unique_weights\n\n # Compute F-Score\n num = ((y_test[rows] == y_pred[rows]) * weights).sum()\n den = weights.sum()\n score = num / den\n\n # Compute standard deviation of estimate\n var = ((((y_test[rows] == y_pred[rows]) - score) ** 2) * (weights ** 2)).mean() / (\n (weights.mean() ** 2) * (1.0 - ((weights ** 2).sum() / (weights.sum() ** 2)))\n )\n n = len(rows)\n trialstd = np.sqrt(var) / np.sqrt(n)\n\n return score, trialstd, den",
"def test_newton_rhapson(testFunctions, tol, printFlag): \n pass",
"def lfprob (dfnum, dfden, F):\r\n p = betai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))\r\n return p",
"def errfunc(p,x,y):\n chis = 2*y / fitfunc(p,x) #values relative to current model\n probs = chi2.pdf(chis,2) #probability for each value\n return log(probs).sum() #function goodness",
"def _fexact(diffexp, not_diffexp, background, term, uniprot2entrez_map, EASE=True):\n\n if not diffexp:\n return 1.0\n\n # convert Uniprot ids to Entrez Gene ids\n term_genes = map_uniprot(term['genes'], uniprot2entrez_map)\n\n # convert all to sets\n term_genes = set(term_genes)\n diffexp = set(diffexp)\n not_diffexp = set(not_diffexp)\n background = set(background)\n \n # contingency table for fisher's exact test:\n # | g_e - 1 | g_ne | (e = diff. expressed, ne = not diff. expressed,\n # +–––––-----–+–-–––––+ g = geneset, ng = not geneset)\n # | ng_e | ng_ne |\n\n g_e = len(term_genes.intersection(diffexp))\n g_e = g_e - 1 if EASE else g_e\n\n if g_e < 1:\n return 1.0\n\n g_ne = len(term_genes.intersection(not_diffexp)) \n not_term_genes = set([gene for gene in background if gene not in term_genes])\n ng_e = len(not_term_genes.intersection(diffexp))\n ng_ne = len(not_term_genes.intersection(not_diffexp))\n table = array([[g_e, g_ne], [ng_e, ng_ne]])\n\n odds, pval = stats.fisher_exact(table, alternative='greater')\n return pval"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute a sequence of Wald tests for terms over multiple columns This computes joined Wald tests for the hypothesis that all coefficients corresponding to a `term` are zero. `Terms` are defined by the underlying formula or by string matching. | def wald_test_terms(self, skip_single=False, extra_constraints=None,
combine_terms=None): # noqa:E501
result = self
if extra_constraints is None:
extra_constraints = []
if combine_terms is None:
combine_terms = []
design_info = getattr(result.model.data, 'design_info', None)
if design_info is None and extra_constraints is None:
raise ValueError('no constraints, nothing to do')
identity = np.eye(len(result.params))
constraints = []
combined = defaultdict(list)
if design_info is not None:
for term in design_info.terms:
cols = design_info.slice(term)
name = term.name()
constraint_matrix = identity[cols]
# check if in combined
for cname in combine_terms:
if cname in name:
combined[cname].append(constraint_matrix)
k_constraint = constraint_matrix.shape[0]
if skip_single:
if k_constraint == 1:
continue
constraints.append((name, constraint_matrix))
combined_constraints = []
for cname in combine_terms:
combined_constraints.append((cname,
np.vstack(combined[cname])))
else:
# check by exog/params names if there is no formula info
for col, name in enumerate(result.model.exog_names):
constraint_matrix = identity[col]
# check if in combined
for cname in combine_terms:
if cname in name:
combined[cname].append(constraint_matrix)
if skip_single:
continue
constraints.append((name, constraint_matrix))
combined_constraints = []
for cname in combine_terms:
combined_constraints.append((cname,
np.vstack(combined[cname])))
use_t = result.use_t
distribution = ['chi2', 'F'][use_t]
res_wald = []
index = []
for pair in constraints + combined_constraints + extra_constraints:
name, constraint = pair
wt = result.wald_test(constraint)
row = [wt.statistic.item(), wt.pvalue, constraint.shape[0]]
if use_t:
row.append(wt.df_denom)
res_wald.append(row)
index.append(name)
# distribution neutral names
col_names = ['statistic', 'pvalue', 'df_constraint']
if use_t:
col_names.append('df_denom')
# TODO: maybe move DataFrame creation to results class
table = pd.DataFrame(res_wald, index=index, columns=col_names)
res = WaldTestResults(None, distribution, None, table=table)
# TODO: remove temp again, added for testing
res.temp = constraints + combined_constraints + extra_constraints
return res | [
"def get_all(any, shard, shard_term_features, qterms):\n tmp = 1\n for t in qterms:\n if t in shard_term_features[shard]:\n cdf = shard_term_features[shard][t].df\n else:\n cdf = 0\n tmp *= cdf/any\n all = tmp * any\n return all",
"def test_alchemy_extraction(self):\n terms = web_extract_terms(self.query, service='alchemy')\n \n self.assert_(set(self.expected_terms) & set(terms))",
"def test_term(self):\n\t\tterm_one = schrodinger.term(0)\n\t\tself.assertEqual(1, term_one(0).numpy())\n\t\tterm_two = schrodinger.term(1)\n\t\tself.assertEqual(0, term_two(0).numpy())",
"def eval_all_combinations(docs, labels, punct_vals,\n feature_fns, min_freqs):\n ###TODO\n #pass\n withpunct = []\n withoutpunct = []\n for doc in docs:\n tokens_1 = tokenize(doc, keep_internal_punct=True)\n withpunct.append(tokens_1)\n #print(withpunct)\n tokens_2 = tokenize(doc, keep_internal_punct=False)\n withoutpunct.append(tokens_2)\n #print(withpunct)\n \n featuresFns = []\n for i in range(len(feature_fns)+1):\n for each in combinations(feature_fns, i):\n #print(each)\n if(set(each)):\n featuresFns.append((each))\n #print(featuresFns)\n \n result = []\n res = defaultdict(lambda:0)\n \n for fn in featuresFns:\n for p in punct_vals:\n for f in min_freqs:\n if p == True:\n tokens = withpunct\n else:\n tokens = withoutpunct\n X,y=vectorize(tokens, fn, min_freq=f)\n accuracy = cross_validation_accuracy(LogisticRegression(),X,labels,5)\n res = {'features':fn, 'punct':p , 'accuracy':accuracy, 'min_freq':f }\n result.append(res)\n \n result = sorted(result, key=lambda x:x['accuracy'], reverse=True)\n return result",
"def test_term_phrase_outcome():\n\n term_or_precise_phrase, no_file, freq_in_all_files, \\\n no_Gold, no_Resections, no_No_Surgery,\\\n no_Gold_absent_term, no_Resections_absent_term, no_No_Surgery_absent_term= \\\n term_phrase_outcome(\n r\"([eE][pP][iI][gG])|([aA][bB][dD][oO])\", \n path_to_folder=\"L:\\\\word_docs\\\\NLP\\\\both_done_copy\\\\\", \n stemmed=False,\n suppress_print_cycle=False,\n positive_files=[])\n \n assert (\n term_or_precise_phrase, no_file, freq_in_all_files, \\\n no_Gold, no_Resections, no_No_Surgery,\\\n no_Gold_absent_term, no_Resections_absent_term, no_No_Surgery_absent_term) ==\\\n ('([eE][pP][iI][gG])|([aA][bB][dD][oO])', 1209, 440,\n 65, 55, 142,\n 122, 262, 562)",
"def filter_or_lookups_terms(queryset, orm_lookups, search_terms):\n conditions = []\n for search_term in search_terms:\n queries = [Q(**{orm_lookup: search_term}) for orm_lookup in orm_lookups]\n conditions.append(reduce(operator.or_, queries))\n return queryset.filter(reduce(operator.or_, conditions))",
"def eval_all_combinations(docs, labels, punct_vals,\n feature_fns, min_freqs):\n# result = []\n\n# for punct_val in punct_vals:\n# tokens = [tokenize(doc, punct_val) for doc in docs]\n# for f in [comb for i in range(len(feature_fns)) for comb in combinations(feature_fns, i+1)]:\n# feature = list(f)\n\n# for min_freq in min_freqs:\n# clf = LogisticRegression()\n# X, vocab = vectorize(tokens, feature, min_freq)\n# accuracy = cross_validation_accuracy(clf, X, labels, 5)\n# result.append(dict(punct= punct_val, features= feature, min_freq= min_freq, accuracy = accuracy))\n\n# return sorted(result, key=lambda x:(-x['accuracy'],-x['min_freq']))\n clf = LogisticRegression()\n result = []\n output = []\n for l in range(1, len(feature_fns)+1):\n for c in combinations(feature_fns,l):\n result.append(c)\n\n for p in punct_vals:\n list_tok = [tokenize(d,p) for d in docs]\n for fl in result:\n for mf in min_freqs:\n dict_output = {}\n matrix,vocab = vectorize(list_tok, fl, mf)\n average_value = cross_validation_accuracy(clf, matrix, labels, 5)\n dict_output['features'] = fl\n dict_output['punct'] = p\n dict_output['accuracy'] = average_value\n dict_output['min_freq'] = mf\n output.append(dict_output)\n\n return sorted(output, key=lambda x: (-x['accuracy'], -x['min_freq']))",
"def test_null_distribution_wald(self, n_cells: int = 2000, n_genes: int = 100):\n logging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\n logging.getLogger(\"batchglm\").setLevel(logging.WARNING)\n logging.getLogger(\"diffxpy\").setLevel(logging.WARNING)\n\n sim = Simulator(num_observations=n_cells, num_features=n_genes)\n sim.generate_sample_description(num_batches=0, num_conditions=0)\n sim.generate()\n\n random_sample_description = pd.DataFrame({\n \"pseudotime\": np.random.random(size=sim.nobs)\n })\n\n test = de.test.continuous_1d(\n data=sim.X,\n continuous=\"pseudotime\",\n df=3,\n formula_loc=\"~ 1 + pseudotime\",\n formula_scale=\"~ 1\",\n factor_loc_totest=\"pseudotime\",\n test=\"wald\",\n sample_description=random_sample_description,\n quick_scale=True,\n batch_size=None,\n training_strategy=\"DEFAULT\",\n dtype=\"float64\"\n )\n summary = test.summary()\n\n # Compare p-value distribution under null model against uniform distribution.\n pval_h0 = stats.kstest(test.pval, 'uniform').pvalue\n\n logging.getLogger(\"diffxpy\").info('KS-test pvalue for null model match of wald(): %f' % pval_h0)\n assert pval_h0 > 0.05, \"KS-Test failed: pval_h0 is <= 0.05!\"\n\n return True",
"def vectorize(self, terms):\n features = {}\n\n if self.parameters[LexiconFeature.PARAM_ENABLED] == 'false':\n return features\n\n tones = []\n if (self.terms_used == 'all'):\n tones = [self.get_tone(term) for term in terms]\n elif (self.used_terms == 'hashtags_only'):\n tones = [self.get_tone(term) for term in terms\n if len(term) > 0 and term[0] == '#']\n\n if (len(tones) == 0):\n tones.append(0)\n\n for function_name in self.functions:\n if (function_name == 'sum'):\n value = (sum(tones))\n elif (function_name == 'max'):\n value = max(tones)\n elif (function_name == 'min'):\n value = min(tones)\n else:\n raise ValueError(\n \"unexpected function: '{}'\".format(function_name))\n\n feature_name = \"{}_{}\".format(self.get_name(), function_name)\n features[feature_name] = utils.normalize(value)\n\n #\n # Calculate sum of cluster scores\n #\n # for cluster in self.bag_of_clusters_features:\n # cluster_tones = [self.get_cluster_tone(\n # cluster, cluster.get_cluster_id(word))\n # for word in terms if cluster.contains_word(word)]\n # if len(cluster_tones) == 0:\n # cluster_tones.append(0)\n\n # feature_name = \"{}_score_sum\".format(cluster.get_name())\n # value = sum(cluster_tones)\n # features[feature_name] = utils.normalize(value)\n\n return features",
"def findTerms(self, text, terms, scope=50, includeAll=True):\n\t\tlistOfResults = list()\n\t\tlistOfMatchesMain = list()\n\t\tlistOfMatchesSecondary = list()\n\n\t\tappend = listOfResults.append\n\t\treplace\t= str.replace\n\n\t\tkeywordIndices = self.find(text, terms[0])\n\n\t\t# loop through the indices and check for dependencies if terms list has more than 1 term\n\t\tfor indices in keywordIndices:\n\n\t\t\tleading = text[indices[0]-scope:indices[0]]\n\t\t\ttrailing = text[indices[0]:indices[0]+scope]\n\n\t\t\tleading = replace(replace(leading, '\\n', '_'), '\\t', ' ') \n\t\t\ttrailing = replace(replace(trailing, '\\n', '_'), '\\t', ' ') \n\n\t\t\t# if terms list has more than 1 term (i.e., contextual terms), see if present within scope\n\t\t\tif len(terms) > 1:\n\n\t\t\t\t# loop through the contextual terms and check for presence within scope\n\t\t\t\tfor term in terms[1:]:\n\n\t\t\t\t\t# if term in either leading or trailing\n\t\t\t\t\tif (replace(term, '*', '') in leading.lower()) or (replace(term, '*', '') in trailing.lower()):\n\n\t\t\t\t\t\t# if '*' in term, do not add this context\n\t\t\t\t\t\tif '*' in term:\n\t\t\t\t\t\t\tpass\n\n\t\t\t\t\t\t# if '*' not indicated, add this context\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\texcerpt = leading + trailing\n\n\t\t\t\t\t\t\tif excerpt not in listOfResults:\n\t\t\t\t\t\t\t\tif includeAll==True:\n\t\t\t\t\t\t\t\t\tappend(excerpt+'\\t'+text[indices[0]:indices[1]]+'\\t'+term)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tappend(excerpt)\n\n\t\t\t# if terms list has 1 term, just append the excerpt\n\t\t\telse:\n\n\t\t\t\texcerpt = leading + trailing\n\n\t\t\t\tif excerpt not in listOfResults:\n\t\t\t\t\tif includeAll==True:\n\t\t\t\t\t\tappend(excerpt+'\\t'+text[indices[0]:indices[1]]+'\\t')\n\t\t\t\t\telse:\n\t\t\t\t\t\tappend(excerpt)\n\n\t\treturn listOfResults",
"def compare_css_terms(self, x_inputs, x_samples, full_path):\n \n self.load_model_params(full_path)\n \n data_term = self.get_data_term()\n is_term = self.get_is_term()\n \n diff_var = T.log(is_term - data_term)\n \n self.add_p_tilda()\n \n get_css_diff = theano.function(inputs = [self.x, self.x_tilda],\n outputs = [diff_var, self.p_tilda])\n \n diff_val, p_tilda_vals = get_css_diff(x_inputs, x_samples)\n \n return diff_val, p_tilda_vals",
"def test_similarity_Lin_bad_ids(self):\n self.assertEqual(self.sim.calculateTermSimilarity(\"bad_id\",\"bad_id2\"),0)\n self.assertEqual(self.mat_sim_mf.calculateTermSimilarity(\"bad_id\",\"bad_id2\"),0)",
"def corrected_ttr(n_terms, n_words):\n if n_words == 0:\n return 0\n return n_terms / math.sqrt(2 * n_words)",
"def _score_terms(self, terms_list):\n terms_scores = {}\n total_terms = 0\n\n for terms, coeff, split, ivtidx in terms_list:\n if not terms:\n continue\n # Swap ivtidx name for inverted index definition dict\n ivtidx = self._inverted_indexes[ivtidx]\n if not isinstance(terms, (basestring, list, tuple)):\n raise ValueError, \"Invalid type (%s) for ATTR_INVERTED_INDEX attribute. \" \\\n \"Only sequence, unicode or str allowed.\" % str(type(terms))\n\n if isinstance(terms, (list, tuple)):\n parsed = terms\n else:\n if callable(split):\n parsed = split(terms)\n else:\n parsed = split.split(terms)\n\n for term in parsed:\n if not term or (ivtidx['max'] and len(term) > ivtidx['max']) or \\\n (ivtidx['min'] and len(term) < ivtidx['min']):\n continue\n\n term = str_to_unicode(term)\n lower_term = term.lower()\n\n if ivtidx['ignore'] and lower_term in ivtidx['ignore']:\n continue\n if lower_term not in terms_scores:\n terms_scores[lower_term] = [term, coeff]\n else:\n terms_scores[lower_term][1] += coeff\n total_terms += 1\n\n # Score based on term frequency in document. (Add weight for\n # non-dictionary terms? Or longer terms?)\n for lower_term, score in terms_scores.items():\n terms_scores[lower_term][1] = math.sqrt(terms_scores[lower_term][1] / total_terms)\n return dict(terms_scores.values())",
"def _score_terms(self, terms_list):\n terms_scores = {}\n total_terms = 0\n\n for terms, coeff, split, ivtidx in terms_list:\n if not terms:\n continue\n # Swap ivtidx name for inverted index definition dict\n ivtidx = self._inverted_indexes[ivtidx]\n if not isinstance(terms, (str, list, tuple)):\n raise ValueError(\"Invalid type (%s) for ATTR_INVERTED_INDEX attribute. \" \\\n \"Only sequence, unicode or str allowed.\" % str(type(terms)))\n\n if isinstance(terms, (list, tuple)):\n terms = [tostr(term) for term in terms]\n parsed = terms\n else:\n terms = tostr(terms)\n if callable(split):\n parsed = list(split(terms))\n else:\n parsed = split.split(terms)\n\n for term in parsed:\n if not term or (ivtidx['max'] and len(term) > ivtidx['max']) or \\\n (ivtidx['min'] and len(term) < ivtidx['min']):\n continue\n\n lower_term = term.lower()\n\n if ivtidx['ignore'] and lower_term in ivtidx['ignore']:\n continue\n if lower_term not in terms_scores:\n terms_scores[lower_term] = [term, coeff]\n else:\n terms_scores[lower_term][1] += coeff\n total_terms += 1\n\n # Score based on term frequency in document. (Add weight for\n # non-dictionary terms? Or longer terms?)\n for lower_term, score in terms_scores.items():\n terms_scores[lower_term][1] = math.sqrt(terms_scores[lower_term][1] / total_terms)\n return dict(terms_scores.values())",
"def containsTerm_0(cls, body, term):\n for curTerm in body:\n if curTerm == term:\n return True\n if isinstance(curTerm, (GdlFunction, )):\n if cls.containsTerm((curTerm).getBody(), term):\n return True\n return False",
"def dataframe_filter(df, column, terms):\n return df[column].str.contains(\"|\".join(terms), na=False, case=False)",
"def _apply_rule_homothety(self, operands):\n return sum(\n (\n self._apply_rule_homothety_linear(list(group))\n if islinear\n else list(group)\n for islinear, group in groupby(operands, lambda o: o.flags.linear)\n ),\n [],\n )",
"def test(dfa, words):\n for word in words:\n try:\n dfa.test(word)\n except AssertionError as e:\n logging.error('ERROR: %s\\n' % e.message)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Formats dictated text to camel case. | def camel_case_text(text):
newText = format_camel_case(text)
Text("%(text)s").execute({"text": newText}) | [
"def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output",
"def camelHump( text ):\n # make sure the first letter is upper case\n output = ''.join([ capitalize(word) for word in words(text) ])\n if ( output ):\n output = output[0].lower() + output[1:]\n return output",
"def _camelify(words):\n newText = ''\n for word in words:\n if newText == '':\n newText = word[:1].lower() + word[1:]\n else:\n newText = '%s%s' % (newText, word.capitalize())\n return newText",
"def sentence_case(camel_case_text: str) -> str:\n if camel_case_text == '':\n return camel_case_text\n sentence_case_text = re.sub('([A-Z])', r' \\1', camel_case_text)\n return sentence_case_text[:1].upper() + sentence_case_text[1:].lower()",
"def underscore_to_camelcase(text):\n text = text.replace('_', ' ').title()\n return text.replace(' ', '')",
"def render_camel(var_words):\n return ''.join([word.capitalize() for word in var_words])",
"def to_camel_case(snake_case_word):\n pascal = to_pascal_case(snake_case_word)\n return pascal[0].lower() + pascal[1:]",
"def capitalize_text(text_input):\n print(text_input.capitalize())",
"def uppercase_text(text):\n newText = format_upper_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def pascal_case_text(text):\n newText = format_pascal_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def UPPER(text):\n return text.upper()",
"def camelcase(value):\n parts = value.split('_')\n parts = [p.capitalize() for p in parts]\n cameled = ''.join(parts)\n return cameled[0].lower() + cameled[1:]",
"def transform_semantic(self, string):\r\n return string.replace(' ', '_').capitalize()",
"def camelcase_to_sentencecase(name: str) -> str:\n return stringcase.sentencecase(stringcase.snakecase(name)).lower()",
"def convert_to_camel_case(list_of_words):\n upper_case_word = [x.capitalize() for x in list_of_words]\n return ''.join(upper_case_word)",
"def camel(s):\n return s[0].upper() + s[1:]",
"def _camel_case_to_snake_case(text: str) -> str:\n return re.sub(r\"(?<!^)(?=[A-Z])\", \"_\", text).lower()",
"def snake_to_camel(word):\n return ''.join(x.capitalize() or '_' for x in word.split('_'))",
"def titlecase(text):\n\n words = re.split('\\s', text)\n line = []\n for word in words:\n if word.startswith('#') or \\\n INLINE_PERIOD.search(word) or \\\n UC_ELSEWHERE.match(word):\n line.append(word)\n continue\n if SMALL_WORDS.match(word):\n line.append(word.lower())\n continue\n line.append(CAPFIRST.sub(lambda m: m.group(0).upper(), word))\n\n line = \" \".join(line)\n\n line = SMALL_FIRST.sub(lambda m: '%s%s' % (\n m.group(1),\n m.group(2).capitalize()\n ), line)\n\n line = SMALL_LAST.sub(lambda m: m.group(0).capitalize(), line)\n\n line = SUBPHRASE.sub(lambda m: '%s%s' % (\n m.group(1),\n m.group(2).capitalize()\n ), line)\n\n return line"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Formats n words to the left of the cursor to camel case. Note that word count differs between editors and programming languages. The examples are all from Eclipse/Python. | def camel_case_count(n):
saveText = _get_clipboard_text()
cutText = _select_and_cut_text(n)
if cutText:
endSpace = cutText.endswith(' ')
text = _cleanup_text(cutText)
newText = _camelify(text.split(' '))
if endSpace:
newText = newText + ' '
newText = newText.replace("%", "%%") # Escape any format chars.
Text(newText).execute()
else: # Failed to get text from clipboard.
Key('c-v').execute() # Restore cut out text.
_set_clipboard_text(saveText) | [
"def pascal_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = text.title().replace(' ', '')\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def snake_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText.lower())\n newText = '_'.join(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output",
"def lowercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.lower()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def caps_fith_word(self):\r\n try:\r\n for i in range(4,len(self.words),5):\r\n self.words[i]=self.words[i].upper()\r\n logging.info(\"capitalizing every fifth word\")\r\n logging.info(self.words)\r\n except:\r\n logging.error(\"exception occurred : \")",
"def render_camel(var_words):\n return ''.join([word.capitalize() for word in var_words])",
"def _camelify(words):\n newText = ''\n for word in words:\n if newText == '':\n newText = word[:1].lower() + word[1:]\n else:\n newText = '%s%s' % (newText, word.capitalize())\n return newText",
"def uppercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.upper()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def camelHump( text ):\n # make sure the first letter is upper case\n output = ''.join([ capitalize(word) for word in words(text) ])\n if ( output ):\n output = output[0].lower() + output[1:]\n return output",
"def word(self, maxSize, start=None):\n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n while 1:\n wd = self.gen(maxSize, start)\n if self.wordCheck(wd):\n return wd.capitalize()",
"def camel_case_text(text):\n newText = format_camel_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def caps_third_letter(self):\r\n try:\r\n for i in range(len(self.words)):\r\n if len(self.words[i])>=3:\r\n flag=list(self.words[i])\r\n flag[2]=flag[2].upper()\r\n self.words[i]=''.join(flag)\r\n logging.info(\"capitalizing every 3rd letter of a word\")\r\n logging.info(self.words)\r\n except:\r\n logging.error(\"exception occurred : \")",
"def capitalize_word(self, e): # (M-c)\n self.l_buffer.capitalize_word()\n self.finalize()",
"def to_camelcase(s):\n words = re.split(\"[^a-zA-Z0-9]+\", s)\n return \"\".join(\n w.lower() if i is 0 else w.title() for i, w in enumerate(words))",
"def to_camel_case(snake_case_word):\n pascal = to_pascal_case(snake_case_word)\n return pascal[0].lower() + pascal[1:]",
"def abbrev_text(text, n):\n if len(text) < n:\n return text\n # Cut off at a whole word (delimited by space).\n cutoff = text.rfind(' ', 0, n-3)\n return text[:cutoff] + \"...\"",
"def CapWords(self, name):\n # split the filename on underscore\n name = name.split('_')\n\n # capitalize every word\n for i in xrange(len(name)):\n name[i-1] = name[i-1][0].upper() + name[i-1][1:]\n\n # return the space-delineated name\n return ' '.join(name)",
"def to_pascal_case(snake_case_word):\n parts = iter(snake_case_word.split(\"_\"))\n return \"\".join(word.title() for word in parts)",
"def words_capital_letter(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n number_of_words_capital_letter = 0\n for i in root.iter(root_tag+'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n number_of_words_capital_letter = number_of_words_capital_letter + sum(list(map(lambda x: x.istitle(), i.text.split())))\n return number_of_words_capital_letter"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes a list of words and returns a string formatted to camel case. | def _camelify(words):
newText = ''
for word in words:
if newText == '':
newText = word[:1].lower() + word[1:]
else:
newText = '%s%s' % (newText, word.capitalize())
return newText | [
"def convert_to_camel_case(list_of_words):\n upper_case_word = [x.capitalize() for x in list_of_words]\n return ''.join(upper_case_word)",
"def render_camel(var_words):\n return ''.join([word.capitalize() for word in var_words])",
"def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output",
"def snake_to_camel(word):\n return ''.join(x.capitalize() or '_' for x in word.split('_'))",
"def camelHump( text ):\n # make sure the first letter is upper case\n output = ''.join([ capitalize(word) for word in words(text) ])\n if ( output ):\n output = output[0].lower() + output[1:]\n return output",
"def to_camelcase(s):\n words = re.split(\"[^a-zA-Z0-9]+\", s)\n return \"\".join(\n w.lower() if i is 0 else w.title() for i, w in enumerate(words))",
"def capitalaze(words):\n words_capitalazed = []\n for word in words:\n words_capitalazed.append(word.capitalize())\n return words_capitalazed",
"def switch_case(str_list):\n string_list = []\n for word in str_list:\n string_list.append(word[0].capitalize() + word[1:])\n return string_list",
"def capitalize_text(text, words_list, marker=''):\n for word in words_list:\n text = capitalize_term_re(text, word, marker)\n return text",
"def to_camel_case(snake_case_word):\n pascal = to_pascal_case(snake_case_word)\n return pascal[0].lower() + pascal[1:]",
"def capitalizeWords( text ):\n return ' '.join([capitalize(word) for word in words(text)])",
"def convert_to_snake_case(list_of_words):\n word_with_underscores = '_'.join(list_of_words)\n return word_with_underscores.lower()",
"def camelcase(value):\n parts = value.split('_')\n parts = [p.capitalize() for p in parts]\n cameled = ''.join(parts)\n return cameled[0].lower() + cameled[1:]",
"def camel(s):\n return s[0].upper() + s[1:]",
"def abbreviate(words):\n pattern = r\"\"\"\n ([A-Z]) # capture the first capital letter\n [A-Z']* # followed by zero or more capital letters or apostrophes\n \"\"\"\n return ''.join(re.findall(pattern, words.upper(), re.VERBOSE))",
"def to_camel_case(snake_str):\n return ''.join(w.title() for w in snake_str.split('_'))",
"def break_camelcase_pythonic(string: str) -> str:\n return \"\".join(f\" {item}\" if item.isupper() else item for item in string)",
"def snake_to_camel_case(snake_str: str) -> str:\n\n words = snake_str.strip(\"_\").split(\"_\")\n return words[0] + \"\".join(word[:1].upper() + word[1:] for word in words[1:])",
"def underscore_to_camelcase(text):\n text = text.replace('_', ' ').title()\n return text.replace(' ', '')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Formats dictated text to pascal case. | def pascal_case_text(text):
newText = format_pascal_case(text)
Text("%(text)s").execute({"text": newText}) | [
"def to_pascal(text: str) -> str:\n return text.title().replace(' ', '')",
"def to_pascal_case(snake_case_word):\n parts = iter(snake_case_word.split(\"_\"))\n return \"\".join(word.title() for word in parts)",
"def _pascal_case(arg: str):\n # replace _x with X\n tmp = re.sub(\n r\"(?<=[a-zA-Z])_([a-z])\",\n lambda c: c.group(1).upper(),\n arg\n )\n # upper-case first character\n tmp = re.sub(\n r\"^[a-z]\",\n lambda c: c.group(0).upper(),\n tmp\n )\n return tmp",
"def pascalcase(string):\n\n return capitalcase(camelcase(string))",
"def is_pascal_case(input):\n return REGEX_IS_PASCAL_CASE.match(input)",
"def snake_to_pascal(string):\n return string[0].upper() + re.sub('_([a-z])', lambda match: match.group(1).upper(), string[1:])",
"def titlecase(text):\n\n words = re.split('\\s', text)\n line = []\n for word in words:\n if word.startswith('#') or \\\n INLINE_PERIOD.search(word) or \\\n UC_ELSEWHERE.match(word):\n line.append(word)\n continue\n if SMALL_WORDS.match(word):\n line.append(word.lower())\n continue\n line.append(CAPFIRST.sub(lambda m: m.group(0).upper(), word))\n\n line = \" \".join(line)\n\n line = SMALL_FIRST.sub(lambda m: '%s%s' % (\n m.group(1),\n m.group(2).capitalize()\n ), line)\n\n line = SMALL_LAST.sub(lambda m: m.group(0).capitalize(), line)\n\n line = SUBPHRASE.sub(lambda m: '%s%s' % (\n m.group(1),\n m.group(2).capitalize()\n ), line)\n\n return line",
"def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output",
"def snake_case_text(text):\n newText = format_snake_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def to_camel_case(snake_case_word):\n pascal = to_pascal_case(snake_case_word)\n return pascal[0].lower() + pascal[1:]",
"def pascal_case(arg):\n if not is_pascal_case(arg):\n raise argparse.ArgumentTypeError(\"'%s' must be PascalCase\" % arg)\n return arg",
"def camel_case_text(text):\n newText = format_camel_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def camelHump( text ):\n # make sure the first letter is upper case\n output = ''.join([ capitalize(word) for word in words(text) ])\n if ( output ):\n output = output[0].lower() + output[1:]\n return output",
"def correct_case(text):\n words = text.split(' ')\n words_formatted = []\n\n for word in words:\n left_paren = False\n right_paren = False\n\n if word.startswith('('):\n left_paren = True\n word = word[1:]\n\n if word.endswith(')'):\n right_paren = True\n word = word[:-1]\n\n if (word == word.upper() and\n word not in ACCEPTABLE_CAPITALIZATIONS):\n word = word.capitalize()\n\n if left_paren:\n word = '(' + word\n if right_paren:\n word = word + ')'\n \n words_formatted.append(word)\n\n return ' '.join(words_formatted)",
"def _camel_case_to_snake_case(text: str) -> str:\n return re.sub(r\"(?<!^)(?=[A-Z])\", \"_\", text).lower()",
"def pretty( text ):\n return ' '.join( [word.capitalize() for word in words(text)] )",
"def capitalize_text(text_input):\n print(text_input.capitalize())",
"def titleize(phrase):\n nl = phrase.split(' ')\n o=[]\n for i in nl:\n o.append(i[0].upper()+i[1:].lower())\n print(' '.join(o))",
"def titleize(phrase):\n phrase_lower = phrase.lower().split()\n completed_string = \"\"\n for word in phrase_lower:\n completed_string = completed_string+ str(word).capitalize()+ \" \" \n return completed_string"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Formats n words to the left of the cursor to pascal case. Note that word count differs between editors and programming languages. The examples are all from Eclipse/Python. | def pascal_case_count(n):
saveText = _get_clipboard_text()
cutText = _select_and_cut_text(n)
if cutText:
endSpace = cutText.endswith(' ')
text = _cleanup_text(cutText)
newText = text.title().replace(' ', '')
if endSpace:
newText = newText + ' '
newText = newText.replace("%", "%%") # Escape any format chars.
Text(newText).execute()
else: # Failed to get text from clipboard.
Key('c-v').execute() # Restore cut out text.
_set_clipboard_text(saveText) | [
"def to_pascal(text: str) -> str:\n return text.title().replace(' ', '')",
"def pascal_case_text(text):\n newText = format_pascal_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def to_pascal_case(snake_case_word):\n parts = iter(snake_case_word.split(\"_\"))\n return \"\".join(word.title() for word in parts)",
"def snake_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText.lower())\n newText = '_'.join(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def camel_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = _camelify(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def lowercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.lower()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def caps_fith_word(self):\r\n try:\r\n for i in range(4,len(self.words),5):\r\n self.words[i]=self.words[i].upper()\r\n logging.info(\"capitalizing every fifth word\")\r\n logging.info(self.words)\r\n except:\r\n logging.error(\"exception occurred : \")",
"def word(self, maxSize, start=None):\n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n while 1:\n wd = self.gen(maxSize, start)\n if self.wordCheck(wd):\n return wd.capitalize()",
"def caps_third_letter(self):\r\n try:\r\n for i in range(len(self.words)):\r\n if len(self.words[i])>=3:\r\n flag=list(self.words[i])\r\n flag[2]=flag[2].upper()\r\n self.words[i]=''.join(flag)\r\n logging.info(\"capitalizing every 3rd letter of a word\")\r\n logging.info(self.words)\r\n except:\r\n logging.error(\"exception occurred : \")",
"def words_capital_letter(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n number_of_words_capital_letter = 0\n for i in root.iter(root_tag+'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n number_of_words_capital_letter = number_of_words_capital_letter + sum(list(map(lambda x: x.istitle(), i.text.split())))\n return number_of_words_capital_letter",
"def print_words(filename):\r\n counter_list = count_words(filename)\r\n counter_list.sort()\r\n\r\n print('\\n'.join([f'{a} {b}' for a, b in counter_list]))",
"def titlecase(text):\n\n words = re.split('\\s', text)\n line = []\n for word in words:\n if word.startswith('#') or \\\n INLINE_PERIOD.search(word) or \\\n UC_ELSEWHERE.match(word):\n line.append(word)\n continue\n if SMALL_WORDS.match(word):\n line.append(word.lower())\n continue\n line.append(CAPFIRST.sub(lambda m: m.group(0).upper(), word))\n\n line = \" \".join(line)\n\n line = SMALL_FIRST.sub(lambda m: '%s%s' % (\n m.group(1),\n m.group(2).capitalize()\n ), line)\n\n line = SMALL_LAST.sub(lambda m: m.group(0).capitalize(), line)\n\n line = SUBPHRASE.sub(lambda m: '%s%s' % (\n m.group(1),\n m.group(2).capitalize()\n ), line)\n\n return line",
"def _pascal_case(arg: str):\n # replace _x with X\n tmp = re.sub(\n r\"(?<=[a-zA-Z])_([a-z])\",\n lambda c: c.group(1).upper(),\n arg\n )\n # upper-case first character\n tmp = re.sub(\n r\"^[a-z]\",\n lambda c: c.group(0).upper(),\n tmp\n )\n return tmp",
"def pretty( text ):\n return ' '.join( [word.capitalize() for word in words(text)] )",
"def snake_to_pascal(string):\n return string[0].upper() + re.sub('_([a-z])', lambda match: match.group(1).upper(), string[1:])",
"def print_words(words):\n print('Here are the words you entered:')\n for i, word in enumerate(words, start=1):\n print('{}. {}'.format(i, word.title()))",
"def spaces(num: int) -> unicode:\n ...",
"def wrap_by_word(s, n):\n\ta = s.split()\n\tret = ''\n\tfor i in range(0, len(a), n):\n\t\tret += ' '.join(a[i:i+n]) + '\\n'\n\treturn ret",
"def abbrev_text(text, n):\n if len(text) < n:\n return text\n # Cut off at a whole word (delimited by space).\n cutoff = text.rfind(' ', 0, n-3)\n return text[:cutoff] + \"...\""
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Formats dictated text to snake case. | def snake_case_text(text):
newText = format_snake_case(text)
Text("%(text)s").execute({"text": newText}) | [
"def _camel_case_to_snake_case(text: str) -> str:\n return re.sub(r\"(?<!^)(?=[A-Z])\", \"_\", text).lower()",
"def snakecase_to_sentencecase(name: str) -> str:\n return stringcase.sentencecase(name).lower()",
"def _snake_case(display_name):\n str_re = re.compile('[{0}]'.format(re.escape(string.punctuation)))\n str = str_re.sub(' ', display_name)\n str = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', str)\n str = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', str).lower()\n return re.sub(' +', '_', str)",
"def convert_to_snake_case(list_of_words):\n word_with_underscores = '_'.join(list_of_words)\n return word_with_underscores.lower()",
"def pascal_case_text(text):\n newText = format_pascal_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def snakecase(self, given_path):\n filename = os.path.basename(given_path)\n filename = first_cap_re.sub(r'\\1_\\2', filename)\n filename = all_cap_re.sub(r'\\1_\\2', filename).lower()\n return given_path.replace(os.path.basename(given_path), filename)",
"def convert_to_snake_case(camel_case_string):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', camel_case_string)\n s2 = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n return s2.replace('__', '_')",
"def to_pascal_case(snake_case_word):\n parts = iter(snake_case_word.split(\"_\"))\n return \"\".join(word.title() for word in parts)",
"def lower_case(self, text):\n return text.lower()",
"def to_snake(value):\r\n return TO_SNAKE_RE.sub(r\"_\\1\", value).lower()",
"def lowercase_text(text):\n newText = format_lower_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def to_snake_case(value: str) -> str:\n string_1: str = first_cap_re.sub(r\"\\1_\\2\", value)\n string_2: str = all_cap_re.sub(r\"\\1_\\2\", string_1).lower()\n return string_2",
"def _snake_to_camel_case(snake: str) -> str:\n components = snake.split(\"_\")\n return components[0] + \"\".join(component.title() for component in components[1:])",
"def to_snake_case(s: str) -> str:\n return re.sub(r\"(?!^)([A-Z]+)\", r\"_\\1\", s).lower()",
"def to_snake_case(str):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', str)\n s2 = re.sub('-', '_', s1)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s2).lower()",
"def to_snake_case(input):\n return re.sub(REGEX_TO_SNAKE_CASE, r\"_\\1\", input).lower()",
"def LOWER(text):\n return text.lower()",
"def camel_case_text(text):\n newText = format_camel_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def to_api_case(key):\n return snakecase(key)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Formats n words to the left of the cursor to snake case. Note that word count differs between editors and programming languages. The examples are all from Eclipse/Python. | def snake_case_count(n):
saveText = _get_clipboard_text()
cutText = _select_and_cut_text(n)
if cutText:
endSpace = cutText.endswith(' ')
text = _cleanup_text(cutText.lower())
newText = '_'.join(text.split(' '))
if endSpace:
newText = newText + ' '
newText = newText.replace("%", "%%") # Escape any format chars.
Text(newText).execute()
else: # Failed to get text from clipboard.
Key('c-v').execute() # Restore cut out text.
_set_clipboard_text(saveText) | [
"def pascal_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = text.title().replace(' ', '')\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def lowercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.lower()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def camel_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = _camelify(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def caps_fith_word(self):\r\n try:\r\n for i in range(4,len(self.words),5):\r\n self.words[i]=self.words[i].upper()\r\n logging.info(\"capitalizing every fifth word\")\r\n logging.info(self.words)\r\n except:\r\n logging.error(\"exception occurred : \")",
"def snake_case_text(text):\n newText = format_snake_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def wrap_by_word(s, n):\n\ta = s.split()\n\tret = ''\n\tfor i in range(0, len(a), n):\n\t\tret += ' '.join(a[i:i+n]) + '\\n'\n\treturn ret",
"def render_snake(var_words):\n return '_'.join(var_words)",
"def word(self, maxSize, start=None):\n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n while 1:\n wd = self.gen(maxSize, start)\n if self.wordCheck(wd):\n return wd.capitalize()",
"def convert_to_snake_case(list_of_words):\n word_with_underscores = '_'.join(list_of_words)\n return word_with_underscores.lower()",
"def uppercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.upper()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def _set_number_of_words(self, N):\n self.N_words_to_display = N",
"def abbrev_text(text, n):\n if len(text) < n:\n return text\n # Cut off at a whole word (delimited by space).\n cutoff = text.rfind(' ', 0, n-3)\n return text[:cutoff] + \"...\"",
"def caps_third_letter(self):\r\n try:\r\n for i in range(len(self.words)):\r\n if len(self.words[i])>=3:\r\n flag=list(self.words[i])\r\n flag[2]=flag[2].upper()\r\n self.words[i]=''.join(flag)\r\n logging.info(\"capitalizing every 3rd letter of a word\")\r\n logging.info(self.words)\r\n except:\r\n logging.error(\"exception occurred : \")",
"def print_words(filename):\r\n counter_list = count_words(filename)\r\n counter_list.sort()\r\n\r\n print('\\n'.join([f'{a} {b}' for a, b in counter_list]))",
"def to_pascal_case(snake_case_word):\n parts = iter(snake_case_word.split(\"_\"))\n return \"\".join(word.title() for word in parts)",
"def make_title(words):",
"def generate_text_owc(model: Dict[str, Set[str]], n: int) -> str:\n # ACCUMULATOR: a list of the randomly-generated words so far\n words_so_far = []\n # We've provided this template as a starting point; you may modify it as necessary.\n words_so_far.append(generate_new_word(model))\n for x in range(0, n-1):\n key = words_so_far[x]\n new_word = generate_next_word(model,key)\n if new_word == \".\":\n words_so_far[x] = words_so_far[x]+'.'\n new_word= generate_new_word(model)\n elif new_word == {}:\n new_word = generate_new_word(model)\n words_so_far.append(new_word)\n\n return str.join(' ', words_so_far)",
"def test20():\n\tdef highlight_word(sentence, word):\n\t\treturn(\" \".join(x) for x in sentence.split())\n\n\tprint(highlight_word(\"Have a nice day\", \"nice\"))\n\tprint(highlight_word(\"Shhh, don't be so loud!\", \"loud\"))\n\tprint(highlight_word(\"Automating with Python is fun\", \"fun\"))",
"def titlecase(text):\n\n words = re.split('\\s', text)\n line = []\n for word in words:\n if word.startswith('#') or \\\n INLINE_PERIOD.search(word) or \\\n UC_ELSEWHERE.match(word):\n line.append(word)\n continue\n if SMALL_WORDS.match(word):\n line.append(word.lower())\n continue\n line.append(CAPFIRST.sub(lambda m: m.group(0).upper(), word))\n\n line = \" \".join(line)\n\n line = SMALL_FIRST.sub(lambda m: '%s%s' % (\n m.group(1),\n m.group(2).capitalize()\n ), line)\n\n line = SMALL_LAST.sub(lambda m: m.group(0).capitalize(), line)\n\n line = SUBPHRASE.sub(lambda m: '%s%s' % (\n m.group(1),\n m.group(2).capitalize()\n ), line)\n\n return line"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Formats n words to the left of the cursor to upper case. Note that word count differs between editors and programming languages. The examples are all from Eclipse/Python. | def uppercase_count(n):
saveText = _get_clipboard_text()
cutText = _select_and_cut_text(n)
if cutText:
newText = cutText.upper()
newText = newText.replace("%", "%%") # Escape any format chars.
Text(newText).execute()
else: # Failed to get text from clipboard.
Key('c-v').execute() # Restore cut out text.
_set_clipboard_text(saveText) | [
"def pascal_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = text.title().replace(' ', '')\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def caps_fith_word(self):\r\n try:\r\n for i in range(4,len(self.words),5):\r\n self.words[i]=self.words[i].upper()\r\n logging.info(\"capitalizing every fifth word\")\r\n logging.info(self.words)\r\n except:\r\n logging.error(\"exception occurred : \")",
"def camel_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = _camelify(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def lowercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.lower()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def snake_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText.lower())\n newText = '_'.join(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def word(self, maxSize, start=None):\n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n while 1:\n wd = self.gen(maxSize, start)\n if self.wordCheck(wd):\n return wd.capitalize()",
"def abbrev_text(text, n):\n if len(text) < n:\n return text\n # Cut off at a whole word (delimited by space).\n cutoff = text.rfind(' ', 0, n-3)\n return text[:cutoff] + \"...\"",
"def caps_third_letter(self):\r\n try:\r\n for i in range(len(self.words)):\r\n if len(self.words[i])>=3:\r\n flag=list(self.words[i])\r\n flag[2]=flag[2].upper()\r\n self.words[i]=''.join(flag)\r\n logging.info(\"capitalizing every 3rd letter of a word\")\r\n logging.info(self.words)\r\n except:\r\n logging.error(\"exception occurred : \")",
"def print_upper_words(words):\r\n\r\n for word in words:\r\n print(word.upper())",
"def uppercase_lowercase_words(s):\n if not s:\n return None\n if not isinstance(s, str) :\n raise TypeError('Not a string')\n else:\n # take out the words\n s = s.split()\n news = \"\"\n # process the words based on indexing and combine them into a new string\n # V1 :\n # for i in range(len(s)):\n # if i % 2 == 0:\n # news += s[i].upper() + \" \"\n # else:\n # news += s[i].lower() + \" \"\n for i in range(len(s)):\n if i % 2 == 0:\n s[i] = s[i].upper()\n else:\n s[i] = s[i].lower()\n return \" \".join(s).strip()",
"def wrap_by_word(s, n):\n\ta = s.split()\n\tret = ''\n\tfor i in range(0, len(a), n):\n\t\tret += ' '.join(a[i:i+n]) + '\\n'\n\treturn ret",
"def pretty( text ):\n return ' '.join( [word.capitalize() for word in words(text)] )",
"def print_upper_words(words):\n for word in words:\n print(word.upper())",
"def _set_number_of_words(self, N):\n self.N_words_to_display = N",
"def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output",
"def print_upper_words(words):\n \n for word in words:\n print(word.upper())",
"def capitalize_word(self, e): # (M-c)\n self.l_buffer.capitalize_word()\n self.finalize()",
"def capitalize_text(text, words_list, marker=''):\n for word in words_list:\n text = capitalize_term_re(text, word, marker)\n return text",
"def print_words(filename):\r\n counter_list = count_words(filename)\r\n counter_list.sort()\r\n\r\n print('\\n'.join([f'{a} {b}' for a, b in counter_list]))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Formats dictated text to lower case. | def lowercase_text(text):
newText = format_lower_case(text)
Text("%(text)s").execute({"text": newText}) | [
"def lower_case(self, text):\n return text.lower()",
"def LOWER(text):\n return text.lower()",
"def to_lower(self):\n\n print('Converting to lowercase...')\n self.__data['text'] = self.__data['text'].str.lower()",
"def lowercase(text):\n\n lowercase_text = text.lower()\n return lowercase_text",
"def lowerCase(self,phrase):\n if(\"normalizeText\" in self._classes):\n return self._normalize.lowerCase(phrase)",
"def lower_text(text_input):\n print(text_input.lower())",
"def snake_case_text(text):\n newText = format_snake_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def tr_upper_to_lower(text):\n out = []\n for ch in text:\n if ch in tr_upper_to_lower_dict:\n out.append(tr_upper_to_lower_dict[ch])\n else:\n out.append(ch.lower())\n \n return \"\".join(out)",
"def to_lower(s):\n return s.lower()",
"def lowercase(self, _str):\n if _str:\n _str = _str.lower()\n\n return _str",
"def make_lowercase(contents):\n return contents.lower()",
"def preprocess_text(self):\n self.text_received = self.text_received.replace(\" \", \"\").lower()",
"def toLowerCase(self, str):\n print str.lower()\n return str.lower()",
"def camel_case_text(text):\n newText = format_camel_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def pascal_case_text(text):\n newText = format_pascal_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def lowercase_name(name):\n return name.lower()",
"def toLowerCase(s):\n return s.lower()",
"def _camel_case_to_snake_case(text: str) -> str:\n return re.sub(r\"(?<!^)(?=[A-Z])\", \"_\", text).lower()",
"def lowercase(payload, **kwargs):\n\n retVal = payload\n\n if payload:\n for match in re.finditer(r\"[A-Za-z_]+\", retVal):\n word = match.group()\n\n if word.upper() in kb.keywords:\n retVal = retVal.replace(word, word.lower())\n\n return retVal"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Formats n words to the left of the cursor to lower case. Note that word count differs between editors and programming languages. The examples are all from Eclipse/Python. | def lowercase_count(n):
saveText = _get_clipboard_text()
cutText = _select_and_cut_text(n)
if cutText:
newText = cutText.lower()
newText = newText.replace("%", "%%") # Escape any format chars.
Text(newText).execute()
else: # Failed to get text from clipboard.
Key('c-v').execute() # Restore cut out text.
_set_clipboard_text(saveText) | [
"def pascal_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = text.title().replace(' ', '')\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def snake_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText.lower())\n newText = '_'.join(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def camel_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = _camelify(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def caps_fith_word(self):\r\n try:\r\n for i in range(4,len(self.words),5):\r\n self.words[i]=self.words[i].upper()\r\n logging.info(\"capitalizing every fifth word\")\r\n logging.info(self.words)\r\n except:\r\n logging.error(\"exception occurred : \")",
"def word(self, maxSize, start=None):\n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n while 1:\n wd = self.gen(maxSize, start)\n if self.wordCheck(wd):\n return wd.capitalize()",
"def words_lower_case(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n number_of_words_in_lower_case = 0\n for i in root.iter(root_tag+'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n number_of_words_in_lower_case = number_of_words_in_lower_case + sum(list(map(lambda x: x.islower(), i.text.split())))\n return number_of_words_in_lower_case",
"def wrap_by_word(s, n):\n\ta = s.split()\n\tret = ''\n\tfor i in range(0, len(a), n):\n\t\tret += ' '.join(a[i:i+n]) + '\\n'\n\treturn ret",
"def lowercase_text(text):\n newText = format_lower_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def print_words(filename):\r\n counter_list = count_words(filename)\r\n counter_list.sort()\r\n\r\n print('\\n'.join([f'{a} {b}' for a, b in counter_list]))",
"def words_from_tokens(tokens: Sequence[Token], offset: int, n: int) -> str:\n g = (t.value_lower for t in tokens[offset:] if t.type == \"word\")\n return \" \".join(itertools.islice(g, n))",
"def _set_number_of_words(self, N):\n self.N_words_to_display = N",
"def uppercase_lowercase_words(s):\n if not s:\n return None\n if not isinstance(s, str) :\n raise TypeError('Not a string')\n else:\n # take out the words\n s = s.split()\n news = \"\"\n # process the words based on indexing and combine them into a new string\n # V1 :\n # for i in range(len(s)):\n # if i % 2 == 0:\n # news += s[i].upper() + \" \"\n # else:\n # news += s[i].lower() + \" \"\n for i in range(len(s)):\n if i % 2 == 0:\n s[i] = s[i].upper()\n else:\n s[i] = s[i].lower()\n return \" \".join(s).strip()",
"def titlecase(text):\n\n words = re.split('\\s', text)\n line = []\n for word in words:\n if word.startswith('#') or \\\n INLINE_PERIOD.search(word) or \\\n UC_ELSEWHERE.match(word):\n line.append(word)\n continue\n if SMALL_WORDS.match(word):\n line.append(word.lower())\n continue\n line.append(CAPFIRST.sub(lambda m: m.group(0).upper(), word))\n\n line = \" \".join(line)\n\n line = SMALL_FIRST.sub(lambda m: '%s%s' % (\n m.group(1),\n m.group(2).capitalize()\n ), line)\n\n line = SMALL_LAST.sub(lambda m: m.group(0).capitalize(), line)\n\n line = SUBPHRASE.sub(lambda m: '%s%s' % (\n m.group(1),\n m.group(2).capitalize()\n ), line)\n\n return line",
"def caps_third_letter(self):\r\n try:\r\n for i in range(len(self.words)):\r\n if len(self.words[i])>=3:\r\n flag=list(self.words[i])\r\n flag[2]=flag[2].upper()\r\n self.words[i]=''.join(flag)\r\n logging.info(\"capitalizing every 3rd letter of a word\")\r\n logging.info(self.words)\r\n except:\r\n logging.error(\"exception occurred : \")",
"def make_title(words):",
"def pretty( text ):\n return ' '.join( [word.capitalize() for word in words(text)] )",
"def format_words(words):\n return sorted(words, key=str.lower)",
"def ladcased(normal):\r\n\r\n ladified = ''\r\n for i, c in enumerate(normal):\r\n ladified += c.lower() if (i % 2 == 0) else c.upper()\r\n\r\n return ladified",
"def print_words(words):\n print('Here are the words you entered:')\n for i, word in enumerate(words, start=1):\n print('{}. {}'.format(i, word.title()))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cleans up the text before formatting to camel, pascal or snake case. Removes dashes, underscores, single quotes (apostrophes) and replaces them with a space character. Multiple spaces, tabs or new line characters are collapsed to one space character. Returns the result as a string. | def _cleanup_text(text):
prefixChars = ""
suffixChars = ""
if text.startswith("-"):
prefixChars += "-"
if text.startswith("_"):
prefixChars += "_"
if text.endswith("-"):
suffixChars += "-"
if text.endswith("_"):
suffixChars += "_"
text = text.strip()
text = text.replace('-', ' ')
text = text.replace('_', ' ')
text = text.replace("'", ' ')
text = re.sub('[ \t\r\n]+', ' ', text) # Any whitespaces to one space.
text = prefixChars + text + suffixChars
return text | [
"def clean_up_text(text):\n text = text.lower() # to lower case\n text = re.sub(r'[^a-z]', ' ', text) # replace other characters than a-z with ' '\n return text",
"def sanitize(text):\n text = str(text).strip().replace(' ', '_')\n return re.sub(r'(?u)[^-\\w.\\/]', '', text)",
"def clean_review(self, text):\n text = text.lower() # lowercase capital letters\n\n if self.remove_stopwords:\n text = self.remove_stopwords_f(text, keep_neg_words=True)\n\n text = re.sub('[^a-zA-Z]+', ' ', text) # select only alphabet characters (letters only)\n # text = re.sub('[^a-zA-Z0-9]+', ' ', text) # select only alphanumeric characters (letters & numbers)\n # text = re.sub(r'\\W+', ' ', text) # Select only alphanumeric characters (including greek & underscore)\n\n text = re.sub(' +', ' ', text) # remove extra spaces\n\n if self.apply_normalization:\n text = self.normalize_text(text)\n\n return text",
"def detokenize(self, text):\n text = ' ' + text + ' '\n text = self._dash_fixes.sub(r' \\1-\\2 ', text)\n text = self._dash_fixes2.sub(r' \\1-\\2 ', text)\n text = self._currency_or_init_punct.sub(r' \\1', text)\n text = self._noprespace_punct.sub(r'\\1 ', text)\n text = self._contract.sub(r\" \\1'\\2\", text)\n text = self._contractions.sub(r\"\\1\", text)\n text = self._esses.sub(r\"s \", text)\n text = self.moses_detokenizer.detokenize(text.split())\n text = text.strip()\n # capitalize\n if not text:\n return ''\n return text",
"def text_cleaning(self, text):\n # *** Remove this if you are using capitalization as feature ex. NER **\n text = text.lower()\n\n text = re.sub(r\"\\. \\. \\.\", \"\\.\", text)\n text = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`\\.]\", \" \", text)\n text = re.sub(r'[0-9]+', '', text)\n text = re.sub(r\"\\'s\", \" \\'s\", text)\n text = re.sub(r\"\\'ve\", \" \\'ve\", text)\n text = re.sub(r\"n\\'t\", \" n\\'t\", text)\n text = re.sub(r\"\\'re\", \" \\'re\", text)\n text = re.sub(r\"\\'d\", \" \\'d\", text)\n text = re.sub(r\"\\'ll\", \" \\'ll\", text)\n text = re.sub(r\",\", \"\", text)\n text = re.sub(r\"!\", \"\", text)\n text = re.sub(r\"\\(\", \"\", text)\n text = re.sub(r\"\\)\", \"\", text)\n text = re.sub(r\"\\?\", \"\", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n text = re.sub(r\"<br />\", \" \", text)\n text = re.sub(r'[^\\w\\s]', '', text)\n text = text.split(\" \")\n # text = [w for w in text if w not in self.stops]\n return text",
"def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text",
"def detokenize(self, text):\n text = ' ' + text + ' '\n text = self._dash_fixes.sub(r' \\1-\\2 ', text)\n text = self._dash_fixes2.sub(r' \\1-\\2 ', text)\n text = self._dash_fixes3.sub(r' \\1-\\2 ', text)\n text = self._dash_fixes4.sub(r' \\1-\\2 ', text)\n text = self._currency_or_init_punct.sub(r' \\1', text)\n text = self._noprespace_punct.sub(r'\\1 ', text)\n text = self._contract.sub(r\" \\1'\\2\", text)\n text = text.strip()\n if not text:\n return ''\n # capitalize (we assume no abbreviations since there are none in the data)\n # this ensures proper sentence splitting\n text = text[0].upper() + text[1:]\n text = re.sub('(\\p{Alpha})\\. (\\p{Ll})', lambda m: m.group(1) + '. ' + m.group(2).upper(), text)\n return text",
"def clean_text(text):\n text = text.lower()\n text = text.translate(str.maketrans('', '', string.punctuation + '“”’—'))\n text = text.translate(str.maketrans('', '', string.digits))\n\n return text.strip()",
"def _clean_text(text):\n rrb = re.compile(\"-RRB-\")\n lrb = re.compile(\"-LRB-\")\n new_text = re.sub(rrb, \" \", text)\n new_text = re.sub(lrb, \" \", new_text)\n\n punct = re.compile(r'[_?!.,]')\n new_text = re.sub(punct, \" \", new_text)\n\n new_text = str(new_text).lower()\n return new_text",
"def space_out_camel_case(s):\n return re.sub('((?=[A-Z][a-z])|(?<=[a-z])(?=[A-Z]))', ' ', s).strip()",
"def remove_all_caps(text):\n return re.sub(r\"(\\b(?:[A-Z]+[a-z]?[A-Z]*|[A-Z]*[a-z]?[A-Z]+)\\b(?:\\s+(?:[A-Z]+[a-z]?[A-Z]*|[A-Z]*[a-z]?[A-Z]+)\\b)*)\",\n ' ', text)",
"def standardize(text):\n # FIXME regex restricts us to only ascii\n # FIXME move regex compilation outside\n p = re.compile('[^a-zA-Z]')\n retval = p.sub('', text)\n retval = retval.lower()\n return retval",
"def sanitize_string(input_string, space=True):\n output_string = input_string.strip().lower().replace('-', '_').replace('\\n', '')\n if not space:\n output_string = output_string.replace(' ', '_')\n return output_string",
"def format_title(text):\n text = text.strip()\n # if empty string, return \"\"\n if len(text) == 0:\n return text\n else:\n text = text.lower() # lower all char\n\n # Change to in single space format\n words = [word for word in text.strip().split(\" \") if len(word) >= 1]\n\n # Capitalize all words except function word\n words_new = list()\n for word in words:\n if word not in FUNCTION_WORD:\n word = word[0].upper() + word[1:]\n words_new.append(word)\n\n # Make sure first word always be capitalized\n words_new[0] = words_new[0][0].upper() + words_new[0][1:]\n\n return \" \".join(words_new)",
"def _lowercase_despace_depunctuate(some_str=None):\n some_str = some_str.replace(\" \", \"\")\n some_str = some_str.replace(\"_\", \"\")\n some_str = some_str.replace(\"-\", \"\")\n some_str = some_str.lower()\n return some_str",
"def clean_text(text):\n rrb = re.compile(\"-RRB-\")\n lrb = re.compile(\"-LRB-\")\n new_text = re.sub(rrb, \" \", text)\n new_text = re.sub(lrb, \" \", new_text)\n\n punct = re.compile(r'[_?!.,]')\n new_text = re.sub(punct, \" \", new_text)\n\n new_text = str(new_text).lower()\n return new_text",
"def removeTitleHelper(self, input_text):\n\t\tpatterns = '(\".*\")'\n\t\tinput_text = re.sub(patterns, '', input_text)\n\t\treturn input_text",
"def clean_key(text):\n if not isinstance(text, (str, unicode)):\n return\n text = clean(text)\n return text.replace(' ', '_').replace('/', '_').lower()",
"def clean(text: str) -> str:\n\n clean_str = ''\n for char in text:\n if char.isalnum() or char == UNDERSCORE:\n clean_str = clean_str + char\n elif char == HASHTAG_SYMBOL or char == MENTION_SYMBOL:\n clean_str = clean_str + SPACE + char\n else:\n clean_str = clean_str + SPACE\n return clean_str"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the text contents of the system clip board. | def _get_clipboard_text():
clipboard = Clipboard()
return clipboard.get_system_text() | [
"def display_text(self):\n return self.display_screen.text()",
"def getTextFromClipboard(self) -> str:\n cb = self.qtApp.clipboard()\n if cb:\n QtWidgets.QApplication.processEvents()\n return cb.text()\n g.trace('no clipboard!')\n return ''",
"def text(self):\n return win32gui.GetWindowText(self.hwnd)",
"def read_all_screen(self):\n full_text = \"\"\n for ypos in range(self.model_dimensions[\"rows\"]):\n full_text += self.string_get(ypos + 1, 1, self.model_dimensions[\"columns\"])\n return full_text",
"def win32_clipboard_get():\n try:\n import win32clipboard\n except ImportError:\n message = (\"Getting text from the clipboard requires the pywin32 \"\n \"extensions: http://sourceforge.net/projects/pywin32/\")\n raise Exception(message)\n win32clipboard.OpenClipboard()\n text = win32clipboard.GetClipboardData(win32clipboard.CF_TEXT)\n # FIXME: convert \\r\\n to \\n?\n win32clipboard.CloseClipboard()\n return text",
"def _get_pad_content(self):\n self.ensure_one()\n return self.pad_get_content(self.description_pad)",
"def text(self):\n text = ''\n for run in self.runs:\n text += run.text\n return text",
"def get_clipboard_contents(self):\r\n return self.clipboard",
"def get_text():",
"def text_output(self):\n print(self.board)\n print()",
"def get_content(paste):\n return paste.content",
"def getText(self):\n return _libsbml.TextGlyph_getText(self)",
"def osx_clipboard_get():\n p = subprocess.Popen(['pbpaste', '-Prefer', 'ascii'],\n stdout=subprocess.PIPE)\n text, stderr = p.communicate()\n # Text comes in with old Mac \\r line endings. Change them to \\n.\n text = text.replace('\\r', '\\n')\n return text",
"def copy_text(self):\r\n try:\r\n current_item = self.treeWidget.currentItem()#.text(0)\r\n except:\r\n return\r\n\r\n # Lowest level of tree\r\n if current_item.childCount() == 0:\r\n text = current_item.text(0)\r\n # Second level of tree.\r\n elif current_item.parent():\r\n text = '{} - {}'.format(current_item.parent().text(0),\r\n current_item.text(0))\r\n for n in range(current_item.childCount()):\r\n text += '\\n\\t{}'.format(current_item.child(n).text(0))\r\n else:\r\n text = current_item.text(0)\r\n for n in range(current_item.childCount()):\r\n text += '\\n{}'.format(current_item.child(n).text(0))\r\n for n1 in range(current_item.child(n).childCount()):\r\n text += '\\n\\t{}'.format(\r\n current_item.child(n).child(n1).text(0)) \r\n\r\n pyperclip.copy(text)",
"def get_text(self):\n return self.output.getvalue()",
"def tkinter_clipboard_get():\n try:\n import Tkinter\n except ImportError:\n message = (\"Getting text from the clipboard on this platform \"\n \"requires Tkinter.\")\n raise Exception(message)\n root = Tkinter.Tk()\n root.withdraw()\n text = root.clipboard_get()\n root.destroy()\n return text",
"def get_visible_text(self):\n return self.browser.find_element_by_xpath(\"//body\").text",
"def get_content(self):\r\n view = self.window.active_view()\r\n selection = \"\"\r\n for region in view.sel():\r\n # If no selection, use the entire file as the selection\r\n if region.empty():\r\n selection = sublime.Region(0, view.size())\r\n else:\r\n selection = region\r\n return view.substr(selection)",
"async def getDisplayText(self):\n display_text = await self.director.getItemVariableValue(\n self.item_id, \"DISPLAY_TEXT\"\n )\n return display_text"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Selects wordCount number of words to the left of the cursor and cuts them out of the text. Returns the text from the system clip board. | def _select_and_cut_text(wordCount):
clipboard = Clipboard()
clipboard.set_system_text('')
Key('cs-left/3:%s/10, c-x/10' % wordCount).execute()
return clipboard.get_system_text() | [
"def moveCursorWordLeft(self):\r\n self.SendScintilla(QsciScintilla.SCI_WORDLEFT)",
"def moveLeftOneWordPart(self):\r\n self.SendScintilla(QsciScintilla.SCI_WORDPARTLEFT)",
"def extendSelectionLeftOneWord(self):\r\n self.SendScintilla(QsciScintilla.SCI_WORDLEFTEXTEND)",
"def extendSelectionLeftOneWordPart(self):\r\n self.SendScintilla(QsciScintilla.SCI_WORDPARTLEFTEXTEND)",
"def clip(text, max_len=80):\n end = None\n if len(text) > max_len:\n space_before = text.find(\" \", 0, max_len)\n if space_before >= 0:\n end = space_before\n else:\n space_after = text.find(\" \", max_len)\n if space_after >= 0:\n end = space_after\n \n if end is None: # no spaces\n end = len(text)\n return text[:end].rstrip()",
"def get_current_word(self):\r\n line, index = self.getCursorPosition()\r\n text = self.text(line)\r\n wc = self.wordCharacters()\r\n if wc is None:\r\n regexp = QtCore.QRegExp('[^\\w_]')\r\n else:\r\n regexp = QtCore.QRegExp('[^{0}]'.format(re.escape(wc)))\r\n start = regexp.lastIndexIn(text, index) + 1\r\n end = regexp.indexIn(text, index)\r\n if start == end + 1 and index > 0:\r\n # we are on a word boundary, try again\r\n start = regexp.lastIndexIn(text, index - 1) + 1\r\n if start == -1:\r\n start = 0\r\n if end == -1:\r\n end = len(text)\r\n if end > start:\r\n word = text[start:end]\r\n else:\r\n word = ''\r\n return word",
"def WordLeft(*args, **kwargs):\n return _richtext.RichTextCtrl_WordLeft(*args, **kwargs)",
"def clip(text, max_len=80):\n end = None\n if len(text) > max_len:\n space_before = text.rfind(' ', 0, max_len)\n if space_before >= 0:\n end = space_before\n else:\n space_after = text.rfind(' ', max_len)\n if space_after >= 0:\n end = space_after\n if end is None: # no spaces were found\n end = len(text)\n return text[:end].rstrip()",
"def word_under_cursor(self):\n textCursor = self.editor.textCursor()\n textCursor.select(QtGui.QTextCursor.LineUnderCursor)\n #textCursor.select(QtGui.QTextCursor.WordUnderCursor)\n word = textCursor.selection().toPlainText()\n return word",
"def truncate(text, words=25):\n return ' '.join((text).split()[:words])",
"def getMarked(self):\n if not self.selection.isSelection():\n return u\"\"\n sm1, sm2 = self.selection.order(self.selection.selectionMark,\n self.selection.selectionMark2)\n w1 = sm1[0]\n w2 = sm2[0]\n cx1 = sm1[1]\n cx2 = sm2[1]\n if (w1 == w2):\n return w1.string[cx1:cx2]\n # Get the word fragments at the beginning and end of the selection\n snip1 = w1.string[cx1:]\n snip2 = w2.string[:cx2]\n tl1 = w1.tline\n wx1 = tl1.twords.index(w1)\n tl2 = w2.tline\n wx2 = tl2.twords.index(w2)\n # Start the text string with the format of the first line\n text = tl1.para.getFormat() + snip1\n # then get all intervening words\n if (tl1 == tl2): # only 1 line is involved\n # get words from wx1+1 to wx2-1 (incl.)\n for w in tl1.twords[wx1+1:wx2]:\n text += u\" \" + w.string\n ch = u\" \"\n\n else: # deletion block covers >1 line\n # get words from wx1+1 to end of paragraph\n for w in tl1.twords[wx1+1:]:\n text += u\" \" + w.string\n # get all the intervening lines\n while True:\n para = tl1.para\n tl1 = self.rsubject.nextLine(tl1)\n if (tl1.para == para):\n text += u\" \"\n else:\n text += u\"\\n\" + tl1.para.getFormat()\n if (tl1 == tl2): break\n text += tl1.getText()\n\n ch = u\"\"\n # Add the remaining words in tl2 up to w2-1\n for w in tl2.twords[:wx2]:\n text += ch + w.string\n ch = u\" \"\n\n # Add the fragment of the last marked word\n return text + ch + snip2",
"def Left(text, number):\n return text[:number]",
"def clip(text:str, max_len:'int > 0'=80) -> str:\n end=None\n if len(text)>max_len:\n space_before = text.rfind(' ',0,max_len)\n if space_before >= 0:\n end=space_before\n else:\n space_after = text.rfind(' ',max_len)\n if space_after >=0:\n end =space_after\n if end is None:\n end = len(text)\n return text[:end].rstrip()",
"def truncate_text(text, number_of_words):\n return \" \".join(text.split()[:number_of_words])",
"def deleteWordToLeft(self):\r\n self.SendScintilla(QsciScintilla.SCI_DELWORDLEFT)",
"def textUnderCursor(self):\n tc = self.textCursor()\n tc.select(QtGui.QTextCursor.WordUnderCursor)\n return tc.selectedText()",
"def _(event):\n pos = line.document.find_start_of_previous_word(count=event.arg)\n if pos:\n deleted = line.delete_before_cursor(count=-pos)\n line.set_clipboard(ClipboardData(deleted))",
"def paste(self):\n res = \"\"\n try:\n os.environ['DISPLAY']\n except KeyError:\n return\n for cmd in (['xclip', '-o', '-selection', 'primary'],\n ['xsel', '-o', '--primary']):\n try:\n res = Popen(cmd, stdout=PIPE,\n universal_newlines=True).communicate()[0]\n except IOError:\n continue\n else:\n break\n para_idx, line_idx, char_idx = self.paragraph\n if not res:\n return\n if sys.version_info.major < 3:\n enc = locale.getpreferredencoding() or 'utf-8'\n res = str(res, encoding=enc)\n res = res.splitlines()\n if len(res) == 1:\n line = list(self.line)\n cur_line_paste = list(res[0])\n line[char_idx:char_idx] = cur_line_paste\n char_idx += len(cur_line_paste)\n self.line = \"\".join(line)\n else:\n line = list(self.line)\n beg_line = line[:char_idx]\n end_line = \"\".join(line[char_idx:])\n first_line_paste = list(res[0])\n beg_line.extend(first_line_paste)\n self.line = \"\".join(beg_line)\n ins = [self._text_wrap(i) for i in res[1:]]\n self.text[para_idx + 1:para_idx + 1] = ins\n para_idx += len(res[1:])\n self.text[para_idx].append(end_line)\n self.text[para_idx] = self._text_wrap(self.text[para_idx])\n char_idx = sum(len(i) for i in self.text[para_idx])\n self._char_index_to_yx(para_idx, char_idx)",
"def extendSelectionLeftOneCharacter(self):\r\n self.SendScintilla(QsciScintilla.SCI_CHARLEFTEXTEND)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates a plot of the specified data file and sets the ThumbnailPanel's bitmap accordingly | def plot_thumb(self, data_fname):
thumbnail = self.controller.plot_thumb(data_fname, self.bitmap_width, self.bitmap_height)
if thumbnail is not None:
self.figure_bmp.SetBitmap(thumbnail)
else:
self.plot_blank() | [
"def filePlot(self):\n if len(self.filename) == 0:\n self.filename = QtGui.QFileDialog.getOpenFileName(None, \\\n \"Open Data File (csv)\", \".\", \"*.csv\")\n \n self._readCSV(self.filename)\n \n self.main_widget = QtGui.QWidget(self)\n \n #fig = Figure(figsize=(5,4), dpi=100)\n self.resize(500, 500)\n\n self.fig = Figure()\n self.ax = self.fig.add_subplot(111)\n for i in range(0, len(self.y_values)):\n self.ax.plot(self.x_values, self.y_values[i], linewidth=self.list_line_width[i])\n self.ax.set_xlabel(self.x_label)\n self.ax.set_ylabel(self.y_label)\n self.ax.set_title(self.title)\n\n self.canvas = FigureCanvas(self.fig) # A Qt Drawing area\n \n l = QtGui.QVBoxLayout(self.main_widget)\n l.addWidget(self.canvas)\n \n self.main_widget.setFocus()\n self.setCentralWidget(self.main_widget)\n self.filename = \"\"",
"def plot(self, job):\n # fill PlotJob with needed data if it doesn't exist\n # Plotter will look for the files it needs relative to the work directory\n # If this fails it will fall back to a baseline location if one was \n # Provided to cmake at the time this file was generated\n if job.dataPath == None :\n job.dataPath = \"Scenarios/\" + job.verificationDirectory + \"/baselines/\"\n \n if job.dataFile == None:\n job.dataFile = job.name + \"Results.zip\"\n \n if job.outputFilename==None:\n job.outputFilename=job.titleOverride+\".jpg\"\n \n if len(job.outputFilename.split(\".\"))==1:\n job.outputFilename+=\".jpg\"\n \n if job.imageWidth==None and job.imageHeight==None:\n job.imageWidth=1600\n job.imageHeight=800\n \n if not os.path.exists(job.dataPath):\n job.dataPath = os.path.join(job.basedir,job.dataPath)\n \n if not os.path.isfile(os.path.join(job.dataPath,job.dataFile)):\n job.dataPath = os.path.join(job.basedir,job.dataPath)\n \n if not job.fontSize:\n job.fontSize=22\n \n if not os.path.exists(os.path.dirname(job.outputDir)):\n os.mkdir(os.path.dirname(job.outputDir))\n \n self.drawgraph(job,os.path.join(job.dataPath,job.dataFile),os.path.join(job.outputDir,job.outputFilename))",
"def plot_chosen_data(main, dataPath):\n error = \"Error \"+errorPath+\"plot_chosen_data: Must choose data of proper format (tiff, jpeg, etc.)\"\n try:\n if dataPath == '':\n main.msg('thinks it has nothing')\n main.msg(error)\n return\n data = mpimg.imread(dataPath)\n imgObj = Img.Img(data, title = os.path.basename(dataPath), filePath = dataPath)\n main.imgObjList.append(imgObj)\n main.horizontalSlider.setMaximum(len(main.imgObjList)-1)\n main.horizontalSlider.setValue(main.horizontalSlider.maximum())\n func.plot_img_obj(main, imgObj)\n except:\n main.msg(error)",
"def plotify(title,xLabel,yLabel,minY,maxY,minX,maxX):\n plt.title(title)\n plt.xlabel(xLabel)\n plt.ylabel(yLabel)\n plt.legend()\n plt.xlim((minX,maxX))\n plt.ylim((minY, maxY))\n image = BytesIO()\n plt.savefig(image, format=\"png\")\n image.seek(0)\n return image",
"def make_image(self, frame, filename, **kwds):\n p = plot.plot(frame, **kwds)\n p.save_image(filename)",
"def plotfile(self):\r\n filename = self.locatefile()\r\n if filename == \"\":\r\n print \"\\nNo file was chosen, exiting ...\\n\"\r\n return\r\n else:\r\n print \"\\nXYZ Data file:\\n\" + filename\r\n \r\n print \"\\nReading XYZ data file....\"\r\n xyz = XYZImporter(filename)\r\n geodata = xyz.genericdata\r\n print \"FINISHED reading XYZ data file\"\r\n\r\n # Note PNG is only 8 bit, and so PDF has greater colour\r\n # depth \r\n print \"\\nAbout to render plot ...\"\r\n gp = GridPlotterCustom()\r\n gp.shownulls = False\r\n title = \"Plot of XYZ data file: \" + filename\r\n outfname = (filename.replace('.', '_') +\r\n '_PLOT_custom.pdf')\r\n gp.plotgeodata(geodata, title, outfname)\r\n print \"FINISHED rendering plot to:\\n\" + outfname\r\n print \"\\n\\n\"",
"def plot_data(self):",
"def main():\r\n nColSize, nRowSize, lstData, lstGraphLabels = pipe.retrieveData()\r\n aaData = _processData(nColSize, nRowSize, lstData) \r\n _saveImage(aaData, lstGraphLabels)\r\n pipe.updateStatus()",
"def generatePlot(data, title, save, show):\n data.sort()\n ax = plt.subplot()\n plt.plot(data)\n plt.title(title)\n anchoredText = AnchoredText(f'max: {max(data)} (sec)\\nmean: {statistics.mean(data)} (sec)\\nmin: {min(data)} (sec)', loc=2)\n ax.add_artist(anchoredText)\n ax.set_ylabel('Time (s)')\n ax.set_xlabel('Trial (sorted by time)')\n if show == True:\n plt.show()\n if save == True:\n plt.savefig(f'{title}.png')\n ax.remove()",
"def save_plot_as_image(self):\r\n plt.savefig(ROOT_DIR + '/presentation/images/' + self.folder + '/' + self.generated_image_name + '.png',\r\n bbox_inches='tight')",
"def create_image_gallery(file_name, wf_uuid_list, wf_parent_uuid_list, uuid_image_map, wf_uuid_label_map, isDax):\n\twf_uuid_parent_ref = None\n\ttry:\n\t\tfh = open(file_name, \"w\")\n\t\tcontent = \"\"\"\n<html>\n<head>\n<style>\n.imgbox\n{\nfloat:left;\ntext-align:center;\nwidth:450px;\nheight:450px;\nmargin:4px;\nmargin-bottom:8px;\npadding:0px;\n}\n.thumbnail\n{\nwidth:300px;\nheight:300px;\nmargin:3px;\n}\n.box\n{\nwidth:450px;\npadding:0px;\n}\n.workflow\n{\nclear:both;\n}\n</style>\n</head>\n<body>\n\"\"\" + plot_utils.create_home_button() + \"\"\"\n<center>\n\"\"\"\n\t\tif isDax:\n\t\t\tcontent += \"<h3>DAX Graph </h3>\" + NEW_LINE_STR\n\t\telse:\n\t\t\tcontent += \"<h3>DAG Graph </h3>\"+ NEW_LINE_STR\n\t\tfor index in range(len(wf_uuid_list)):\n\t\t\tuuid = wf_uuid_list[index]\n\t\t\timage = uuid_image_map[index]\n\t\t\tlabel = wf_uuid_label_map[index]\n\t\t\tparent_uuid =wf_parent_uuid_list[index]\n\t\t\tif parent_uuid is None:\n\t\t\t\tcontent += \"<h3 class= 'workflow'> Top level workflow (\" + uuid + \")</h3>\"\n\t\t\telse:\n\t\t\t\tif parent_uuid != wf_uuid_parent_ref:\n\t\t\t\t\twf_uuid_parent_ref = parent_uuid \n\t\t\t\t\tcontent += \"<h3 class= 'workflow'> Sub workflow's of workflow (\" + parent_uuid + \")</h3>\"\n\t\t\tcontent += \"<div class ='imgbox' >\"\n\t\t\tif image is None:\n\t\t\t\tcontent += \"<a class= 'thumbnail' href ='#'>\\n\"\n\t\t\t\tcontent +=\"<img src ='images/not_available.jpg' height='300px' width='300px'>\\n\"\n\t\t\t\tcontent +=\"</img>\\n</a>\"\n\t\t\t\tcontent +=\"<div class ='box'>\\n\"\n\t\t\t\tcontent += \"wf_uuid :\" + uuid +\"<br/>\"\n\t\t\t\tif isDax:\n\t\t\t\t\tcontent+= \"dax label :\" + label\n\t\t\t\telse:\n\t\t\t\t\tif image is not None:\n\t\t\t\t\t\tcontent += \"dag label :\" + image\n\t\t\t\tcontent +=\"</div>\"\t\t\t\t\t\n\t\t\telse:\n\t\t\t\tcontent +=\"<a class= 'thumbnail' href ='\" + image + \"'>\"\n\t\t\t\tcontent +=\"<img src ='\" + image + \"' height='300px' width='300px'>\"\n\t\t\t\tcontent +=\"</img>\\n</a>\\n\"\n\t\t\t\tcontent +=\"<div class ='box'>\\n\"\n\t\t\t\tcontent += \"wf_uuid :\" + uuid +\"<br/>\"\n\t\t\t\tif isDax:\n\t\t\t\t\tcontent += \"dax label :\" + label\n\t\t\t\telse:\n\t\t\t\t\tif label is not None:\n\t\t\t\t\t\tcontent += \"dag label :\" + label\n\t\t\t\tcontent += \"</div>\"\n\t\t\tcontent += \"</div>\"\n\t\tcontent += \"\"\"\n</center>\n</body>\n</html>\n\"\"\"\n\t\tfh.write( content)\n\texcept IOError:\n\t\tlogger.error(\"Unable to write to file \" + data_file)\n\t\tsys.exit(1)\n\telse:\n\t\tfh.close()",
"def image(Data, SH={}, maxval=-1):\n import matplotlib.pylab as plt\n\n if (maxval<=0):\n Dmax = np.max(Data)\n maxval = -1*maxval*Dmax\n\n if 'time' in SH:\n t = SH['time']\n ntraces = SH['ntraces']\n ns = SH['ns']\n else:\n ns = Data.shape[0]\n t = np.arange(ns)\n ntraces = Data.shape[1]\n x = np.arange(ntraces)+1\n\n print(maxval)\n plt.pcolor(x, t, Data, vmin=-1*maxval, vmax=maxval)\n plt.colorbar()\n plt.axis('normal')\n plt.xlabel('Trace number')\n if 'time' in SH:\n plt.ylabel('Time (ms)')\n else:\n plt.ylabel('Sample number')\n if 'filename' in SH:\n plt.title(SH['filename'])\n plt.gca().invert_yaxis()\n\n #plt.grid(True)\n plt.show()",
"def recreate_single_figure(\n plot: VelociraptorPlot,\n line_data: Dict[str, Dict],\n output_directory: str,\n file_type: str,\n) -> None:\n\n try:\n first_line_metadata = line_data[list(line_data.keys())[0]][\"metadata\"]\n fake_catalogue = FakeCatalogue(\n z=first_line_metadata[\"redshift\"], a=first_line_metadata[\"scale_factor\"]\n )\n except KeyError:\n fake_catalogue = FakeCatalogue()\n\n fig, ax = plt.subplots()\n\n # Add simulation data\n for line_type in valid_line_types:\n line = getattr(plot, f\"{line_type}_line\", None)\n if line is not None:\n for name, data in line_data.items():\n ax.set_xlabel(data[plot.filename].get(\"x_label\"))\n ax.set_ylabel(data[plot.filename].get(\"y_label\"))\n\n this_line_dict = data[plot.filename][\"lines\"][line_type]\n centers = unyt.unyt_array(this_line_dict[\"centers\"], units=plot.x_units)\n heights = unyt.unyt_array(this_line_dict[\"values\"], units=plot.y_units)\n errors = unyt.unyt_array(this_line_dict[\"scatter\"], units=plot.y_units)\n\n # Data points from the bins with too few data points\n additional_x = unyt.unyt_array(this_line_dict.get(\"additional_points_x\", []),\n units=plot.x_units)\n additional_y = unyt.unyt_array(this_line_dict.get(\"additional_points_y\", []),\n units=plot.y_units)\n\n if line.scatter == \"errorbar\":\n (mpl_line, _, _) = ax.errorbar(centers, heights, yerr=errors, label=name)\n elif line.scatter == \"shaded\":\n (mpl_line,) = ax.plot(centers, heights, label=name)\n\n # Deal with different + and -ve errors\n if errors.shape[0]:\n if errors.ndim > 1:\n down, up = errors\n else:\n up = errors\n down = errors\n else:\n up = 0\n down = 0\n\n ax.fill_between(\n centers,\n heights - down,\n heights + up,\n color=mpl_line.get_color(),\n alpha=0.3,\n linewidth=0.0,\n )\n\n # line.scatter == \"none\":\n else:\n (mpl_line,) = ax.plot(centers, heights, label=name)\n\n ax.scatter(additional_x, additional_y, c=mpl_line.get_color())\n\n\n # Add observational data second to allow for colour precedence\n # to go to runs\n for data in plot.observational_data:\n data.plot_on_axes(ax, errorbar_kwargs=dict(zorder=-10))\n\n # Finally set up metadata\n if plot.x_log:\n ax.set_xscale(\"log\")\n if plot.y_log:\n ax.set_yscale(\"log\")\n\n try:\n ax.set_xlim(*unyt.unyt_array(plot.x_lim, units=plot.x_units))\n except AttributeError:\n pass\n\n try:\n ax.set_ylim(*unyt.unyt_array(plot.y_lim, units=plot.y_units))\n except AttributeError:\n pass\n\n decorate_axes(\n ax,\n catalogue=fake_catalogue,\n comment=plot.comment,\n legend_loc=plot.legend_loc,\n redshift_loc=plot.redshift_loc,\n comment_loc=plot.comment_loc,\n )\n\n fig.savefig(f\"{output_directory}/{plot.filename}.{file_type}\")\n plt.close(fig)",
"def test_plot_save_figure(self):\n pname = os.path.join(\n self.datadir,\n 'monol_testA_E3-50_pds_rebin1.03' + HEN_FILE_EXTENSION)\n hen.plot.main([pname, '--noplot', '--figname',\n os.path.join(self.datadir,\n 'monol_testA_E3-50_pds_rebin1.03.png'),\n '-o', 'dummy.qdp'])",
"def plot_dataset(dfile, savefolder=None, **kwargs):\n if savefolder is None:\n savefolder = dfile.split('.')[0] + '_images'\n X, y = read_dataset_csv(dfile, to_array=True, **kwargs)\n if not os.path.exists(savefolder):\n os.makedirs(savefolder)\n plt.ioff()\n uids = np.unique(y)\n if len(uids) != len(y): # multiple samples per class\n for uid in uids:\n X_uid = np.atleast_2d(X[np.where(y==uid)[0]])\n create_figure(X_uid.T, savefolder, uid)\n else:\n for xx, yy in zip(X, y):\n create_figure(xx, savefolder, yy)",
"def plotgraphics(ID,filename,color): \n if ID == 2:\n \n X,Y = np.loadtxt(filename, unpack = True)\n\n pl.figure()\n pl.plot(X,Y, c = color)\n pl.draw()\n\n if ID == 1:\n \n X = np.loadtxt(filename, unpack = True)\n\n pl.figure()\n pl.plot(X, c = color)\n pl.draw()\n \n return",
"def plot_and_save_2d(file_name, path_name, raw_data_file, show=False):\n print '-'*23+'PLOT (2d)'+'-'*24\n \n print 'Loading data...',\n data = load_file(path_name+file_name)\n t = data['t']\n \n pic_path = path_name+'pics/'\n if not os.path.exists(pic_path):\n os.makedirs(pic_path)\n print 'done'\n print 'Creating and saving plots...', \n\n # Moment.\n plt.figure(1)\n plt.plot(t, data['dyn']['M'], t, data['static']['M'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('M')\n plt.title('Moment')\n plt.grid()\n plt.savefig('%sM.png' %pic_path)\n\n # Axial force.\n plt.figure(2)\n plt.plot(t, data['dyn']['FY'], t, data['static']['FY'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fa')\n plt.title('Fa')\n plt.grid()\n plt.savefig('%sFa.png' %pic_path)\n\n # Transverse force.\n plt.figure(3)\n plt.plot(t, data['dyn']['FZ'], t, data['static']['FZ'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Ft')\n plt.title('Ft')\n plt.grid()\n plt.savefig('%sFt.png' %pic_path)\n\n # Resultant force.\n plt.figure(4)\n plt.plot(t, np.sqrt(data['dyn']['FY']**2+data['dyn']['FZ']**2),\n t, np.sqrt(data['static']['FY']**2+data['static']['FZ']**2))\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fr')\n plt.title('Fr')\n plt.grid()\n plt.savefig('%sFr.png' %pic_path)\n print 'done'\n\n if show:\n plt.show()",
"def save_fig(ax_data, file_name):\n with open(file_name,'wb') as fid:\n pickle.dump(ax_data, fid)",
"def pop_up_plot_window(path):\n window = tk.Toplevel()\n window.wm_title(\"Plot\")\n window.configure(background='grey')\n\n img = ImageTk.PhotoImage(Image.open(path))\n image_label = Label(window, image=img)\n image_label.photo = img\n image_label.pack()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method to invoke Disable command on SDP Master. | def do(self):
this_server = TangoServerHelper.get_instance()
try:
sdp_master_ln_fqdn = ""
property_val = this_server.read_property("SdpMasterFQDN")[0]
sdp_master_ln_fqdn = sdp_master_ln_fqdn.join(property_val)
sdp_mln_client_obj = TangoClient(sdp_master_ln_fqdn)
sdp_mln_client_obj.send_command_async(
const.CMD_Disable, None, self.disable_cmd_ended_cb
)
self.logger.debug(const.STR_DISABLE_CMS_SUCCESS)
this_server.write_attr(
"activityMessage", const.STR_DISABLE_CMS_SUCCESS, False
)
except DevFailed as dev_failed:
self.logger.exception(dev_failed)
log_msg = f"{const.ERR_DISABLE_CMD_FAIL}{dev_failed}"
tango.Except.re_throw_exception(
dev_failed,
const.ERR_INVOKING_CMD,
log_msg,
"SdpMasterLeafNode.DisableCommand()",
tango.ErrSeverity.ERR,
) | [
"def disable(self):\n\n return self.conn.request(\"POST\", \"/%s/disable\" % self.uuid)",
"def disable_mems(self):\n off = 'D001'\n self.send_command(off)\n print('MEMS DISABLE')",
"async def disable(self, ctx, *, command: str):\r\n\t\tcommand = command.lower()\r\n\t\tif command in ('enable', 'disable'):\r\n\t\t\treturn await ctx.send('Cannot disable that command.')\r\n\t\tif command not in [i.name for i in list(self.bot.commands)]:\r\n\t\t\treturn await ctx.send('Unrecognised command name.')\r\n\t\tif \"disabled\" in self.bot.config[f\"{ctx.guild.id}\"]:\r\n\t\t\tself.bot.config[f\"{ctx.guild.id}\"][\"disabled\"].append(command)\r\n\t\telse:\r\n\t\t\tself.bot.config[f\"{ctx.guild.id}\"][\"disabled\"] = [command]\r\n\t\tawait self._save()\r\n\t\tawait ctx.send(f'The \"{command}\" command has been disabled for this server.')",
"def disable(self):\n self.post(\"disable\")\n return self",
"def do_disable(self, line):\n\n syntax = \"*** Syntax: disable <rip> <port>\"\n\n commands = line.split()\n if len(commands) > 2 or len(commands) == 0:\n print syntax\n elif len(commands) <= 2:\n host = commands[0]\n if len(commands) == 1:\n port = ''\n elif len(commands) == 2:\n port = commands[1]\n else:\n print syntax\n return\n # ask for an optional reason for disabling\n reason = raw_input(\"Reason for disabling [default = None]: \")\n if not self.director.disable(host, port, reason=reason):\n logger.error(\"Could not disable %s\" % host)\n else:\n print syntax",
"def __neg__(self):\n _this_module.txt_command('disable {0}'.format(self.fullname))",
"def disable(self, sid):\n return",
"def disable(self):\n logging.debug(\"Disabling switch %s\" % self.name)\n self.disabled = True",
"def _disable(self):\n self.enabled = False",
"def on_disable(self) -> None:\n self._cancel_automation()",
"def command_disable(update: Update, context: CallbackContext) -> None:\n logger.debug(f'Command /disable from {update.effective_chat.id} chat.')\n chat = update.effective_chat\n\n with db_session_from_context(context) as db_session:\n rg = ReceiverGroup.get_by_chat_id(\n chat_id=chat.id,\n session=db_session,\n )\n rg: ReceiverGroup\n if not rg:\n reply_msg = 'Use command /start first.'\n else:\n if rg.is_disabled:\n reply_msg = 'Broadcasting to this group chat already disabled.'\n else:\n rg.disable()\n db_session.add(rg)\n reply_msg = 'Broadcasting to this group chat successfully disabled.'\n\n # update chat data\n if rg.update_title(title=chat.title):\n db_session.add(rg)\n\n update.effective_message.reply_text(reply_msg)",
"def disable(self) -> None:\n self._channel.disable()",
"async def disable(self, ctx):\n\n server = ctx.message.server\n\n settings = self.bot.dota_ticker_settings.get(server.id)\n\n if settings is not None:\n settings['enabled'] = False\n await self.bot.dota_ticker_settings.put(server.id, settings)\n\n await self.bot.say('The match ticker has been disabled on {0.name}.'.format(server))",
"def disable(self):\n try:\n self.bus.open(self.BUS_NUMBER)\n self.write(AntennaDeployerCommand.DISARM_ANTS, 0x00)\n self.bus.close()\n return True\n except:\n return False",
"def do_monitor_mss_check_disable(client, args):\n item = client.msscheck.perform_action(args.id, 'disable')\n utils.print_dict(item)",
"def disable(self):\r\n self.enabled = False",
"def disableSensor(self):\n conf = self.read(self.VEML6040_CONF)\n conf = conf & 0x00FE\n conf = conf | 0x000\n self.write(self.VEML6040_CONF, conf)\n time.sleep(.001)",
"def disable(self, **kw):\n kw_copy = deepcopy(kw)\n pool_spec = kw_copy.pop(\"pool-spec\", \"\")\n cmd = f\"{self.base_cmd} disable {pool_spec} {build_cmd_from_args(**kw_copy)}\"\n\n return self.execute_as_sudo(cmd=cmd)",
"def do_monitor_redis_check_disable(client, args):\n item = client.redischeck.perform_action(args.id, 'disable')\n utils.print_dict(item)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns whether the current instance is an edge server in crosssilo FL. | def is_edge_server() -> bool:
return Config().args.port is not None | [
"def is_edge_site(self) -> bool:\n return self.config.edge",
"def is_cross(self):\n\n # Cross-zone edge has two neighbour faces from different zones.\n\n faces_count = len(self.Faces)\n\n if faces_count == 1:\n return False\n elif faces_count == 2:\n return self.Faces[0].Zone != self.Faces[1].Zone\n else:\n raise Exception('Edge cannot has {0} neighbours faces.'.format(faces_count))",
"def is_inner(self):\n\n # Inner edge has two faces from one zone.\n\n faces_count = len(self.Faces)\n\n if faces_count == 1:\n return False\n elif faces_count == 2:\n return self.Faces[0].Zone == self.Faces[1].Zone\n else:\n raise Exception('Edge cannot has {0} neighbours faces.'.format(faces_count))",
"def has_edge(self, e):\r\n return e in self.edges",
"def is_connected_to(self, receiver: SkupperSite) -> bool:\n return receiver in self.connected_sites",
"def isEdge(self,x,y):\n\t\treturn y in self._dictOut[x]",
"def isEdge(self, x, y):\n return y in self._dictOut[x]",
"def is_dedicated_node(self):\n return self.is_node() and not self.is_master()",
"def is_border(self):\n\n # Border edge has only one neighbour face.\n return len(self.Faces) == 1",
"def has_edge(self, v1, v2):\n\n return v1 in self.get_reachables(v2[0], v2[1])",
"def is_edge(self):\n if self._row == 0 or self._row == 9 or self._column == 0 or self._column == 9:\n # check that the edge is not actually a corner square\n if not self.is_corner():\n # If not a corner and in a border row return True\n return True\n\n return False",
"def is_peered_with(self, other: SkupperSite) -> bool:\n if not self.cluster.peering:\n return False\n\n for c in self.cluster.peering.connections:\n if (\n isinstance(\n c,\n (\n ClusterPeeringConnectionClusterRequesterV1,\n ClusterPeeringConnectionClusterAccepterV1,\n ),\n )\n ) and c.cluster.name == other.cluster.name:\n return True\n return False",
"def is_edge(self, nid1, nid2):\n return self._G.IsEdge(nid1, nid2)",
"def is_server(self):\n return self.device_type == \"server\" or self.phy.device_type == \"server\"",
"def is_external_edge(self, eid):\n \n for fid in self.mesh.regions(1,eid) :\n if self.mesh.nb_regions(2,fid) == 1 :\n return True\n return False",
"def isClientHost(self):\n return self.serverThread is not None",
"def has_conn_edge(self, n0, n1):\n if self.has_edge(n0, n1):\n return any(\n data[\"type\"] == \"connectivity\"\n for data in self.get_edge_data(n0, n1).values()\n )\n return False",
"def is_adjacent(self, remote_host_name):\n # Check if a topology is defined, otherwise use fully connected\n if self.topology is None:\n return True\n\n if self.name in self.topology:\n if remote_host_name in self.topology[self.name]:\n return True\n else:\n return False\n else:\n logging.warning(\n \"Node {} is not in the specified topology and is therefore \"\n \"assumed to have no neighbors\".format(self.name)\n )\n return False",
"def check_edge(self):\n if self.rect.right >= self.screen.get_rect().right or self.rect.left <= 0:\n return True"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns whether the current instance is a central server in crosssilo FL. | def is_central_server() -> bool:
return hasattr(Config().algorithm,
'cross_silo') and Config().args.port is None | [
"def is_server(self):\n return self._role_maker._is_server()",
"def isClientHost(self):\n return self.serverThread is not None",
"def server(self) -> bool:\n return pulumi.get(self, \"server\")",
"def is_server(self):\n return self.device_type == \"server\" or self.phy.device_type == \"server\"",
"def isServer(self):",
"def IsManagedHost():\n try:\n SmartConnect()\n return False\n except Exception as e:\n # connect to local server will be refused when host managed by vCenter\n return True",
"def __is_active_master():\n import params\n return params.hostname == common.get_local_hawq_site_property(\"hawq_master_address_host\")",
"def is_site_local(self):\n return self in self._constants._sitelocal_network",
"def is_connected(self):\n if self.server: return True\n return False",
"def isCluster(self):\n if(self.nb.virtual_chassis):\n return True\n else:\n return False",
"def is_server(self, location):\n return location in self.servers",
"def is_local_client(self):\n return self.msg.is_local_client",
"def isInCluster(self):\n logger.debug(\"Checking if %s is a part of cluster\" % self)\n role = self.getClusterRole()\n return role is not None and role != \"DISABLED\"",
"def is_remote(self):\n pass",
"def is_origin_remote():\n return sync_mode in (SyncMode.RECEIVER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)",
"def _checkTorcsServer(self):\n isRunning = False\n if self.torcsServerProcess is not None:\n if self.torcsServerProcess.poll() is None:\n isRunning = True\n return isRunning",
"def is_master(instance):\n if instance.get('mwn') and \\\n instance.get('wbn') == instance.get('mwn'):\n return True\n return False",
"def local(self):\n return self.hostname == \"localhost\" and self.user is None and self.ssh_args is None",
"def isClientMultiplexingInterface(self):\n adaptation = self.getServerAdaptationFunction()\n if adaptation == None:\n return False # no adaptatation underneath\n else:\n clientcount = adaptation.getClientCount() # max. number of clients; None means unlimited\n return (clientcount != 1)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the device to be used for training. | def device() -> str:
import torch
if torch.cuda.is_available() and torch.cuda.device_count() > 0:
if hasattr(Config().trainer,
'parallelized') and Config().trainer.parallelized:
device = 'cuda'
else:
device = 'cuda:' + str(
random.randint(0,
torch.cuda.device_count() - 1))
else:
device = 'cpu'
return device | [
"def device(self) -> torch.device:\n for param in self.parameters():\n return param.device\n return get_device(\"cpu\")",
"def device(self) -> th.device:\n for param in self.parameters():\n return param.device\n return get_device(\"cpu\")",
"def get_device():\n import torch\n\n if torch.cuda.is_available():\n return torch.device('cuda')\n return torch.device('cpu')",
"def get_device():\n is_device_available = {\n 'cuda': torch.cuda.is_available(),\n 'mlu': is_mlu_available()\n }\n device_list = [k for k, v in is_device_available.items() if v]\n return device_list[0] if len(device_list) == 1 else 'cpu'",
"def device(self):\n return next(self.parameters()).device",
"def get_default_device():\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n return device",
"def device(self) -> torch.device:\n return self._device",
"def get_device(self):\n return self.DEVICE",
"def device(self):\n return self._vars[0].device",
"def device(self)->torch.device:\n return next(iter(self.variables.values())).device",
"def _create_device(self):\n return torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")",
"def device(self):\n return self._tensor.device",
"def device(self):\n\n\t\treturn self._device",
"def get_device(x, device: Optional[Device]=None) ->torch.device:\n if device is not None:\n return make_device(device)\n if torch.is_tensor(x):\n return x.device\n return torch.device('cpu')",
"def get_device(l):\n if not l.device:\n l.device = find_device()\n setup_device(l.device)\n return l.device",
"def finddevice():\n\n return next((device for device in [\"xpu\"] if hasattr(torch, device) and getattr(torch, device).is_available()), None)",
"def get_default_device():\n global _default_device\n\n if _default_device is None:\n import wgpu.backends.rs # noqa\n\n adapter = wgpu.request_adapter(canvas=None, power_preference=\"high-performance\")\n _default_device = adapter.request_device()\n return _default_device",
"def device_from_config(cfg):\n if \"gpu\" in cfg and cfg['gpu'] and torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')",
"def get_default_device():\n return MXNET_DEFAULT_DEVICE"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if the hardware and OS support data parallelism. | def is_parallel() -> bool:
import torch
return hasattr(Config().trainer, 'parallelized') and Config(
).trainer.parallelized and torch.cuda.is_available(
) and torch.distributed.is_available(
) and torch.cuda.device_count() > 1 | [
"def is_available():\n return torch._C.has_openmp",
"def model_parallel_is_initialized():\n if _MODEL_PARALLEL_GROUP is None or _DATA_PARALLEL_GROUP is None:\n return False\n return True",
"def has_external_pooling():\n\n\treturn False",
"def is_multiprocess_available(self) -> bool:\n return self._is_multiprocess_available",
"def can_multitask(self) -> bool:\n return self.__multitask",
"def is_multiprocessing_problematic():\n # Handling numpy linked against accelerate.\n config_info = str([value for key, value in\n np.__config__.__dict__.items()\n if key.endswith(\"_info\")]).lower()\n\n if \"accelerate\" in config_info or \"veclib\" in config_info:\n return True\n elif \"openblas\" in config_info:\n # Most openBLAS can only operate with one thread...\n os.environ[\"OPENBLAS_NUM_THREADS\"] = \"1\"\n else:\n return False",
"def can_use_omp_threads(self, omp_threads):\n return self.cores_per_node >= omp_threads",
"def use_openmp():\n return False\n if qset.has_openmp and os.environ['QUTIP_IN_PARALLEL'] != 'TRUE':\n return True\n else:\n return False",
"def DataAvailable(self) -> bool:",
"def is_system_ready_for_benchmarking():\n\n # check if scaling_governor is set to 'performance' for all cpu cores\n cpu_governors = glob.glob('/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor')\n if not cpu_governors:\n logger.error('no scaling_governor found. Do you run on a Linux System?')\n return False\n for governor in sorted(cpu_governors):\n with open(governor, 'r') as f:\n line = f.read().splitlines()[0]\n logger.debug('%s is set to \\\"%s\\\"', governor, line)\n if line != 'performance':\n logger.warning('please set all scaling_governor to \\\"performance\\\" (using \"sudo ./ondemand.sh start\")')\n return False\n\n return True",
"def is_available():",
"def has_pooling():\n\n\treturn True",
"def check_multiprocessing():\n\n try:\n import multiprocessing\n except ImportError:\n return False\n return True",
"def _workers_available(self) -> bool:\n total_compute_power = sum(self.client.nthreads().values())\n if len(self.futures) < total_compute_power:\n return True\n return False",
"def check_multiprocessing():\n try:\n multiprocessing.Lock()\n except Exception as e:\n raise Exception(\"multiprocessing not functional (shm misconfigured?): {e}\".format(e=e))",
"def hyperthreading(self) -> bool:\n return pulumi.get(self, \"hyperthreading\")",
"def is_xpu_available():\n xpu_count = int(os.getenv(\"FLAGS_selected_xpus\", \"-1\"))\n if xpu_count < 0:\n return False\n\n if _HAS_FLUID:\n from paddle import fluid\n if not fluid.is_compiled_with_xpu():\n logger.warning(\"Found non-empty XPU_VISIBLE_DEVICES. \\\n But PARL found that Paddle was not complied with XPU, which may cause issues. \\\n Thus PARL will not use XPU.\")\n return False\n if _HAS_PADDLE:\n import paddle\n if not paddle.is_compiled_with_xpu():\n logger.warning(\"Found non-empty XPU_VISIBLE_DEVICES. \\\n But PARL found that Paddle was not complied with XPU, which may cause issues. \\\n Thus PARL will not use XPU.\")\n return False\n return True",
"def _zero_param_parallel_is_initialized():\n ###TODO: assert that MPU is not set\n if _ZERO_PARAM_INTRA_PARALLEL_GROUP is None and _DATA_PARALLEL_GROUP is None:\n return False",
"def parallel(self):\n return self._n_workers if self._parallel else False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return True if the number_str can be truncated from both left and right and always be prime, e.g. 3797 | def is_left_right_truncatable(number_str, prime_str_set):
l = len(number_str)
#left truncatable?
for i in range(l):
if number_str[i:] not in prime_str_set or number_str[:l-i] not in prime_str_set:
return False
return True | [
"def is_truncatable(nb):\n nb = str(nb)\n if is_prime(int(nb)):\n for i in range(1, len(nb)):\n if not is_prime(int(nb[i:])) or not is_prime(int(nb[:len(nb)-i])):\n return False\n return True\n else:\n return False",
"def substring_divisible(number):\n string = str(number)\n for offset in xrange(1, len(string)-2):\n substring = string[offset:offset+3]\n # print '%s / %d' % (substring, PRIMES[offset-1])\n if int(substring) % PRIMES[offset-1]:\n return False\n return True",
"def is_circular_prime(number):\n num_string = str(number)\n for i in range(len(num_string)):\n if not util_euler.is_prime(int(num_string)):\n return False\n num_string = rotate(num_string)\n return True",
"def is_prime(digit):\n if digit < 2:\n return False\n i = 2\n while i <= digit / 2:\n if digit % i == 0:\n return False\n else:\n i += 1\n return True",
"def is_number_palindrome(number, digits, start):\n number = str((number // 10**start) % 10**digits).rjust(digits, \"0\")\n return is_palindrome(number)",
"def check_social_number_key(number: str)-> bool:\n cleaned_number = ''.join(filter(lambda x: x.isdigit(), number))\n if len(cleaned_number) != 15:\n return False\n nir = int(cleaned_number[0:13])\n key = int(cleaned_number[-2:])\n\n return 97 - (nir % 97) == key",
"def is_number_palindrome(number, digits, start):\n number = str((number // 10**start) % 10**digits).zfill(digits)\n return is_palindrome(number)",
"def is_mostl_numeric(token):\n a = len(token)\n for i in range(0, 10):\n token = token.replace(str(i), \"\")\n if len(token) < 0.5*a and len(token) != a:\n return True\n else:\n return False",
"def is_prime(num: int) -> bool:\n pass",
"def palindromic_prime(number):\n return prime(number) and palindrome(number)",
"def is_prime(number: int):\n\n for index in range(2, (number//2) + 1):\n if number%index == 0:\n return False\n return True",
"def check_number(number):\n digits = str(number)\n if len(digits) != 6:\n return False\n\n double = False\n last = '0'\n for digit in digits:\n if digit < last:\n return False\n\n if digit == last:\n double = True\n\n last = digit\n\n return double",
"def _check_prime_number(num):\n res = -1\n max_num = num - 1\n for i in range(2, max_num):\n if num % i == 0:\n res = i\n if i > 32:\n break\n\n return res",
"def undulating(number):\n if number < 100:\n return False\n number = str(number)\n for idx in range(len(number)-2):\n if number[idx] != number[idx+2]:\n return False\n\n return True",
"def is_prime_number(number_):\n flag = 0\n for values in range(2, number_//2):\n if number_ % values == 0:\n flag += 1\n if flag == 1:\n return True\n else:\n return False",
"def is_sciper(sciper):\n if str(sciper).isnumeric():\n sciper = int(sciper)\n if 100000 < sciper < 999999:\n return True\n return False",
"def checkPerfectNumber(self, num: int) -> bool:\n if num <= 0:\n return False\n s = 0\n for i in range(1, int(math.sqrt(num) + 1)):\n if i != num:\n res = num % i\n if res == 0:\n s += i\n divisor = num // i\n if divisor != num:\n s += divisor\n if s > num:\n return False\n return s == num",
"def substringdiv(n):\r\n # Creates holding variable\r\n a = 0\r\n # Loops through substrings start index\r\n for i in range(1, 8):\r\n # Creates substring number\r\n a = int(''.join([n[i], n[i + 1], n[i + 2]]))\r\n # Checks if it divides evenly\r\n if a % PRIMES[i] == 0:\r\n continue\r\n else:\r\n return False\r\n \r\n # Returns true if it passes all tests\r\n return True",
"def is_circular_prime(number):\n if number not in (2, 5):\n if '5' in str(number):\n return False\n for digit in str(number):\n if int(digit) % 2 == 0:\n return False\n # the previous 6 lines make the execution time go from 65 sec to 3 sec\n return all(is_prime(i) for i in rotation(number))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine fixed modifications in case the reference shift is at zero. Does not need localization. | def determine_fixed_mods_zero(aastat_result, data, params_dict):
fix_mod_zero_thresh = params_dict['fix_mod_zero_thresh']
min_fix_mod_pep_count_factor = params_dict['min_fix_mod_pep_count_factor']
fix_mod_dict = {}
reference = utils.mass_format(0)
aa_rel = aastat_result[reference][2]
utils.internal('aa_rel:\n%s', aa_rel)
candidates = aa_rel[aa_rel < fix_mod_zero_thresh].index
logger.debug('Fixed mod candidates: %s', candidates)
for i in candidates:
candidate_label = get_fixed_mod_raw(i, data, params_dict)
if candidate_label != reference:
# number of peptides with `i` at shift `candidate label` must be higher than ...
count_cand = data.peptides(candidate_label).str.contains(i).sum()
# number of peptides with `i` at shift `reference` by a factor of `min_fix_mod_pep_count_factor`
count_ref = data.peptides(reference).str.contains(i).sum()
# peptide count at candidate shift over # of peptides at reference
est_ratio = count_cand / data.ms_stats()[reference][1]
logger.debug('Peptides with %s: ~%d at %s, ~%d at %s. Estimated pct: %f',
i, count_ref, reference, count_cand, candidate_label, est_ratio)
if aastat_result[candidate_label][2][i] > fix_mod_zero_thresh and (
est_ratio * 100 > fix_mod_zero_thresh * min_fix_mod_pep_count_factor):
fix_mod_dict[i] = candidate_label
else:
logger.debug('Could not find %s anywhere. Can\'t fix.', i)
else:
logger.debug('Reference shift is the best for %s.', i)
return fix_mod_dict | [
"def _only_fixed(o, d):\n if d[\"fixed\"]:\n return (\"value\", \"fixed\")\n else:\n return (\"fixed\",)",
"def _fixed_indicies(self):\n fixed_inds = self.constraints == 'fixed'\n return fixed_inds",
"def is_fixed(self):\n return False",
"def shift_detection_conv(signals, marker, range_):\n marker = marker.copy()\n shift_detected = True\n shift_log = []\n while shift_detected:\n error_frames, replacements = error_frame_matches(signals, marker, range_)\n\n all_shifts = np.zeros(len(marker))\n all_shifts[error_frames] = replacements-error_frames\n all_shifts_conv = np.convolve(all_shifts, [1/20]*20, mode=\"same\") #Averaging the shifts to find consistant shifts\n\n shift_detected = np.any(np.abs(all_shifts_conv)>.5)\n if shift_detected: #iF the -.5 threshold is crossed, we insert a \"fake\" frame in the reference and we repeat the operation\n change_idx = np.argmax(np.abs(all_shifts_conv)>.5)\n if all_shifts_conv[change_idx]>.5:#Need to delete frame in reference\n #Need to refine index to make sure we delete a useless frame\n start,stop = max(0,change_idx-2), min(len(marker),change_idx+2)\n for i in range(start,stop):\n if marker[i] not in signals[start:stop]:\n change_idx = i\n break\n shift_log.append([int(change_idx), \"del\"])\n marker = np.concatenate((marker[:change_idx], marker[change_idx+1:], [0]))\n else:#Need to insert frame in reference\n shift_log.append([int(change_idx), \"ins\"])\n #inserting a frame and excluding the last frame to keep the references the same length\n marker = np.insert(marker, change_idx, marker[change_idx], axis=0)[:-1]\n return shift_log",
"def mod_mask(self):\n # Check the *_masq values\n self.__log.debug(\"Checking the *_masq arrays\")\n # Retrieve the kid boxes\n masq_names = np.unique([\"{}_masq\".format(item[1]) for item in self.list_detector])\n self.__check_attributes(masq_names, read_missing=False)\n # Check that they are all the same\n warnings.warn(\"Temporary fix to int8\")\n masqs = [getattr(self, masq).astype(np.int8) for masq in masq_names]\n\n if np.any(np.std(masqs, axis=0) != 0):\n self.__log.error(\"*_masq is varying -- Please check : {}\".format(pprint_list(masq_names, \"_masq\")))\n\n # AB private comm) main_flag should be the bitwise_or of all boxes\n # Well not exactly....\n # cast into 8 bit, is more than enough, only 3 bits used anyway...\n masq = np.bitwise_or.reduce(masqs, axis=0).astype(np.int8)\n\n # AB (#CONCERTO_DAQ January 11 13:02)\n # _flag_balayage_en_cours & _flag_blanking_synthe\n # Ainsi on aura la modulation en bit0 et 1 et le flag blanking en bit\n # AB (#CONCERTO_DAQ February 11 11:07)\n # bit 1 & 2 code the modulation as a signed integer -1 0 1 : 11 00 01 ie 3 0 1\n # bit 3 is a blanking bit, which does not exist for KISS, but should not be taken into account for CONCERTO\n\n # Thus as a temporary fix, let's clear the 3rd bit, actually a bad idea...\n # self.__log.warning(\"Temporary fix : clearing the 3rd bit of masq\")\n # masq = masq & ~(1 << 2)\n\n return masq",
"def correct_difc_to_default(idf_difc_vec, cal_difc_vec, cal_table, row_shift, difc_tol, difc_col_index, mask_ws):\n # difference between IDF and calibrated DIFC\n difc_diff_vec = idf_difc_vec - cal_difc_vec\n\n # go over all the DIFCs\n num_corrected = 0\n message = ''\n for index in range(len(difc_diff_vec)):\n if abs(difc_diff_vec[index]) > difc_tol:\n cal_table.setCell(index + row_shift, difc_col_index, idf_difc_vec[index])\n if mask_ws.readY(index + row_shift)[0] < 0.5:\n mask_sig = 'No Mask'\n num_corrected += 1\n else:\n mask_sig = 'Masked'\n message += '{0}: ws-index = {1}, diff = {2}... {3}\\n' \\\n ''.format(index, index + row_shift, difc_diff_vec[index], mask_sig)\n # END-IF\n # END-FOR\n print(message)\n print('Number of corrected DIFC = {0}'.format(num_corrected))\n\n return",
"def test_correct_backward_order1(self):\r\n coeffs, shifts = finite_diff_coeffs(1, 1, \"backward\")\r\n assert np.allclose(coeffs, [1, -1])\r\n assert np.allclose(shifts, [0, -1])",
"def _set_fixed(o, d):\n if d:\n o.fix()\n else:\n o.unfix()",
"def _computeDeltaShifts(sdict,ref_file): \n # Determine pointing for reference image\n # Open reference image\n fref = pyfits.open(ref_file)\n # Get pointing position from SCI extension header\n ref_hdr = fref['SCI'].header\n ref_pos = (ref_hdr['CRVAL1'],ref_hdr['CRVAL2'])\n ref_cd = (ref_hdr['CD1_1'],ref_hdr['CD1_2'])\n # Close and delete reference image object\n fref.close()\n del fref\n pscale = pow((pow(float(ref_cd[0]),2)+pow(float(ref_cd[1]),2)),0.5) * 3600.\n\n for img in sdict.keys():\n # Open science image\n fimg = pyfits.open(img)\n scihdr = fimg['SCI'].header\n # Extract commanded position from image header \n img_pos = (scihdr['CRVAL1'],scihdr['CRVAL2'])\n # Done with image: close and delete FITS object\n fimg.close()\n del fimg\n\n # Compute commanded shift here: image minus reference\n # This has to be in units of pixels\n pos = (Numeric.array(img_pos) - Numeric.array(ref_pos))/pscale\n pos = pos * 3600. * Numeric.cos(ref_pos[1]) * Numeric.array([-1.0,1.0])\n\n # Compute delta offset: total minus commanded\n delta_pos = Numeric.array(sdict[img]) - pos\n \n # Replace total shift with delta in shift dictionary \n sdict[img] = tuple(delta_pos)",
"def test_explicit_fixed_effects_without_mask(tmp_path):\n shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3\n _, fmri_data, design_matrices =\\\n write_fake_fmri_data_and_design(shapes, rk, file_path=tmp_path)\n contrast = np.eye(rk)[1]\n\n # session 1\n multi_session_model = FirstLevelModel().fit(\n fmri_data[0], design_matrices=design_matrices[:1])\n dic1 = multi_session_model.compute_contrast(\n contrast, output_type='all')\n\n # session 2\n multi_session_model.fit(\n fmri_data[1], design_matrices=design_matrices[1:])\n dic2 = multi_session_model.compute_contrast(\n contrast, output_type='all')\n\n # fixed effects model\n multi_session_model.fit(\n fmri_data, design_matrices=design_matrices)\n fixed_fx_dic = multi_session_model.compute_contrast(\n contrast, output_type='all')\n\n contrasts = [dic1['effect_size'], dic2['effect_size']]\n variance = [dic1['effect_variance'], dic2['effect_variance']]\n\n # test without mask variable\n (\n fixed_fx_contrast,\n fixed_fx_variance,\n fixed_fx_stat,\n ) = compute_fixed_effects(contrasts, variance)\n assert_almost_equal(\n get_data(fixed_fx_contrast),\n get_data(fixed_fx_dic['effect_size']))\n assert_almost_equal(\n get_data(fixed_fx_variance),\n get_data(fixed_fx_dic['effect_variance']))\n assert_almost_equal(\n get_data(fixed_fx_stat), get_data(fixed_fx_dic['stat']))",
"def fixed_pattern_correction(image, black_reference):\n corrected_image = image - black_reference\n # correctedImage[correctedImage < 0] = 0\n # This has been removed because I was improperly enforcing a cutoff value for noise.\n # Per Winfried, negative values are acceptable in background corrected images\n return corrected_image",
"def test_fix_mask(self):\n fixable_mask = mapreader.Map(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_fixable_mask.map'))\n self.assertFalse(fixable_mask.is_mask)\n fixable_mask.fix_mask()\n self.assertTrue(fixable_mask.is_mask)",
"def test_correct_center_order2(self):\r\n coeffs, shifts = finite_diff_coeffs(1, 2, \"center\")\r\n assert np.allclose(coeffs, [-0.5, 0.5])\r\n assert np.allclose(shifts, [-1, 1])",
"def check_fixedblock(self):\n print('This will read the fixed block then display changes as they')\n print('occur. Typically the most common change is the incrementing')\n print('of the data pointer, which happens whenever readings are saved')\n print('to the station memory. For example, if the logging interval')\n print('is set to 5 minutes, the fixed block should change at least')\n print('every 5 minutes.')\n raw_fixed = self.station.get_raw_fixed_block()\n while True:\n new_fixed = self.station.get_raw_fixed_block(unbuffered=True)\n for ptr in range(len(new_fixed)):\n if new_fixed[ptr] != raw_fixed[ptr]:\n print(datetime.datetime.now().strftime('%H:%M:%S'), end=' ')\n print(' %04x (%d) %02x -> %02x' % (\n ptr, ptr, raw_fixed[ptr], new_fixed[ptr]))\n raw_fixed = new_fixed\n time.sleep(0.5)",
"def test_fixed_ref_behind(qisrc_action, git_server, record_messages):\n git_server.create_repo(\"foo.git\")\n git_server.push_file(\"foo.git\", \"a.txt\", \"a\")\n git_server.push_tag(\"foo.git\", \"v0.1\")\n git_server.set_fixed_ref(\"foo.git\", \"v0.1\")\n qisrc_action(\"init\", git_server.manifest_url)\n git_worktree = TestGitWorkTree()\n foo_proj = git_worktree.get_git_project(\"foo\")\n git = qisrc.git.Git(foo_proj.path)\n git.call(\"reset\", \"--hard\", \"HEAD~1\")\n qisrc_action(\"status\")\n assert record_messages.find(\"fixed ref v0.1 -1\")",
"def test_shift_ruptures_no_shift(midday):\n shift_mask, shift_amounts = time.shifts_ruptures(\n midday, midday\n )\n assert not shift_mask.any()\n assert_series_equal(\n shift_amounts,\n pd.Series(0, index=midday.index, dtype='int64'),\n check_names=False\n )",
"def fixed(self):\n return self._level <= MASS_LEVEL_LABEL",
"def check_and_fix(self, changed_i, changed_j, modify_list, current_score):\r\n\r\n # check and fix author limits\r\n changed_positions = set()\r\n for i in range(len(self.working_point)):\r\n if self.author_buffer[i] <= self.author_limit_list[i]:\r\n continue\r\n for j in range(len(self.working_point[i])):\r\n if self.working_point[i][j] and ((i != changed_i or j != changed_j) or j == len(self.working_point[i])-1):\r\n\r\n # print(f\"---------- Fixing point ({i}, {j})\") # Debug\r\n\r\n if modify_list:\r\n self.working_point[i][j] = False\r\n else:\r\n changed_positions.add((i, j))\r\n current_score -= self.entry_matrix[i][j].score\r\n self.author_buffer[i] -= self.entry_matrix[i][j].contribution\r\n self.university_buffer -= self.entry_matrix[i][j].contribution\r\n if self.author_buffer[i] <= self.author_limit_list[i]:\r\n break\r\n\r\n # check and fix university limit\r\n if self.university_buffer <= self.university_limit:\r\n return current_score\r\n\r\n current_lookups = [0] * len(self.working_point)\r\n while self.university_buffer > self.university_limit:\r\n lowest_gain = None\r\n index_to_remove = None\r\n\r\n # find current lowest unit gain to remove\r\n for i in range(len(current_lookups)):\r\n # search for the first valid True value until the end of list\r\n valid = False\r\n while not valid:\r\n valid = True\r\n # reached end of list\r\n if current_lookups[i] == -1:\r\n break\r\n # entry is false\r\n if not self.working_point[i][current_lookups[i]]:\r\n valid = False\r\n self.increment_lookup_index(current_lookups, i)\r\n # avoid fixing changed (i, j) point\r\n elif changed_i == i and changed_j == current_lookups[i]:\r\n valid = False\r\n self.increment_lookup_index(current_lookups, i)\r\n # if this is a simulation check if entry has already been changed, if so - skip it\r\n elif not modify_list and (i, current_lookups[i]) in changed_positions:\r\n valid = False\r\n self.increment_lookup_index(current_lookups, i)\r\n\r\n # check if lookup index i reached end of list\r\n if current_lookups[i] == -1:\r\n continue\r\n if lowest_gain is None or lowest_gain > self.entry_matrix[i][current_lookups[i]].unit_gain:\r\n lowest_gain = self.entry_matrix[i][current_lookups[i]].unit_gain\r\n index_to_remove = i\r\n\r\n # remove current lowest unit gain\r\n if modify_list:\r\n self.working_point[index_to_remove][current_lookups[index_to_remove]] = False\r\n current_score -= self.entry_matrix[index_to_remove][current_lookups[index_to_remove]].score\r\n self.author_buffer[index_to_remove] -= self.entry_matrix[index_to_remove][current_lookups[index_to_remove]].contribution\r\n self.university_buffer -= self.entry_matrix[index_to_remove][current_lookups[index_to_remove]].contribution\r\n self.increment_lookup_index(current_lookups, index_to_remove)\r\n\r\n return current_score",
"def is_fixed(self):\n\t\tif self.current_coordinate.latitude != 0.0 or self.current_coordinate.longitude != 0.0:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r"""Compute the Einstein radius for a given isotropic velocity dispersion assuming a singular isothermal sphere (SIS) mass profile | def approximate_theta_E_for_SIS(vel_disp_iso, z_lens, z_src, cosmo):
lens_cosmo = LensCosmo(z_lens, z_src, cosmo=cosmo)
theta_E_SIS = lens_cosmo.sis_sigma_v2theta_E(vel_disp_iso)
return theta_E_SIS | [
"def Wigner_Seitz_radius(n: u.m**-3) -> u.m:\n return (3 / (4 * np.pi * n)) ** (1 / 3)",
"def _calculate_residual_sphere(parameters, x_values, y_values, z_values):\n #extract the parameters\n x_centre, y_centre, z_centre, radius = parameters\n\n #use numpy's sqrt function here, which works by element on arrays\n distance_from_centre = numpy.sqrt((x_values - x_centre)**2 +\n (y_values - y_centre)**2 +\n (z_values - z_centre)**2)\n\n return distance_from_centre - radius",
"def sphere_inertia(mass, radius):\n inertia = (2.0 / 5.0) * (radius ** 2) * mass * np.eye(3)\n return inertia",
"def virial_radius(particles):\n if len(particles) < 2:\n raise exceptions.AmuseException(\"Cannot calculate virial radius for a particles set with fewer than 2 particles.\")\n partial_sum = zero\n\n mass = particles.mass\n x_vector = particles.x\n y_vector = particles.y\n z_vector = particles.z\n\n for i in range(len(particles) - 1):\n x = x_vector[i]\n y = y_vector[i]\n z = z_vector[i]\n dx = x - x_vector[i+1:]\n dy = y - y_vector[i+1:]\n dz = z - z_vector[i+1:]\n dr_squared = (dx * dx) + (dy * dy) + (dz * dz)\n dr = (dr_squared).sqrt()\n m_m = mass[i] * mass[i+1:]\n partial_sum += (m_m / dr).sum()\n return (mass.sum()**2) / (2 * partial_sum)",
"def getSphereRadius(self):\n return 1.5",
"def herrmann_radius(self, e_kin):\n v = electron_velocity(e_kin)\n s1 = M_E * self._cur / (PI * EPS_0 * Q_E * v * self._b_d**2)\n s2 = 8 * K_B * self._t_c * M_E * self._r_c**2 / (Q_E**2 * self._b_d**2)\n s3 = self._b_c**2 * self._r_c**4 / (self._b_d**2)\n return np.sqrt(s1 + np.sqrt(s1**2 + s2 + s3))",
"def ellipse_ellipticity(S):\n return 1/2 * np.arcsin(S[..., 3]/S[..., 0])",
"def prime_vertical_radius(self, sinlat):\n return self.semimajor_axis / np.sqrt(\n 1 - self.first_eccentricity**2 * sinlat**2\n )",
"def asphericity(Rnm_eg):\n num = (Rnm_eg[0] - Rnm_eg[2])**2 + (Rnm_eg[1] - Rnm_eg[2])**2 + (Rnm_eg[0] - Rnm_eg[1])**2\n dem = 2*(Rnm_eg[0] + Rnm_eg[1] + Rnm_eg[2])**2\n Asphere = num/dem\n return Asphere",
"def velocity_dispersion_from(\r\n self, redshift_0: float, redshift_1: float, einstein_radius: float\r\n ) -> float:\r\n const = constants.c.to(\"kpc / s\")\r\n\r\n angular_diameter_distance_to_redshift_0_kpc = (\r\n self.angular_diameter_distance_to_earth_in_kpc_from(redshift=redshift_1)\r\n )\r\n\r\n angular_diameter_distance_to_redshift_1_kpc = (\r\n self.angular_diameter_distance_to_earth_in_kpc_from(redshift=redshift_1)\r\n )\r\n\r\n angular_diameter_distance_between_redshifts_kpc = (\r\n self.angular_diameter_distance_between_redshifts_in_kpc_from(\r\n redshift_0=redshift_0, redshift_1=redshift_1\r\n )\r\n )\r\n\r\n kpc_per_arcsec = self.kpc_per_arcsec_from(redshift=redshift_0)\r\n\r\n einstein_radius_kpc = einstein_radius * kpc_per_arcsec\r\n\r\n velocity_dispersion_kpc = const * np.sqrt(\r\n (einstein_radius_kpc * angular_diameter_distance_to_redshift_1_kpc)\r\n / (\r\n 4\r\n * np.pi\r\n * angular_diameter_distance_to_redshift_0_kpc\r\n * angular_diameter_distance_between_redshifts_kpc\r\n )\r\n )\r\n\r\n return velocity_dispersion_kpc.to(\"km/s\").value",
"def _update_inhibition_radius(self):\r\n\t\t\r\n\t\tself.inhibition_radius = max(bn.nansum(self.syn_dist * self.syn_c) /\r\n\t\t\tmax(bn.nansum(self.syn_c), 1), 1)",
"def neighborhood_influence_radius(self, in_iteration_cycle):\r\n neighbor_influence_radius = self._initial_neighborhood_influence_radius * np.exp(-in_iteration_cycle/self._neighborhood_radius_scale)\r\n return neighbor_influence_radius",
"def rirjsisj(self):\n \n flattened_spin_arr = self.lattice.flatten()\n sisj_arr = np.outer(flattened_spin_arr, flattened_spin_arr)\n rirjsisj_arr = self.r2s * sisj_arr\n rirjsisj_sum = np.sum(rirjsisj_arr)\n \n return rirjsisj_sum",
"def _r_soma_calc(xmid, ymid, zmid, x, y, z):\n r_soma = np.sqrt((x - xmid)**2 + (y - ymid)**2 + (z - zmid)**2)\n return r_soma",
"def calc_length_distortion_on_ellipsoid(self, lon, lat):\n\n # get the subgrid\n sg, _, _ = self.lonlat2xy(lon, lat)\n\n lon0 = self.subgrids[str(sg)].core.projection.osr_spref.GetProjParm('central_meridian')\n lat0 = self.subgrids[str(sg)].core.projection.osr_spref.GetProjParm('latitude_of_origin')\n\n # get spherical distance and azimuth between projection centre and point of interest\n geod = Geodesic.WGS84\n gi = geod.Inverse(lat0, lon0, lat, lon)\n c1 = gi['s12']\n az1 = gi['azi1']\n\n # apply equation for distortion in direction perpendicular to the radius, k:\n # k = c/geod.a / np.sin(c/geod.a)\n k = c1 / geod.a / np.sin(c1 / geod.a)\n\n return k",
"def electron_velocity(e_kin):\n return C_L * np.sqrt(1 - (M_E_EV / (M_E_EV + e_kin))**2)",
"def earth_moon_orbit(vxei,vyei,xei,yei,vxmi,vymi,xmi,ymi, time,steps):\n #position and velocity of earth\n vxe = [0]*(steps+1)\n vye = [0]*(steps+1)\n xe = [0]*(steps+1)\n ye = [0]*(steps+1)\n vxe[0] = vxei\n vye[0] = vyei\n xe[0] = xei\n ye[0] = yei\n re = [0]*(steps+1)\n ve = [0]*(steps+1)\n\n #position and velocity of moon\n vxm = [0]*(steps+1)\n vym = [0]*(steps+1)\n xm = [0]*(steps+1)\n ym = [0]*(steps+1)\n vxm[0] = vxmi\n vym[0] = vymi\n xm[0] = xmi\n ym[0] = ymi\n rm = [0]*(steps+1)\n vm = [0]*(steps+1)\n \n tstep=(time/steps)\n rme=[0]*(steps+1)\n\n #Euler-Cromer Method of integration\n for i in range(steps):\n #distance of earth to sun, moon to sun, and earth to moon at each timestep\n re[i] = sqrt(xe[i]**2+ye[i]**2)\n rm[i] = sqrt(xm[i]**2+ym[i]**2)\n rme[i] = sqrt((xe[i]-xm[i])**2+(ye[i]-ym[i])**2)\n\n #total velocity of Earth and Moon\n ve[i] = sqrt(vxe[i]**2+vye[i]**2)\n vm[i] = sqrt(vye[i]**2+vye[i]**2)\n\n #components of velocity and postion of earth at each timestep\n vxe[i+1] = vxe[i]+Aex(xe[i],xm[i],re[i],rme[i])*tstep\n xe[i+1] = xe[i]+vxe[i+1]*tstep\n vye[i+1] = vye[i]+Aey(ye[i],ym[i],re[i],rme[i])*tstep\n ye[i+1] = ye[i]+vye[i+1]*tstep\n \n #velocity and position of moon\n vxm[i+1] = vxm[i]+Amx(xe[i],xm[i],rm[i],rme[i])*tstep\n xm[i+1] = xm[i]+vxm[i+1]*tstep\n vym[i+1] = vym[i]+Amy(ye[i],ym[i],rm[i],rme[i])*tstep\n ym[i+1] = ym[i]+vym[i+1]*tstep\n \n if re[i]<(1/214):\n #the the radius is smaller than the radius of the sun\n return \"The Earth crashed into the Sun!\"\n\n #Eccentricities of Earth around the Sun and Moon around the Earth\n em=(max(rme)-min(rme[:-1]))/(max(rme)+min(rme[:-1]))\n ee=(max(re)-min(re[:-1]))/(max(re)+min(re[:-1]))\n print(\"Moon\",em,max(rme),min(rme[:-1]))\n print(\"Earth\",ee,max(re),min(re[:-1]))\n\n #Path of Earth and Moon around the Sun\n plot(xe,ye,\"b\")\n plot(xm,ym,\"r\")\n xlabel(\"x\")\n ylabel(\"y\")\n title(\"Earth and Moon path around the Sun\")\n legend((\"Earth\",\"Moon\"))\n xlim(.5,.7)\n ylim(.7,.9)\n show()\n\n #position of Earth and Moon around the Sun at each time interval\n Xe = [0]*(steps+1)\n Ye = [0]*(steps+1)\n Xm = [0]*(steps+1)\n Ym = [0]*(steps+1)\n for i in range(steps+1): #1/20 of calculate position values are plotted\n if i%20==0:\n Xe[i] = xe[i]\n Ye[i] = ye[i]\n Xm[i] = xm[i]\n Ym[i] = ym[i] \n plot(Xe,Ye,\"bo\")\n plot(Xm,Ym,\"ro\")\n xlabel(\"x\")\n ylabel(\"y\")\n xlim(.95,1.025)\n ylim(0,.3)\n legend((\"Earth\",\"Moon\"))\n title(\"Position of Earth and Moon around Sun\")\n show()\n \n \n #distance between the earth and moon as a function of time\n t=linspace(0,1,steps+1)\n plot(t,rme)\n ylim(.002,.003)\n xlabel(\"Time (yrs)\")\n ylabel(\"Distance (AU)\")\n title(\"Distance between the earth and moon as a function of time\")\n show()\n \n #orbit distance of earth and moon around the sun as a function of time\n plot(t,re,\"b\")\n plot(t,rm,\"r\")\n ylim(.97,1.03)\n xlabel(\"Time (yrs)\")\n ylabel(\"Distance (AU)\")\n legend((\"Earth\",\"Moon\"))\n title(\"Orbit distance of Earth and Moon around the Sun as a function of time\")\n show()",
"def normal_gravity(self, latitude, height, si_units=False):\n # Warn if height is negative\n if np.any(height < 0):\n warn(\n \"Formulas used are valid for points outside the ellipsoid.\"\n \"Height must be greater than or equal to zero.\"\n )\n\n # Pre-compute to avoid repeated calculations\n sinlat = np.sin(np.radians(latitude))\n coslat = np.sqrt(1 - sinlat**2)\n\n # The terms below follow the variable names from Li and Goetze (2001).\n # The prime terms (*_p) refer to quantities on an ellipsoid passing\n # through the computation point.\n\n # The reduced latitude of the projection of the point on the ellipsoid\n beta = np.arctan2(self.semiminor_axis * sinlat, self.semimajor_axis * coslat)\n sinbeta = np.sin(beta)\n cosbeta = np.sqrt(1 - sinbeta**2)\n\n # Distance between the computation point and the equatorial plane\n z_p2 = (self.semiminor_axis * sinbeta + height * sinlat) ** 2\n # Distance between the computation point and the spin axis\n r_p2 = (self.semimajor_axis * cosbeta + height * coslat) ** 2\n\n # Auxiliary variables\n big_d = (r_p2 - z_p2) / self.linear_eccentricity**2\n big_r = (r_p2 + z_p2) / self.linear_eccentricity**2\n\n # Reduced latitude of the computation point\n cosbeta_p2 = 0.5 + big_r / 2 - np.sqrt(0.25 + big_r**2 / 4 - big_d / 2)\n sinbeta_p2 = 1 - cosbeta_p2\n\n # Auxiliary variables\n b_p = np.sqrt(r_p2 + z_p2 - self.linear_eccentricity**2 * cosbeta_p2)\n q_0 = 0.5 * (\n (1 + 3 * (self.semiminor_axis / self.linear_eccentricity) ** 2)\n * np.arctan2(self.linear_eccentricity, self.semiminor_axis)\n - 3 * self.semiminor_axis / self.linear_eccentricity\n )\n q_p = (\n 3\n * (1 + (b_p / self.linear_eccentricity) ** 2)\n * (\n 1\n - b_p\n / self.linear_eccentricity\n * np.arctan2(self.linear_eccentricity, b_p)\n )\n - 1\n )\n big_w = np.sqrt(\n (b_p**2 + self.linear_eccentricity**2 * sinbeta_p2)\n / (b_p**2 + self.linear_eccentricity**2)\n )\n\n # Put together gamma using 3 separate terms\n term1 = self.geocentric_grav_const / (b_p**2 + self.linear_eccentricity**2)\n term2 = (0.5 * sinbeta_p2 - 1 / 6) * (\n self.semimajor_axis**2\n * self.linear_eccentricity\n * q_p\n * self.angular_velocity**2\n / ((b_p**2 + self.linear_eccentricity**2) * q_0)\n )\n term3 = -cosbeta_p2 * b_p * self.angular_velocity**2\n gamma = (term1 + term2 + term3) / big_w\n\n # Convert gamma from SI to mGal\n if not si_units:\n gamma *= 1e5\n\n return gamma",
"def radius(xpix,ypix,xcenter,ycenter):\n rad = np.sqrt((xpix-xcenter)**2+(ypix-ycenter)**2)\n \n return rad"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the slope and intercept values fit on a sample of ETGs Note The slope and intercept were read off from Fig 7 of [1]_. Values binned by magnitudes are available in [2]_. References | def _define_ETG_fit_params(self):
self.slope = 2.0
self.intercept = 5.8 | [
"def setSlopeAndIntercept(self, slope, intercept):\n\t\tself.setSlope(slope)\n\t\tself.setIntercept(intercept)\n\t\tself.updatePreview()",
"def set_fit_intercept(self, new_fit_intercept=True):\n self.fit_intercept = new_fit_intercept",
"def setSlope(self, slope):\n\t\tself.slope = slope",
"def fit_slopes_intercepts(slopes, intercepts, stds, waves, norm):\n # define a mask for the good data\n mask = ~np.isnan(slopes)\n short_wave_mask = waves < 4.1\n\n # fit the intercepts with a power law\n fit_lev = fitting.LevMarLSQFitter()\n powerlaw = models.PowerLaw1D(fixed={\"x_0\": True})\n fit_intercepts = fit_lev(powerlaw, waves[mask], intercepts[mask])\n\n # define the anchor points for the spline interpolation\n # divide the data into 25 bins with the same number of data points in every bin\n alloc, bin_edges = pd.qcut(waves[mask * short_wave_mask], q=25, retbins=True)\n # calculate the median wavelength, slope and standard deviation in every bin\n meds, edges, indices = stats.binned_statistic(\n waves[mask * short_wave_mask],\n (\n waves[mask * short_wave_mask],\n slopes[mask * short_wave_mask],\n stds[mask * short_wave_mask],\n ),\n statistic=\"median\",\n bins=bin_edges,\n )\n\n # use the median values as the anchor points for the spline interpolation\n spline_wave = meds[0][~np.isnan(meds[0])]\n spline_slope = meds[1][~np.isnan(meds[1])]\n spline_std = meds[2][~np.isnan(meds[2])]\n\n # interpolate the slopes with a spline function\n fit_slopes = interpolate.splrep(spline_wave, spline_slope)\n\n # interpolate the standard deviations with a spline function\n fit_stds = interpolate.splrep(spline_wave, spline_std)\n\n # create tables with the fitting results at certain wavelengths\n table_waves = np.arange(0.8, 4.05, 0.05)\n table_inv_rv_dep(\n table_path, table_waves, fit_slopes, fit_intercepts, fit_stds, norm\n )\n\n # create a table with the anchor points of the spline interpolation\n table_spline(table_path, spline_wave, spline_slope, spline_std, norm)\n\n return spline_wave, spline_slope, spline_std, fit_slopes, fit_intercepts, fit_stds",
"def hf_ch4_slope_fit(yrs, a, b, c, t0):\n return a * np.exp(b*(yrs - t0)) + c",
"def test_slope(slope,div = 10):\n plt.figure(figsize=(20,10))\n step, lz = np.loadtxt(slope, float, unpack = True)\n step_len = len(step)\n print(step_len)\n plt.plot(step,lz)\n inv = int(step_len/div)\n max = -1\n min = 0\n prev = 0.1\n found_start = False\n for i in range(len(step)):\n\n if i%inv == 0:\n tr_step = step[i:i+inv]\n tr_lz = lz[i:i+inv]\n slope, intc, r, p, err = linregress(tr_step, tr_lz)\n plt.plot(tr_step,tr_step*slope + intc)\n if not found_start:\n if slope < 0:\n min = i - inv\n found_start = True\n\n if abs(slope/prev) < 0.3 and i > 500:\n plt.text(tr_step[-1],tr_lz[-1],slope/prev)\n print(step[i],i)\n max = i - inv\n #break\n\n prev = slope\n print(max)\n print(min)\n\n slope, intcpt, r, p, std_srr = linregress(step[min:max], lz[min:max])\n plt.plot(step[min:max], step[min:max] * slope + intcpt)\n\n plt.show()",
"def fit_line_int(model,scaling,intercept):\n\treturn scaling*model + intercept",
"def fitting_function(wet_bulb_temps):\n return linregress(\n heights[start_point:end_point],\n wet_bulb_temps[start_point:end_point])",
"def set_slope(self, slope: float) -> None:\r\n self.slope = slope",
"def _linear_regression(self):\n self.slope, self.intercept, _, _, _ = stats.linregress(\n self.data_array, range(len(self.data_array)))",
"def Fit(self,Source,X=None,function='line',Guess=numpy.ones(10),Show_Residuals=True,StartP=None,EndP=None,StartX=None,EndX=None,Show_Guess=False,Figure=None,Rendering=True,RAW_Data=True,Color='r',Show_Parameters=True,Number_of_points=1000.,Bounds=[[None,None]]*10,Model=False):\n \n def find_indexes(Source,Min,Max):\n \tc=abs(numpy.array(Source)-Min)\n \tMin_index=list(c).index(min(c))\n \tc=abs(numpy.array(Source)-Max)\n \tMax_index=list(c).index(min(c))\n \treturn Min_index,Max_index \n\n begin=X[0] #original begin and end points are stored here, so the final plot will be displayed in the original workspace\n end=X[-1] \n if (StartP == None) and (StartX == None) :\n StartP=0\n StartX=0\n if (EndP == None) and (EndX == None) == None:\n EndP=len(Source)-1 \n EndX=len(Source)-1 \n \n if (StartP == None) or (EndP == None) :\n StartP,EndP=find_indexes(X,StartX,EndX) \n else:\n StartX=X[StartP]\n EndX=X[EndP]\n\n\n Source=numpy.array(Source[StartP:EndP]) #if not using the whole range for the fit, truncated wave is created here\n X=numpy.array(X[StartP:EndP]) #if not using the whole range for the fit, truncated timescale is created here\n\n\n \t# Data and Xscale\n Source = numpy.array(Source)\n if X==None or (len(Source) != len(X)):\n X = numpy.array(range(len(Source)))\n # create a set of Parameters\n\n Function=eval(\"self.\"+function) #the fitting function\n Formula=eval(\"self.\"+function.capitalize())[0] #the fitting formula\n Parameters_Names=eval(\"self.\"+function.capitalize())[1] ##list of the fitting parameters \n \n\n\n\n for i,j in enumerate(Bounds):\n if Bounds[i][0] != None:\n Bounds[i][0]=numpy.float32(Bounds[i][0])\n if Bounds[i][1] != None:\n Bounds[i][1]=numpy.float32(Bounds[i][1])\n \n #print Bounds\n \n p0 = Parameters()\n\n \n # do fit, here with leastsq model\n if Model == False:\n \n for i,j in enumerate(Parameters_Names):\n #For each paramters, you can set value, min & max. i.e. p0.add('omega', value= 0.0, min=-numpy.pi/2., max=numpy.pi/2)\n if j != '0':\n if Bounds[i][0] != None and Bounds[i][1]!=None:\n p0.add(j,value=Guess[i],min=Bounds[i][0],max=Bounds[i][1])\n elif Bounds[i][0] !=None and Bounds[i][1]==None:\n p0.add(j,value=Guess[i],min=Bounds[i][0]) \n elif Bounds[i][0] == None and Bounds[i][1] != None:\n p0.add(j,value=Guess[i],max=Bounds[i][1])\n else:\n p0.add(j,value=Guess[i]) \n print 'Fitting in process ...'\n try:\n result = minimize(Function, p0, args=(X, Source))\n RenderingX=numpy.linspace(float(X[0]),float(X[-1]),num=Number_of_points)\n fit = Function(result.params,RenderingX) #Rendering, with Popt, the best fitting parameters\n except:\n print 'Fitting failed, try other parameters or constraints'\n return\n print 'Fitting performed between points ',StartP,' and ',EndP, ' (',len(X),' points)'\n print 'in units between: ',StartX,' and ',EndX\n print '######### FITTING RESULTS ############'\n print 'Parameters are :' \n res=[]\n for i in list(result.params):\n print i, result.params[i].value \n res.append(result.params[i].value)\n \n elif Model== True:\n for i,j in enumerate(Parameters_Names):\n if j != '0':\n p0.add(j,value=Guess[i]) \n RenderingX=numpy.linspace(float(X[0]),float(X[-1]),num=Number_of_points)\n fit = Function(p0,RenderingX) #Rendering, with Popt, the best fitting parameters \n\n# if Show_Parameters == True:\n# for i in range(len(popt)):\n# if List[i][0] != '0':\n# try:\n# pyplot.text(begin+((end-begin)/10), max(fit)-(i+1)*abs((max(fit)-min(fit))/(10)), r\"%s = {%s} +/- {%s}\" % ((str(List[i][0])),str(float(popt[i])),str(float((pcov[i][i])**0.5))))# .format(popt[i], (pcov[i][i])**0.5))\n# except:\n# pyplot.text(begin+((end-begin)/10), max(fit)-(i+1)*abs((max(fit)-min(fit))/(10)),'test')\n\n #if Show_Error_Bars == True: to do\n #pyplot.errorbar(X, Source, yerr = y_sigma, fmt = 'o') \n\n \n# if Show_Guess == True:\n# guess=Function(X,*p0)\n# G,=pyplot.plot(X, guess, label='Test data',marker='o',color='g') \n# if RAW_Data == True:\n# pyplot.legend([S, G, F], [\"Data\", \"initial guess\", \"Fit\"], loc='best',fancybox=True)\n# else:\n# pyplot.legend([G, F], [\"initial guess\", \"Fit\"], loc='best',fancybox=True)\n# else:\n# if RAW_Data == True:\n# pyplot.legend([S, F], [\"Data\", \"Fit\"], loc='best',fancybox=True)\n# else:\n# pyplot.legend([F], [\"Fit\"], loc='best',fancybox=True)\n\n if Rendering == True:\n pyplot.rc('axes',fc='white')\n if Figure == None: #Creation of the figure. If Figure is not None, the fit is integrated in pre-existing figure\n fig=pyplot.figure(facecolor='white')\n else:\n fig = Figure \n \n pyplot.title('Fitting completed') \n if RAW_Data == True:\n S,=pyplot.plot(X, Source,color='b')\n \n try:\n if Show_Residuals == True:\n final = Source + result.residual\n pyplot.plot(X, final, 'r')\n except UnboundLocalError: #Should be only in the Plugin, at first launch.\n print 'No results'\n \n F,=pyplot.plot(RenderingX, fit, linestyle='--',color=Color)\n \n pyplot.xlim([begin,end])\n pyplot.grid(True) \n pyplot.show()\n return fit,res,fig\n else:\n return fit",
"def test_fit_intercept_and_copy(coefs, intercept, model):\n X, y = _create_dataset(coefs, intercept, noise=2.0)\n regressor = model(fit_intercept=False, copy_X=False)\n regressor.fit(X, y)\n\n assert regressor.intercept_ == 0.0",
"def demo2():\n\n x = np.array([10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5])\n y1 = np.array([8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68])\n y2 = np.array([9.14, 8.14, 8.74, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74])\n y3 = np.array([7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73])\n x4 = np.array([8, 8, 8, 8, 8, 8, 8, 19, 8, 8, 8])\n y4 = np.array([6.58, 5.76, 7.71, 8.84, 8.47, 7.04, 5.25, 12.50, 5.56, 7.91, 6.89])\n\n\n def fit(x):\n return 3 + 0.5*x\n\n\n xfit = np.array([np.amin(x), np.amax(x)])\n\n plt.subplot(221)\n plt.plot(x, y1, 'ks', xfit, fit(xfit), 'r-', lw=2)\n plt.axis([2, 20, 2, 14])\n plt.setp(plt.gca(), xticklabels=[], yticks=(4, 8, 12), xticks=(0, 10, 20))\n plt.text(3, 12, 'I', fontsize=20)\n\n plt.subplot(222)\n plt.plot(x, y2, 'ks', xfit, fit(xfit), 'r-', lw=2)\n plt.axis([2, 20, 2, 14])\n plt.setp(plt.gca(), xticklabels=[], yticks=(4, 8, 12), yticklabels=[], xticks=(0, 10, 20))\n plt.text(3, 12, 'II', fontsize=20)\n\n plt.subplot(223)\n plt.plot(x, y3, 'ks', xfit, fit(xfit), 'r-', lw=2)\n plt.axis([2, 20, 2, 14])\n plt.text(3, 12, 'III', fontsize=20)\n plt.setp(plt.gca(), yticks=(4, 8, 12), xticks=(0, 10, 20))\n\n plt.subplot(224)\n\n xfit = np.array([np.amin(x4), np.amax(x4)])\n plt.plot(x4, y4, 'ks', xfit, fit(xfit), 'r-', lw=2)\n plt.axis([2, 20, 2, 14])\n plt.setp(plt.gca(), yticklabels=[], yticks=(4, 8, 12), xticks=(0, 10, 20))\n plt.text(3, 12, 'IV', fontsize=20)\n\n # verify the stats\n pairs = (x, y1), (x, y2), (x, y3), (x4, y4)\n for x, y in pairs:\n print('mean=%1.2f, std=%1.2f, r=%1.2f' % (np.mean(y), np.std(y), np.corrcoef(x, y)[0][1]))\n\n plt.show()",
"def plot_inv_RV_lit(outpath, fit_slopes, fit_intercepts, fit_stds):\n waves = np.arange(0.8, 4.01, 0.001)\n fig, ax = plt.subplots(figsize=(10, 9))\n\n for i, RV in enumerate([2.5, 3.1, 5.5]):\n # plot the extinction curve from this work\n offset = 0.1 * i\n slopes = interpolate.splev(waves, fit_slopes)\n alav = fit_intercepts(waves) + slopes * (1 / RV - 1 / 3.1)\n (line,) = ax.plot(waves, alav + offset, lw=1.5, label=r\"$R(V) = $\" + str(RV))\n stddev = interpolate.splev(waves, fit_stds)\n color = line.get_color()\n ax.fill_between(\n waves,\n alav + offset - stddev,\n alav + offset + stddev,\n color=color,\n alpha=0.2,\n edgecolor=None,\n )\n\n # plot the literature curves\n styles = [\"--\", \":\"]\n for i, cmodel in enumerate([CCM89, F19]):\n ext_model = cmodel(Rv=RV)\n (indxs,) = np.where(\n np.logical_and(\n 1 / waves >= ext_model.x_range[0], 1 / waves <= ext_model.x_range[1]\n )\n )\n yvals = ext_model(waves[indxs] * u.micron)\n ax.plot(\n waves[indxs],\n yvals + offset,\n lw=1.5,\n color=color,\n ls=styles[i],\n alpha=0.8,\n )\n\n # add text\n ax.text(3.45, 0.03, r\"$R(V) = 2.5$\", fontsize=0.8 * fs, color=\"tab:blue\")\n ax.text(3.45, 0.15, r\"$R(V) = 3.1$\", fontsize=0.8 * fs, color=\"tab:orange\")\n ax.text(3.45, 0.305, r\"$R(V) = 5.5$\", fontsize=0.8 * fs, color=\"tab:green\")\n\n # finalize and save the plot\n ax.set_xlabel(r\"$\\lambda\\ [\\mu m$]\", fontsize=1.2 * fs)\n ax.set_ylabel(r\"$A(\\lambda)/A(V)$ + offset\", fontsize=1.2 * fs)\n ax.set_xlim(0.75, 4.05)\n ax.set_ylim(-0.03, 0.98)\n handles = [\n Line2D([0], [0], color=\"k\", lw=1.5),\n Line2D([0], [0], color=\"k\", lw=1.5, ls=\"--\"),\n Line2D([0], [0], color=\"k\", lw=1.5, ls=\":\"),\n ]\n labels = [\n \"this work\",\n \"Cardelli et al. (1989)\",\n \"Fitzpatrick et al. (2019)\",\n ]\n plt.legend(handles, labels, fontsize=fs)\n plt.savefig(outpath + \"inv_RV_lit.pdf\", bbox_inches=\"tight\")\n\n # also save the plot in log scale\n plt.ylim(0.01, 1)\n plt.yscale(\"log\")\n plt.tight_layout()\n plt.savefig(outpath + \"inv_RV_lit_log.pdf\")",
"def fit(self, X, Y):\n ...",
"def line_fit(x, y, e):\n\n suma = 0\n sumx = 0\n sumy = 0\n sumx2 = 0\n sumy2 = 0\n sumxy = 0\n\n dlen = len(x)\n if dlen < 3:\n return [0, 0, 0, 0]\n\n for k in range(0, dlen):\n try:\n weight = 1.0 / e[k]**2\n except:\n weight = 1.0\n suma += weight\n sumx += weight * x[k]\n sumy += weight * y[k]\n sumx2 += weight * x[k] * x[k]\n sumy2 += weight * y[k] * y[k]\n sumxy += weight * x[k] * y[k]\n\n delta = suma * sumx2 - sumx* sumx\n a = (sumx2 * sumy - sumx * sumxy) / delta\n b = (sumxy * suma - sumx * sumy ) / delta\n if dlen <= 2:\n siga = 0\n sigb = 0\n else: \n var = (sumy2 + a * a * suma + b * b * sumx2 - 2.0 *(a * sumy + b * sumxy - a * b * sumx)) / (len(x) -2)\n siga = math.sqrt(var * sumx2 / delta)\n sigb = math.sqrt(var * suma / delta)\n\n return [a, b, siga, sigb]",
"def fit_and_plot(self):\n try:\n if not hasattr(self, \"file\"):\n self.ui.Result_textBrowser.setText(\"You need to load a data file.\")\n else:\n if self.opened_from_flim:\n x, y = self.hist_data_from_flim\n else:\n x,y = self.acquire_settings() #get data\n y_norm = y/np.max(y) #normalized y\n\n # find the max intensity in the array and start things from there\n find_max_int = np.nonzero(y_norm == 1)\n y = y[np.asscalar(find_max_int[0]):]\n x = x[np.asscalar(find_max_int[0]):]\n\n t = x\n time_fit = t\n TRPL_interp = np.interp(time_fit, t, y)\n \n fit_func = self.ui.FittingFunc_comboBox.currentText()\n self.ui.plot.plot(t, y, clear=self.ui.clear_plot_checkBox.isChecked(), pen=pg.mkPen(self.plot_color))\n \n if fit_func == \"Stretched Exponential\": #stretch exponential tab\n tc, beta, a, avg_tau, PL_fit, noise = stretch_exp_fit(TRPL_interp, t)\n self.out = np.empty((len(t), 3))\n self.out[:,0] = t #time\n self.out[:,1] = TRPL_interp #Raw PL \n self.out[:,2] = PL_fit # PL fit\n self.ui.plot.plot(t, PL_fit, clear=self.ui.clear_plot_checkBox.isChecked(), pen='k')\n self.ui.Result_textBrowser.setText(\"Fit Results:\\n\\nFit Function: Stretched Exponential\"\n \"\\nFit Method: \" + \"diff_ev\" + #TODO : change when diff_ev and fmin_tnc implemented for non-irf\n \"\\nAverage Lifetime = \" + str(avg_tau)+ \" ns\"\n \"\\nCharacteristic Tau = \" + str(tc)+\" ns\"\n \"\\nBeta = \"+str(beta)+\n \"\\nNoise = \"+ str(noise))\n self.ui.average_lifetime_spinBox.setValue(avg_tau)\n \n elif fit_func == \"Double Exponential\": #double exponential tab\n tau1, a1, tau2, a2, avg_tau, PL_fit, noise = double_exp_fit(TRPL_interp, t)\n self.out = np.empty((len(t), 3))\n self.out[:,0] = t #time\n self.out[:,1] = TRPL_interp #Raw PL \n self.out[:,2] = PL_fit # PL fit\n self.ui.plot.plot(t, PL_fit, clear=self.ui.clear_plot_checkBox.isChecked(), pen='k')\n self.ui.Result_textBrowser.setText(\"Fit Results:\\n\\nFit Function: Double Exponential\"\n \"\\nFit Method: \" + \"diff_ev\" +\n \"\\nAverage Lifetime = \" + str(avg_tau)+ \" ns\"\n \"\\nTau 1 = \" + str(tau1)+\" ns\"\n \"\\nA 1 = \" + str(a1)+\n \"\\nTau 2 = \" + str(tau2)+\" ns\"\n \"\\nA 2 = \" + str(a2)+\n \"\\nNoise = \"+ str(noise))\n #TODO - once tau_avg implemented, set average lifetime spinbox to tau_avg value\n \n elif fit_func == \"Single Exponential\": #single exponential tab\n tau, a, PL_fit, noise = single_exp_fit(TRPL_interp, t)\n self.out = np.empty((len(t), 3))\n self.out[:,0] = t #time\n self.out[:,1] = TRPL_interp #Raw PL \n self.out[:,2] = PL_fit # PL fit\n self.ui.plot.plot(t, PL_fit, clear=self.ui.clear_plot_checkBox.isChecked(), pen='k')\n self.ui.Result_textBrowser.setText(\"Fit Results:\\n\\nFit Function: Single Exponential\"\n \"\\nFit Method: \" + \"diff_ev\" +\n \"\\nLifetime = \" + str(tau)+ \" ns\"\n \"\\nA = \" + str(a)+\n \"\\nNoise = \"+ str(noise))\n self.ui.average_lifetime_spinBox.setValue(tau)\n \n #add fit params to data_list\n self.data_list.append(\"Data Channel: \" + str(self.ui.Data_channel_spinBox.value()) + \"\\n\" + self.ui.Result_textBrowser.toPlainText())\n self.fit_lifetime_called_wo_irf = True\n self.fit_lifetime_called_w_irf = False\n\n self.ui.plot.setLabel('left', 'Intensity', units='a.u.')\n self.ui.plot.setLabel('bottom', 'Time (ns)')\n return self.out\n \n except Exception as e:\n self.ui.Result_textBrowser.append(format(e))",
"def test_fit(self):\n self.ax.hold(True)\n self.plot_clicked()\n #get the variables\n self.x_data = self.filter_ignore(self.data[self.x]).get_values()\n fit_type = self.fitting_select.currentText()\n mod = self.fit_models[fit_type]\n #update paramter values\n for i in mod.param_names:\n self.params_objects[fit_type][i].value = self.fit_param_dict[fit_type][i].value()\n #make a smooth interpolation of data\n x = np.linspace(self.x_data[0],self.x_data[-1],1000)\n self.ax.plot(x,mod.eval(params=self.params_objects[fit_type],x = x),\n linestyle = '--')\n\n self.canvas.draw()",
"def test_fit_intercept_and_copy(coefs, intercept):\n X, y = _create_dataset(coefs, intercept, noise=2.0)\n imb = LADRegression(fit_intercept=False, copy_X=False)\n imb.fit(X, y)\n\n assert imb.intercept_ == 0.0"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Evaluate the Vband luminosity L_V expected from the FJ relation for a given velocity dispersion | def get_luminosity(self, vel_disp):
log_L_V = self.slope*np.log10(vel_disp) + self.intercept
return log_L_V | [
"def fwd_voltage(self):\r\n\t\treturn float(self.query(\"LAS:LDV?\"))",
"def estimate_velocity_vring_collider(sl, sv):\n print('... estimate_velocity_vring_collider: Only applicable to the piston setting1 (~10cm blob creation)')\n # get module location\n mod_loc = os.path.abspath(__file__)\n pdir, filename = os.path.split(mod_loc)\n ringDataDir = os.path.join(os.path.join(pdir, 'reference_data'), 'vortex_ring_data_setting_medium')\n f_vrT = read_pickle(os.path.join(ringDataDir, 'f_vrT.pkl')) # velocity of a top ring\n f_vrB = read_pickle(os.path.join(ringDataDir, 'f_vrB.pkl')) # velocity of a bottom ring\n dp, do, norfices = 160, 25.6, 8. # chamber parameters\n # compute L/D and veff\n ld = compute_form_no(sl, orifice_d=do, piston_d=dp, num_orifices=norfices)\n veff = estimate_veff(sl, sv)\n vrT, vrB = f_vrT(ld, veff), f_vrB(ld, veff)\n return vrT, vrB",
"def P_vapor(self) -> float:\n chemicals = self.vle_chemicals\n F_l = eq.LiquidFugacities(chemicals, self.thermo)\n IDs = tuple([i.ID for i in chemicals])\n x = self.get_molar_fraction(IDs)\n if x.sum() < 1e-12: return 0\n return F_l(x, self.T).sum()",
"def Luminosity(self, z, f=1., dnu=1000.):\n ld = self.Luminosity_Distance(z)\n ld2 = ld * ld\n lum = f * self.Jy2CGS * dnu * self.MHz2Hz * 4 * np.pi * ld2\n return lum",
"def Luminosity(self):\n try:\n L = (self.E*self.Weight).sum()\n N = self.E.count()\n except:\n L = self.E.sum()\n N = self.E.count()\n return L, L/np.sqrt(N)",
"def cie_luv(self):\n K = Fraction(29, 3) ** 3\n e = Fraction(6, 29) ** 3\n XYZ = self.cie_xyz\n yr = XYZ[1] / D65[1]\n L = 116 * yr ** Fraction(1, 3) - 16 if yr > e else K * yr\n u = 13 * L * (U(*XYZ) - U(*D65))\n v = 13 * L * (V(*XYZ) - V(*D65))\n return (L, u, v)",
"def velocity(current_data):\n from numpy import ma\n drytol = current_data.user.get('dry_tolerance', drytol_default)\n q = current_data.q\n h = q[0,:]\n h_wet = ma.masked_where(h<=drytol, h)\n u_wet = q[1,:] / h_wet\n return u_wet",
"def mvir(radius, vdisp_1d, prefactor_eta=3.39):\n return prefactor_eta * ((3 * vdisp_1d**2) * radius / constants.G).to(u.M_sun)",
"def test_lfc_inversion():\n levels = np.array([963., 789., 782.3, 754.8, 728.1, 727., 700.,\n 571., 450., 300., 248.]) * units.mbar\n temperatures = np.array([25.4, 18.4, 17.8, 15.4, 12.9, 12.8,\n 10., -3.9, -16.3, -41.1, -51.5]) * units.celsius\n dewpoints = np.array([20.4, 0.4, -0.5, -4.3, -8., -8.2, -9.,\n -23.9, -33.3, -54.1, -63.5]) * units.celsius\n lfc_pressure, lfc_temp = lfc(levels, temperatures, dewpoints)\n assert_almost_equal(lfc_pressure, 705.8806 * units.mbar, 2)\n assert_almost_equal(lfc_temp, 10.6232 * units.celsius, 2)",
"def get_fermi_velocities():\n\n vr = Vasprun('vasprun.xml')\n # eigenvalues = vr.eigenvalues\n bs = vr.get_band_structure()\n bands = bs.bands\n kpoints = bs.kpoints\n efermi = bs.efermi\n h_bar = 6.582e-16 # eV*s\n\n fermi_bands = []\n for spin in bands:\n for i in range(len(bands[spin])):\n if max(bands[spin][i]) > efermi > min(bands[spin][i]):\n fermi_bands.append(bands[spin][i])\n\n fermi_velocities = []\n for band in fermi_bands:\n for i in range(len(band)-1):\n if (band[i] < efermi < band[i+1]) or (band[i] > efermi > band[i+1]):\n dk = np.sqrt((kpoints[i+1].cart_coords[0]\n - kpoints[i].cart_coords[0])**2\n + (kpoints[i+1].cart_coords[1]\n - kpoints[i].cart_coords[1])**2)\n v_f = abs((band[i+1] - band[i]) / (h_bar * dk))\n fermi_velocities.append(v_f)\n\n return fermi_velocities # Values are in Angst./s",
"def test_filt_vegamag(self):\n sun = Sun.from_builtin('E490_2014')\n V = get_bandpass('johnson v')\n wave, fluxd = sun.filt(V, unit=JMmag)\n assert np.isclose(fluxd.value, -26.75, atol=0.006)",
"def test_luminosity(query_derived, query_atnf):\n\n edot = query_derived.get_pulsar('TEST1')['EDOT'][0]\n edotatnf = query_atnf.get_pulsar('TEST1')['EDOT'][0]\n\n assert abs(edot - edotatnf) < sf_scale(edotatnf)\n\n edotd2 = query_derived.get_pulsar('TEST1')['EDOTD2'][0]\n edotd2atnf = query_atnf.get_pulsar('TEST1')['EDOTD2'][0]\n\n assert abs(edotd2 - edotd2atnf) < sf_scale(edotd2atnf)",
"def luminosity(self, ldist):\n\t\ta = np.pi*3e14/2.*self.amplitude*(self.fwhm/self.x_0)/self.x_0/10**23\n\t\tb = 4*np.pi*(ldist*10**6*3.086e18)**2\n\t\treturn a*b",
"def calc_lumin(self):\r\n return -1./self.tau*self.c",
"def w_dispersion(q,v=1):\r\n # parameters for two-fluid hydrodynamic model from [1]\r\n Vol = np.sqrt(3)/2 * 4.63**2; # unit cell volume in graphene\r\n wr1= 4.08 / HARTREE; # Pi-electrons [eV]\r\n n1 = 2/Vol;\r\n wr2= 13.06 / HARTREE; # Sigma-electrons [eV]\r\n n2 = 6/Vol;\r\n \r\n # resonance frequencies\r\n w12 = wr1**2; # we neglect the acoustic velocity s=0\r\n w22 = wr2**2;\r\n\r\n # generalized plasma frequencies\r\n Q12 = 2*np.pi*n1*q * v ; # effective Omega_nu^2\r\n Q22 = 2*np.pi*n2*q * v ;\r\n\r\n # dispersion formula (17) in [1]\r\n A = 0.5*(w12 + Q12 + w22 + Q22);\r\n B = np.sqrt( 0.25*( w12 + Q12 - w22 - Q22 )**2 + Q12 * Q22 );\r\n\r\n return np.asarray([np.sqrt(A-B), np.sqrt(A+B)]);",
"def getVoltageResistance(self):\n return self.getTheveninEquiv()",
"def calcLorentzGammaFromVelocity(self,direction):\n if direction not in self.v.order: \n raise CoordinateVector(\"The direction, \"+str(direction)+ \" needs to be one of \" +\",\".join(self.x.order) + \" to calculated the lorentz gamma.\")\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n return math.sqrt(1 /(1 - (getattr(self.v,direction)/speed_light)**2))",
"def voltage(self) -> float:\n pass",
"def V_lopass(V, R_S, C, L, R_L, f):\n # current in circuit\n I = V/(R_S + Z_lopass(C, L, R_L, f))\n # voltage across circuit\n V_out = V - I*R_S\n I_C = V_out/Xcap(C, f)\n I_L = V_out/Z_low(L, R_L, f)\n V_L = I_L*R_L\n return V_L"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the parameters fit on SDSS DR4 Note The values of slope and intercept are taken from the rband orthogonal fit on SDSS DR4. See Table 2 of [1]_. References .. [1] Hyde, Joseph B., and Mariangela Bernardi. "The luminosity and stellar mass Fundamental Plane of earlytype galaxies." | def _define_SDSS_fit_params(self):
self.a = 1.4335
self.b = 0.3150
self.c = -8.8979
self.intrinsic_scatter = 0.0578
#self.delta_a = 0.02
#self.delta_b = 0.01 | [
"def _define_SDSS_fit_params(self):\n\t\tself.a = 5.7*1.e-4\n\t\tself.b = 0.38\n\t\tself.lower = 0.2",
"def _define_SLACS_fit_params(self):\n\t\t# Fit params from R_eff\n\t\tself.a = -0.41\n\t\tself.b = 0.39\n\t\t#self.delta_a = 0.12\n\t\t#self.delta_b = 0.10\n\t\tself.intrinsic_scatter = 0.14\n\t\t# Fit params from vel_disp\n\t\tself.a_v = 0.07\n\t\tself.b_v = -0.12\n\t\tself.int_v = 0.17",
"def _define_ETG_fit_params(self):\n\t\tself.slope = 2.0\n\t\tself.intercept = 5.8",
"def _define_combined_fit_params(self):\n\t\tself.z_bins = np.array([0.40, 0.60, 0.80, 1.00, 1.20,\n\t\t 1.40, 1.60, 1.80, 2.20, 2.40, \n\t\t 2.50, 2.60, 2.70, 2.80, 2.90,\n\t\t 3.00, 3.10, 3.20, 3.30, 3.40,\n\t\t 3.50, 4.10, 4.70, 5.50, np.inf])\n\t\tself.alphas = -np.array([2.74, 3.49, 3.55, 3.69, 4.24,\n\t\t 4.02, 4.35, 3.94, 4.26, 3.34,\n\t\t 3.61, 3.31, 3.13, 3.78, 3.61, \n\t\t 5.01, 4.72, 4.39, 4.39, 4.76, \n\t\t 3.72, 4.84, 4.19, 4.55, 5.00])\n\t\tself.betas = -np.array([1.07, 1.55, 1.89, 1.88, 1.84, \n\t\t 1.88, 1.87, 1.69, 1.98, 1.61, \n\t\t 1.60, 1.38, 1.05, 1.34, 1.46, \n\t\t 1.71, 1.70, 1.96, 1.93, 2.08, \n\t\t 1.25, 2.07, 2.20, 2.31, 2.40])\n\t\tself.M_stars = -np.array([21.30, 23.38, 24.21, 24.60, 25.24,\n\t\t 25.41, 25.77, 25.56, 26.35, 25.50,\n\t\t 25.86, 25.33, 25.16, 25.94, 26.22,\n\t\t 26.52, 26.48, 27.10, 27.19, 27.39,\n\t\t 26.65, 27.26, 27.37, 27.89, 29.19])",
"def set_parameters(self, params):\r\n PIDController.set_parameters(self,params)\r\n\r\n self.sensor_poses = params.sensor_poses\r\n \r\n # Week 4 assigment\r\n # Set the weigths here\r\n self.weights = [1]*len(self.sensor_poses)",
"def fit_parameters(yhere): \n\n cfit = sol_1d[1]\n spec_here = np.ma.median(data[int(yhere)-2:int(yhere)+2, :], axis=0)\n shift = Fit.xcor_peak(spec_here, spec0, lags)\n ll_here = CV.chebval(pix - shift, cfit)\n [xs, sxs, sigmas] = find_known_lines(linelist,\n ll_here, spec_here, options)\n\n if data2 is not None:\n cfit2 = sol_1d2[1]\n spec_here2 = np.ma.median(data2[yhere-2:yhere+2, :], axis=0)\n shift2 = Fit.xcor_peak(spec_here2, spec2, lags)\n ll_here2 = CV.chebval(pix - shift2, cfit2)\n\n [xs2, sxs2, sigmas2] = find_known_lines(linelist2,\n ll_here2, spec_here2, options)\n\n \"\"\" Fit a chebyshev to the measured positions \"\"\"\n if data2 is not None:\n \"fit both line lists\"\n \"\"\"combine the line lists\"\"\"\n clinelist= np.concatenate([linelist,linelist2])\n cxs = np.concatenate([xs, xs2])\n csxs = np.concatenate([sxs, sxs2])\n \n \"\"\"combine the measured xs and sxs arrays that have the measured\n line positions\"\"\"\n [delt, cfit, lines] = fit_chebyshev_to_lines(cxs, csxs,\n clinelist, options)\n else:\n [delt, cfit, lines] = fit_chebyshev_to_lines(xs, sxs,\n linelist, options)\n\n #if np.std(delt) < .01: pdb.set_trace()\n debug(\"resid ang S%2.2i @ p%4.0i: %1.2f rms %1.2f mad [shift%2.0f]\" % \\\n (slitno+1, yhere, np.std(delt), np.median(np.abs(delt)),\n shift))\n\n return cfit, delt",
"def plot_fit_params(self, wsp, pdfname, snapname=''):\n if snapname:\n wsp.loadSnapshot(snapname)\n frame = get_var(wsp, self.mname).frame(rf.Title('Fit Results'))\n full_pdf = wsp.pdf(self.full_model)\n\n full_pdf.paramOn(frame, rf.Layout(0.1, 0.9, 0.9),\n rf.Format('NEU', rf.AutoPrecision(2)))\n\n can = r.TCanvas(create_random_str(32), 'rcan', 600, 600)\n can.cd()\n frame.findObject('{}_paramBox'.format(full_pdf.GetName())).Draw()\n can.SaveAs(pdfname)",
"def create_design_params(self):\n self.design_params = np.array([self.r1, self.r2, self.d1, self.d2, self.Ixx, self.Iyy, self.Izz])",
"def dline_dSFR(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n GR = glo.global_results(sim_run=p.sim_run,nGal=p.nGal)\n \n marker = 'o'\n if p.sim_run == p.sim_runs[0]: marker = '^'\n\n L_line = getattr(GR,'L_'+p.line+'_sun')#[380:400]#[0:100]\n SFR = getattr(GR,'SFR')#[380:400]#[0:100]\n M_star = getattr(GR,'M_star')#[380:400]#[0:100]\n Zsfr = getattr(GR,'Zsfr')#[380:400]#[0:100]\n R_gas = getattr(GR,'R2_gas')#[380:400]#[0:100]\n M_H2 = getattr(GR,'M_H2_R2_gas')#[380:400]#[0:100]\n\n SFR = SFR[L_line > 0]\n M_star = M_star[L_line > 0]\n Zsfr = Zsfr[L_line > 0]\n R_gas = R_gas[L_line > 0]\n M_H2 = M_H2[L_line > 0]\n L_line = L_line[L_line > 0]\n print('%i data points ' % (len(L_line)))\n\n # Distance from MS\n dlSFR = aux.distance_from_salim18(GR.M_star,GR.SFR)\n\n if p.add:\n ax = p.ax\n else:\n fig,ax = plt.subplots(figsize=(8,6))\n\n # Distance from observed relation\n L_obs,SFR_obs,fit,std = add_line_SFR_obs(p.line,[1e6,1e6],ax,plot=False,select=p.select)\n ldL_line = np.log10(L_line) - fit.predict(np.log10(SFR.reshape(-1, 1))).flatten()\n\n labs = {'_M10':'Mach=10 power-law',\\\n '_arepoPDF_ext':'AREPO parametric PDF with extinction',\\\n '_arepoPDF':'SIGAME v3',\\\n '_arepoPDF_CMZ':'SIGAME v3',\\\n '_arepoPDF_M51':'SIGAME v3'}\n lab = labs[p.table_ext]\n\n\n ax.text(0.05,0.9,p.line,transform=ax.transAxes,fontsize=13)\n ax.set_xlabel('log SFR - log SFR$_{MS,Salim+18}$')\n ax.set_ylabel('log L - log L$_{obs}$(SFR)')\n if not p.xlim: p.xlim = np.array([-3,3])\n if not p.ylim: \n p.ylim = [np.median(ldL_line) - 4,np.median(ldL_line) + 3]\n # if p.line == '[OI]63': p.ylim = [np.median(ldL_line) - 5,np.median(ldL_line) + 4]\n # if 'CO' in p.line: p.ylim = [np.median(ldL_line) - 4,np.median(ldL_line) + 4]\n\n ax.set_xlim(p.xlim)\n ax.set_ylim(p.ylim)\n ax.plot([0,0],ax.get_ylim(),'--k',lw=1)\n ax.plot(ax.get_xlim(),[0,0],'--k',lw=1)\n\n if p.select == 'Sigma_M_H2':\n Sigma_M_H2 = M_H2/(np.pi*R_gas**2)/1e6 # per pc^-2\n m = ax.scatter(dlSFR[np.argsort(Sigma_M_H2)],ldL_line[np.argsort(Sigma_M_H2)],marker=marker,s=14,\\\n c=np.log10(Sigma_M_H2[np.argsort(Sigma_M_H2)]),vmin=-2.5,vmax=2.2,label=lab,alpha=0.5,zorder=10)\n if p.cb:\n cbar = plt.colorbar(m,ax=ax)\n cbar.set_label(label=r'log $\\Sigma_{H2}$ [M$_{\\odot}$/pc$^2$]',size=15)",
"def _fd_opt(self, \n param0, \n bounds, \n directory='Data/', \n param_file='fd_parameters.pkl', \n data_file='induction_slopes_all.mat', \n plot_file='fd_fitted_traces.png',\n method='differential_evolution'):\n \n # load data\n #`````````````````````````\n self.data = scipy.io.loadmat(directory+data_file)\n \n t_data_list = [self.data['t_tbs'], self.data['t_tet'][:,-1]]\n y_data_list = [self.data['x_tbs'], self.data['x_tet'][:,-1]]\n \n # run optimization\n #```````````````````````````````\n if method == 'differential_evolution':\n param_opt_obj = scipy.optimize.differential_evolution(self._fd_err_mult, bounds=bounds, args=(t_data_list, y_data_list))\n \n else:\n param_opt_obj = scipy.optimize.minimize(self._fd_err_mult, param0, (t_data_list, y_data_list), bounds=bounds)\n\n # save optimization parameters\n with open(directory+param_file, 'wb') as output:\n pickle.dump(param_opt_obj, output,protocol=pickle.HIGHEST_PROTOCOL)\n print 'saved optimized parameters'\n \n self._plot_fd_fromfile(directory=directory, param_file=param_file, plot_file=plot_file)\n \n return param_opt_obj",
"def set_params_doh(self):\n u.printf(\"Loading params for DoH processing...\")\n self.params.min_sigma = 15\n self.params.max_sigma = 60\n self.params.num_sigma = 15\n self.params.threshold = 0.01\n self.params.overlap = 0.5\n self.params.log_Scale = True",
"def fit(self, skydip):\n parameter_order = ['tau', 'offset', 'kelvin', 'tsky']\n self.parameters = {}\n self.errors = {}\n self.p_opt = None\n self.p_cov = None\n self.fitted_values = None\n self.data = None\n self.sigma = None\n self.elevation = None\n\n log.debug(\"Initial skydip values:\")\n log.debug(f\" Tsky = {self.initial_guess['tsky']}\")\n log.debug(f\" offset = {self.initial_guess['offset']}\")\n log.debug(f\" kelvin = {self.initial_guess['kelvin']}\")\n log.debug(f\" tau = {self.initial_guess['tau']}\")\n\n if self.el_range is not None:\n from_bin = max(0, skydip.get_bin(self.el_range.min))\n to_bin = min(skydip.data.size, skydip.get_bin(self.el_range.max))\n else:\n from_bin = 0\n to_bin = skydip.data.size\n\n self.init_parameters(skydip)\n\n data = skydip.data[from_bin:to_bin]\n weight = skydip.weight[from_bin:to_bin]\n valid = weight > 0\n data = data[valid]\n weight = weight[valid]\n\n if self.uniform_weights:\n sigma = None\n else:\n sigma = 1 / weight\n\n elevation = skydip.get_elevation(\n np.nonzero(valid)[0]).to('radian').value\n\n self.use_points = data.size\n\n p0 = []\n lower_bounds = np.zeros(4, dtype=float)\n upper_bounds = np.zeros(4, dtype=float)\n\n for i, parameter in enumerate(parameter_order):\n value = self.initial_guess[parameter]\n p0.append(value)\n if parameter in self.fit_for:\n lower_bounds[i] = self.bounds[parameter][0]\n upper_bounds[i] = self.bounds[parameter][1]\n else: # An attempt to fix parameters with curve_fit\n eps = abs(value - np.nextafter(value, 1))\n lower_bounds[i] = value - eps\n upper_bounds[i] = value + eps\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', OptimizeWarning)\n p_opt, p_cov = curve_fit(self.value_at, elevation, data,\n p0=p0, sigma=sigma,\n bounds=(lower_bounds, upper_bounds))\n self.p_opt = p_opt\n self.p_cov = p_cov\n self.data = data\n self.elevation = elevation\n self.sigma = sigma\n\n self.has_converged = np.isfinite(p_opt).all()\n if not self.has_converged: # pragma: no cover\n log.warning(\"Skydip fit did not converge!\")\n errors = np.sqrt(np.diag(p_cov))\n\n for i, parameter in enumerate(parameter_order):\n self.parameters[parameter] = p_opt[i]\n self.errors[parameter] = errors[i]\n\n self.fitted_values = self.fit_elevation(elevation)\n fit_weights = None if sigma is None else weight ** 2\n\n t_obs_rms = np.sqrt(np.average((data - self.fitted_values) ** 2,\n weights=fit_weights))\n self.rms = t_obs_rms / self.parameters['kelvin']",
"def set_fit_params(self):\n\n self.p0 = np.array([self.A_arr, self.T_a])\n # initial guess at A_arr and T_a\n\n self.popt, self.pcov = curve_fit(\n self.get_eta_fit, self.T_exp, self.eta_exp, p0=self.p0\n )\n\n self.A_arr = self.popt[0]\n self.T_a = self.popt[1]\n\n self.T_array = self.T_model",
"def test_linear_fit_fixed_parameter(self):\n init_model = models.Polynomial1D(degree=2, c1=1)\n init_model.c1.fixed = True\n\n x = np.arange(10)\n y = 2 + x + 0.5 * x * x\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y)\n assert_allclose(fitted_model.parameters, [2.0, 1.0, 0.5], atol=1e-14)",
"def Fit(self,Source,X=None,function='line',Guess=numpy.ones(10),Show_Residuals=True,StartP=None,EndP=None,StartX=None,EndX=None,Show_Guess=False,Figure=None,Rendering=True,RAW_Data=True,Color='r',Show_Parameters=True,Number_of_points=1000.,Bounds=[[None,None]]*10,Model=False):\n \n def find_indexes(Source,Min,Max):\n \tc=abs(numpy.array(Source)-Min)\n \tMin_index=list(c).index(min(c))\n \tc=abs(numpy.array(Source)-Max)\n \tMax_index=list(c).index(min(c))\n \treturn Min_index,Max_index \n\n begin=X[0] #original begin and end points are stored here, so the final plot will be displayed in the original workspace\n end=X[-1] \n if (StartP == None) and (StartX == None) :\n StartP=0\n StartX=0\n if (EndP == None) and (EndX == None) == None:\n EndP=len(Source)-1 \n EndX=len(Source)-1 \n \n if (StartP == None) or (EndP == None) :\n StartP,EndP=find_indexes(X,StartX,EndX) \n else:\n StartX=X[StartP]\n EndX=X[EndP]\n\n\n Source=numpy.array(Source[StartP:EndP]) #if not using the whole range for the fit, truncated wave is created here\n X=numpy.array(X[StartP:EndP]) #if not using the whole range for the fit, truncated timescale is created here\n\n\n \t# Data and Xscale\n Source = numpy.array(Source)\n if X==None or (len(Source) != len(X)):\n X = numpy.array(range(len(Source)))\n # create a set of Parameters\n\n Function=eval(\"self.\"+function) #the fitting function\n Formula=eval(\"self.\"+function.capitalize())[0] #the fitting formula\n Parameters_Names=eval(\"self.\"+function.capitalize())[1] ##list of the fitting parameters \n \n\n\n\n for i,j in enumerate(Bounds):\n if Bounds[i][0] != None:\n Bounds[i][0]=numpy.float32(Bounds[i][0])\n if Bounds[i][1] != None:\n Bounds[i][1]=numpy.float32(Bounds[i][1])\n \n #print Bounds\n \n p0 = Parameters()\n\n \n # do fit, here with leastsq model\n if Model == False:\n \n for i,j in enumerate(Parameters_Names):\n #For each paramters, you can set value, min & max. i.e. p0.add('omega', value= 0.0, min=-numpy.pi/2., max=numpy.pi/2)\n if j != '0':\n if Bounds[i][0] != None and Bounds[i][1]!=None:\n p0.add(j,value=Guess[i],min=Bounds[i][0],max=Bounds[i][1])\n elif Bounds[i][0] !=None and Bounds[i][1]==None:\n p0.add(j,value=Guess[i],min=Bounds[i][0]) \n elif Bounds[i][0] == None and Bounds[i][1] != None:\n p0.add(j,value=Guess[i],max=Bounds[i][1])\n else:\n p0.add(j,value=Guess[i]) \n print 'Fitting in process ...'\n try:\n result = minimize(Function, p0, args=(X, Source))\n RenderingX=numpy.linspace(float(X[0]),float(X[-1]),num=Number_of_points)\n fit = Function(result.params,RenderingX) #Rendering, with Popt, the best fitting parameters\n except:\n print 'Fitting failed, try other parameters or constraints'\n return\n print 'Fitting performed between points ',StartP,' and ',EndP, ' (',len(X),' points)'\n print 'in units between: ',StartX,' and ',EndX\n print '######### FITTING RESULTS ############'\n print 'Parameters are :' \n res=[]\n for i in list(result.params):\n print i, result.params[i].value \n res.append(result.params[i].value)\n \n elif Model== True:\n for i,j in enumerate(Parameters_Names):\n if j != '0':\n p0.add(j,value=Guess[i]) \n RenderingX=numpy.linspace(float(X[0]),float(X[-1]),num=Number_of_points)\n fit = Function(p0,RenderingX) #Rendering, with Popt, the best fitting parameters \n\n# if Show_Parameters == True:\n# for i in range(len(popt)):\n# if List[i][0] != '0':\n# try:\n# pyplot.text(begin+((end-begin)/10), max(fit)-(i+1)*abs((max(fit)-min(fit))/(10)), r\"%s = {%s} +/- {%s}\" % ((str(List[i][0])),str(float(popt[i])),str(float((pcov[i][i])**0.5))))# .format(popt[i], (pcov[i][i])**0.5))\n# except:\n# pyplot.text(begin+((end-begin)/10), max(fit)-(i+1)*abs((max(fit)-min(fit))/(10)),'test')\n\n #if Show_Error_Bars == True: to do\n #pyplot.errorbar(X, Source, yerr = y_sigma, fmt = 'o') \n\n \n# if Show_Guess == True:\n# guess=Function(X,*p0)\n# G,=pyplot.plot(X, guess, label='Test data',marker='o',color='g') \n# if RAW_Data == True:\n# pyplot.legend([S, G, F], [\"Data\", \"initial guess\", \"Fit\"], loc='best',fancybox=True)\n# else:\n# pyplot.legend([G, F], [\"initial guess\", \"Fit\"], loc='best',fancybox=True)\n# else:\n# if RAW_Data == True:\n# pyplot.legend([S, F], [\"Data\", \"Fit\"], loc='best',fancybox=True)\n# else:\n# pyplot.legend([F], [\"Fit\"], loc='best',fancybox=True)\n\n if Rendering == True:\n pyplot.rc('axes',fc='white')\n if Figure == None: #Creation of the figure. If Figure is not None, the fit is integrated in pre-existing figure\n fig=pyplot.figure(facecolor='white')\n else:\n fig = Figure \n \n pyplot.title('Fitting completed') \n if RAW_Data == True:\n S,=pyplot.plot(X, Source,color='b')\n \n try:\n if Show_Residuals == True:\n final = Source + result.residual\n pyplot.plot(X, final, 'r')\n except UnboundLocalError: #Should be only in the Plugin, at first launch.\n print 'No results'\n \n F,=pyplot.plot(RenderingX, fit, linestyle='--',color=Color)\n \n pyplot.xlim([begin,end])\n pyplot.grid(True) \n pyplot.show()\n return fit,res,fig\n else:\n return fit",
"def fit(self):\n\n coeff, std, k, offset, nuse, ntot = fit_wavelength(\n identlist = self.identlist,\n npixel = self.param['npixel'],\n xorder = self.param['xorder'],\n yorder = self.param['yorder'],\n maxiter = self.param['maxiter'],\n clipping = self.param['clipping'],\n fit_filter = self.param['fit_filter'],\n )\n\n self.param['coeff'] = coeff\n self.param['std'] = std\n self.param['k'] = k\n self.param['offset'] = offset\n self.param['nuse'] = nuse\n self.param['ntot'] = ntot\n\n message = 'Wavelength fitted. std={:.6f}, utot={}, nuse={}'.format(\n std, ntot, nuse)\n print(message)\n\n self.plot_wavelength()\n\n # udpdate the order/aperture string\n aperture = self.param['aperture']\n order = k*aperture + offset\n text = 'Order {} (Aperture {})'.format(order, aperture)\n self.info_frame.order_label.config(text=text)\n\n self.update_fit_buttons()",
"def __SetSFParams(self):\n\n # If radial structure functions are in output\n if self.__containsRadial:\n # Defines radial attributes\n self.__nc_RSoft_O.radial_error_tolerance = self.etol_radial\n\n # Defines radial dimensions\n self.__nc_RSoft_O.createDimension('radial_structure_functions',\\\n len(self.mus))\n\n # Defines radial variables\n mus_var_id_O = self.__nc_RSoft_O.createVariable('mus', \\\n 'f4', ('radial_structure_functions'))\n Ls_var_id_O = self.__nc_RSoft_O.createVariable('Ls', \\\n 'f4', ('radial_structure_functions'))\n radial_Xs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'radial_Xs', 'i4', ('radial_structure_functions'))\n radial_Ys_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'radial_Ys', 'i4', ('radial_structure_functions'))\n\n # Sets radial structure function variables\n mus_var_id_O[:] = self.mus\n Ls_var_id_O[:] = self.Ls\n radial_Xs_var_id_O[:] = self.radial_Xs\n radial_Ys_var_id_O[:] = self.radial_Ys\n\n # If angular structure functions are in output\n if self.__containsAngular:\n # Defines angular attributes\n self.__nc_RSoft_O.angular_error_tolerance = self.etol_angular\n\n # Defines angular dimensions\n self.__nc_RSoft_O.createDimension('angular_structure_functions',\\\n len(self.xis))\n\n # Defines angular variables\n xis_var_id_O = self.__nc_RSoft_O.createVariable('xis', \\\n 'f4', ('angular_structure_functions'))\n zetas_var_id_O = self.__nc_RSoft_O.createVariable('zetas', \\\n 'i4', ('angular_structure_functions'))\n lambdas_var_id_O = self.__nc_RSoft_O.createVariable('lambdas', \\\n 'i4', ('angular_structure_functions'))\n angular_Xs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Xs', 'i4', ('angular_structure_functions'))\n angular_Ys_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Ys', 'i4', ('angular_structure_functions'))\n angular_Zs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Zs', 'i4', ('angular_structure_functions')) \n\n # Sets angular structure function variables\n xis_var_id_O[:] = self.xis\n zetas_var_id_O[:] = self.zetas\n lambdas_var_id_O[:] = self.lambdas\n angular_Xs_var_id_O[:] = self.angular_Xs\n angular_Ys_var_id_O[:] = self.angular_Ys\n angular_Zs_var_id_O[:] = self.angular_Zs",
"def test_linear_fit_2d_model_set_fixed_parameters(self):\n init_model = models.Polynomial2D(\n degree=2,\n c1_0=[1, 2],\n c0_1=[-0.5, 1],\n n_models=2,\n fixed={\"c1_0\": True, \"c0_1\": True},\n )\n\n x, y = np.mgrid[0:5, 0:5]\n zz = np.array([1 + x - 0.5 * y + 0.1 * x * x, 2 * x + y - 0.2 * y * y])\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y, zz)\n\n assert_allclose(fitted_model(x, y, model_set_axis=False), zz, atol=1e-14)",
"def set_parameters(self, L, r):\n self.L = L\n self.r = r"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the parameters fit on the Sloan Lens Arcs Survey (SLACS) sample of 73 ETGs Note See Table 4 of [1]_ for the fit values, taken from the empirical correlation derived from the SLACS lens galaxy sample. References | def _define_SLACS_fit_params(self):
# Fit params from R_eff
self.a = -0.41
self.b = 0.39
#self.delta_a = 0.12
#self.delta_b = 0.10
self.intrinsic_scatter = 0.14
# Fit params from vel_disp
self.a_v = 0.07
self.b_v = -0.12
self.int_v = 0.17 | [
"def _define_SDSS_fit_params(self):\n\t\tself.a = 1.4335\n\t\tself.b = 0.3150 \n\t\tself.c = -8.8979\n\t\tself.intrinsic_scatter = 0.0578\n\t\t#self.delta_a = 0.02\n\t\t#self.delta_b = 0.01",
"def _define_SDSS_fit_params(self):\n\t\tself.a = 5.7*1.e-4\n\t\tself.b = 0.38\n\t\tself.lower = 0.2",
"def set_fit_params(self):\n\n self.p0 = np.array([self.A_arr, self.T_a])\n # initial guess at A_arr and T_a\n\n self.popt, self.pcov = curve_fit(\n self.get_eta_fit, self.T_exp, self.eta_exp, p0=self.p0\n )\n\n self.A_arr = self.popt[0]\n self.T_a = self.popt[1]\n\n self.T_array = self.T_model",
"def _define_ETG_fit_params(self):\n\t\tself.slope = 2.0\n\t\tself.intercept = 5.8",
"def use_so_corr(params):\n # vary the log10 of f_so\n #params.valid_params.append(\"log10_fso\")\n #params.log10_fso = AttrDict(vary=True, fiducial=-1.5, prior='uniform', lower=-5, upper=-1)\n \n # fit params\n params.sigma_so.update(vary=True, fiducial=5.)\n params.f_so.update(vary=True, fiducial=0.04)\n #params.f_so.update(vary=False, expr=\"10**log10_fso\")\n params.options.append('socorr') \n \n # model params\n params.model.use_so_correction = True",
"def fit_parameters(yhere): \n\n cfit = sol_1d[1]\n spec_here = np.ma.median(data[int(yhere)-2:int(yhere)+2, :], axis=0)\n shift = Fit.xcor_peak(spec_here, spec0, lags)\n ll_here = CV.chebval(pix - shift, cfit)\n [xs, sxs, sigmas] = find_known_lines(linelist,\n ll_here, spec_here, options)\n\n if data2 is not None:\n cfit2 = sol_1d2[1]\n spec_here2 = np.ma.median(data2[yhere-2:yhere+2, :], axis=0)\n shift2 = Fit.xcor_peak(spec_here2, spec2, lags)\n ll_here2 = CV.chebval(pix - shift2, cfit2)\n\n [xs2, sxs2, sigmas2] = find_known_lines(linelist2,\n ll_here2, spec_here2, options)\n\n \"\"\" Fit a chebyshev to the measured positions \"\"\"\n if data2 is not None:\n \"fit both line lists\"\n \"\"\"combine the line lists\"\"\"\n clinelist= np.concatenate([linelist,linelist2])\n cxs = np.concatenate([xs, xs2])\n csxs = np.concatenate([sxs, sxs2])\n \n \"\"\"combine the measured xs and sxs arrays that have the measured\n line positions\"\"\"\n [delt, cfit, lines] = fit_chebyshev_to_lines(cxs, csxs,\n clinelist, options)\n else:\n [delt, cfit, lines] = fit_chebyshev_to_lines(xs, sxs,\n linelist, options)\n\n #if np.std(delt) < .01: pdb.set_trace()\n debug(\"resid ang S%2.2i @ p%4.0i: %1.2f rms %1.2f mad [shift%2.0f]\" % \\\n (slitno+1, yhere, np.std(delt), np.median(np.abs(delt)),\n shift))\n\n return cfit, delt",
"def fit(self, num_stoch_iters, num_switch_iters, max_s_itters=np.inf, randomness=10E-9, ant_randomness=0.5e-9, \n min_num_iters=10, max_num_iters=1000, xtol=1e-16, ftol=1e-16, gtol=1e-16):\n\n num_switch_iters_isMinimum = False\n if num_switch_iters < 0:\n num_switch_iters = np.abs(num_switch_iters)\n num_switch_iters_isMinimum = True\n max_s_itters = max(max_s_itters, num_switch_iters)\n\n initial_RMS = self.fitter.RMS( self.current_solution, self.num_DOF )\n print(\"initial RMS:\", initial_RMS)\n \n print('fitting:')\n best_sol = np.array( self.current_solution )\n best_RMS = initial_RMS\n number_LS_runs = 0\n num_LS_runs_needMoreItters = 0\n\n fitterSQ= GSL_LeastSquares( self.fitter.get_num_parameters(), self.fitter.get_num_measurments(), self.fitter.objective_fun_sq, jacobian=self.fitter.objective_fun_sq_jacobian )\n fitter = GSL_LeastSquares( self.fitter.get_num_parameters(), self.fitter.get_num_measurments(), self.fitter.objective_fun, jacobian=self.fitter.objective_fun_jacobian )\n\n for i in range( num_stoch_iters ):\n \n current_solution = np.array( best_sol )\n current_solution[ : self.num_delays] += np.random.normal(scale=randomness, size=self.num_delays )\n current_solution[ self.num_delays : self.num_delays+self.num_RecalAnts] += np.random.normal(scale=ant_randomness, size=self.num_RecalAnts )\n current_solution[self.num_delays+self.num_RecalAnts:] += np.random.normal(scale=randomness, size=len(current_solution)-(self.num_delays+self.num_RecalAnts) )\n\n print(i)\n\n switch_itter = 0\n previous_RMS = np.inf\n RMS_had_increased = False\n while num_switch_iters_isMinimum or switch_itter<num_switch_iters:\n # fit_res = least_squares( self.fitter.objective_fun_sq, current_solution, jac='2-point', method='lm', xtol=3.0E-16, ftol=3.0E-16, gtol=3.0E-16, x_scale='jac', max_nfev=max_nfev)\n # fit_res = least_squares( self.fitter.objective_fun_sq, current_solution, jac=self.fitter.objective_fun_sq_jacobian, method='lm', xtol=3.0E-16, ftol=3.0E-16, gtol=3.0E-16, x_scale='jac', max_nfev=max_nfev)\n \n fitterSQ.reset( current_solution )\n code, text = fitterSQ.run(min_num_iters, max_itters=max_num_iters, xtol=xtol, gtol=gtol, ftol=ftol)\n current_solution = fitterSQ.get_X()\n # num_runs = fitterSQ.get_num_iters()\n\n if code == 4:\n num_LS_runs_needMoreItters += 1\n \n ARMS = self.fitter.RMS( current_solution, self.num_DOF )\n \n # fit_res = least_squares( self.fitter.objective_fun, current_solution, jac='2-point', method='lm', xtol=3.0E-16, ftol=3.0E-16, gtol=3.0E-16, x_scale='jac', max_nfev=max_nfev)\n # fit_res = least_squares( self.fitter.objective_fun, current_solution, jac=self.fitter.objective_fun_sq_jacobian, method='lm', xtol=3.0E-16, ftol=3.0E-16, gtol=3.0E-16, x_scale='jac', max_nfev=max_nfev)\n # fit_res = least_squares( self.fitter.objective_fun, current_solution, jac=self.fitter.objective_fun_jacobian, method='lm', xtol=3.0E-16, ftol=3.0E-16, gtol=3.0E-16, x_scale='jac', max_nfev=max_num_iters)\n \n fitter.reset( current_solution )\n code, text = fitter.run(min_num_iters, max_itters=max_num_iters, xtol=xtol, gtol=gtol, ftol=ftol)\n current_solution = fitter.get_X()\n num_runs = fitter.get_num_iters()\n\n if code == 4:\n num_LS_runs_needMoreItters += 1\n\n number_LS_runs += 2\n RMS = self.fitter.RMS( current_solution, self.num_DOF )\n \n print(\" \", i, switch_itter, \":\", RMS, '(', ARMS, ')', text, 'N:', num_runs)\n\n if RMS>previous_RMS:\n RMS_had_increased = True\n \n if RMS < best_RMS:\n best_RMS = RMS\n best_sol = np.array( current_solution )\n print('IMPROVEMENT!')\n else:\n if num_switch_iters_isMinimum and (switch_itter>=num_switch_iters) and RMS_had_increased:\n break\n if switch_itter>=max_s_itters:\n break\n\n previous_RMS = RMS\n switch_itter += 1\n\n self.current_solution = best_sol\n self.GSL_covariance_matrix = fitter.get_covariance_matrix()\n print('frac. runs need more itters:', num_LS_runs_needMoreItters/number_LS_runs)\n print()\n print()\n \n print('initial RMS:', initial_RMS)\n print(\"best RMS:\", best_RMS)\n \n \n print('fits:')\n \n stations_to_print = self.station_order\n PSE_RMS_data = [ self.fitter.event_SSqE( source_i, best_sol ) for source_i in range(len(self.current_sources)) ]\n while len(stations_to_print) > 0:\n stations_this_run = stations_to_print[:self.num_stat_per_table]\n stations_to_print = stations_to_print[len(stations_this_run):]\n \n fit_table = PrettyTable()\n fit_table.field_names = ['id'] + stations_this_run + ['total']\n fit_table.float_format = '.2E'\n \n for source_i,PSE in enumerate(self.current_sources):\n new_row = ['']*len(fit_table.field_names)\n new_row[0] = PSE.ID\n \n# PSE_SSqE, total_ants = self.fitter.event_SSqE( source_i, best_sol )\n new_row[-1] = np.sqrt( PSE_RMS_data[source_i][0] / (PSE_RMS_data[source_i][1]-4) )\n \n for i,sname in enumerate( stations_this_run ):\n stat_ant_range = self.station_to_antenna_index_dict[sname]\n SSqE, numants = self.fitter.event_SSqE( source_i, best_sol, stat_ant_range )\n if numants > 0:\n new_row[i+1] = np.sqrt( SSqE/numants )\n else:\n new_row[i+1] = ''\n \n fit_table.add_row( new_row )\n \n print( fit_table )\n print()\n \n print()\n print()\n \n station_delays = best_sol[:self.num_delays]\n antenna_delays = best_sol[ self.num_delays:self.num_delays+self.num_RecalAnts ]\n source_locs = best_sol[ self.num_delays+self.num_RecalAnts: ]\n \n print(\"stat delays:\")\n for sname, delay, original in zip( self.station_order, station_delays, self.original_delays ):\n print(\"'\"+sname+\"' :\", delay, \", ## diff to guess:\", delay-original)\n \n print()\n print()\n \n print(\"ant delays:\")\n for ant_i, delay in zip(self.ant_recalibrate_order, antenna_delays):\n ant_name = self.sorted_antenna_names[ant_i]\n print(\"'\"+ant_name+\"' : \", delay, ', #', SId_to_Sname[ int(ant_name[:3]) ])\n \n print()\n print()\n \n print(\"locations:\")\n offset = 0\n for i,source in enumerate(self.current_sources):\n \n print(source.ID,':[', source_locs[offset+0], ',', source_locs[offset+1], ',', np.abs(source_locs[offset+2]), ',', source_locs[offset+3], end=' ')# '],')\n \n offset += 4\n if source.polarization == 2:\n print( ',', source_locs[offset], '],')\n offset += 1\n elif source.polarization == 3:\n print(',')\n print(\" \", source_locs[offset+0], ',', source_locs[offset+1], ',', np.abs(source_locs[offset+2]), ',', source_locs[offset+3], '],' )\n offset += 4\n else: ## end pol = 0 or 1\n print('],')\n \n print()\n print()\n \n print(\"REL LOCS\")\n refX, refY, refZ, refT = source_locs[:4]\n offset = 0\n for i,source in enumerate(self.current_sources):\n X,Y,Z,T = source_locs[offset:offset+4]\n print(source.ID,':[', X-refX, ',', Y-refY, ',', np.abs(Z)-np.abs(refZ), ',', T-refT, '],')\n offset +=4\n if source.polarization == 2:\n offset += 1\n elif source.polarization == 3:\n offset += 4",
"def tholefit( namefile, calpol, jmtype, screenradius, **kwargs):\n\t\"\"\" kwargs specify desired atomic polarisabilities, will change polarisability of each atom of this type, eg. C=3.0 \"\"\"\n\t\"\"\" kwargs also specify costslope (universal, set to 0 if no cost desired) and cost centre (for each atom type), specified as eg. C_costcentre=3.5 \"\"\"\n \t'''Fit parameter here is difference in squared differences of each component of polarisability tensor normalised by square of calibrated value'''\n\tReadMoleculeType(namefile)\n\tmol = GetMolecule(namefile)\n \n\n\tprint \"keywords understood:\"\n\tfor key in kwargs:\n \t\tprint \"%s: %s\" % (key, kwargs[key])\n\tprint 'screenradius ', screenradius\n\n\t\"\"\" For cost function, will scale fitting parameter by costslope*(pol-costcentre)\"\"\"\n\tcostslope = kwargs.get('costslope',0.0)\n\n\t\"\"\" ModifyPolarizability \"\"\"\n\n\tprint namefile\n\n\tfor atom in mol():\n \t\tfor key in kwargs:\n\t\t\tif (atom()._elname.upper()==key.upper()):\n\t\t\t\tif atom()._pol[0,0]==atom()._pol[1,1] and atom()._pol[0,0]==atom()._pol[2,2]:\n\t\t\t\t\t#If isotropic, then make iso to this value\n\t\t\t\t\tatom().setpol(3*kwargs[key]*atom()._pol/np.trace(atom()._pol))\n\t\t\t\t\t#Will conserve np.trace at 3 * iso value\n\t\t\t\telse:\n\t\t\t\t\tatom().setpol(kwargs[key]*atom()._pol/np.trace(atom()._pol))\n\n\tcostfactor=1.0\n \tfor costkey in kwargs:\n\t\tfor atomkey in kwargs:\n\t\t\tif (costkey==atomkey,'_costcentre'):\n\t\t\t\tcostfactor = costfactor * (1.0 + (costslope*((kwargs[costkey]-kwargs[atomkey]) ** 2.0)))\n\n\tprint 'costfactor', costfactor\n\t\n\t'''pypol will give polarisability in python model, etamat will give goodness of fit value for each component of the polarisability tensor '''\n\tpypol=np.matrix([[0.,0.,0.],[0.,0.,0.],[0.,0.,0.]])\n\tetamat=np.matrix([[0.,0.,0.],[0.,0.,0.],[0.,0.,0.]]) \n\n\tjm=JMatrix(jmtype=jmtype, screenradius=screenradius)\n\n\n\tjm_I=jm._m.I\n\n\talpha_group=np.matrix(np.zeros((3,3)))\n\n\tfor i in range(0,len(jm_I)/3,1):\n\t\tfor j in range(0,len(jm_I)/3,1):\n\t\t\tfor m in range(0,3,1):\n\t\t\t\tfor n in range(0,3,1):\n\t\t\t\t\talpha_group[m,n]=alpha_group[m,n]+jm_I[3*i+m,3*j+n]\n\n\tprint 'Alpha group via invertd J Matrix:'\n\tprint alpha_group\n\n\n\tfor i in np.arange(0. ,2.1 ,1. ):\n\t\tE0 = np.matrix([0.,0.,0.])\n\t\tE0[0,i]=1.\n\t\td = get_dipoles(E0=E0,jm=jm._m)\n\t\tsplit_d = split_dipoles_onto_atoms(d)\n\t\ttot = np.matrix([0.,0.,0.])\n \t\tfor dd in split_d:\n \t\ttot += dd\n \t\tpypol.T[i] = tot\n\t\t#print 'E0 = ', E0\n\t\t#print 'Total dipole moment:'\n\t\t#print tot\n\t\t#print 'dipoles:'\n\t\t#print split_d\n\n\teta=(pypol[0,0]/calpol[0,0] - 1)**2 + (pypol[1,1]/calpol[1,1] - 1)**2 + (pypol[2,2]/calpol[2,2] - 1)**2\n\teta_w_cost = costfactor*eta\n\n\tprint 'eta_w_cost', eta_w_cost\n\n\tprint 'calpol:'\n\tprint calpol\n\tprint 'pypol:'\n\tprint pypol\n\tpy_over_cal=pypol/calpol\n\tprint 'py_over_cal'\n\tprint py_over_cal\n\tprint 'py over cal three linear compnents:'\n\tprint namefile, py_over_cal[0,0], py_over_cal[1,1], py_over_cal[2,2], \n\tprint 'eta:'\n\tprint eta\n\n\tstdout=sys.stdout\n\tsys.stdout = open('./polarisabilities.csv', 'a')\n\tprint namefile, '\\t', ((pypol[0,0]+pypol[1,1]+pypol[2,2])/3), '\\n'\n\tsys.stdout = stdout\n\n\tmol().__del__\n\treturn eta_w_cost",
"def setCSEParameters(csi:str, ri:str, rn:str) -> None:\n\t\t\tCSE.cseCsi = csi\n\t\t\tConfiguration.set('cse.csi', csi)\n\t\t\tCSE.cseRi = ri\n\t\t\tConfiguration.set('cse.ri', ri)\n\t\t\tCSE.cseRn = rn\n\t\t\tConfiguration.set('cse.rn', rn)",
"def fits(self):\n return sncosmo.fit_lc(self.SNCosmoLC(scattered=True,\n nightlyCoadd=True, seed=0), model=self.sncosmoModel,\n vparam_names=['t0', 'x0', 'x1', 'c'], minsnr=0.01)",
"def setup_lls_fit_analy(spec_fil, zlls, lls_windows, NHI_mnx, nNHI=100, spec_keys=None):\n # Init\n if spec_keys is None:\n spec_keys = dict(sig='ERROR', flux='FLUX', wave='WAVE')\n # Load up spectrum (Table and xspec)\n spec = Table.read(spec_fil)\n # Deal with NANs\n sig = spec[spec_keys['sig']].data.flatten()\n sig[np.isnan(sig)] = 0.\n xspec = XSpectrum1D.from_tuple((np.array(spec[spec_keys['wave']].data.flatten()),\n np.array(spec[spec_keys['flux']].data.flatten()),\n sig), masking='none')\n\n # Analysis pixels\n pixels = []\n for window in lls_windows:\n gdwv = np.where((xspec.wavelength >= window[0]*u.AA) &\n (xspec.wavelength <= window[1]*u.AA))[0]\n pixels.append(gdwv)\n gdwv = np.concatenate(pixels)\n\n # NHI\n NHI = np.linspace(NHI_mnx[0], NHI_mnx[1], num=nNHI)\n wv_rest = xspec.wavelength[gdwv] / (zlls+1)\n energy = wv_rest.to(u.eV, equivalencies=u.spectral())\n # Get photo_cross and calculate tau\n tau0 = (10.**NHI[0] / u.cm**2) * ltaa.photo_cross(1, 1, energy)\n\n # Return\n return spec, xspec, gdwv, NHI, tau0",
"def doParametersOfInterest(self):\n\n print 'XsecCorrelation creating POI'\n # --- Signal Strength and BSM contribution as POI --- \n self.modelBuilder.doVar(\"mu_8TeV[1,0,3]\")\n self.modelBuilder.doVar(\"mu_7TeV[1,0,3]\")\n #self.modelBuilder.doVar(\"epsBSM[0,-1,1]\")\n #self.modelBuilder.doVar('expr:mu_7TeV(\"@0*(@1+1)\",mu_8TeV,epsBSM)')\n\n if self.floatMass:\n if self.modelBuilder.out.var(\"MH\"):\n self.modelBuilder.out.var(\"MH\").setRange(float(self.mHRange[0]),float(self.mHRange[1]))\n self.modelBuilder.out.var(\"MH\").setConstant(False)\n else:\n self.modelBuilder.doVar(\"MH[%s,%s]\" % (self.mHRange[0],self.mHRange[1])) \n self.modelBuilder.doSet(\"POI\",'mu_8TeV,mu_7TeV,MH')\n else:\n if self.modelBuilder.out.var(\"MH\"):\n self.modelBuilder.out.var(\"MH\").setVal(self.options.mass)\n self.modelBuilder.out.var(\"MH\").setConstant(True)\n else:\n self.modelBuilder.doVar(\"MH[%g]\" % self.options.mass) \n self.modelBuilder.doSet(\"POI\",'mu_8TeV,mu_7TeV')\n return 0",
"def _fit_point_lens(self):\n\n def chi2_fun(theta, event, parameters_to_fit):\n \"\"\"\n for a given event set attributes from parameters_to_fit\n (list of str) to values from theta list\n \"\"\"\n for (key, val) in enumerate(parameters_to_fit):\n setattr(event.model.parameters, val, theta[key])\n chi2 = event.get_chi2()\n if chi2 < chi2_fun.best_chi2:\n chi2_fun.best_chi2 = chi2\n return chi2\n chi2_fun.best_chi2 = 1.e10\n\n def jacobian(theta, event, parameters_to_fit):\n \"\"\"\n Calculate chi^2 gradient (also called Jacobian).\n \"\"\"\n for (key, val) in enumerate(parameters_to_fit):\n setattr(event.model.parameters, val, theta[key])\n return event.chi2_gradient(parameters_to_fit)\n\n if self._event_PSPL is None:\n self._set_event_PSPL()\n\n parameters_to_fit = [\"t_0\", \"u_0\", \"t_E\"]\n initial_guess = [self._parameters[p] for p in parameters_to_fit]\n\n failed = False\n try:\n result = op.minimize(\n chi2_fun, x0=initial_guess,\n args=(self._event_PSPL, parameters_to_fit),\n method='Newton-CG', jac=jacobian, tol=3.e-4)\n except:\n failed = True\n\n if failed:\n try:\n result = op.minimize(\n chi2_fun, x0=initial_guess,\n args=(self._event_PSPL, parameters_to_fit),\n method='Newton-CG', jac=jacobian, tol=3.e-4)\n except:\n pass\n# XXX what if fit failed (i.e., .success is False)?\n\n self._LSST_PSPL_chi2 = chi2_fun.best_chi2",
"def fit(self, samples, values, nopt=None, corr_model_params=None):\n from scipy.linalg import cholesky\n\n if self.verbose:\n print('UQpy: Running Kriging.fit')\n\n def log_likelihood(p0, cm, s, f, y):\n # Return the log-likelihood function and it's gradient. Gradient is calculate using Central Difference\n m = s.shape[0]\n n = s.shape[1]\n r__, dr_ = cm(x=s, s=s, params=p0, dt=True)\n try:\n cc = cholesky(r__ + 2 ** (-52) * np.eye(m), lower=True)\n except np.linalg.LinAlgError:\n return np.inf, np.zeros(n)\n\n # Product of diagonal terms is negligible sometimes, even when cc exists.\n if np.prod(np.diagonal(cc)) == 0:\n return np.inf, np.zeros(n)\n\n cc_inv = np.linalg.inv(cc)\n r_inv = np.matmul(cc_inv.T, cc_inv)\n f__ = cc_inv.dot(f)\n y__ = cc_inv.dot(y)\n\n q__, g__ = np.linalg.qr(f__) # Eq: 3.11, DACE\n\n # Check if F is a full rank matrix\n if np.linalg.matrix_rank(g__) != min(np.size(f__, 0), np.size(f__, 1)):\n raise NotImplementedError(\"Chosen regression functions are not sufficiently linearly independent\")\n\n # Design parameters\n beta_ = np.linalg.solve(g__, np.matmul(np.transpose(q__), y__))\n\n # Computing the process variance (Eq: 3.13, DACE)\n sigma_ = np.zeros(y.shape[1])\n\n ll = 0\n for out_dim in range(y.shape[1]):\n sigma_[out_dim] = (1 / m) * (np.linalg.norm(y__[:, out_dim] - np.matmul(f__, beta_[:, out_dim])) ** 2)\n # Objective function:= log(det(sigma**2 * R)) + constant\n ll = ll + (np.log(np.linalg.det(sigma_[out_dim] * r__)) + m * (np.log(2 * np.pi) + 1)) / 2\n\n # Gradient of loglikelihood\n # Reference: C. E. Rasmussen & C. K. I. Williams, Gaussian Processes for Machine Learning, the MIT Press,\n # 2006, ISBN 026218253X. (Page 114, Eq.(5.9))\n residual = y - np.matmul(f, beta_)\n gamma = np.matmul(r_inv, residual)\n grad_mle = np.zeros(n)\n for in_dim in range(n):\n r_inv_derivative = np.matmul(r_inv, np.matmul(dr_[:, :, in_dim], r_inv))\n tmp = np.matmul(residual.T, np.matmul(r_inv_derivative, residual))\n for out_dim in range(y.shape[1]):\n alpha = gamma / sigma_[out_dim]\n tmp1 = np.matmul(alpha, alpha.T) - r_inv / sigma_[out_dim]\n cov_der = sigma_[out_dim] * dr_[:, :, in_dim] + tmp * r__ / m\n grad_mle[in_dim] = grad_mle[in_dim] - 0.5 * np.trace(np.matmul(tmp1, cov_der))\n\n return ll, grad_mle\n\n if nopt is not None:\n self.nopt = nopt\n if corr_model_params is not None:\n self.corr_model_params = corr_model_params\n self.samples = np.array(samples)\n\n # Number of samples and dimensions of samples and values\n nsamples, input_dim = self.samples.shape\n output_dim = int(np.size(values) / nsamples)\n\n self.values = np.array(values).reshape(nsamples, output_dim)\n\n # Normalizing the data\n if self.normalize:\n self.sample_mean, self.sample_std = np.mean(self.samples, 0), np.std(self.samples, 0)\n self.value_mean, self.value_std = np.mean(self.values, 0), np.std(self.values, 0)\n s_ = (self.samples - self.sample_mean) / self.sample_std\n y_ = (self.values - self.value_mean) / self.value_std\n else:\n s_ = self.samples\n y_ = self.values\n\n self.F, jf_ = self.reg_model(s_)\n\n # Maximum Likelihood Estimation : Solving optimization problem to calculate hyperparameters\n if self.op:\n starting_point = self.corr_model_params\n minimizer, fun_value = np.zeros([self.nopt, input_dim]), np.zeros([self.nopt, 1])\n for i__ in range(self.nopt):\n p_ = self.optimizer(log_likelihood, starting_point, args=(self.corr_model, s_, self.F, y_),\n **self.kwargs_optimizer)\n minimizer[i__, :] = p_[0]\n fun_value[i__, 0] = p_[1]\n # Generating new starting points using log-uniform distribution\n if i__ != self.nopt - 1:\n starting_point = stats.reciprocal.rvs([j[0] for j in self.bounds], [j[1] for j in self.bounds], 1,\n random_state=self.random_state)\n if min(fun_value) == np.inf:\n raise NotImplementedError(\"Maximum likelihood estimator failed: Choose different starting point or \"\n \"increase nopt\")\n t = np.argmin(fun_value)\n self.corr_model_params = minimizer[t, :]\n\n # Updated Correlation matrix corresponding to MLE estimates of hyperparameters\n self.R = self.corr_model(x=s_, s=s_, params=self.corr_model_params)\n # Compute the regression coefficient (solving this linear equation: F * beta = Y)\n c = np.linalg.cholesky(self.R) # Eq: 3.8, DACE\n c_inv = np.linalg.inv(c)\n f_dash = np.linalg.solve(c, self.F)\n y_dash = np.linalg.solve(c, y_)\n q_, g_ = np.linalg.qr(f_dash) # Eq: 3.11, DACE\n # Check if F is a full rank matrix\n if np.linalg.matrix_rank(g_) != min(np.size(self.F, 0), np.size(self.F, 1)):\n raise NotImplementedError(\"Chosen regression functions are not sufficiently linearly independent\")\n # Design parameters (beta: regression coefficient)\n self.beta = np.linalg.solve(g_, np.matmul(np.transpose(q_), y_dash))\n\n # Design parameter (R * gamma = Y - F * beta = residual)\n self.gamma = np.linalg.solve(c.T, (y_dash - np.matmul(f_dash, self.beta)))\n\n # Computing the process variance (Eq: 3.13, DACE)\n self.err_var = np.zeros(output_dim)\n for i in range(output_dim):\n self.err_var[i] = (1 / nsamples) * (np.linalg.norm(y_dash[:, i] - np.matmul(f_dash, self.beta[:, i])) ** 2)\n\n self.F_dash, self.C_inv, self.G = f_dash, c_inv, g_\n\n if self.verbose:\n print('UQpy: Kriging fit complete.')",
"def _define_combined_fit_params(self):\n\t\tself.z_bins = np.array([0.40, 0.60, 0.80, 1.00, 1.20,\n\t\t 1.40, 1.60, 1.80, 2.20, 2.40, \n\t\t 2.50, 2.60, 2.70, 2.80, 2.90,\n\t\t 3.00, 3.10, 3.20, 3.30, 3.40,\n\t\t 3.50, 4.10, 4.70, 5.50, np.inf])\n\t\tself.alphas = -np.array([2.74, 3.49, 3.55, 3.69, 4.24,\n\t\t 4.02, 4.35, 3.94, 4.26, 3.34,\n\t\t 3.61, 3.31, 3.13, 3.78, 3.61, \n\t\t 5.01, 4.72, 4.39, 4.39, 4.76, \n\t\t 3.72, 4.84, 4.19, 4.55, 5.00])\n\t\tself.betas = -np.array([1.07, 1.55, 1.89, 1.88, 1.84, \n\t\t 1.88, 1.87, 1.69, 1.98, 1.61, \n\t\t 1.60, 1.38, 1.05, 1.34, 1.46, \n\t\t 1.71, 1.70, 1.96, 1.93, 2.08, \n\t\t 1.25, 2.07, 2.20, 2.31, 2.40])\n\t\tself.M_stars = -np.array([21.30, 23.38, 24.21, 24.60, 25.24,\n\t\t 25.41, 25.77, 25.56, 26.35, 25.50,\n\t\t 25.86, 25.33, 25.16, 25.94, 26.22,\n\t\t 26.52, 26.48, 27.10, 27.19, 27.39,\n\t\t 26.65, 27.26, 27.37, 27.89, 29.19])",
"def __SetSFParams(self):\n\n # If radial structure functions are in output\n if self.__containsRadial:\n # Defines radial attributes\n self.__nc_RSoft_O.radial_error_tolerance = self.etol_radial\n\n # Defines radial dimensions\n self.__nc_RSoft_O.createDimension('radial_structure_functions',\\\n len(self.mus))\n\n # Defines radial variables\n mus_var_id_O = self.__nc_RSoft_O.createVariable('mus', \\\n 'f4', ('radial_structure_functions'))\n Ls_var_id_O = self.__nc_RSoft_O.createVariable('Ls', \\\n 'f4', ('radial_structure_functions'))\n radial_Xs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'radial_Xs', 'i4', ('radial_structure_functions'))\n radial_Ys_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'radial_Ys', 'i4', ('radial_structure_functions'))\n\n # Sets radial structure function variables\n mus_var_id_O[:] = self.mus\n Ls_var_id_O[:] = self.Ls\n radial_Xs_var_id_O[:] = self.radial_Xs\n radial_Ys_var_id_O[:] = self.radial_Ys\n\n # If angular structure functions are in output\n if self.__containsAngular:\n # Defines angular attributes\n self.__nc_RSoft_O.angular_error_tolerance = self.etol_angular\n\n # Defines angular dimensions\n self.__nc_RSoft_O.createDimension('angular_structure_functions',\\\n len(self.xis))\n\n # Defines angular variables\n xis_var_id_O = self.__nc_RSoft_O.createVariable('xis', \\\n 'f4', ('angular_structure_functions'))\n zetas_var_id_O = self.__nc_RSoft_O.createVariable('zetas', \\\n 'i4', ('angular_structure_functions'))\n lambdas_var_id_O = self.__nc_RSoft_O.createVariable('lambdas', \\\n 'i4', ('angular_structure_functions'))\n angular_Xs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Xs', 'i4', ('angular_structure_functions'))\n angular_Ys_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Ys', 'i4', ('angular_structure_functions'))\n angular_Zs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Zs', 'i4', ('angular_structure_functions')) \n\n # Sets angular structure function variables\n xis_var_id_O[:] = self.xis\n zetas_var_id_O[:] = self.zetas\n lambdas_var_id_O[:] = self.lambdas\n angular_Xs_var_id_O[:] = self.angular_Xs\n angular_Ys_var_id_O[:] = self.angular_Ys\n angular_Zs_var_id_O[:] = self.angular_Zs",
"def fit(self, resonance_file, experiment, out_paths):\n # Set up temporary file names #\n inp = temp_file_gen('Sammy_fit','inp')\n par = temp_file_gen('Sammy_fit','par')\n cov = temp_file_gen('Sammy_fit','cov')\n ndf = temp_file_gen('Sammy_fit','ndf')\n parout = temp_file_gen('Sammy_fit','out.par')\n covout = temp_file_gen('Sammy_fit','out.cov')\n #\n # Construct SAMMY input using resonance_file and information about the #\n # 'experiment' #\n self.endf2inp_par_ndf(resonance_file, [inp, par, ndf], \n experiment[1], flag_all = True)\n #\n # Change from MLBW formalism if this was in original file. #\n # Reich-Moore will be used instead, which is recommended. #\n self.modify_inp(inp, keyremove = ['mlbw formalism is wanted'])\n #\n # Fit to total cross section data without prior #\n message = self.g_least_squares(inp, par, experiment['total'],\n parout, covout)\n shutil.move(parout, par)\n shutil.move(covout, cov)\n #\n # Check if convergence was reached. Otherwise, something is bad. #\n if message[:len('Did not converge')] == 'Did not converge':\n raise RuntimeError(message)\n #\n # Perform a Beyesian update using capture data\n self.bayesian([inp, par, cov], experiment['capture'], [parout, covout])\n #\n # Construct ENDF formatted files from output #\n self.inp_par_ndf_cov2endfs([inp, parout, ndf, covout], out_paths)\n #\n # Include ENDF file paths in ResonanceFile instance to return\n resonance_file_out = ResonanceFile(out_paths[0], resonance_file.nuclide)\n resonance_file_out.cov = ResonanceCovFile(out_paths[1])\n #\n # Clean up\n if self.cleanup:\n for p in [inp, par, cov, ndf, parout, covout]: os.remove(p)\n #\n return resonance_file_out",
"def ISRSfitfunction(x, y_acf, sensdict, simparams, Niratios, y_err=None ):\n npts = simparams['numpoints']\n specs = simparams['species']\n amb_dict = simparams['amb_dict']\n numtype = simparams['dtype']\n if 'FitType' in simparams.keys():\n fitspec = simparams['FitType']\n else:\n fitspec = 'Spectrum'\n nspecs = len(specs)\n if not 'fitmode' in simparams.keys():\n (Ti, Ne, Te, v_i) = x\n elif simparams['fitmode'] == 0:\n (Ti, Ne, Te, v_i) = x\n elif simparams['fitmode'] == 1:\n (Ti, Ne, TeoTi, v_i) = x\n Te = TeoTi*Ti\n elif simparams['fitmode'] == 2:\n (Ti, acfnorm, TeoTi, v_i) = x\n Te = TeoTi*Ti\n Ne = acfnorm*(1+TeoTi)\n\n datablock = np.zeros((nspecs, 2), dtype=x.dtype)\n datablock[:-1, 0] = Ne*Niratios\n datablock[:-1, 1] = Ti\n datablock[-1, 0] = Ne\n datablock[-1, 1] = Te\n\n # determine if you've gone beyond the bounds\n # penalty for being less then zero\n grt0 = np.exp(-datablock)\n pentsum = np.zeros(grt0.size+1)\n pentsum[:-1] = grt0.flatten()\n\n\n specobj = ISRSpectrum(centerFrequency=sensdict['fc'], nspec=npts, sampfreq=sensdict['fs'])\n (omeg, cur_spec, rcs) = specobj.getspecsep(datablock, specs, v_i, rcsflag=True)\n cur_spec.astype(numtype)\n # Create spectrum guess\n (_, acf) = spect2acf(omeg, cur_spec)\n \n if amb_dict['WttMatrix'].shape[-1] != acf.shape[0]:\n pdb.set_trace()\n guess_acf = np.dot(amb_dict['WttMatrix'], acf)\n # apply ambiguity function\n\n guess_acf = guess_acf*rcs/guess_acf[0].real\n if fitspec.lower() == 'spectrum':\n # fit to spectrums\n spec_interm = scfft.fft(guess_acf, n=len(cur_spec))\n spec_final = spec_interm.real\n y_interm = scfft.fft(y_acf, n=len(spec_final))\n y_spec = y_interm.real\n yout = y_spec-spec_final\n elif fitspec.lower() == 'acf':\n yout = y_acf-guess_acf\n\n if y_err is not None:\n yout = yout*1./y_err\n # Cannot make the output a complex array! To avoid this problem simply double\n # the size of the array and place the real and imaginary parts in alternating spots.\n if np.iscomplexobj(yout):\n youttmp = yout.copy()\n yout = np.zeros(2*len(youttmp)).astype(youttmp.real.dtype)\n yout[::2] = youttmp.real\n yout[1::2] = youttmp.imag\n\n penadd = np.sqrt(np.power(np.absolute(yout), 2).sum())*pentsum.sum()\n\n return yout+penadd",
"def fit(self):\n\n coeff, std, k, offset, nuse, ntot = fit_wavelength(\n identlist = self.identlist,\n npixel = self.param['npixel'],\n xorder = self.param['xorder'],\n yorder = self.param['yorder'],\n maxiter = self.param['maxiter'],\n clipping = self.param['clipping'],\n fit_filter = self.param['fit_filter'],\n )\n\n self.param['coeff'] = coeff\n self.param['std'] = std\n self.param['k'] = k\n self.param['offset'] = offset\n self.param['nuse'] = nuse\n self.param['ntot'] = ntot\n\n message = 'Wavelength fitted. std={:.6f}, utot={}, nuse={}'.format(\n std, ntot, nuse)\n print(message)\n\n self.plot_wavelength()\n\n # udpdate the order/aperture string\n aperture = self.param['aperture']\n order = k*aperture + offset\n text = 'Order {} (Aperture {})'.format(order, aperture)\n self.info_frame.order_label.config(text=text)\n\n self.update_fit_buttons()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the parameters fit on the SDSS data Note The shape of the distribution arises because more massive galaxies are closer to spherical than less massive ones. The truncation excludes highlyflattened profiles. The default fit values have been derived by [1]_ from the SDSS data. References | def _define_SDSS_fit_params(self):
self.a = 5.7*1.e-4
self.b = 0.38
self.lower = 0.2 | [
"def _define_SDSS_fit_params(self):\n\t\tself.a = 1.4335\n\t\tself.b = 0.3150 \n\t\tself.c = -8.8979\n\t\tself.intrinsic_scatter = 0.0578\n\t\t#self.delta_a = 0.02\n\t\t#self.delta_b = 0.01",
"def _define_SLACS_fit_params(self):\n\t\t# Fit params from R_eff\n\t\tself.a = -0.41\n\t\tself.b = 0.39\n\t\t#self.delta_a = 0.12\n\t\t#self.delta_b = 0.10\n\t\tself.intrinsic_scatter = 0.14\n\t\t# Fit params from vel_disp\n\t\tself.a_v = 0.07\n\t\tself.b_v = -0.12\n\t\tself.int_v = 0.17",
"def _define_combined_fit_params(self):\n\t\tself.z_bins = np.array([0.40, 0.60, 0.80, 1.00, 1.20,\n\t\t 1.40, 1.60, 1.80, 2.20, 2.40, \n\t\t 2.50, 2.60, 2.70, 2.80, 2.90,\n\t\t 3.00, 3.10, 3.20, 3.30, 3.40,\n\t\t 3.50, 4.10, 4.70, 5.50, np.inf])\n\t\tself.alphas = -np.array([2.74, 3.49, 3.55, 3.69, 4.24,\n\t\t 4.02, 4.35, 3.94, 4.26, 3.34,\n\t\t 3.61, 3.31, 3.13, 3.78, 3.61, \n\t\t 5.01, 4.72, 4.39, 4.39, 4.76, \n\t\t 3.72, 4.84, 4.19, 4.55, 5.00])\n\t\tself.betas = -np.array([1.07, 1.55, 1.89, 1.88, 1.84, \n\t\t 1.88, 1.87, 1.69, 1.98, 1.61, \n\t\t 1.60, 1.38, 1.05, 1.34, 1.46, \n\t\t 1.71, 1.70, 1.96, 1.93, 2.08, \n\t\t 1.25, 2.07, 2.20, 2.31, 2.40])\n\t\tself.M_stars = -np.array([21.30, 23.38, 24.21, 24.60, 25.24,\n\t\t 25.41, 25.77, 25.56, 26.35, 25.50,\n\t\t 25.86, 25.33, 25.16, 25.94, 26.22,\n\t\t 26.52, 26.48, 27.10, 27.19, 27.39,\n\t\t 26.65, 27.26, 27.37, 27.89, 29.19])",
"def plot_fit_params(self, wsp, pdfname, snapname=''):\n if snapname:\n wsp.loadSnapshot(snapname)\n frame = get_var(wsp, self.mname).frame(rf.Title('Fit Results'))\n full_pdf = wsp.pdf(self.full_model)\n\n full_pdf.paramOn(frame, rf.Layout(0.1, 0.9, 0.9),\n rf.Format('NEU', rf.AutoPrecision(2)))\n\n can = r.TCanvas(create_random_str(32), 'rcan', 600, 600)\n can.cd()\n frame.findObject('{}_paramBox'.format(full_pdf.GetName())).Draw()\n can.SaveAs(pdfname)",
"def setup_fit(self,\n names=None,\n model='hydro',\n lp_filter=False,\n lp_fixed=False,\n aliasing=False,\n f_sample=None,\n N_alias=9,\n debias=True,\n f3dB=None,\n alpha=None,\n **fit_psd_kws):\n if names and not isinstance(names, list):\n names = [names]\n\n for name in names if names else self.names:\n kws = fit_psd_kws\n kws['model'] = model.lower()\n kws['lp_filter'] = lp_filter\n kws['lp_fixed'] = lp_fixed\n kws['aliasing'] = aliasing\n if aliasing and f_sample is None:\n try:\n f_sample = self.psdm.psds[name].get_f_sample(unit='Hz')\n except:\n pass\n kws['f_sample'] = f_sample\n kws['N_alias'] = N_alias\n kws['debias'] = debias\n if f3dB:\n kws['f3dB'] = f3dB\n if alpha:\n kws['alpha'] = alpha\n self.fit_kwargs[name] = kws",
"def _define_ETG_fit_params(self):\n\t\tself.slope = 2.0\n\t\tself.intercept = 5.8",
"def set_fit_params(self):\n\n self.p0 = np.array([self.A_arr, self.T_a])\n # initial guess at A_arr and T_a\n\n self.popt, self.pcov = curve_fit(\n self.get_eta_fit, self.T_exp, self.eta_exp, p0=self.p0\n )\n\n self.A_arr = self.popt[0]\n self.T_a = self.popt[1]\n\n self.T_array = self.T_model",
"def SetParams(ss, sheet, setMsg):\n if sheet == \"\":\n ss.Params.ValidateSheets(go.Slice_string([\"Network\", \"Sim\"]))\n ss.SetParamsSet(\"Base\", sheet, setMsg)\n if ss.ParamSet != \"\" and ss.ParamSet != \"Base\":\n sps = ss.ParamSet.split()\n for ps in sps:\n ss.SetParamsSet(ps, sheet, setMsg)\n if ss.Learn == LearnType.Hebbian:\n ss.SetParamsSet(\"Hebbian\", sheet, setMsg)\n elif ss.Learn == LearnType.ErrorDriven:\n ss.SetParamsSet(\"ErrorDriven\", sheet, setMsg)",
"def fit_synaptic_parameter_distribution(rec_filename, sec_type, syn_type, param_name):\n with h5py.File(data_dir+rec_filename+'.hdf5', 'r') as f:\n if f.attrs['syn_type'] == syn_type and sec_type in f and param_name in f[sec_type]:\n dataset = f[sec_type][param_name]\n distances = f[sec_type]['distances']\n indexes = range(len(distances))\n indexes.sort(key=distances.__getitem__)\n sorted_distances = np.array(map(distances.__getitem__, indexes))\n sorted_dataset = np.array(map(dataset.__getitem__, indexes))\n interp_distances = np.arange(0, sorted_distances[-1], 1.)\n\n y0 = sorted_dataset[0]\n A, tau = fit_exp_linear(sorted_distances, sorted_dataset, y0)\n y1 = y0-A\n A, tau = fit_exp_linear(sorted_distances, sorted_dataset, y1)\n fit = (y0 - A) + A * np.exp(interp_distances/tau)\n\n plt.scatter(sorted_distances, sorted_dataset, label=syn_type+': '+param_name)\n plt.plot(interp_distances, fit, label='fit')\n plt.xlabel('Distance from Soma (um)')\n plt.ylabel('Peak Conductance (uS)')\n plt.title('Mechanism Parameter Distribution')\n plt.legend(loc=\"best\", scatterpoints=1, frameon=False, framealpha=0.5)\n plt.show()\n else:\n raise Exception('rec_filename is not formatted correctly or does not contain the specified data.')\n return [y0, A, tau]",
"def test_density_fit(self):\n \t\t\n\t\tprint(\"----test_density_fit-----\")\n\t\tdetails = self.watcher.analyze(layers = [25], ww2x=True, randomize=False, plot=False, mp_fit=True)\n\t\tprint(details.columns)\n\t\tprint(\"num spikes\", details.num_spikes)\n\t\tprint(\"sigma mp\", details.sigma_mp)\n\t\tprint(\"softrank\", details.mp_softrank)\n\n\t\t#self.assertAlmostEqual(details.num_spikes, 13) #numofSig\n\t\t#self.assertAlmostEqual(details.sigma_mp, 1.064648437) #sigma_mp\n\t\t#self.assertAlmostEqual(details.np_softrank, 0.203082, places = 6) ",
"def set_samples(self, parameters, summaries, distances=None,\n replace=False):\n if distances is None:\n distances = jax.vmap(\n lambda target, F: self.distance_measure(\n summaries, target, F))(self.target_summaries, self.F)\n if (self.parameters.all is None) or (replace):\n self.parameters.all = parameters\n self.summaries.all = summaries\n self.distances.all = distances\n else:\n self.parameters.all = np.concatenate(\n [self.parameters.all, parameters], axis=1)\n self.summaries.all = np.concatenate(\n [self.summaries.all, summaries], axis=1)\n self.distances.all = np.concatenate(\n [self.distances.all, distances], axis=1)\n self.parameters.size = self.parameters.all.shape[0]\n self.summaries.size = self.summaries.all.shape[0]\n self.distances.size = self.distances.all.shape[-1]",
"def fit(self):\n\n coeff, std, k, offset, nuse, ntot = fit_wavelength(\n identlist = self.identlist,\n npixel = self.param['npixel'],\n xorder = self.param['xorder'],\n yorder = self.param['yorder'],\n maxiter = self.param['maxiter'],\n clipping = self.param['clipping'],\n fit_filter = self.param['fit_filter'],\n )\n\n self.param['coeff'] = coeff\n self.param['std'] = std\n self.param['k'] = k\n self.param['offset'] = offset\n self.param['nuse'] = nuse\n self.param['ntot'] = ntot\n\n message = 'Wavelength fitted. std={:.6f}, utot={}, nuse={}'.format(\n std, ntot, nuse)\n print(message)\n\n self.plot_wavelength()\n\n # udpdate the order/aperture string\n aperture = self.param['aperture']\n order = k*aperture + offset\n text = 'Order {} (Aperture {})'.format(order, aperture)\n self.info_frame.order_label.config(text=text)\n\n self.update_fit_buttons()",
"def reset_parameters(self):\n logger.info('===== Initialize %s with normal distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_like_transformer_xl(n, p, std=0.02)",
"def fit_parameters(yhere): \n\n cfit = sol_1d[1]\n spec_here = np.ma.median(data[int(yhere)-2:int(yhere)+2, :], axis=0)\n shift = Fit.xcor_peak(spec_here, spec0, lags)\n ll_here = CV.chebval(pix - shift, cfit)\n [xs, sxs, sigmas] = find_known_lines(linelist,\n ll_here, spec_here, options)\n\n if data2 is not None:\n cfit2 = sol_1d2[1]\n spec_here2 = np.ma.median(data2[yhere-2:yhere+2, :], axis=0)\n shift2 = Fit.xcor_peak(spec_here2, spec2, lags)\n ll_here2 = CV.chebval(pix - shift2, cfit2)\n\n [xs2, sxs2, sigmas2] = find_known_lines(linelist2,\n ll_here2, spec_here2, options)\n\n \"\"\" Fit a chebyshev to the measured positions \"\"\"\n if data2 is not None:\n \"fit both line lists\"\n \"\"\"combine the line lists\"\"\"\n clinelist= np.concatenate([linelist,linelist2])\n cxs = np.concatenate([xs, xs2])\n csxs = np.concatenate([sxs, sxs2])\n \n \"\"\"combine the measured xs and sxs arrays that have the measured\n line positions\"\"\"\n [delt, cfit, lines] = fit_chebyshev_to_lines(cxs, csxs,\n clinelist, options)\n else:\n [delt, cfit, lines] = fit_chebyshev_to_lines(xs, sxs,\n linelist, options)\n\n #if np.std(delt) < .01: pdb.set_trace()\n debug(\"resid ang S%2.2i @ p%4.0i: %1.2f rms %1.2f mad [shift%2.0f]\" % \\\n (slitno+1, yhere, np.std(delt), np.median(np.abs(delt)),\n shift))\n\n return cfit, delt",
"def fit(self, num_stoch_iters, num_switch_iters, max_s_itters=np.inf, randomness=10E-9, ant_randomness=0.5e-9, \n min_num_iters=10, max_num_iters=1000, xtol=1e-16, ftol=1e-16, gtol=1e-16):\n\n num_switch_iters_isMinimum = False\n if num_switch_iters < 0:\n num_switch_iters = np.abs(num_switch_iters)\n num_switch_iters_isMinimum = True\n max_s_itters = max(max_s_itters, num_switch_iters)\n\n initial_RMS = self.fitter.RMS( self.current_solution, self.num_DOF )\n print(\"initial RMS:\", initial_RMS)\n \n print('fitting:')\n best_sol = np.array( self.current_solution )\n best_RMS = initial_RMS\n number_LS_runs = 0\n num_LS_runs_needMoreItters = 0\n\n fitterSQ= GSL_LeastSquares( self.fitter.get_num_parameters(), self.fitter.get_num_measurments(), self.fitter.objective_fun_sq, jacobian=self.fitter.objective_fun_sq_jacobian )\n fitter = GSL_LeastSquares( self.fitter.get_num_parameters(), self.fitter.get_num_measurments(), self.fitter.objective_fun, jacobian=self.fitter.objective_fun_jacobian )\n\n for i in range( num_stoch_iters ):\n \n current_solution = np.array( best_sol )\n current_solution[ : self.num_delays] += np.random.normal(scale=randomness, size=self.num_delays )\n current_solution[ self.num_delays : self.num_delays+self.num_RecalAnts] += np.random.normal(scale=ant_randomness, size=self.num_RecalAnts )\n current_solution[self.num_delays+self.num_RecalAnts:] += np.random.normal(scale=randomness, size=len(current_solution)-(self.num_delays+self.num_RecalAnts) )\n\n print(i)\n\n switch_itter = 0\n previous_RMS = np.inf\n RMS_had_increased = False\n while num_switch_iters_isMinimum or switch_itter<num_switch_iters:\n # fit_res = least_squares( self.fitter.objective_fun_sq, current_solution, jac='2-point', method='lm', xtol=3.0E-16, ftol=3.0E-16, gtol=3.0E-16, x_scale='jac', max_nfev=max_nfev)\n # fit_res = least_squares( self.fitter.objective_fun_sq, current_solution, jac=self.fitter.objective_fun_sq_jacobian, method='lm', xtol=3.0E-16, ftol=3.0E-16, gtol=3.0E-16, x_scale='jac', max_nfev=max_nfev)\n \n fitterSQ.reset( current_solution )\n code, text = fitterSQ.run(min_num_iters, max_itters=max_num_iters, xtol=xtol, gtol=gtol, ftol=ftol)\n current_solution = fitterSQ.get_X()\n # num_runs = fitterSQ.get_num_iters()\n\n if code == 4:\n num_LS_runs_needMoreItters += 1\n \n ARMS = self.fitter.RMS( current_solution, self.num_DOF )\n \n # fit_res = least_squares( self.fitter.objective_fun, current_solution, jac='2-point', method='lm', xtol=3.0E-16, ftol=3.0E-16, gtol=3.0E-16, x_scale='jac', max_nfev=max_nfev)\n # fit_res = least_squares( self.fitter.objective_fun, current_solution, jac=self.fitter.objective_fun_sq_jacobian, method='lm', xtol=3.0E-16, ftol=3.0E-16, gtol=3.0E-16, x_scale='jac', max_nfev=max_nfev)\n # fit_res = least_squares( self.fitter.objective_fun, current_solution, jac=self.fitter.objective_fun_jacobian, method='lm', xtol=3.0E-16, ftol=3.0E-16, gtol=3.0E-16, x_scale='jac', max_nfev=max_num_iters)\n \n fitter.reset( current_solution )\n code, text = fitter.run(min_num_iters, max_itters=max_num_iters, xtol=xtol, gtol=gtol, ftol=ftol)\n current_solution = fitter.get_X()\n num_runs = fitter.get_num_iters()\n\n if code == 4:\n num_LS_runs_needMoreItters += 1\n\n number_LS_runs += 2\n RMS = self.fitter.RMS( current_solution, self.num_DOF )\n \n print(\" \", i, switch_itter, \":\", RMS, '(', ARMS, ')', text, 'N:', num_runs)\n\n if RMS>previous_RMS:\n RMS_had_increased = True\n \n if RMS < best_RMS:\n best_RMS = RMS\n best_sol = np.array( current_solution )\n print('IMPROVEMENT!')\n else:\n if num_switch_iters_isMinimum and (switch_itter>=num_switch_iters) and RMS_had_increased:\n break\n if switch_itter>=max_s_itters:\n break\n\n previous_RMS = RMS\n switch_itter += 1\n\n self.current_solution = best_sol\n self.GSL_covariance_matrix = fitter.get_covariance_matrix()\n print('frac. runs need more itters:', num_LS_runs_needMoreItters/number_LS_runs)\n print()\n print()\n \n print('initial RMS:', initial_RMS)\n print(\"best RMS:\", best_RMS)\n \n \n print('fits:')\n \n stations_to_print = self.station_order\n PSE_RMS_data = [ self.fitter.event_SSqE( source_i, best_sol ) for source_i in range(len(self.current_sources)) ]\n while len(stations_to_print) > 0:\n stations_this_run = stations_to_print[:self.num_stat_per_table]\n stations_to_print = stations_to_print[len(stations_this_run):]\n \n fit_table = PrettyTable()\n fit_table.field_names = ['id'] + stations_this_run + ['total']\n fit_table.float_format = '.2E'\n \n for source_i,PSE in enumerate(self.current_sources):\n new_row = ['']*len(fit_table.field_names)\n new_row[0] = PSE.ID\n \n# PSE_SSqE, total_ants = self.fitter.event_SSqE( source_i, best_sol )\n new_row[-1] = np.sqrt( PSE_RMS_data[source_i][0] / (PSE_RMS_data[source_i][1]-4) )\n \n for i,sname in enumerate( stations_this_run ):\n stat_ant_range = self.station_to_antenna_index_dict[sname]\n SSqE, numants = self.fitter.event_SSqE( source_i, best_sol, stat_ant_range )\n if numants > 0:\n new_row[i+1] = np.sqrt( SSqE/numants )\n else:\n new_row[i+1] = ''\n \n fit_table.add_row( new_row )\n \n print( fit_table )\n print()\n \n print()\n print()\n \n station_delays = best_sol[:self.num_delays]\n antenna_delays = best_sol[ self.num_delays:self.num_delays+self.num_RecalAnts ]\n source_locs = best_sol[ self.num_delays+self.num_RecalAnts: ]\n \n print(\"stat delays:\")\n for sname, delay, original in zip( self.station_order, station_delays, self.original_delays ):\n print(\"'\"+sname+\"' :\", delay, \", ## diff to guess:\", delay-original)\n \n print()\n print()\n \n print(\"ant delays:\")\n for ant_i, delay in zip(self.ant_recalibrate_order, antenna_delays):\n ant_name = self.sorted_antenna_names[ant_i]\n print(\"'\"+ant_name+\"' : \", delay, ', #', SId_to_Sname[ int(ant_name[:3]) ])\n \n print()\n print()\n \n print(\"locations:\")\n offset = 0\n for i,source in enumerate(self.current_sources):\n \n print(source.ID,':[', source_locs[offset+0], ',', source_locs[offset+1], ',', np.abs(source_locs[offset+2]), ',', source_locs[offset+3], end=' ')# '],')\n \n offset += 4\n if source.polarization == 2:\n print( ',', source_locs[offset], '],')\n offset += 1\n elif source.polarization == 3:\n print(',')\n print(\" \", source_locs[offset+0], ',', source_locs[offset+1], ',', np.abs(source_locs[offset+2]), ',', source_locs[offset+3], '],' )\n offset += 4\n else: ## end pol = 0 or 1\n print('],')\n \n print()\n print()\n \n print(\"REL LOCS\")\n refX, refY, refZ, refT = source_locs[:4]\n offset = 0\n for i,source in enumerate(self.current_sources):\n X,Y,Z,T = source_locs[offset:offset+4]\n print(source.ID,':[', X-refX, ',', Y-refY, ',', np.abs(Z)-np.abs(refZ), ',', T-refT, '],')\n offset +=4\n if source.polarization == 2:\n offset += 1\n elif source.polarization == 3:\n offset += 4",
"def test_linear_fit_fixed_parameter(self):\n init_model = models.Polynomial1D(degree=2, c1=1)\n init_model.c1.fixed = True\n\n x = np.arange(10)\n y = 2 + x + 0.5 * x * x\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y)\n assert_allclose(fitted_model.parameters, [2.0, 1.0, 0.5], atol=1e-14)",
"def _fit(self, fit_data: np.array):\n bw = 1 if self.bw is None else self.bw # bandwidth\n fit_kws = {\n \"bandwidth\": bw,\n \"algorithm\": \"auto\", # kdtree or ball_tree\n \"kernel\": self.kernel_type,\n \"metric\": \"euclidean\", # default\n \"atol\": 1e-4, # tolerance for convergence\n \"rtol\": 0, #\n \"breadth_first\": True, #\n \"leaf_size\": 40,\n \"metric_params\": None,\n } # define the kernel density estimator parameters\n kde = KernelDensity(**fit_kws) # initiate the estimator\n if self.grid_search and not self.bw:\n # GridSearchCV maximizes the total log probability density under the model.\n # The data X will be divided into train-test splits based on folds defined in cv param\n # For each combination of parameters that you specified in param_grid, the model\n # will be trained on the train part from the step above and then scoring will be used on test part.\n # The scores for each parameter combination will be combined for all the folds and averaged.\n # Highest performing parameter will be selected.\n\n grid = GridSearchCV(\n kde, {\"bandwidth\": self.bandwidth_space}\n ) # Grid search on bandwidth\n grid.fit(fit_data) # Fit the grid search\n self.bw = grid.best_params_[\n \"bandwidth\"\n ] # Set the bandwidth to the best bandwidth\n fit_kws[\"bandwidth\"] = self.bw # Update the bandwidth in the fit_kws\n kde.set_params(\n **{\"bandwidth\": self.bw}\n ) # Update the bandwidth in the scikit-learn model\n\n kde.fit(fit_data) # Fit the KDE\n\n return kde",
"def plot_fit(self,**kwargs):\n import matplotlib.pyplot as plt\n import seaborn as sns\n\n figsize = kwargs.get('figsize',(10,7))\n\n if self.latent_variables.estimated is False:\n raise Exception(\"No latent variables estimated!\")\n else:\n date_index = self.index[self.lags:self.data[0].shape[0]]\n mu, Y = self._model(self.latent_variables.get_z_values())\n for series in range(0,Y.shape[0]):\n plt.figure(figsize=figsize)\n plt.plot(date_index,Y[series],label='Data ' + str(series))\n plt.plot(date_index,mu[series],label='Filter' + str(series),c='black') \n plt.title(self.data_name[series])\n plt.legend(loc=2) \n plt.show()",
"def _fit_cleaner(*args, **kwargs):\n res = psy_fit(*args, **kwargs)\n pars = res.x\n\n if 'fixed' in kwargs.keys():\n fixed = kwargs['fixed']\n else:\n fixed = None\n\n m, w, lam, gam = _parameter_assignment(pars, fixed)\n return pd.Series({'m': m,\n 'w': w,\n 'lam': lam,\n 'gam': gam})"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sample (one minus) the axis ratio of the lens galaxy from the Rayleigh distribution with scale that depends on velocity dispersion | def get_axis_ratio(self, vel_disp):
scale = self.a*vel_disp + self.b
q = 0.0
while q < self.lower:
q = 1.0 - np.random.rayleigh(scale, size=None)
return q | [
"def scale(self):\n return Vector([self.axis.mag(), self.height, self.width]) * 0.5",
"def level_mags(slide):\n return [highest_mag(slide)/downsample for downsample in slide.level_downsamples]",
"def downsample_ratio(self):\n return self.resolution / self.mip_resolution(0)",
"def sphere_volume(r) :\n return (4 / 3) * np.pi * r ** 3",
"def density(self):\r\n return self.count_ones() / float(self.xspan * self.yspan)",
"def getScale(self):\n return self.factor**self.turnOn",
"def vscale(self):\r\n return self.onefun.vscale",
"def GetScale(self) -> \"double\":\n return _itkShotNoiseImageFilterPython.itkShotNoiseImageFilterIUS2IUS2_GetScale(self)",
"def GetScale(self):\n ...",
"def rho_spaxel_scale(spaxel_scale=4.0, wavelength=1.0):\n\n scale_rad = spaxel_scale / MILIARCSECS_IN_A_RAD\n rho = scale_rad * ELT_DIAM / (wavelength * 1e-6)\n return rho",
"def _mean_plus_r_var(data: torch.Tensor, ratio: float = 0, **kwargs) -> float:\n return max(data.min().item(), data.mean().item() + ratio * data.std().item() + 1e-8)",
"def rho_spaxel_scale(spaxel_scale=4.0, wavelength=1.5):\n\n scale_rad = spaxel_scale / MILIARCSECS_IN_A_RAD\n rho = scale_rad * ELT_DIAM / (wavelength * 1e-6)\n return rho",
"def scale_to_vol(self, vol):\n\n f = (vol / self.vol) ** (1.0 / self.n) # linear factor\n self.expand *= f\n self.radius *= f\n self.vol_ball = vol",
"def find_scale(self):\n\n width = self.face['width'] * 2\n\n print \"hat size, \", hat.size\n print \"width: \", width\n print \"scale factor: \", width / hat.size[0]\n return map(int, (width, hat.size[1] * (width / hat.size[0])))",
"def calculateRatio(levelDims):\n highestReso = np.asarray(levelDims[0])\n lowestReso = np.asarray(levelDims[-1])\n Xratio, Yratio = highestReso/lowestReso\n return (Xratio, Yratio)",
"def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw",
"def scale(self):",
"def sphere_volume(r):\n\treturn 4/3. * math.pi * r ** 3",
"def mixing_ratio(vp, p) :\r\n return EPSILON * (vp / (p - vp))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r"""Set the parameters fit on the combined sample of more than >80,000 colorselected AGN from ~14 datasets Note | def _define_combined_fit_params(self):
self.z_bins = np.array([0.40, 0.60, 0.80, 1.00, 1.20,
1.40, 1.60, 1.80, 2.20, 2.40,
2.50, 2.60, 2.70, 2.80, 2.90,
3.00, 3.10, 3.20, 3.30, 3.40,
3.50, 4.10, 4.70, 5.50, np.inf])
self.alphas = -np.array([2.74, 3.49, 3.55, 3.69, 4.24,
4.02, 4.35, 3.94, 4.26, 3.34,
3.61, 3.31, 3.13, 3.78, 3.61,
5.01, 4.72, 4.39, 4.39, 4.76,
3.72, 4.84, 4.19, 4.55, 5.00])
self.betas = -np.array([1.07, 1.55, 1.89, 1.88, 1.84,
1.88, 1.87, 1.69, 1.98, 1.61,
1.60, 1.38, 1.05, 1.34, 1.46,
1.71, 1.70, 1.96, 1.93, 2.08,
1.25, 2.07, 2.20, 2.31, 2.40])
self.M_stars = -np.array([21.30, 23.38, 24.21, 24.60, 25.24,
25.41, 25.77, 25.56, 26.35, 25.50,
25.86, 25.33, 25.16, 25.94, 26.22,
26.52, 26.48, 27.10, 27.19, 27.39,
26.65, 27.26, 27.37, 27.89, 29.19]) | [
"def fit_bma(self):\n if len(self.star.filter_names[self.star.filter_mask]) <= 5:\n print(colored('\\t\\t\\tNOT ENOUGH POINTS TO MAKE THE FIT! !', 'red'))\n return\n thr = self._threads if self._sequential else len(self._interpolators)\n # display('Bayesian Model Averaging', self.star, self._nlive,\n # self._dlogz, self.ndim, self._bound, self._sample,\n # thr, self._dynamic)\n if not self._sequential:\n jobs = []\n n_threads = len(self._interpolators)\n for intp, gr in zip(self._interpolators, self._grids):\n p = Process(target=self._bma_dynesty, args=([intp, gr]))\n jobs.append(p)\n p.start()\n for p in jobs:\n p.join()\n else:\n global interpolator\n for intp, gr in zip(self._interpolators, self._grids):\n interpolator = intp\n self.grid = gr\n out_file = self.out_folder + '/' + gr + '_out.pkl'\n print('\\t\\t\\tFITTING MODEL : ' + gr)\n try:\n self.fit_dynesty(out_file=out_file)\n except ValueError as e:\n dump_out = self.out_folder + '/' + gr + '_DUMP.pkl'\n pickle.dump(self.sampler.results, open(dump_out, 'wb'))\n DynestyError(dump_out, gr, e).__raise__()\n continue\n\n # Now that the fitting finished, read the outputs and average\n # the posteriors\n outs = []\n for g in self._grids:\n in_folder = f'{self.out_folder}/{g}_out.pkl'\n outs.append(in_folder)\n # with open(in_folder, 'rb') as out:\n # outs.append(pickle.load(out))\n c = np.random.choice(self.colors)\n avgd = self.bayesian_model_average(outs, self._grids, self._norm,\n self.n_samples, c)\n self.save_bma(avgd)\n\n elapsed_time = execution_time(self.start)\n end(self.coordinator, elapsed_time, self.out_folder,\n 'Bayesian Model Averaging', self.norm)\n pass",
"def _define_SDSS_fit_params(self):\n\t\tself.a = 1.4335\n\t\tself.b = 0.3150 \n\t\tself.c = -8.8979\n\t\tself.intrinsic_scatter = 0.0578\n\t\t#self.delta_a = 0.02\n\t\t#self.delta_b = 0.01",
"def _define_ETG_fit_params(self):\n\t\tself.slope = 2.0\n\t\tself.intercept = 5.8",
"def reset_parameters(self):\n self.lin.reset_parameters()\n self.att.reset_parameters()\n self.gnn_score.reset_parameters()\n if self.gnn_intra_cluster is not None:\n self.gnn_intra_cluster.reset_parameters()\n self.select.reset_parameters()",
"def gaussian_fit(self):\r\n\r\n self.df5 = pd.DataFrame(columns=['Slit Number', 'Centre', 'Centre_err', 'Sigma', 'Sigma_err', 'FWHM', 'FWHM_err', 'Height', 'Height_err'])\r\n QDot_slits = self.QDot_detection()\r\n\r\n if len(QDot_slits) > 0: \r\n self.plot_data = pd.DataFrame(columns=[f\"{QDot_slits[0]}\"], index=self.energies)\r\n else:\r\n self.plot_data = pd.DataFrame(index=self.energies)\r\n\r\n for slit_number in QDot_slits:\r\n sel = self.df4[f'{slit_number}']\r\n self.plot_data[f'{slit_number}'] = sel\r\n \r\n # Makes a good first guess for the fit values of the gaussian\r\n max_intensity = max(sel)\r\n central_energy = sel[sel==max_intensity].index.values\r\n central_energy = central_energy[0]\r\n\r\n # Fits a gaussian model to the selected data and shows the output\r\n gauss = models.GaussianModel()\r\n fit = gauss.fit(sel, x=self.energies, weights=1 / np.sqrt(sel), center = central_energy, amplitude = max_intensity, sigma = 1, nan_policy= 'omit')\r\n \r\n self.plot_data[f'{slit_number} best fit'] = fit.best_fit\r\n\r\n # Appends the fit data for the variables to a new dataframe and shows the fit results with errors\r\n fit_variables = [slit_number]\r\n for key in fit.params:\r\n if key in ['center', 'sigma', 'fwhm', 'height']:\r\n fit_variables.append(fit.params[key].value)\r\n fit_variables.append(fit.params[key].stderr)\r\n \r\n self.df5 = self.df5.append({'Slit Number': fit_variables[0], 'Centre': fit_variables[1], 'Centre_err': fit_variables[2], 'Sigma': fit_variables[3], 'Sigma_err': fit_variables[4], 'FWHM': fit_variables[5], 'FWHM_err': fit_variables[6], 'Height': fit_variables[7], 'Height_err': fit_variables[8]}, ignore_index=True)\r\n \r\n return self.plot_data, self.df5",
"def set_fit_params(self):\n\n self.p0 = np.array([self.A_arr, self.T_a])\n # initial guess at A_arr and T_a\n\n self.popt, self.pcov = curve_fit(\n self.get_eta_fit, self.T_exp, self.eta_exp, p0=self.p0\n )\n\n self.A_arr = self.popt[0]\n self.T_a = self.popt[1]\n\n self.T_array = self.T_model",
"def set_best_parameters(self):\n self.b1 = self.best_parameters[0]\n self.b2 = self.best_parameters[1]\n self.w1 = self.best_parameters[2]\n self.w2 = self.best_parameters[3]",
"def fit_all(self,x,MODEL=['M1','M2','M3','M4','M5','M6','M7','M8'],th=[0.0001,0.0005,0.001,0.005,0.01,0.05,0.1,0.2,0.3],C = [1,2,3,4,5,6,7,8],VERBOSE=False,random_state=0,criteria='icl'):\n nmod,nC,nt = len(MODEL),len(C),len(th)\n \n CRIT = sp.zeros((nmod,nC,nt))\n param = {'init':'user','tol':0.00001,'random_state':random_state}\n for i,c_ in enumerate(C):\n param['C']=c_\n # Kmeans initialization\n yi = KMeans(n_clusters=param['C'],n_init=10,n_jobs=-1,random_state=param['random_state']).fit_predict(x)\n # Check for minimal size of cluster\n nc = sp.asarray([len(sp.where(yi==i_)[0]) for i_ in xrange(param['C'])])\n if sp.any(nc<2):\n CRIT [:,i,:] = MIN\n else:\n yi+=1\n for m,model_ in enumerate(MODEL): # Loop over the models\n for j,th_ in enumerate(th): # Loop over the threshold\n param['th']=th_\n model = HDGMM(model=model_)\n model.fit(x,param=param,yi=yi)\n if criteria == 'bic':\n CRIT [m,i,j]=model.bic\n elif criteria == 'icl':\n CRIT [m,i,j]=model.icl # model.bic\n\n if VERBOSE:\n print(\"Models \\t C \\t th \\t {0} \".format(criteria))\n for m in xrange(len(MODEL)):\n t = sp.where(CRIT [m,:,:]==CRIT [m,:,:].max())\n print MODEL[m] + \" \\t \" + str(C[t[0][0]]) + \" \\t \" + str(th[t[1][0]]) + \" \\t \" + str(CRIT [m,:,:].max())\n\n t = sp.where(CRIT ==CRIT .max())\n print (\"\\nBest model is {}\".format(MODEL[t[0][0]]))\n else:\n t = sp.where(CRIT ==CRIT .max())\n\n ## Return the best model\n param['init']='kmeans'\n param['C']=C[t[1][0]]\n param['th']=th[t[2][0]]\n self.model = MODEL[t[0][0]]\n self.fit(x,param=param)",
"def set_reg_param(training_data, cv_data, alpha_min, alpha_max):\r\n \r\n f = open('alpha.txt', 'w')\r\n \r\n alph = alpha_min\r\n min_alpha = alpha_min # This is the value of alpha in our range that gives minimum for cv data\r\n alpha_largest = alpha_min # Learning is not generally done at alpha_min, this tracks larget alpha\r\n while alph < alpha_max:\r\n \"\"\" Learn for this parameter \"\"\"\r\n clf = linear_model.Ridge (alpha=alph, fit_intercept=False)\r\n clf.fit(training_data.X, training_data.y)\r\n \r\n \"\"\" Get prediction for this parameter \"\"\"\r\n predict_data = clf.predict(training_data.X)\r\n predict_cv = clf.predict(cv_data.X)\r\n \r\n \"\"\" Caculate the differences from actual data for training and cv data\"\"\"\r\n diff_training = (1.0/training_data.m) * np.linalg.norm(predict_data - training_data.y)\r\n diff_cv = (1.0/cv_data.m) * np.linalg.norm(predict_cv - cv_data.y)\r\n \r\n \"\"\" Write out the values for plotting. Do appropriate work to determine min_val_alpha \"\"\"\r\n f.write(str(alph) + \" \" + str(diff_training) + \" \" + str(diff_cv) + \"\\n\")\r\n if alph == alpha_min:\r\n min_diff = diff_cv # Just setting default value for first value of alph \r\n min_alpha = alpha_min\r\n if diff_cv < min_diff:\r\n \"\"\" We have a new minimum so value and alph must be recored \"\"\"\r\n min_diff = diff_cv\r\n min_alpha = alph\r\n alpha_largest = alph # Keep track of largest alpha used\r\n alph = alph * 1.5 # increment alph\r\n f.close()\r\n \r\n \"\"\" Loop is now complete. If min_value_alpha is not alpha_min or alpha_max, return flag of 0\r\n else return -1 or 1 so min or max can be adjusted and loop completed again \"\"\"\r\n if abs(min_alpha - alpha_min) < alpha_min/10.0:\r\n flag = -1 # Local minimum is less than alpha_min so return -1 \r\n elif abs(min_alpha - alpha_largest) < alpha_min/10.0:\r\n flag = 1 # Local minimum is greater than alpha_max so return 1 \r\n else:\r\n flag = 0 # Local minimum is in range so return 0 \r\n \r\n return min_alpha, flag",
"def GetParameters(ParamsFile, QualityFile, Bands, NumberOfParameters, RelativeUncert, ScaleFactor, ProcessSnow = 0):\n\n FillValue = 32767\n NumberOfBands = Bands.shape[0]\n\n # Get dimensions\n rows, cols = GetDimSubDataset( ParamsFile )\n\n Parameters = np.zeros((rows, cols, NumberOfBands, NumberOfParameters), np.float32)\n Uncertainties = np.zeros((rows, cols, NumberOfBands), np.float32)\n\n # Get Snow\n # 1 Snow albedo retrieved\n # 0 Snow-free albedo retrieved\n # 255 Fill Value\n print \"Reading Snow QA:\", QualityFile\n SubDatasetName = 'HDF4_EOS:EOS_GRID:\"' + QualityFile + '\":MOD_Grid_BRDF:Snow_BRDF_Albedo'\n SubDataset = gdal.Open(SubDatasetName, GA_ReadOnly)\n SnowQA = SubDataset.GetRasterBand(1).ReadAsArray()\n if ProcessSnow == 0:\n SnowQA = np.where( SnowQA == 0, 1, 0)\n else:\n SnowQA = np.where( SnowQA == 1, 1, 0)\n\n # Load BRDF parameters\n print \"Reading BRDF parameters...\"\n for Band in range( Bands.shape[0] ):\n SubDatasetName = 'HDF4_EOS:EOS_GRID:\"' + ParamsFile + '\":MOD_Grid_BRDF:BRDF_Albedo_Parameters_Band' + str( Bands[Band] )\n print SubDatasetName \n SubDataset = gdal.Open(SubDatasetName, GA_ReadOnly)\n\n for Parameter in range(NumberOfParameters):\n print \"Getting BRDF parameter\", Parameter\n Parameters[:,:,Band,Parameter] = SubDataset.GetRasterBand( Parameter + 1 ).ReadAsArray()\n\n # Snow mask\n Parameters[:,:,Band,Parameter] = Parameters[:,:,Band,Parameter] * SnowQA\n\n # Filter out fill values\n Parameters[:,:,Band,Parameter] = np.where(Parameters[:,:,Band,Parameter] == FillValue, 0.,\n Parameters[:,:,Band,Parameter] * ScaleFactor )\n\n # Get QA\n print \"Reading QA:\", QualityFile\n for Band in range( Bands.shape[0] ):\n SubDatasetName = 'HDF4_EOS:EOS_GRID:\"' + QualityFile + '\":MOD_Grid_BRDF:BRDF_Albedo_Band_Quality_Band' + str( Bands[Band] )\n SubDataset = gdal.Open(SubDatasetName, GA_ReadOnly)\n QA = SubDataset.GetRasterBand(1).ReadAsArray()\n\n # https://ladsweb.nascom.nasa.gov/api/v1/filespec/collection=6&product=MCD43A2\n # BRDF_Albedo_Band_Quality_BandN ( N is 1 to 7 )> \n # 0 = best quality, full inversion (WoDs, RMSE majority good)\n # 1 = good quality, full inversion (also including the cases that no clear sky\n # observations over the day of interest or the Solar Zenith Angle is too \n # large even WoDs, RMSE majority good)\n # 2 = Magnitude inversion (numobs >=7)\n # 3 = Magnitude inversion (numobs >=2&<7)\n # 4 = Fill value\n\n QA_flags = np.array( [ 0,1,2,3 ] )\n\n for i, QA_flag in enumerate( QA_flags ) :\n indices = np.where( QA == QA_flag )\n Uncertainties[ indices[0], indices[1], Band ] = RelativeUncert[ i ]\n\n Uncertainties[:,:,Band] = Uncertainties[:,:,Band] * SnowQA \n\n SubDataset = None\n return Parameters, Uncertainties",
"def reduce_fit_params(fit_params, par_list):\n red_params = fit_params\n\n for i in par_list:\n red_params[i] = np.full(reader.chip_dim, np.mean(red_params[i]))\n\n return red_params",
"def _define_SDSS_fit_params(self):\n\t\tself.a = 5.7*1.e-4\n\t\tself.b = 0.38\n\t\tself.lower = 0.2",
"def fit_gauss_dif_constrained_allpeaks(df, treatment, mu_1, sigma_1, amplitude_1, gamma_1, mu_2, sigma_2, amplitude_2, mu_3, sigma_3, amplitude_3, gamma_3):\n filt_df = df[df['treatment_name'] == treatment]\n bins = np.arange(-0.21, 1.1, 0.025) \n inds = np.digitize(filt_df['FRET'].astype(float), bins)\n xdata, ydata = np.unique(inds, return_counts=True)\n ydata = ydata[1:-1] #### trim off outside range bins at the end\n xdata = [np.mean(bins[x : x + 2]) for x in range(len(bins)- 1)] ##### convert bin edges to bin centres, therefore end up with one less bin\n sns.lineplot(xdata, ydata)\n\n model_1 = models.SkewedGaussianModel(prefix='m1_')\n model_2 = models.GaussianModel(prefix='m2_')\n model_3 = models.SkewedGaussianModel(prefix='m3_')\n model = model_1 + model_2 + model_3 \n\n model_1.set_param_hint('m1_gamma', vary=False)\n model_1.set_param_hint('m1_sigma', vary=False)\n model_1.set_param_hint('m1_center', vary=False)\n\n model_2.set_param_hint('m2_sigma', vary=False)\n model_2.set_param_hint('m2_center', vary=False)\n model_3.set_param_hint('m3_gamma', vary=False)\n model_3.set_param_hint('m3_sigma', vary=False)\n\n\n params_1 = model_1.make_params(center = mu_1, sigma = sigma_1, amplitude = amplitude_1, gamma = gamma_1, min = 0)\n params_2 = model_2.make_params(center = mu_2, sigma = sigma_2, amplitude = amplitude_2, min = 0)\n params_3 = model_3.make_params(center = mu_3, sigma = sigma_3, amplitude = amplitude_3, gamma = gamma_3, min = 0)\n params = params_1.update(params_2)\n params = params.update(params_3)\n\n output = model.fit((ydata/np.max(ydata)), params, x=xdata)\n fig = sns.set_style('darkgrid')\n fig = output.plot(data_kws={'markersize': 3})\n\n paramaters = {name:output.params[name].value for name in output.params.keys()}\n fitx = np.arange(-0.2, 1.2, 0.025)\n\n fit1 = model_1.eval(x = fitx, center = paramaters['m1_center'], amplitude = abs(paramaters['m1_amplitude']), sigma = paramaters['m1_sigma'], gamma = paramaters['m1_gamma'])\n fit2 = model_2.eval(x = fitx, center = paramaters['m2_center'], amplitude = abs(paramaters['m2_amplitude']), sigma = paramaters['m2_sigma'], fwhm = paramaters['m2_fwhm'])\n fit3 = model_3.eval(x = fitx, center = paramaters['m3_center'], amplitude = abs(paramaters['m3_amplitude']), sigma = paramaters['m3_sigma'], gamma = paramaters['m3_gamma'])\n\n sns.lineplot(fitx, fit1)\n sns.lineplot(fitx, fit2)\n sns.lineplot(fitx, fit3)\n fig.savefig(f'{output_folder}/{treatment}_gaussfit.svg', dpi = 600)\n plt.show()\n\n # Calculate area under the curve for each gaussian\n aoc_m1 = paramaters['m1_amplitude']\n aoc_m2 = paramaters['m2_amplitude']\n aoc_m3 = paramaters['m3_amplitude']\n\n # aoc_m1 = (paramaters['m1_amplitude']*paramaters['m1_sigma'])/0.3989\n # aoc_m2 = (paramaters['m2_amplitude']*paramaters['m2_sigma'])/0.3989\n # aoc_m3 = (paramaters['m3_amplitude']*paramaters['m3_sigma'])/0.3989\n\n sum_aoc = aoc_m1 + aoc_m2 + aoc_m3 \n\n aoc_m1_percent_of_total = (aoc_m1/sum_aoc)*100\n aoc_m2_percent_of_total = (aoc_m2/sum_aoc)*100\n aoc_m3_percent_of_total = (aoc_m3/sum_aoc)*100\n list_of_gaus_proportion = [aoc_m1_percent_of_total, aoc_m2_percent_of_total, aoc_m3_percent_of_total]\n labels_of_gaus_proportion = ['m1', 'm2', 'm3']\n proportion_df = pd.DataFrame([labels_of_gaus_proportion, list_of_gaus_proportion])\n proportion_df.columns = proportion_df.iloc[0]\n proportion_df = proportion_df.drop(0)\n proportion_df['treatment'] = treatment\n proportion_df.to_csv(f'{output_folder}/gaussian_proportions_for_{treatment}.csv')\n return proportion_df",
"def _fit_all(self):\n\n t0=time.time()\n\n self.res={'flags':0}\n\n # for checkpointing\n self.data['processed'][self.dindex]=1\n self.data['nimage_use'][self.dindex] = 1\n self.data['number'][self.dindex] = self.meds['number'][self.mindex]\n\n assert self.data['number'][self.dindex]==self.truth['id'][self.mindex],\"ids must match\"\n\n self._fit_psf()\n\n if self.res['flags'] != 0:\n print('not fitting object due to psf failure')\n return\n\n self._fit_galaxy()\n\n self._copy_to_output()\n\n self.data['time'][self.dindex] = time.time()-t0",
"def make_asimov_fit_parameter_plots(self, combined=False):\n import matplotlib.pyplot as plt\n plt.rcParams['text.usetex'] = True\n \n if combined:\n outdir = os.path.join(self.outdir, 'CombinedBestFits')\n else:\n outdir = os.path.join(self.outdir, 'IndividualBestFits')\n mkdir(outdir)\n \n maintitle = self.make_main_title(\n end='Asimov Analysis'\n )\n\n hrange = self.inj_param_vals[-1]-self.inj_param_vals[0]\n xlims = [self.inj_param_vals[0]-0.1*hrange,\n self.inj_param_vals[-1]+0.1*hrange]\n\n th = self.labels[self.labels.keys()[0]].dict[\n '%s_name'%self.th_to_wh[0]['params']['bestfit']]\n wh = self.labels[self.labels.keys()[0]].dict[\n '%s_name'%self.th_to_wh[0]['params']['altfit']]\n\n th_to_wh_label = \"%s fit to %s fiducial\"%(\n self.tex_axis_label(th),\n self.tex_axis_label(wh)\n )\n wh_to_th_label = \"%s fit to %s fiducial\"%(\n self.tex_axis_label(wh),\n self.tex_axis_label(th)\n )\n fitlabels = [th_to_wh_label, wh_to_th_label]\n\n subtitle = \"True %s Best Fit Parameters\\end{center}\"%(self.tex_axis_label(th))\n\n # Set up multi-plot if needed\n if combined:\n num_rows = self.get_num_rows(\n data=self.th_to_wh[0]['params'],\n omit_metric=False\n )\n plt.figure(figsize=(20, 5*num_rows+2))\n subplotnum = 1\n else:\n subplotnum = None\n\n for param in self.th_to_wh[0]['params'].keys():\n if param not in ['bestfit', 'altfit']:\n ymax = None\n ymin = None\n for fit, fitname, fitlabel in zip(\n [self.th_to_wh, self.wh_to_th],\n ['th_to_wh', 'wh_to_th'],\n fitlabels):\n vals = []\n for param_val in fit[0]['params'][param]:\n val, units = self.parse_pint_string(\n pint_string=param_val\n )\n if param == 'deltam31':\n vals.append(np.abs(float(val)))\n else:\n vals.append(float(val))\n # Specify the subplot, if necessary\n if combined:\n plt.subplot(num_rows, 4, subplotnum)\n self.make_1d_graph(\n xvals=self.inj_param_vals,\n yvals=vals,\n xlabel=self.inj_param_name,\n xunits=self.inj_param_units,\n ylabel=param,\n yunits=units,\n marker=self.marker_style(fitname),\n color=self.plot_colour(fitname),\n plotlabel=fitlabel,\n xlims=xlims\n )\n\n if ymax is None:\n ymax = max(vals)\n else:\n ymax = max(ymax, max(vals))\n if ymin is None:\n ymin = min(vals)\n else:\n ymin = min(ymin, min(vals))\n\n yrange = ymax - ymin\n plt.ylim(ymin-0.1*yrange, ymax+0.2*yrange)\n plt.legend(loc='upper left')\n # Advance the subplot number, if necessary\n if combined:\n subplotnum += 1\n # Else, save/close this plot\n else:\n plt.title(r'%s \\\\ %s'%(maintitle,subtitle))\n plt.tight_layout()\n save_end = \"%s_%s_best_fit_values\"%(self.inj_param_name,\n param)\n self.save_plot(outdir=outdir, end=save_end, truth=th)\n plt.close()\n # Save the whole canvas, if necessary\n if combined:\n plt.suptitle(r'%s \\\\ %s'%(maintitle,subtitle), fontsize=36)\n plt.tight_layout()\n plt.subplots_adjust(top=0.9)\n save_end = \"%s_all_best_fit_values\"%(self.inj_param_name)\n self.save_plot(outdir=outdir, end=save_end, truth=th)\n plt.close()",
"def fit_gp(self):\n self.gpf_core.fit_gp_for_gp_bandit(num_samples=self.options.hp_samples)",
"def test_fit4(self):\r\n self.test.set_range(qmin=-1, qmax=10)\r\n self.test.reset_value()\r\n self.test.test_map_fit(n=3)",
"def fit(self, n_iter, n_processes, alpha):\n return",
"def fit(self, data):\n self.seed = random.choice(range(100))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sample the AGN luminosity from the redshiftbinned luminosity function | def sample_agn_luminosity(self, z):
# Assign redshift bin
is_less_than_right_edge = (z < self.z_bins)
alpha = self.alphas[is_less_than_right_edge][0]
beta = self.betas[is_less_than_right_edge][0]
M_star = self.M_stars[is_less_than_right_edge][0]
# Evaluate function
pmf = self.get_double_power_law(alpha, beta, M_star)
# Sample luminosity
sampled_M = np.random.choice(self.M_grid, None, replace=True, p=pmf)
return sampled_M | [
"def compute_luminosity(red, green, blue):\r\n return (0.299 * red) + (0.587 * green) + (0.114 * blue)",
"def compute_radiocore_luminosity(MBH, L_AGN):\n\tL_X = bolcorr_hardX(L_AGN)\n\tm = log10(MBH / u.Msun)\n\t# Merloni, Heinz & Di Matteo (2003)\n\tlogLR = 0.6 * log10(L_X/(u.erg/u.s)) + 0.78 * m + 7.33\n\treturn 10**logLR * u.erg/u.s",
"def read_luminance(self):\r\n ADC_adjust = self._read_adc()\r\n numerator = (float(ADC_adjust) / 524288) * self._V_ref * 1.e-6\r\n denominator = self._R_feed * self._K_cal * 1.e-15\r\n return max(0.0, numerator / denominator)",
"def luminosity(self, ldist):\n\t\ta = np.pi*3e14/2.*self.amplitude*(self.fwhm/self.x_0)/self.x_0/10**23\n\t\tb = 4*np.pi*(ldist*10**6*3.086e18)**2\n\t\treturn a*b",
"def average_luminosity(self, delta=1e-10):\n cumsum = 0.0\n for pix in self.pixels:\n cumsum += math.log10(delta + pix.luminosity())\n\n return math.pow(10, cumsum / len(self.pixels))",
"def LG_01_Intensity(r,w):\r\n \r\n #r = np.sqrt(x**2 + y**2)\r\n \r\n return 2 / np.pi * (1 / w ** 2) * (2 *r**2 / (w ** 2)) * np.exp(- 2 * r**2 / w**2)",
"def pixelLuminance (r, g, b):\n assert (type(r) == int and type(g) == int and type(b) == int)\n assert (0<=r<=255 and 0<=g<=255 and 0<=b<=255)\n return roundHalfUp((.2126*r)+(.7152*g)+(.0722*b))",
"def Luminosity(self):\n try:\n L = (self.E*self.Weight).sum()\n N = self.E.count()\n except:\n L = self.E.sum()\n N = self.E.count()\n return L, L/np.sqrt(N)",
"def get_luminance(color):\n return (0.2126*color[0]) + (0.7152*color[1]) + (0.0722*color[2])",
"def loadLuminosityFunction(self):\n\n tab = np.genfromtxt(self.fname[0], skip_header=self.skip_header)\n if not self.evolve:\n self.luminosity_function = np.zeros((tab.shape[0], self.nbands, self.nzbins))\n\n else:\n self.luminosity_function = np.zeros((tab.shape[0], self.nbands, 1))\n\n if self.ecol is not None:\n self.ye = np.zeros(self.luminosity_function.shape)\n imult = 1\n else:\n self.ye = None\n imult = 2\n\n self.magmean = tab[:,self.xcol]\n\n if self.nzbins==1:\n for i in range(self.nzbins):\n for j in range(self.nbands):\n self.luminosity_function[:,j,i] = tab[:,self.ycol]\n if self.ecol is not None:\n self.ye[:,j,i] = tab[:,self.ecol]\n else:\n if not self.evolve:\n assert((tab.shape[1]-1)==self.nzbins)\n for i in range(self.nzbins):\n for j in range(self.nbands):\n self.luminosity_function[:,j,i] = tab[:,i*imult+self.ycol]\n if self.ecol is not None:\n self.ye[:,j,i] = tab[:,i*imult+self.ecol]\n else:\n for j in range(self.nbands):\n self.luminosity_function[:,j,0] = tab[:,self.ycol]\n if self.ecol is not None:\n self.ye[:,j,0] = tab[:,self.ecol]\n\n self.xmean = self.magmean\n self.y = self.luminosity_function",
"def luminocity(orig_colors: ColorList, _config: ConfigParser) -> ColorList:\n return [Color.randcolor(lum=i.luminocity()) for i in orig_colors]",
"def GetLuminance(self):\n return _itkRGBAPixelPython.itkRGBAPixelUS_GetLuminance(self)",
"def luminance(self, color):\n return 0.2426 * color[2] + 0.7152 * color[1] + 0.0722 * color[0]",
"def test_luminosity(query_derived, query_atnf):\n\n edot = query_derived.get_pulsar('TEST1')['EDOT'][0]\n edotatnf = query_atnf.get_pulsar('TEST1')['EDOT'][0]\n\n assert abs(edot - edotatnf) < sf_scale(edotatnf)\n\n edotd2 = query_derived.get_pulsar('TEST1')['EDOTD2'][0]\n edotd2atnf = query_atnf.get_pulsar('TEST1')['EDOTD2'][0]\n\n assert abs(edotd2 - edotd2atnf) < sf_scale(edotd2atnf)",
"def sRGBGrayscale(x):\n rellum=sRGBLuminance(x)\n return [rellum,rellum,rellum]",
"def GetLuminance(self):\n return _itkRGBAPixelPython.itkRGBAPixelUC_GetLuminance(self)",
"def brightness():\n return random.randint(5, 100)",
"def get_uthreshold(img):\n import noiselevel\n # sigma=Table.read('noiselevel.csv',format='csv')['sigma'][0]\n sigma = noiselevel.getnoiselevel(img,ranges=(-30,30),toplot=False)\n \n thres = sigma*np.sqrt(2*np.log(img.size))\n return thres, sigma",
"def _value_as_luminance(self):\n return round(float(self._value), 1)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
expects 2 arrays of shape (3, N) rigid transform algorithm from | def rigid_transform_3d(xs,ys):
assert xs.shape == ys.shape
assert xs.shape[0] == 3, 'The points must be of dimmensionality 3'
# find centroids and H
x_centroid = np.mean(xs, axis=1)[:, np.newaxis]
y_centroid = np.mean(ys, axis=1)[:, np.newaxis]
H = (xs - x_centroid)@(ys - y_centroid).T
# find rotation
U, S, Vt = np.linalg.svd(H)
rotation = Vt.T@U.T
# handling reflection
if np.linalg.det(rotation) < 0:
Vt[2, :] *= -1
rotation = np.dot(Vt.T, U.T)
# find translation
translation = y_centroid - rotation@x_centroid
return translation, rotation | [
"def transform(tvec1, rvec1, tvec2, rvec2):\n op = localToGlobal(np.squeeze(tvec2), np.squeeze(rvec2))\n tvec3 = []\n for tvec in tvec1:\n #tvec = tvec.squeeze()\n tvec3.append(np.matmul(op, tvec))\n tvec3 = np.array(tvec3)\n return tvec3",
"def get_transformation(k: np.ndarray, r: np.ndarray, t: np.ndarray) -> np.ndarray:\n ... # TODO",
"def two_bs2x4_transform_opt(t1, r1, t2, r2, input_state):\n size = len(input_state)\n out = np.zeros((size,) * 4, dtype=complex)\n\n def coef(k1, k2, k3, k4):\n return t1 ** k2 * (1j * r1) ** k1 * t2 ** k4 * (1j * r2) ** k3 / (factorial(k1) * factorial(k2) * factorial(k3) * factorial(k4))\n\n # index 'i' = (m,n,k,l)\n for i in np.ndindex(size, size, size, size):\n if i[2] <= i[0] and i[3] <= i[1] and i[0] + i[1] < size:\n out[i[2], i[0] - i[2], i[3], i[1] - i[3]] = coef(i[2], i[0] - i[2], i[3], i[1] - i[3]) * input_state[i[0], i[1]] * factorial(i[0]) * factorial(i[1])\n\n return out",
"def compute_transform(tri1, tri2):\n tri2 = np.hstack((tri2, np.ones((3, 1))))\n return np.vstack((tri1.T @ np.linalg.inv(tri2.T), np.array([0, 0, 1])))",
"def compose_transforms(*transforms):\n from functools import reduce\n\n for transform in transforms:\n vg.shape.check(locals(), \"transform\", (4, 4))\n\n if len(transforms) == 0:\n return np.eye(4)\n\n return reduce(np.dot, reversed(transforms))",
"def two_bs2x4_transform(t1, r1, t2, r2, input_state):\n size = len(input_state)\n output_state = np.zeros((size,) * 4, dtype=complex)\n for m in range(size):\n for n in range(size):\n\n for k in range(m + 1):\n for l in range(n + 1):\n # channels indexes\n ind1 = k\n ind2 = m - k\n ind3 = l\n ind4 = n - l\n coeff = input_state[m, n] * t1**(m - k) * (1j*r1)**k * t2**(n - l) * (1j*r2)**l * factorial(m) * factorial(n) / (factorial(k) * factorial(m - k) * factorial(l) * factorial(n - l))\n output_state[ind1, ind2, ind3, ind4] = output_state[ind1, ind2, ind3, ind4] + coeff\n\n return output_state",
"def transformation_matrix(self, s1, s2, s3, t1, t2, t3):\n\n s1 = np.array(s1)\n s2 = np.array(s2)\n s3 = np.array(s3)\n t1 = np.array(t1)\n t2 = np.array(t2)\n t3 = np.array(t3)\n\n Q = np.array(\n [\n [t2[0] - t1[0], t2[1] - t1[1], t2[2] - t1[2]],\n [t3[0] - t1[0], t3[1] - t1[1], t3[2] - t1[2]],\n ]\n )\n\n P = np.array([[s2[0] - s1[0], s2[1] - s1[1]], [s3[0] - s1[0], s3[1] - s1[1]]])\n\n try:\n # Invert the P matrix\n Pinv = inv(P)\n\n # Build the dot product\n T = np.dot(Pinv, Q)\n\n # Offset\n V0 = np.subtract(t2, np.transpose(s2[0:2]).dot(T))\n except Exception as e:\n self.log.error(\"An error occured during the transformation.\", exc_info=True)\n return -1, -1\n\n return T, V0",
"def pose_pair_construct(p1,n1,p2,n2):\n v1 = p2-p1; v1 /= np.linalg.norm(v1)\n R1 = tf_construct(n1,v1)\n return RigidTransform.from_Rt(R1, p1)",
"def test_direct_shape():\n\n n = 21\n x = np.ones((n, n))\n\n recon = abel.direct.direct_transform(x, direction='forward')\n assert recon.shape == (n, n) \n\n recon = abel.direct.direct_transform(x, direction='inverse')\n assert recon.shape == (n, n)",
"def transform_pc3d(pcl_c3d, Ts, seq_n, K_cur, batch_n):\n\n ## need to transform: flat.uvb, flat.feature['xyz'], flat.feature['normal']\n ## no need to transform grid features\n \n assert batch_n % seq_n == 0 # mode==0\n n_group = batch_n // seq_n\n\n ## get relative pose\n T, R, t, target_id = relative_T(Ts, seq_n, batch_n)\n\n ## get accumulative length\n nb = pcl_c3d.flat.nb\n acc_b = []\n acc = 0\n acc_b.append( acc )\n for ib in range(batch_n):\n acc = acc + nb[ib]\n acc_b.append( acc )\n\n ## process flat features\n flat_xyz = pcl_c3d.flat.feature['xyz'] # 1*C*NB\n flat_normal = pcl_c3d.flat.feature['normal']\n trans_normal_list = []\n trans_xyz_list = []\n uvb_list = []\n new_nb = []\n for ib in range(batch_n):\n ## xyz\n trans_xyz = torch.matmul(R[ib], flat_xyz[:, :, acc_b[ib]:acc_b[ib+1]]) + t[ib]\n mask_positive = trans_xyz[0, 2, :] > 0\n trans_xyz = trans_xyz[:, :, mask_positive]\n trans_xyz_list.append(trans_xyz)\n new_nb.append(trans_xyz.shape[2])\n\n ## normal\n trans_normal = torch.matmul(R[ib], flat_normal[:, :, acc_b[ib]:acc_b[ib+1]])\n trans_normal = trans_normal[:, :, mask_positive]\n trans_normal_list.append(trans_normal)\n\n ## project to uv, add b\n uvb = torch.matmul(K_cur[ib], trans_xyz)\n uvb[:, :2] = uvb[:, :2] / uvb[:, [2]] #- 1 , commented because in dataset_read.py there is a K_mat2py() function converting K from matlab to python coordinate\n uvb[:, 2, :] = target_id[ib]\n uvb_list.append(uvb)\n\n ## construct the new object\n tr_pcl_c3d = PCL_C3D_Flat()\n tr_pcl_c3d.feature['xyz'] = torch.cat(trans_xyz_list, dim=2)\n tr_pcl_c3d.feature['normal'] = torch.cat(trans_normal_list, dim=2)\n tr_pcl_c3d.uvb = torch.cat(uvb_list, dim=2)\n tr_pcl_c3d.nb = new_nb\n\n for feat_key in pcl_c3d.flat.feature:\n if feat_key not in ['xyz', 'normal']:\n tr_pcl_c3d.feature[feat_key] = pcl_c3d.flat.feature[feat_key]\n\n return tr_pcl_c3d",
"def apply_transformation(self, points):\n assert (points.shape[0] == 3)\n n = points.shape[1]\n points_ = np.vstack((points, np.ones((1, n))))\n points_trans_ = np.matmul(self.pose_mat, points_)\n points_transformed = np.true_divide(points_trans_[:3, :], points_trans_[[-1], :])\n return points_transformed",
"def transformFromRotation3D(*args):\n return _almathswig.transformFromRotation3D(*args)",
"def transform_affine(self, fixed):\n if len(fixed) == 2:\n label0 = fixed[0][0]\n label1 = fixed[1][0]\n\n p1 = self.points[label0]\n p2 = self.points[label1]\n p3 = np.array(fixed[0][1:3])\n p4 = np.array(fixed[1][1:3])\n\n theta = angle_between(p1, p2, p3, p4) * np.pi / 180\n\n scale = dist(p3, p4) / dist(p1, p2)\n s = np.sin(theta)\n c = np.cos(theta)\n rot = np.array([[c, -s],\n [s, c]]) * scale\n\n labels = self.points.keys()\n for label in labels:\n xy = self.points[label]\n xy2 = np.dot(rot, xy - p1) + p3\n self.points[label] = xy2\n\n elif len(fixed) > 2:\n mat = np.zeros((2*len(fixed), 6))\n vec = np.zeros(2*len(fixed))\n for i, f in enumerate(fixed):\n label = f[0]\n mat[2*i,0] = self.points[label][0]\n mat[2*i,1] = self.points[label][1] \n mat[2*i+1,2] = self.points[label][0] \n mat[2*i+1,3] = self.points[label][1] \n mat[2*i,4] = 1 \n mat[2*i+1,5] = 1 \n\n vec[2*i] = f[1]\n vec[2*i+1] = f[2]\n\n coeff, resid, rank, s = np.linalg.lstsq(mat, vec)\n a, b, c, d, e, f = tuple(coeff)\n\n labels = self.points.keys()\n for label in labels:\n x = self.points[label][0]\n y = self.points[label][1]\n\n x2 = a * x + b * y + e\n y2 = c * x + d * y + f\n self.points[label][0] = x2\n self.points[label][1] = y2",
"def transformFromPose2D(*args):\n return _almathswig.transformFromPose2D(*args)",
"def __call__(self, image: Any, target: Any) -> Any:\n for t in self.transforms:\n image, target = t(image, target)\n return image, target",
"def rigid3d_proc(point_clouds, rgb_images, depth_images, np_kps_pre_img, cv_kps_pre_img, cv_des_pre_img,\n save_intermediate=False, out_folder=None, image_set_name=None, poisson=True, plot=True):\n pcds = make_pcds(point_clouds)\n kps_3d = make_3d_kps_depth_img(depth_images, np_kps_pre_img)\n all_results = []\n\n # perform global registration between every 2 consecutive images\n for i in range(1, len(pcds)):\n img1, kp1, des1 = rgb_images[i], cv_kps_pre_img[i], cv_des_pre_img[i]\n img2, kp2, des2 = rgb_images[i - 1], cv_kps_pre_img[i - 1], cv_des_pre_img[i - 1]\n\n bf_matches = q8.mathching_skimage(img1, kp1, des1, img2, kp2, des2, plot)\n H_matrix, matchs = q9.ransac_loop(img1, img2, kp1, kp2, bf_matches)\n\n m_kps1_3d = []\n m_kps2_3d = []\n\n for m in matchs:\n m_kps1_3d.append(kps_3d[i][m[0]])\n m_kps2_3d.append(kps_3d[i - 1][m[1]])\n\n R, t = r3d.rigid_transform_3D(np.array(m_kps1_3d).T, np.array(m_kps2_3d).T)\n Hmatrix = np.pad(R, ((0, 1), (0, 1)))\n Hmatrix[3, 3] = 1\n Hmatrix[0, 3] = t[0, 0]\n Hmatrix[1, 3] = t[1, 0]\n Hmatrix[2, 3] = t[2, 0]\n\n print(t)\n if plot:\n o3d_utils.visualize_transformation(pcds[i], pcds[i - 1], Hmatrix)\n\n print(Hmatrix)\n all_results.append(Hmatrix)\n\n # chain all point clouds together with computed transformation\n chain_transformation(pcds, all_results, save_intermediate, out_folder, image_set_name, poisson, plot)",
"def test_transform_3d(transform, alpha = 1):\r\n points = 20*[None]\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n z = random.randrange(-40, 41)\r\n points[i] = vec3(x, y, z)\r\n tr_x = random.randrange(-40, 41)\r\n tr_y = random.randrange(-40, 41)\r\n tr_z = random.randrange(-40, 41)\r\n mapping = [(p, vec3(p.x + tr_x, p.y + tr_y, p.z + tr_z)) for p in points]\r\n print(\"Translation\")\r\n print(\"Input\".ljust(30), \"Translation\".ljust(30), \"Transformation\".ljust(30))\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n z = random.randrange(-40, 41)\r\n v_in = vec3(x, y, z)\r\n v_translate = vec3(x + tr_x, y + tr_y, z + tr_z)\r\n v_transform = transform(v_in, mapping, alpha)\r\n print(str(v_in).ljust(30), str(v_translate.str_repr(4)).ljust(30), str(v_transform.str_repr(4)).ljust(30))\r\n print()\r\n th_x = 2*math.pi*random.random()\r\n th_y = 2*math.pi*random.random()\r\n th_z = 2*math.pi*random.random()\r\n points_rot = [vec3(p.x, p.y*math.cos(th_x) - p.z*math.sin(th_x), p.y*math.sin(th_x) + p.z*math.cos(th_x)) for p in points]\r\n points_rot = [vec3(p.z*math.sin(th_y) + p.x*math.cos(th_y), p.y, p.z*math.cos(th_y) - p.x*math.sin(th_y)) for p in points_rot]\r\n points_rot = [vec3(p.x*math.cos(th_z) - p.y*math.sin(th_z), p.x*math.sin(th_z) + p.y*math.cos(th_z), p.z) for p in points_rot]\r\n mapping = [(points[i], points_rot[i]) for i in range(len(points))]\r\n print(\"Rotation\")\r\n print(\"Input\".ljust(30), \"Rotation\".ljust(30), \"Transformation\".ljust(30))\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n z = random.randrange(-40, 41)\r\n v_in = vec3(x, y, z)\r\n v_rotate = vec3(v_in.x, v_in.y*math.cos(th_x) - v_in.z*math.sin(th_x), v_in.y*math.sin(th_x) + v_in.z*math.cos(th_x))\r\n v_rotate = vec3(v_rotate.z*math.sin(th_y) + v_rotate.x*math.cos(th_y), v_rotate.y, v_rotate.z*math.cos(th_y) - v_rotate.x*math.sin(th_y))\r\n v_rotate = vec3(v_rotate.x*math.cos(th_z) - v_rotate.y*math.sin(th_z), v_rotate.x*math.sin(th_z) + v_rotate.y*math.cos(th_z), v_rotate.z)\r\n v_transform = transform(v_in, mapping, alpha)\r\n print(str(v_in).ljust(30), str(v_rotate.str_repr(4)).ljust(30), str(v_transform.str_repr(4)).ljust(30))\r\n print()\r\n k = math.exp(2*random.random() - 1)\r\n mapping = [(p, vec3(k*p.x, k*p.y, k*p.z)) for p in points]\r\n print(\"Uniform scaling\")\r\n print(\"Input\".ljust(30), \"Scaling\".ljust(30), \"Transformation\".ljust(30))\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n z = random.randrange(-40, 41)\r\n v_in = vec3(x, y, z)\r\n v_scale = vec3(k*x, k*y, k*z)\r\n v_transform = transform(v_in, mapping, alpha)\r\n print(str(v_in).ljust(30), str(v_scale.str_repr(4)).ljust(30), str(v_transform.str_repr(4)).ljust(30))\r\n print()\r\n k_x = math.exp(2*random.random() - 1)\r\n k_y = 3*random.random() + 1\r\n k_z = 3*random.random() + 1\r\n if (k_x >= k_y + math.exp(-1)): k_y = k_x - k_y\r\n else: k_y = k_x + k_y\r\n if ((k_x + k_y)/2 >= k_z + math.exp(-1)): k_z = (k_x + k_y)/2 - k_z\r\n else: k_z = (k_x + k_y)/2 + k_z\r\n mapping = [(p, vec3(k_x*p.x, k_y*p.y, k_z*p.z)) for p in points]\r\n print(\"Non-uniform scaling\")\r\n print(\"Input\".ljust(30), \"Scaling\".ljust(30), \"Transformation\".ljust(30))\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n z = random.randrange(-40, 41)\r\n v_in = vec3(x, y, z)\r\n v_scale = vec3(k_x*x, k_y*y, k_z*z)\r\n v_transform = transform(v_in, mapping, alpha)\r\n print(str(v_in).ljust(30), str(v_scale.str_repr(4)).ljust(30), str(v_transform.str_repr(4)).ljust(30))\r\n print()",
"def transformFromRotationPosition3D(*args):\n return _almathswig.transformFromRotationPosition3D(*args)",
"def similarity_transform(self):\n \n p1=self.match3d \n p2=self.match2d\n \n #Translation (translate both sets of vectors to their centroid)\n m1=p1.mean(axis=0)\n m2=p2.mean(axis=0)\n p1=p1-m1\n p2=p2-m2\n #plt.figure(1)\n #plt.plot(p1[:,0], p1[:,1],'ro', p2[:,0], p2[:,1], 'go')\n \n #Scale p1 to p2 (Frobenius norm)\n p1s=np.sqrt(sum(sum(p1**2)))\n p2s=np.sqrt(sum(sum(p2**2)))\n self.s=p2s/p1s;\n p1=p1*self.s;\n #plt.figure(2)\n #plt.plot(p1[:,0], p1[:,1],'ro', p2[:,0], p2[:,1], 'go')\n \n #Rotation (SVD of correlation to find optimal rotatin of p1)\n corr1=np.dot(p1.transpose(), p2)\n u, ss, v=np.linalg.svd(corr1)\n self.R=np.dot(v, u.transpose())\n p1=np.dot(self.R, p1.transpose())\n p1=p1.transpose() \n #plt.figure(3)\n #plt.plot(p1[:,0], p1[:,1],'ro', p2[:,0], p2[:,1], 'go')\n \n #Calculate transform T\n m1prime=self.s * np.dot(self.R, m1.transpose())\n m1prime=m1prime.transpose()\n self.T=m2-m1prime\n \n #p1orig2=s*(np.dot(R, self.match3d.transpose())).transpose() + T\n #plt.figure(14)\n #plt.plot(p1orig2[:,0], p1orig2[:,1],'ro', self.match2d[:,0], self.match2d[:,1], 'go')\n #plt.show()\n \n #print \"Just testing without rotation\"\n #self.R=np.eye(2)\n #rotating the match3D using these newly calculated transforms\n \n self.match3d_align=self.s*(np.dot(self.R, self.match3d.transpose())).transpose() + self.T\n \n Er1=np.sqrt(((self.match2d-self.match3d_align)**2).sum())\n return Er1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Synchronize this instance data with that of its parent | def _syncDataWithParent(self):
parent = self.parent()
if parent is None:
data, range_ = None, None
else:
data = parent.getData(copy=False)
range_ = parent.getDataRange()
self._updateData(data, range_) | [
"def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n self._data = None\n else:\n self._data = parent.getData(copy=False)\n self._updateScenePrimitive()",
"def _synchronize(self, obj, child, associationrow, clearkeys):\n raise NotImplementedError()",
"def doSync (self) :\r\n \r\n self.factory.getSyncFor(self)",
"def _parentChanged(self, event):\n if event == ItemChangedType.DATA:\n self._syncDataWithParent()",
"def sync(self):\n return self._sync",
"def SynchronizingObject(self) -> _n_1_t_3:",
"def _post_sync(self):",
"def _reload(self):\n if self._ancestorModelSourceCreated:\n self._parent._reload()\n else:\n # beware this breaks parent/child links such as current selection / hierarchical groups\n dictSave = self.serialize()\n tmpRegion = self._createBlankCopy()\n tmpRegion.deserialize(dictSave)\n self._assign(tmpRegion)\n self._informRegionChange(True)",
"def lock(self):\n raise NotImplementedError",
"def SyncRoot(self) -> object:",
"def update_data(self):\n pass",
"def sync(self, sync):\n self._sync = sync",
"def sync(self):\n self._sync_contacts()\n self._sync_monitors()",
"def sync(self) -> None:\n for parameter in self.data_to_sync:\n assert hasattr(self, parameter), \\\n \"Parameter: %s does not exist in: %s\" % (parameter, self)\n self.publish(self.key_gen(parameter), getattr(self, parameter))",
"def sync_tree_cache(self) -> None:\n self.sync_tree_with_data(self.tree_cache, self.data_cache)",
"def syncContents(self):\n self._contents.setState_TRY(self.temperature(),\n self.density(),\n self.massFractions())",
"def on_parent_changed(self):\n pass",
"def lock (self):\n self.locked = True\n self._changed = False",
"def _parent_changed(self):\n raise NotImplementedError(\"shouldnt happen, Parentable objects need to be able to change their parent\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle data change in the parent this plane belongs to | def _parentChanged(self, event):
if event == ItemChangedType.DATA:
self._syncDataWithParent() | [
"def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n super(ComplexCutPlane, self)._updated(event)",
"def _parentChanged(self, event):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n super(ComplexIsosurface, self)._parentChanged(event)",
"def _parent_changed(self):\n raise NotImplementedError(\"shouldnt happen, Parentable objects need to be able to change their parent\")",
"def data_changed(self):\n return",
"def on_parent_changed(self):\n pass",
"def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n self._data = None\n else:\n self._data = parent.getData(copy=False)\n self._updateScenePrimitive()",
"def data_changed(self):\n self.data_changed_signal.emit(self)",
"def _notify_parent_change(self):\n for p in self.parameters:\n p._parent_changed(self)",
"def update_data(self):\n # Just set data_changed, the component should do the rest.\n self.data_changed = True",
"def update_data(self):\n pass",
"def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n data, range_ = None, None\n else:\n data = parent.getData(copy=False)\n range_ = parent.getDataRange()\n self._updateData(data, range_)",
"def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n\n elif event in (ItemChangedType.COLORMAP,\n Item3DChangedType.INTERPOLATION):\n self._updateScenePrimitive()\n super(ComplexIsosurface, self)._updated(event)",
"def XPLMDataChanged_f(inRefcon):",
"def update_data():\n pass",
"def levelChanged(self):\r\n pass",
"def change_data(self):\n\n if self.changed is not True:\n self.changed = True\n print('True')",
"def __onChildBorn(self, evt):\n\t\tpass",
"def parameter_tree_changed(self, param, changes):\n for param, change, data in changes:\n path = self.settings.childPath(param)\n if path is not None:\n childName = '.'.join(path)\n else:\n childName = param.name()\n if change == 'childAdded':pass\n\n elif change == 'value':\n\n if param.name() == 'Detectors':\n self.update_plot_det_items(param)\n\n elif param.name() == 'scan_average':\n self.show_average_dock(param.value() > 1)\n elif change == 'parent':pass",
"def _levelChanged(self, event):\n if event == items.Item3DChangedType.ISO_LEVEL:\n model = self.model()\n if model is not None:\n index = self.index(column=1)\n model.dataChanged.emit(index, index)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return whether values <= colormap min are displayed or not. | def getDisplayValuesBelowMin(self):
return self._getPlane().colormap.displayValuesBelowMin | [
"def lowresdisplay():\n w, h = getscreengeom()\n return w < 1400 or h < 700",
"def inHorizontalWindow(self, pc):\n return pc.miny() < self.maxy() and self.miny() < pc.maxy()",
"def setDisplayValuesBelowMin(self, display):\n display = bool(display)\n if display != self.getDisplayValuesBelowMin():\n self._getPlane().colormap.displayValuesBelowMin = display\n self._updated(ItemChangedType.ALPHA)",
"def visible(self):\n return -PipePair.WIDTH < self.x < WIN_WIDTH",
"def _are_breakpoint_values_within_range(self, breakpoints):\n min_allowed_bp = np.quantile(self.xx, self.min_distance_to_edge)\n max_allowed_bp = np.quantile(self.xx, 1 - self.min_distance_to_edge)\n\n for bp in breakpoints:\n if bp <= min_allowed_bp or bp >= max_allowed_bp:\n return False\n return True",
"def is_lower_limit(self):\n is_lower = self.get_raw_status() & self.STATUS_LLIM\n return bool(is_lower)",
"def usesAlpha(self):\n max = 1.0 if self.color.dtype.kind == 'f' else 255\n return np.any(self.color[:,3] != max)",
"def can_stand(self):\n return min(self.values) < 22",
"def is_lower(self):\n M = self.rep\n for i in range(self.rows):\n for j in range(i + 1, self.cols):\n if M[i, j]:\n return False\n return True",
"def _get_display_range(image): # pragma: no cover\n ip = _get_image_properties(image)\n immin, immax = np.min(image), np.max(image)\n if ip.signed:\n magnitude = max(abs(immin), abs(immax))\n lo, hi = -magnitude, magnitude\n cmap = _diverging_colormap\n elif any(ip):\n _raise_warnings(ip)\n lo, hi = immin, immax\n cmap = _nonstandard_colormap\n else:\n lo = 0\n imtype = image.dtype.type\n hi = dtype_range[imtype][1]\n cmap = _default_colormap\n return lo, hi, cmap",
"def isPositiveMap(self):\n x0, y0 = self.skyToPix(self.ra0_deg, self.dec0_deg)\n x1, y1 = self.skyToPix(self.ra0_deg + 1/3600., self.dec0_deg)\n\n if x1 > x0:\n return True\n return False",
"def inVerticalWindow(self, pc):\n return pc.minx() < self.maxx() and self.minx() < pc.maxx()",
"def highlight_min_max(s, min_color=\"#5fba7d\", max_color=\"#e67575\"):\n is_max = s == s.max()\n is_min = s == s.min()\n max_mapping = [f'background-color: {max_color}' if v else '' for v in is_max]\n min_mapping = [f'background-color: {min_color}' if v else '' for v in is_min]\n return [min_mapping[i] if min_mapping[i] != '' else max_mapping[i] for i in range(len(min_mapping))]",
"def has_range(self):\n return self.has_minimum and self.has_maximum",
"def testLinearMinMax(self):\n\n rgbImage = rgb.LinearMapping(image=self.images[R]).makeRgbImage()\n\n if display:\n rgb.displayRGB(rgbImage)",
"def fixed(self):\n return self._level <= MASS_LEVEL_LABEL",
"def is_lower(self):\n return all(i >= j for i, row in self.items() for j in row)",
"def _are_breakpoint_values_far_apart(self, breakpoints):\n min_distance = np.diff(np.sort(breakpoints))\n\n # numpy ptp gives the range of the data, closeness relative to that\n min_distance_allowed = self.min_distance_between_breakpoints * \\\n np.ptp(self.xx)\n\n if (min_distance <= min_distance_allowed).any():\n return False\n return True",
"def f_has_range(self):\n return len(self._explored_range) > 0"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set whether to display values <= colormap min. | def setDisplayValuesBelowMin(self, display):
display = bool(display)
if display != self.getDisplayValuesBelowMin():
self._getPlane().colormap.displayValuesBelowMin = display
self._updated(ItemChangedType.ALPHA) | [
"def set_colormap_full_range(self):\n if(self.plot.image is None):\n return\n \n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n data_min = numpy.min(self.plot.image)\n data_max = numpy.max(self.plot.image)\n cmin.setText(str(data_min))\n cmax.setText(str(data_max))\n self.set_colormap_range()",
"def getDisplayValuesBelowMin(self):\n return self._getPlane().colormap.displayValuesBelowMin",
"def setCmapMinMax(self, minimum, maximum):\n\t\tself.cmapmin = minimum\n\t\tself.cmapmax = maximum\n\t\tself.isorender.set_cmap_minmax(minimum, maximum)",
"def _setBound(self, value):\n if self._colormap is not None:\n if self._index == 0:\n min_ = value\n max_ = self._colormap.getVMax()\n else: # self._index == 1\n min_ = self._colormap.getVMin()\n max_ = value\n\n if max_ is not None and min_ is not None and min_ > max_:\n min_, max_ = max_, min_\n self._colormap.setVRange(min_, max_)",
"def test_change_min_max(self):\n\n datarange = self.colormap.range\n\n # Perform a dummy mapping.\n a = ArrayDataSource(array([0.0, 0.5, 1.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n\n # Update the min_value.\n datarange.low = -1.0\n\n # Test that the map still works.\n a = ArrayDataSource(array([-1.0, 0.0, 1.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n expected = array([0.0, 0.5, 1.0])\n\n close = allclose(ravel(b[:,:1]), expected, atol=0.02)\n self.assert_(close,\n \"Changing min value broke map. Expected %s. Got %s\" % (expected, b[:,:1]))\n\n # Update the max_value.\n datarange.high = 0.0\n # Test that the map still works.\n a = ArrayDataSource(array([-1.0, -0.5, 0.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n expected = array([0.0, 0.5, 1.0])\n\n close = allclose(ravel(b[:,:1]), expected, atol=0.02)\n self.assert_(close,\n \"Changing min value broke map. Expected %s. Got %s\" % (expected, b[:,:1]))\n\n\n return",
"def set_colormap_range(self):\n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n region = self.plot.getHistogramWidget().region\n\n if(self.sender() == region):\n cmin.setText(str(region.getRegion()[0]))\n cmax.setText(str(region.getRegion()[1]))\n return\n\n # Sometimes the values in the lineEdits are\n # not proper floats so we get ValueErrors\n try:\n # If necessary swap min and max\n if(float(cmin.text()) > float(cmax.text())):\n _tmp = cmin.text()\n cmin.setText(cmax.text())\n cmax.setText(_tmp)\n\n region = [float(cmin.text()), float(cmax.text())]\n self.plot.getHistogramWidget().region.setRegion(region)\n except ValueError:\n return",
"def set_norm_min(self, min_value):\n self._norm_min = min_value",
"def set_limits_minmax(self, zmin, zmax):\n self._color_mapper.update(low=zmin, high=zmax)\n self.update()",
"def set_min_value(self, minval):\n self.__minval = minval",
"def set_low_high_value(self):\n # do not apply scaler norm on not scalable data\n self.range_dict.clear()\n\n for data_name in self.dict_to_plot.keys():\n if self.quantitative_normalization:\n # Quantitative normalization\n data_arr, _ = self.img_model_adv.param_quant_analysis.apply_quantitative_normalization(\n data_in=self.dict_to_plot[data_name],\n scaler_dict=self.scaler_norm_dict,\n scaler_name_default=self.get_selected_scaler_name(),\n data_name=data_name,\n ref_name=self.quantitative_ref_eline,\n name_not_scalable=self.name_not_scalable,\n )\n else:\n # Normalize by the selected scaler in a regular way\n data_arr = normalize_data_by_scaler(\n data_in=self.dict_to_plot[data_name],\n scaler=self.scaler_data,\n data_name=data_name,\n name_not_scalable=self.name_not_scalable,\n )\n\n lowv, highv = np.min(data_arr), np.max(data_arr)\n # Create some 'artificially' small range in case the array is constant\n if lowv == highv:\n lowv -= 0.005\n highv += 0.005\n self.range_dict[data_name] = {\"low\": lowv, \"low_default\": lowv, \"high\": highv, \"high_default\": highv}",
"def set_min(self, min):\n self.set_val((min, self.val[1]))",
"def setLow2Min(self):\n\tself.__setArrayValue(self.__LOW, 0)",
"def vmin(self):\n return self._vmin",
"def _set_vmin(self, vmin):\n self._update_frame(vmin=float(vmin))",
"def _rescale_cmap(self, cort, tomin=0., tomax=1., val=0):\n # Find non-zero values :\n non_val = cort != val\n\n # Rescale colormap :\n cort[non_val] = normalize(cort[non_val], tomin=tomin, tomax=tomax)\n\n return cort, non_val",
"def setContactMinThresholdValues(self,contactMinThreshold):\n contactMinThreshold.setLabel(\"Surface Detect threshold\")\n contactMinThreshold.setDefaultValue(60)\n contactMinThreshold.setMin(0)\n contactMinThreshold.setMax(65535)\n contactMinThreshold.setDescription(\"The minimum contact size measurement for persistent contact tracking. Contact size is the sum of neighbouring keys' touch deltas forming the touch contact.\")",
"def setCh1LowerThreshold(self, threshold):\n\t\tself.lower1 = threshold\n\t\tself.paintPreview()",
"def _adjust_scale(self, value):\n if self._min_val <= value <= self._max_val:\n self._scale_var.set(value)\n self.update_label_text()",
"def set_min(self, min_value):\n self._min = min_value"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Synchronize this instance data with that of its parent | def _syncDataWithParent(self):
parent = self.parent()
if parent is None:
self._data = None
else:
self._data = parent.getData(copy=False)
self._updateScenePrimitive() | [
"def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n data, range_ = None, None\n else:\n data = parent.getData(copy=False)\n range_ = parent.getDataRange()\n self._updateData(data, range_)",
"def _synchronize(self, obj, child, associationrow, clearkeys):\n raise NotImplementedError()",
"def doSync (self) :\r\n \r\n self.factory.getSyncFor(self)",
"def _parentChanged(self, event):\n if event == ItemChangedType.DATA:\n self._syncDataWithParent()",
"def sync(self):\n return self._sync",
"def SynchronizingObject(self) -> _n_1_t_3:",
"def _post_sync(self):",
"def _reload(self):\n if self._ancestorModelSourceCreated:\n self._parent._reload()\n else:\n # beware this breaks parent/child links such as current selection / hierarchical groups\n dictSave = self.serialize()\n tmpRegion = self._createBlankCopy()\n tmpRegion.deserialize(dictSave)\n self._assign(tmpRegion)\n self._informRegionChange(True)",
"def lock(self):\n raise NotImplementedError",
"def SyncRoot(self) -> object:",
"def update_data(self):\n pass",
"def sync(self, sync):\n self._sync = sync",
"def sync(self):\n self._sync_contacts()\n self._sync_monitors()",
"def sync(self) -> None:\n for parameter in self.data_to_sync:\n assert hasattr(self, parameter), \\\n \"Parameter: %s does not exist in: %s\" % (parameter, self)\n self.publish(self.key_gen(parameter), getattr(self, parameter))",
"def sync_tree_cache(self) -> None:\n self.sync_tree_with_data(self.tree_cache, self.data_cache)",
"def syncContents(self):\n self._contents.setState_TRY(self.temperature(),\n self.density(),\n self.massFractions())",
"def on_parent_changed(self):\n pass",
"def lock (self):\n self.locked = True\n self._changed = False",
"def _parent_changed(self):\n raise NotImplementedError(\"shouldnt happen, Parentable objects need to be able to change their parent\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the level of this isosurface (float) | def getLevel(self):
return self._level | [
"def get_level(self):\r\n \r\n return self.level",
"def get_level(self):\r\n \r\n return self.level",
"def getLevel(self):\n return _libsbml.SBasePlugin_getLevel(self)",
"def get_level(self):\n return self.playerLevel",
"def getLevel(self):\n return _libsbml.SBase_getLevel(self)",
"def _get_isis_level(self):\n return self.__isis_level",
"def volume_level(self):\n return self._volume",
"def _do_get_level(self):\n logging.info(__name__ + ' : Read level of channel 1')\n result = self._execute('R1')\n return float(result.replace(\"R\", \"\")) / 10",
"def volume_level(self):\n return self._volume_entity.volume_level",
"def get_level(cls, curve_value):\n return curve_value & (2 ** cls.level_bits - 1)",
"def volume_level(self) -> float:\n return int(self._state.get(\"playback_volume\", 0)) / 100",
"def getLevel(self, *args):\n return _libsbml.FbcExtension_getLevel(self, *args)",
"def volume_level(self):\n if 'mixer volume' in self._status:\n return int(self._status['mixer volume']) / 100.0",
"def volume_level(self):\n return self._client.volume / 100",
"def level(self) -> str:\r\n return self.data.get(\"level\")",
"def get_level(self):\r\n \r\n return self.levels[self.current].get_level()",
"def getLevel(self, *args):\n return _libsbml.SBMLExtension_getLevel(self, *args)",
"def percentage_of_current_level(self) -> int:\n return math.floor(max(self.current_xp - 327680000, 0) / 5000)",
"def get_level(self, level):\n return"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the value at which to build the isosurface. Setting this value reset autolevel function | def setLevel(self, level):
self._autoLevelFunction = None
level = float(level)
if level != self._level:
self._level = level
self._updateScenePrimitive()
self._updated(Item3DChangedType.ISO_LEVEL) | [
"def set(self, value):\n\n if value == self.Value.kOff:\n hal.setSolenoid(self.forwardHandle, False)\n hal.setSolenoid(self.reverseHandle, False)\n elif value == self.Value.kForward:\n hal.setSolenoid(self.reverseHandle, False)\n hal.setSolenoid(self.forwardHandle, True)\n elif value == self.Value.kReverse:\n hal.setSolenoid(self.forwardHandle, False)\n hal.setSolenoid(self.reverseHandle, True)\n else:\n raise ValueError(\"Invalid argument '%s'\" % value)",
"def set_level(self, value):\n value = max(0, min(254, value))\n self._brightness = value\n self.async_schedule_update_ha_state()",
"def SetLevelSetValue(self, _arg: 'double const') -> \"void\":\n return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_SetLevelSetValue(self, _arg)",
"def SetLevelSetValue(self, _arg: 'double const') -> \"void\":\n return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_SetLevelSetValue(self, _arg)",
"def __iso(self):\n self.logger.debug(\"running\")\n self.tab.set_config_val(\"ISO\")\n self.__set_device_upper_isi(\"5000\")\n self.__set_device_lower_isi(\"3000\")\n self.__set_device_stim_intensity(100)\n self.__set_device_stim_duration(\"1000\")\n self.__set_upload_button(False)\n self.logger.debug(\"done\")",
"def set_setpoint(self, value):\n value = value * self.conf['PSICONV']\n log.debug(\"Set pressure regulator %d to %f\", self.id_, value)\n self.synth.cbox.set_dac(self.id_, value)",
"def set_initial(self, value):\n # TODO: Make an Initial Stock Adjust here\n pass",
"def fset(self, sky_type):\r\n arg_str = p2e._util._convert_args_to_string(\"set.radiance.sky\", \r\n sky_type)\r\n p2e._app.Exec(arg_str)",
"def SetValue(self, value):\n self.gaugePanel.SetValue(value)",
"def set_light_intensity(self, value):\n self.matrixwritecommand([0x99, value])",
"def current_floor(self, new_value):\r\n self._current_floor = new_value",
"def set_level(self,level):\r\n \r\n self.level = level",
"def setFlatImage(self, value=1.0):\n self.fimage = None\n self.image = numpy.zeros((self.ny, self.nx), 'float') + value\n return",
"def changeRingSetting(self):\n #Input code to accommodate function of Ring setting",
"def setFixed(self):\n\t\tprint \"Setting camera mode to fixed\"",
"def _isosurfaceItemChanged(self, event):\n if event == Item3DChangedType.ISO_LEVEL:\n self._updateIsosurfaces()",
"def setSolenoidCurrent(self, current):\n self.calc_level = min(self.calc_level, CALC_B_MAP - 1)\n self.solenoid.setSolenoidCurrent(current) # to reset solenoid calc",
"def change_current_level_value(v):\n game_vars.current_window = game_vars.current_window + v",
"def set_front_value(self, value):\n self.frt.set_value(value)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the function computing the isolevel (callable or None) | def getAutoLevelFunction(self):
return self._autoLevelFunction | [
"def _get_isis_level(self):\n return self.__isis_level",
"def elevation_level():\n return F.udf(lambda x: str(int(x/1000)*1000))",
"def _DEFAULT_FUNC_(t):\n\treturn 9.1",
"def _computeIsosurface(self):\n data = self.getData(copy=False)\n\n if data is None:\n if self.isAutoLevel():\n self._level = float('nan')\n\n else:\n if self.isAutoLevel():\n st = time.time()\n try:\n level = float(self.getAutoLevelFunction()(data))\n\n except Exception:\n module_ = self.getAutoLevelFunction().__module__\n name = self.getAutoLevelFunction().__name__\n _logger.error(\n \"Error while executing iso level function %s.%s\",\n module_,\n name,\n exc_info=True)\n level = float('nan')\n\n else:\n _logger.info(\n 'Computed iso-level in %f s.', time.time() - st)\n\n if level != self._level:\n self._level = level\n self._updated(Item3DChangedType.ISO_LEVEL)\n\n if numpy.isfinite(self._level):\n st = time.time()\n vertices, normals, indices = MarchingCubes(\n data,\n isolevel=self._level)\n _logger.info('Computed iso-surface in %f s.', time.time() - st)\n\n if len(vertices) != 0:\n return vertices, normals, indices\n\n return None, None, None",
"def _determine_func(self):\n for condition in self._conditions:\n if condition.check():\n return condition.func\n return self.func",
"def getFunc(self):\n\t\treturn self.initFunc()(self.x)",
"def func ( self ) :\n return self.__func",
"def ufunc(self):\n if self.field.ufunc_mode == \"python-calculate\":\n return self.python_calculate\n if self.field.ufunc_mode == \"jit-lookup\" and not self.always_calculate:\n return self.jit_lookup\n return self.jit_calculate",
"def get_function(self):\n return self.func",
"def get_function(self, domain: str, operator: str, opset: int) -> Callable:\n ...",
"def get_func(op):\n if op == \"-e\":\n return func\n elif op == \"-d\":\n return unfunc",
"def get_country_iso(country, alpha=3):\n try:\n country_found = pycountry.countries.search_fuzzy(country)[0]\n if alpha == 3:\n return country_found.alpha_3\n else:\n return country_found.alpha_2\n except LookupError:\n print(\"Can't find country: {}. Check again\".format(country))\n return None",
"def function(self) -> Callable:\n return self.env.function",
"def _PS1Func(self):\n return self.PS1",
"def lookup_identity_threshold(self, function=None, rank=None):\n result = self.default_identity_threshold\n if function == '':\n return result\n try:\n if function is not None and rank is not None:\n result = self.functions_dict[function][rank]\n elif function is not None:\n result = self.functions_dict[function]['function_cutoff']\n elif rank is not None:\n result = self.default_ranks_thresholds[rank]\n except KeyError:\n pass\n return result",
"def calc_Uiso(self):\n if self.temp_factor is None:\n return None\n return numpy.identity(3, float) * (self.temp_factor * Constants.B2U)",
"def _isNullFunc():\n try:\n return vd.sheet.isNullFunc()\n except AttributeError:\n import visidata\n\n return visidata.isNullFunc()",
"def optimal_liquidation_level(self):\r\n\r\n # If the liquidation level wasn't calculated before, setting it\r\n if self.liquidation_level[0] is None:\r\n\r\n equation = lambda price: (self._F(price, self.r[0]) - (price - self.c[0])\r\n * self._F_derivative(price, self.r[0]))\r\n\r\n bracket = [self.theta - 6 * np.sqrt(self.sigma_square),\r\n self.theta + 6 * np.sqrt(self.sigma_square)]\r\n\r\n sol = root_scalar(equation, bracket=bracket)\r\n\r\n output = sol.root\r\n\r\n self.liquidation_level[0] = output\r\n\r\n # If was pre-calculated, using it\r\n else:\r\n\r\n output = self.liquidation_level[0]\r\n\r\n return output",
"def get_function(self, name):"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the color of this isosurface (QColor) | def getColor(self):
return qt.QColor.fromRgbF(*self._color) | [
"def get_color(self):\n return(self.pen_color)",
"def get_color(self):\n return self.color",
"def color(self):\n return idc.get_color(self.ea, idc.CIC_ITEM)",
"def _get_color(self): # pragma: no cover\n return QColorDialog(self).getColor(initial=QColor(\"black\"))",
"def color(self):\n return self.SUITS[self.suit].get(\"color\")",
"def get_color(self):\n return COLOR_DICT[self.element]",
"def getColor(self):\n return self.colorComboBox.currentText()",
"def get_colour(self):\n return self.colour",
"def conseguir_color(self):\n return self.pluma.conseguir_color()",
"def color(self):\n red = self.main.energy * 255\n green = xmath.int(self.main.gravity * 108)\n blue = xmath.int(self.main.gravity * 108)\n return (red, green, blue)",
"def get_color(self) -> tuple:\n return self._color",
"def get_color(self):\n return self._io.last_state['color']['front-center']",
"def color(self):\n if self.partido and self.partido.color:\n return self.partido.color\n return '#FFFFFF'",
"def v_color(ob: BaseGeometry) -> str:\n return COLORS[ob.is_simple + 33]",
"def GetDrawColor(self):\n ...",
"def get_material_color(self):\n return self._material['color']",
"def getColor(self):\n retval = self.pixels[self.x, self.y]\n return Color(retval)",
"def get_shade(self):\n return(self.pen_shade)",
"def mesh_color(self):\n return self._mesh_color"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute isosurface for current state. | def _computeIsosurface(self):
data = self.getData(copy=False)
if data is None:
if self.isAutoLevel():
self._level = float('nan')
else:
if self.isAutoLevel():
st = time.time()
try:
level = float(self.getAutoLevelFunction()(data))
except Exception:
module_ = self.getAutoLevelFunction().__module__
name = self.getAutoLevelFunction().__name__
_logger.error(
"Error while executing iso level function %s.%s",
module_,
name,
exc_info=True)
level = float('nan')
else:
_logger.info(
'Computed iso-level in %f s.', time.time() - st)
if level != self._level:
self._level = level
self._updated(Item3DChangedType.ISO_LEVEL)
if numpy.isfinite(self._level):
st = time.time()
vertices, normals, indices = MarchingCubes(
data,
isolevel=self._level)
_logger.info('Computed iso-surface in %f s.', time.time() - st)
if len(vertices) != 0:
return vertices, normals, indices
return None, None, None | [
"def isosurface(self):\n return self._isosurface()",
"def isosurface(self, value=None, flying_edges=True):\n scrange = self._data.GetScalarRange()\n\n if flying_edges:\n cf = vtk.vtkFlyingEdges3D()\n cf.InterpolateAttributesOn()\n else:\n cf = vtk.vtkContourFilter()\n cf.UseScalarTreeOn()\n\n cf.SetInputData(self._data)\n cf.ComputeNormalsOn()\n\n if utils.is_sequence(value):\n cf.SetNumberOfContours(len(value))\n for i, t in enumerate(value):\n cf.SetValue(i, t)\n else:\n if value is None:\n value = (2 * scrange[0] + scrange[1]) / 3.0\n # print(\"automatic isosurface value =\", value)\n cf.SetValue(0, value)\n\n cf.Update()\n poly = cf.GetOutput()\n\n out = vedo.mesh.Mesh(poly, c=None).phong()\n out.mapper().SetScalarRange(scrange[0], scrange[1])\n\n out.pipeline = utils.OperationNode(\n \"isosurface\",\n parents=[self],\n comment=f\"#pts {out.inputdata().GetNumberOfPoints()}\",\n c=\"#4cc9f0:#e9c46a\",\n )\n return out",
"def compute_isosurfaces(self, bs):\n total_data = []\n iso_surface = []\n n_bands = 0\n\n for spin in bs.bands.keys():\n\n ebands = bs.bands[spin]\n ebands -= bs.efermi\n emax = ebands.max(axis=1)\n emin = ebands.min(axis=1)\n\n\n for band in ebands:\n n_bands += 1\n i, j, k = 0, 0, 0\n data_min = 0\n data_max = 0\n data = np.zeros((self._k_dim[0], self._k_dim[1], self._k_dim[2]))\n for energy in band:\n \n data[i][j][k] = energy\n if energy<data_min or data_min ==0:\n data_min = energy\n if energy>data_max or data_max==0:\n data_max = energy\n \n if j == (self._k_dim[1] - 1) and k == (self._k_dim[2] - 1):\n j = 0\n k = 0\n i += 1\n \n elif k == (self._k_dim[2] - 1):\n j += 1\n k = 0\n \n else:\n k += 1\n\n if 0 > data_min and 0 < data_max:\n total_data.append(data)\n\n rlattvec = bs.lattice_rec._matrix\n\n\n for band_index, band_data in enumerate(total_data):\n\n verts, faces, normals, values = measure.marching_cubes_lewiner(band_data, 0,\n self._spacing)\n iso_surface.append([verts, faces, normals, values])\n\n\n self._iso_surface = iso_surface",
"def _isosurfaceItemChanged(self, event):\n if event == Item3DChangedType.ISO_LEVEL:\n self._updateIsosurfaces()",
"def _updateIsosurfaces(self):\n # Sorting using minus, this supposes data 'object' to be max values\n sortedIso = sorted(self.getIsosurfaces(),\n key=lambda isosurface: - isosurface.getLevel())\n self._isogroup.children = [iso._getScenePrimitive() for iso in sortedIso]",
"def _get_surfaces(idf):\n surfaces = idf.getsurfaces() + idf.getshadingsurfaces() + idf.getsubsurfaces()\n return surfaces",
"def isosurface(X, Y, Z, V, show=False):\n\n def t(a):\n \"\"\"Transpose to coerce np.meshgrid output match np.mgrid output. a must\n be a 3d-array.\"\"\"\n return np.transpose(a, [1, 0, 2])\n\n fig = mlab.contour3d(t(X), t(Y), t(Z), t(V))\n if show:\n mlab.show()\n return fig",
"def isosurface(self, volumes=None, level_1=0.0, level_2=None):\n if level_2 is None:\n level_2 = self.DEFAULT_ISOLEVEL\n if volumes is None:\n volumes = self.volumes\n for volume in volumes:\n volume.surface_levels = level_1, level_2",
"def removeIsosurface(self, isosurface):\n if isosurface not in self.getIsosurfaces():\n _logger.warning(\n \"Try to remove isosurface that is not in the list: %s\",\n str(isosurface))\n else:\n isosurface.sigItemChanged.disconnect(self._isosurfaceItemChanged)\n self._isosurfaces.remove(isosurface)\n self._updateIsosurfaces()\n self.sigIsosurfaceRemoved.emit(isosurface)",
"def toggle_surface(self):",
"def getState(self):\n return _fiasco_numpy.KalmanFilter_getState(self)",
"def closed_v(self):\n sa = ShapeAnalysis_Surface(self.surface())\n return sa.IsVClosed()",
"def island_integrals(state):\n vs = state.variables\n\n uloc = allocate(state.dimensions, (\"xt\", \"yt\", \"isle\"))\n vloc = allocate(state.dimensions, (\"xt\", \"yt\", \"isle\"))\n\n uloc = update(\n uloc,\n at[1:, 1:, :],\n -(vs.psin[1:, 1:, :] - vs.psin[1:, :-1, :])\n * vs.maskU[1:, 1:, -1, npx.newaxis]\n / vs.dyt[npx.newaxis, 1:, npx.newaxis]\n * vs.hur[1:, 1:, npx.newaxis],\n )\n\n vloc = update(\n vloc,\n at[1:, 1:, ...],\n (vs.psin[1:, 1:, :] - vs.psin[:-1, 1:, :])\n * vs.maskV[1:, 1:, -1, npx.newaxis]\n / (vs.cosu[npx.newaxis, 1:, npx.newaxis] * vs.dxt[1:, npx.newaxis, npx.newaxis])\n * vs.hvr[1:, 1:, npx.newaxis],\n )\n\n vs.line_psin = line_integrals.line_integrals(state, uloc=uloc, vloc=vloc, kind=\"full\")\n return KernelOutput(line_psin=vs.line_psin)",
"def compute_wf(self, state): \n\n # For the coarse Wigner function the dimension is that of the \n # underlying affine plane.\n W = np.zeros((self.coarse_field.dim, self.coarse_field.dim)) \n\n # Turn kets into density operators if need be.\n if state.shape[0] == 1: \n state = np.outer(state, np.conj(state)) \n \n # A sorted copy of the subfield for plotting\n sorted_els = sorted(self.coarse_field.elements)\n\n # The coarse Wigner function is indexed by the subfield, so use this.\n for alpha in self.subfield: \n for beta in self.subfield: \n coarse_point = (self.subfield_map[alpha], self.subfield_map[beta])\n mat = np.trace(np.dot(state, self.coarse_kernel[coarse_point]))\n \n # Determine where in the Wigner matrix to put this value\n a = sorted_els.index(coarse_point[0])\n b = sorted_els.index(coarse_point[1])\n\n W[a][b] = (1.0 / self.field.dim) * mat \n\n return W",
"def _calc_ffinv(self, it):\n assert self.PBSRANK == 0, 'NO MPI METHOD'\n if it < 0: return\n fname_dx, fname_dy = self._getfnames_f(it)\n\n if not os.path.exists(fname_dx) or not os.path.exists(fname_dy):\n # FIXME : does this from plm\n assert self.is_previous_iter_done(it)\n Phi_est_WF = self.get_Phimap(it)\n Om_est_WF = self.get_Ommap(it)\n\n assert self.cov.lib_skyalm.shape == Phi_est_WF.shape\n assert self.cov.lib_skyalm.shape == self.lib_qlm.shape\n assert self.cov.lib_skyalm.lsides == self.lib_qlm.lsides\n rmin = np.array(self.cov.lib_skyalm.lsides) / np.array(self.cov.lib_skyalm.shape)\n print('rank %s caching displacement comp. for it. %s' % (self.PBSRANK, it))\n dx = PDP(Phi_est_WF, axis=1, h=rmin[1])\n dy = PDP(Phi_est_WF, axis=0, h=rmin[0])\n dx += -PDP(Om_est_WF, axis=0, h=rmin[0])\n dy += +PDP(Om_est_WF, axis=1, h=rmin[1])\n if self.PBSRANK == 0:\n np.save(fname_dx, dx)\n np.save(fname_dy, dy)\n del dx, dy\n lib_dir = os.path.join(self.lib_dir, 'f_%04d_libdir' % it)\n if not os.path.exists(lib_dir): os.makedirs(lib_dir)\n fname_invdx, fname_invdy = self._getfnames_finv(it)\n if not os.path.exists(fname_invdx) or not os.path.exists(fname_invdy):\n f = self._load_f(it)\n print('rank %s inverting displacement it. %s' % (self.PBSRANK, it))\n f_inv = f.get_inverse(use_Pool=self.use_Pool_inverse)\n np.save(fname_invdx, f_inv.get_dx())\n np.save(fname_invdy, f_inv.get_dy())\n lib_dir = os.path.join(self.lib_dir, 'finv_%04d_libdir' % it)\n if not os.path.exists(lib_dir): os.makedirs(lib_dir)\n assert os.path.exists(fname_invdx), fname_invdx\n assert os.path.exists(fname_invdy), fname_invdy\n return",
"def addIsosurface(self, level, color):\n isosurface = self._Isosurface(parent=self)\n isosurface.setColor(color)\n if callable(level):\n isosurface.setAutoLevelFunction(level)\n else:\n isosurface.setLevel(level)\n isosurface.sigItemChanged.connect(self._isosurfaceItemChanged)\n\n self._isosurfaces.append(isosurface)\n\n self._updateIsosurfaces()\n\n self.sigIsosurfaceAdded.emit(isosurface)\n return isosurface",
"def drfl_dsurface(self, x_surface, geom):\n\n return np.zeros((self.n_wl, 1))",
"def Innovation(state_vectors_names,obs_file,tmp_DA_path,sosie_path,name_sosie_output,name_sosie_map,n_ens,obsop): \n \n state_proj_name=ObsOperator(obsop,state_vectors_names,obs_file,tmp_DA_path,sosie_path,name_sosie_output,name_sosie_map,n_ens)\n \n \n fid_deg1 = nc.Dataset(obs_file)\n lon2d=np.array(fid_deg1.variables[\"lon\"][:,:]) \n lat2d=np.array(fid_deg1.variables[\"lat\"][:,:]) \n obs=np.array(fid_deg1.variables[\"ssh_model\"][:,:]) \n \n n_obs_var=1 # To be moved to Exp1_params.py\n name_obs_var=[\"ssh\"] # To be moved to Exp1_params.py\n \n fid_deg = nc.Dataset(state_proj_name[0])\n n_tot_obs=0\n for i_var in range(n_obs_var):\n n_tot_obs=n_tot_obs+np.shape(fid_deg.variables[name_obs_var[i_var]])[1]*np.shape(fid_deg.variables[name_obs_var[i_var]])[2]\n innov=np.zeros([n_ens,n_tot_obs],) \n \n for i_ens in range(n_ens):\n fid_deg = nc.Dataset(state_proj_name[i_ens])\n lon2d=np.array(fid_deg.variables[\"lon\"][:,:]) \n lat2d=np.array(fid_deg.variables[\"lat\"][:,:]) \n i_innov=0\n for i_var in range(n_obs_var): \n for i_lon in range(np.shape(fid_deg.variables[\"lon\"][:,:])[0]):\n for j_lat in range(np.shape(fid_deg.variables[\"lat\"][:,:])[1]):\n innov[i_ens,i_innov]=np.array(fid_deg.variables[name_obs_var[i_var]][0,i_lon,j_lat]) - obs[i_lon,j_lat]\n i_innov=i_innov+1\n \n innov[(innov<=-9900)]=float('Inf') \n \n \n return innov",
"def plot_fft_isosurfaces(description: str, omega: np.ndarray, \n ut: np.ndarray, filename: str) -> None:\n\n print(f'Plotting fft isosurfaces: {description}...')\n\n (omega_x_grid, omega_y_grid, omega_z_grid) = np.meshgrid(omega, omega, \n omega, indexing='ij')\n n = len(omega)\n\n num_slices = ut.shape[0]\n # We only want to plot the first, middle, and last time slices.\n slices = [0, num_slices//2, num_slices-1]\n\n titles = [f'{description}: slice {slice}' for slice in slices]\n\n num_rows = 1\n num_cols = len(slices)\n fig = make_subplots(\n rows=num_rows, \n cols=num_cols,\n specs=[\n [{'is_3d': True}]*num_cols,\n ]*num_rows,\n subplot_titles=titles,\n )\n for s in range(len(slices)):\n ut_slice = np.reshape(ut[slices[s],:], (n, n, n))\n fig.add_trace(\n go.Isosurface(\n x=omega_x_grid.flatten(), \n y=omega_y_grid.flatten(), \n z=omega_z_grid.flatten(), \n value=normalize(ut_slice).flatten(),\n opacity=0.5,\n isomin=0.6,\n isomax=0.9,\n surface_count=3,\n colorscale=\"Viridis\",\n ),\n row=1,\n col=s+1\n )\n fig.update_layout(\n scene_xaxis_title_text=\"omega_x\",\n scene_yaxis_title_text=\"omega_y\",\n scene_zaxis_title_text=\"omega_z\",\n scene2_xaxis_title_text=\"omega_x\",\n scene2_yaxis_title_text=\"omega_y\",\n scene2_zaxis_title_text=\"omega_z\",\n scene3_xaxis_title_text=\"omega_x\",\n scene3_yaxis_title_text=\"omega_y\",\n scene3_zaxis_title_text=\"omega_z\",\n )\n pio.write_html(fig, filename)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute range info (min, min positive, max) from data | def _computeRangeFromData(data):
if data is None:
return None
dataRange = min_max(data, min_positive=True, finite=True)
if dataRange.minimum is None: # Only non-finite data
return None
if dataRange is not None:
min_positive = dataRange.min_positive
if min_positive is None:
min_positive = float('nan')
return dataRange.minimum, min_positive, dataRange.maximum | [
"def _full_value_range(self):\n min_value, max_value = self._raw_data.data_range\n return max_value - min_value",
"def calcrange(a4lim,data):\r\n a4range=N.intersect1d(N.where(data>a4lim[0])[0],N.where(data<a4lim[1])[0])\r\n return a4range",
"def get_range_parameters(data):\n return data.start, data.stop, data.step",
"def get_range(self):\n return self._max - self._min",
"def getRange(self):\n \n pass",
"def in_range(data, minval=-np.inf, maxval=np.inf) :\n return (minval <= data) & (data < maxval)",
"def summarize_ranges(self, ranges):\n if len(ranges) == 0: return []\n min_ = 'min'\n max_ = 'max'\n for r in ranges:\n if r[0][0] == \"min\":\n r[0][0] = min_\n else:\n min_ = r[0][0]\n if r[-1][1] == \"max\":\n r[-1][1] = max_\n else:\n max_ = r[-1][1]\n return ranges[-1]",
"def range(self):\n xs = sorted(set(x for x, y in self.data.keys()))\n return (min(xs), max(xs) + self.resolution)",
"def get_range(self, start, end):",
"def range_simple(self):\n\n if not self.stack:\n raise StackEmptyError()\n range_min = self.stack[0]\n range_max = self.stack[0]\n for i in self.stack:\n if range_min > i:\n range_min = i\n if range_max < i:\n range_max = i\n return range_min, range_max",
"def get_range(min_val, max_val):\n return range(min_val, max_val)",
"def get_range_parameters(data):\n # seems we only have indexing ops to infer\n # rather than direct accessors\n if len(data) > 1:\n step = data[1] - data[0]\n stop = data[-1] + step\n start = data[0]\n elif len(data):\n start = data[0]\n stop = data[0] + 1\n step = 1\n else:\n start = stop = 0\n step = 1\n\n return start, stop, step",
"def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to",
"def get_min_max_values(data, col1, col2):\n\n return {\n 'ds1_min': data[col1].min(),\n 'ds1_max': data[col1].max(),\n 'ds2_min': data[col2].min(),\n 'ds2_max': data[col2].max()\n }",
"def _rrv_minmax_ ( s ) :\n return s.getMin(),s.getMax()",
"def _parse_vrange(self, data):\n vmin = self.config.get('vmin', np.nanmin(data))\n vmax = self.config.get('vmax', np.nanmax(data))\n vrange = self.config.get('vrange', None)\n\n # Parse vmin, vmax\n if isinstance(vmin, str):\n vmin = np.nanquantile(data, q=float(vmin))\n if isinstance(vmax, str):\n vmax = np.nanquantile(data, q=float(vmax))\n\n # Parse vrange\n if vrange is True:\n vrange = max(abs(np.nanmin(data)), abs(np.nanmax(data)))\n elif isinstance(vrange, str):\n vrange = abs(np.nanquantile(data, q=(float(vrange), 1-float(vrange)))).max()\n\n if vrange is not None:\n if isinstance(vrange, (list, tuple, np.ndarray)):\n vmin, vmax = vrange\n else:\n vmin, vmax = -vrange, vrange\n return vmin, vmax",
"def range(df):\r\n\r\n\tdf_range_dict = dict()\r\n\r\n\tfor i, col in enumerate(df.columns):\r\n\t\tdf_range_dict[col] = [df[col].max(), df[col].min(), df[col].max() - df[col].min()]\r\n\r\n\tdf_range = pd.DataFrame(df_range_dict, index=['Max Value', 'Min Value', 'Range (Max - Min)'])\r\n\tpd.set_option('precision', 2) # set output display precision in 2 decimal places\r\n\r\n\treturn df_range",
"def levels_to_data(mmin, mmax, data):\n # this is needed to make cartopy happy\n mmin_d = np.nanmin(data)\n mmax_d = np.nanmax(data)\n if mmin < mmin_d:\n mmin = mmin_d\n print(\"minimum level changed to make cartopy happy\")\n if mmax > mmax_d:\n mmax = mmax_d\n print(\"maximum level changed to make cartopy happy\")\n return mmin, mmax",
"def get_range(df, col):\n return df[col].min(), df[col].max()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add an isosurface to this item. | def addIsosurface(self, level, color):
isosurface = self._Isosurface(parent=self)
isosurface.setColor(color)
if callable(level):
isosurface.setAutoLevelFunction(level)
else:
isosurface.setLevel(level)
isosurface.sigItemChanged.connect(self._isosurfaceItemChanged)
self._isosurfaces.append(isosurface)
self._updateIsosurfaces()
self.sigIsosurfaceAdded.emit(isosurface)
return isosurface | [
"def add_surface(self,s):\n self.surfaces.append(s)\n s.system=self.surfaces",
"def add_surface_item(self, surfaceitem):\n self._add_surface_item(surfaceitem)",
"def isosurface(self, value=None, flying_edges=True):\n scrange = self._data.GetScalarRange()\n\n if flying_edges:\n cf = vtk.vtkFlyingEdges3D()\n cf.InterpolateAttributesOn()\n else:\n cf = vtk.vtkContourFilter()\n cf.UseScalarTreeOn()\n\n cf.SetInputData(self._data)\n cf.ComputeNormalsOn()\n\n if utils.is_sequence(value):\n cf.SetNumberOfContours(len(value))\n for i, t in enumerate(value):\n cf.SetValue(i, t)\n else:\n if value is None:\n value = (2 * scrange[0] + scrange[1]) / 3.0\n # print(\"automatic isosurface value =\", value)\n cf.SetValue(0, value)\n\n cf.Update()\n poly = cf.GetOutput()\n\n out = vedo.mesh.Mesh(poly, c=None).phong()\n out.mapper().SetScalarRange(scrange[0], scrange[1])\n\n out.pipeline = utils.OperationNode(\n \"isosurface\",\n parents=[self],\n comment=f\"#pts {out.inputdata().GetNumberOfPoints()}\",\n c=\"#4cc9f0:#e9c46a\",\n )\n return out",
"def isosurface(self):\n return self._isosurface()",
"def removeIsosurface(self, isosurface):\n if isosurface not in self.getIsosurfaces():\n _logger.warning(\n \"Try to remove isosurface that is not in the list: %s\",\n str(isosurface))\n else:\n isosurface.sigItemChanged.disconnect(self._isosurfaceItemChanged)\n self._isosurfaces.remove(isosurface)\n self._updateIsosurfaces()\n self.sigIsosurfaceRemoved.emit(isosurface)",
"def add_surface_container(self, surfacecontainer):",
"def _add_surface(\n self, tri, vertices, name, colour=\"red\", paint_with=None, **kwargs\n ):\n pass",
"def _isosurfaceItemChanged(self, event):\n if event == Item3DChangedType.ISO_LEVEL:\n self._updateIsosurfaces()",
"def addSurface(self, HBSurface):\n assert hasattr(HBSurface, \"isHBSurface\"), \\\n \"%s input is not a Honeybee surface.\" % str(HBSurface)\n\n self._surfaces.append(HBSurface)\n\n # update surface parent\n HBSurface._parent = self",
"def add_stock(self, symbol, quantity, unit_price):\n # TODO write SQL statement to grab unit_price\n stock_price_total = quantity * unit_price # TODO write SQL statement\n # TODO deduct stock quantity from market ??\n self.portfolios.append((symbol, quantity, unit_price))\n self.value += stock_price_total",
"def addWireframe(self, name, wireframe):\n\n self.wireframes[name] = wireframe",
"def add_feature_layer(feature, surf_type, world_to_surf, world_to_obs):\n i = next(feature_counter)\n grid_offset = point.Point(i % feature_cols,\n i // feature_cols) * feature_grid_size\n text = feature_font.render(feature.full_name, True, colors.white)\n rect = text.get_rect()\n rect.center = grid_offset + point.Point(feature_grid_size.x / 2,\n feature_font_size)\n feature_pane.blit(text, rect)\n surf_loc = (features_loc + grid_offset + feature_layer_padding +\n point.Point(0, feature_font_size))\n add_surface(surf_type,\n point.Rect(surf_loc, surf_loc + feature_layer_size),\n world_to_surf, world_to_obs,\n lambda surf: self.draw_feature_layer(surf, feature))",
"def add_entry(self, entry):\n self.libraries['surface'].entries[f'{entry.label}'] = entry",
"def _updateIsosurfaces(self):\n # Sorting using minus, this supposes data 'object' to be max values\n sortedIso = sorted(self.getIsosurfaces(),\n key=lambda isosurface: - isosurface.getLevel())\n self._isogroup.children = [iso._getScenePrimitive() for iso in sortedIso]",
"def addStockType(self,item):\n #TODO\n #hint: add an item to this.stocklist\n pass",
"def add(self, layer):\n self.layers.append(layer)",
"def addStockType(self, item):\n # TODO\n # hint: Add an item to this.stocklist\n # No. 6\n self.stocklist.append(item)",
"def isosurface(self, volumes=None, level_1=0.0, level_2=None):\n if level_2 is None:\n level_2 = self.DEFAULT_ISOLEVEL\n if volumes is None:\n volumes = self.volumes\n for volume in volumes:\n volume.surface_levels = level_1, level_2",
"def add_layer(self, _layer):\n self.sub.append(_layer)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove an isosurface from this item. | def removeIsosurface(self, isosurface):
if isosurface not in self.getIsosurfaces():
_logger.warning(
"Try to remove isosurface that is not in the list: %s",
str(isosurface))
else:
isosurface.sigItemChanged.disconnect(self._isosurfaceItemChanged)
self._isosurfaces.remove(isosurface)
self._updateIsosurfaces()
self.sigIsosurfaceRemoved.emit(isosurface) | [
"def remove(self):\n\n self.remove_layer()\n self.remove_geo_resources()",
"def remove_surface(self):\n if len(self.contours)>0:\n for contour in self.contours:\n if isinstance(contour,ContourSet):\n for lineset in contour.collections:\n lineset.remove()\n else:\n contour.remove()\n self.contours=[]",
"def remove(self, drawable):\n if self._final:\n raise Exception('cannot remove objects once finalized')\n if drawable not in self._contents:\n raise ValueError('object not currently on the Layer')\n\n _GraphicsContainer.remove(self, drawable)",
"def isosurface(self):\n return self._isosurface()",
"def remove_from_spatial_regriddings(self, item):\n if not isinstance(item, SpatialRegridding):\n raise TypeError(\"item is of incorrect type.\")\n self.__spatial_regriddings.remove(item)",
"def remove_layer(self):\n layer = self.geoserver.get_layer(self.name)\n if layer:\n self.geoserver.delete(layer, purge=True, recurse=True)\n\n # Remove the layer_name from the file resource\n if self.file_resource.get(\"layer_name\"):\n del self.file_resource[\"layer_name\"]\n\n self.file_resource = toolkit.get_action(\"resource_update\")({\"user\": self.username}, self.file_resource)\n\n return True",
"def remove_entry(self, entry):\n self.libraries['surface'].entries.pop(f'{entry.label}')",
"def remove_poly_map(self):\n\n if self.poly_map_layer:\n self.pyslipqt.DeleteLayer(self.poly_map_layer)\n self.poly_map_layer = None",
"def remove_ospf_interface(self, interface):\n for i in self.ospf_interfaces:\n if i.interface_name == interface.interface_name:\n self.ospf_interfaces.remove(i)",
"def remove_poly_view(self):\n\n if self.poly_view_layer:\n self.pyslipqt.DeleteLayer(self.poly_view_layer)\n self.poly_view_layer = None",
"def remove(self, drawable):\n if drawable not in self._contents:\n raise ValueError('Object not currently on the Canvas')\n _GraphicsContainer.remove(self, drawable)",
"def remove(self):\r\n game_ref.remove(self)",
"def remove_ship(self, ship):\n self.ships_list.remove(ship)",
"def remove_layer(self, layer_pos):\n self.stack.pop(layer_pos)\n return",
"def remove_stock(self, stock):\n if stock in self.stocks:\n self.stocks.remove(stock)\n if stock in self.stock_data.keys():\n del self.stock_data[stock]",
"def removelayer(self, layername):\n if self.layers.has_key(layername):\n self.layers.pop(layername)",
"def remove(self):\n if self.parent is None or self.parent() is None:\n raise Exception(f\"Layer '{self.label}' is no longer in a figure/view\")\n else:\n self.parent().remove(self)\n self.parent = None",
"def remove(self) -> None:\n # Remove the shared memory binding being used by this sensor, if applicable.\n if self.is_using_shared_memory:\n assert self.point_cloud_shmem\n self.logger.debug('Lidar - Unbinding shared memory: 'f'{self.point_cloud_shmem_handle}')\n self.point_cloud_shmem.close()\n\n assert self.colour_shmem\n self.logger.debug('Lidar - Unbinding shared memory: 'f'{self.colour_shmem_handle}')\n self.colour_shmem.close()\n\n # Remove this sensor from the simulation.\n self._close_lidar()\n self.logger.debug('Lidar - sensor removed: 'f'{self.name}')",
"def pop(self, layer):\n to_remove = [a for a in self.artists if a.layer is layer]\n for r in to_remove:\n self.remove(r)\n return to_remove"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle update of isosurfaces upon level changed | def _isosurfaceItemChanged(self, event):
if event == Item3DChangedType.ISO_LEVEL:
self._updateIsosurfaces() | [
"def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n\n elif event in (ItemChangedType.COLORMAP,\n Item3DChangedType.INTERPOLATION):\n self._updateScenePrimitive()\n super(ComplexIsosurface, self)._updated(event)",
"def levelChanged(self):\r\n pass",
"def _levelChanged(self, event):\n if event == items.Item3DChangedType.ISO_LEVEL:\n model = self.model()\n if model is not None:\n index = self.index(column=1)\n model.dataChanged.emit(index, index)",
"def _updateIsosurfaces(self):\n # Sorting using minus, this supposes data 'object' to be max values\n sortedIso = sorted(self.getIsosurfaces(),\n key=lambda isosurface: - isosurface.getLevel())\n self._isogroup.children = [iso._getScenePrimitive() for iso in sortedIso]",
"def updateSurface(self):\n \n pass",
"def isosurface(self, volumes=None, level_1=0.0, level_2=None):\n if level_2 is None:\n level_2 = self.DEFAULT_ISOLEVEL\n if volumes is None:\n volumes = self.volumes\n for volume in volumes:\n volume.surface_levels = level_1, level_2",
"def update(self):\n self.current_level.update()",
"def _computeIsosurface(self):\n data = self.getData(copy=False)\n\n if data is None:\n if self.isAutoLevel():\n self._level = float('nan')\n\n else:\n if self.isAutoLevel():\n st = time.time()\n try:\n level = float(self.getAutoLevelFunction()(data))\n\n except Exception:\n module_ = self.getAutoLevelFunction().__module__\n name = self.getAutoLevelFunction().__name__\n _logger.error(\n \"Error while executing iso level function %s.%s\",\n module_,\n name,\n exc_info=True)\n level = float('nan')\n\n else:\n _logger.info(\n 'Computed iso-level in %f s.', time.time() - st)\n\n if level != self._level:\n self._level = level\n self._updated(Item3DChangedType.ISO_LEVEL)\n\n if numpy.isfinite(self._level):\n st = time.time()\n vertices, normals, indices = MarchingCubes(\n data,\n isolevel=self._level)\n _logger.info('Computed iso-surface in %f s.', time.time() - st)\n\n if len(vertices) != 0:\n return vertices, normals, indices\n\n return None, None, None",
"def toggle_surface(self):",
"def level_changed(self):\n\t\tself.level_needs_saving = True",
"def _update_level_data(self):\n\t\t# taxes, inhabitants\n\t\tself.tax_base = self.session.db.get_settler_tax_income(self.level)\n\t\tself.inhabitants_max = self.session.db.get_settler_inhabitants_max(self.level)\n\t\tif self.inhabitants > self.inhabitants_max: # crop settlers at level down\n\t\t\tself.inhabitants = self.inhabitants_max\n\n\t\t# consumption:\n\t\t# Settler productions are specified to be disabled by default in the db, so we can enable\n\t\t# them here per level.\n\t\tcurrent_lines = self.get_production_lines()\n\t\tfor (prod_line,) in self.session.db.get_settler_production_lines(self.level):\n\t\t\tif not self.has_production_line(prod_line):\n\t\t\t\tself.add_production_by_id(prod_line)\n\t\t\t# cross out the new lines from the current lines, so only the old ones remain\n\t\t\tif prod_line in current_lines:\n\t\t\t\tcurrent_lines.remove(prod_line)\n\t\tfor line in current_lines[:]: # iterate over copy for safe removal\n\t\t\t# all lines, that were added here but are not used due to the current level\n\t\t\tself.remove_production_by_id(line)\n\t\t# update instance graphics\n\t\tself.update_action_set_level(self.level)",
"def UpdateLayers(self):\n pass",
"def updateOverlay(self):\n pass",
"def addIsosurface(self, level, color):\n isosurface = self._Isosurface(parent=self)\n isosurface.setColor(color)\n if callable(level):\n isosurface.setAutoLevelFunction(level)\n else:\n isosurface.setLevel(level)\n isosurface.sigItemChanged.connect(self._isosurfaceItemChanged)\n\n self._isosurfaces.append(isosurface)\n\n self._updateIsosurfaces()\n\n self.sigIsosurfaceAdded.emit(isosurface)\n return isosurface",
"def sceneChanged(data):\n #print 'sceneChanged'\n global SCENE_OPENED\n SCENE_OPENED = True\n refresh_all_aetemplates(force=True)",
"def fDataChanged(self):\n\n self._layerManager.getAimsFeatures()",
"def z_change(self, new_z): \n pass",
"def update(self):\r\n if self.player:\r\n self.player.update()\r\n #check for any enities or update will be error\r\n if self.entities:\r\n for entity in self.entities:\r\n #check to update only the entities that is visible\r\n if self.camera:\r\n translatedRect = self.camera.translate(entity.rect)\r\n if (translatedRect.left <= -32 or translatedRect.right >= 832 or translatedRect.top <= -32 or\r\n translatedRect.bottom >= 632):\r\n continue\r\n entity.update()\r\n if self.camera:\r\n self.camera.update()\r\n\r\n## if self.dialog.visable:\r\n self.dialog.update()",
"def updateLayers(self):\n\t\tself.layers = self.extractLayers()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle updates of isosurfaces level and add/remove | def _updateIsosurfaces(self):
# Sorting using minus, this supposes data 'object' to be max values
sortedIso = sorted(self.getIsosurfaces(),
key=lambda isosurface: - isosurface.getLevel())
self._isogroup.children = [iso._getScenePrimitive() for iso in sortedIso] | [
"def _isosurfaceItemChanged(self, event):\n if event == Item3DChangedType.ISO_LEVEL:\n self._updateIsosurfaces()",
"def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n\n elif event in (ItemChangedType.COLORMAP,\n Item3DChangedType.INTERPOLATION):\n self._updateScenePrimitive()\n super(ComplexIsosurface, self)._updated(event)",
"def _update_level_data(self):\n\t\t# taxes, inhabitants\n\t\tself.tax_base = self.session.db.get_settler_tax_income(self.level)\n\t\tself.inhabitants_max = self.session.db.get_settler_inhabitants_max(self.level)\n\t\tif self.inhabitants > self.inhabitants_max: # crop settlers at level down\n\t\t\tself.inhabitants = self.inhabitants_max\n\n\t\t# consumption:\n\t\t# Settler productions are specified to be disabled by default in the db, so we can enable\n\t\t# them here per level.\n\t\tcurrent_lines = self.get_production_lines()\n\t\tfor (prod_line,) in self.session.db.get_settler_production_lines(self.level):\n\t\t\tif not self.has_production_line(prod_line):\n\t\t\t\tself.add_production_by_id(prod_line)\n\t\t\t# cross out the new lines from the current lines, so only the old ones remain\n\t\t\tif prod_line in current_lines:\n\t\t\t\tcurrent_lines.remove(prod_line)\n\t\tfor line in current_lines[:]: # iterate over copy for safe removal\n\t\t\t# all lines, that were added here but are not used due to the current level\n\t\t\tself.remove_production_by_id(line)\n\t\t# update instance graphics\n\t\tself.update_action_set_level(self.level)",
"def levelChanged(self):\r\n pass",
"def addIsosurface(self, level, color):\n isosurface = self._Isosurface(parent=self)\n isosurface.setColor(color)\n if callable(level):\n isosurface.setAutoLevelFunction(level)\n else:\n isosurface.setLevel(level)\n isosurface.sigItemChanged.connect(self._isosurfaceItemChanged)\n\n self._isosurfaces.append(isosurface)\n\n self._updateIsosurfaces()\n\n self.sigIsosurfaceAdded.emit(isosurface)\n return isosurface",
"def _computeIsosurface(self):\n data = self.getData(copy=False)\n\n if data is None:\n if self.isAutoLevel():\n self._level = float('nan')\n\n else:\n if self.isAutoLevel():\n st = time.time()\n try:\n level = float(self.getAutoLevelFunction()(data))\n\n except Exception:\n module_ = self.getAutoLevelFunction().__module__\n name = self.getAutoLevelFunction().__name__\n _logger.error(\n \"Error while executing iso level function %s.%s\",\n module_,\n name,\n exc_info=True)\n level = float('nan')\n\n else:\n _logger.info(\n 'Computed iso-level in %f s.', time.time() - st)\n\n if level != self._level:\n self._level = level\n self._updated(Item3DChangedType.ISO_LEVEL)\n\n if numpy.isfinite(self._level):\n st = time.time()\n vertices, normals, indices = MarchingCubes(\n data,\n isolevel=self._level)\n _logger.info('Computed iso-surface in %f s.', time.time() - st)\n\n if len(vertices) != 0:\n return vertices, normals, indices\n\n return None, None, None",
"def isosurface(self, volumes=None, level_1=0.0, level_2=None):\n if level_2 is None:\n level_2 = self.DEFAULT_ISOLEVEL\n if volumes is None:\n volumes = self.volumes\n for volume in volumes:\n volume.surface_levels = level_1, level_2",
"def updateSurface(self):\n \n pass",
"def _levelChanged(self, event):\n if event == items.Item3DChangedType.ISO_LEVEL:\n model = self.model()\n if model is not None:\n index = self.index(column=1)\n model.dataChanged.emit(index, index)",
"def update(self):\n self.current_level.update()",
"def update_surfs(self, surf_path, surf_type, offset=None):\n try:\n self.surf[surf_type]\n except KeyError:\n pass\n # Here should be a dialog for confirm, whether adding data or not\n else:\n self._add_surface(surf_path, surf_type, offset)",
"def UpdateLayers(self):\n pass",
"def level_changed(self):\n\t\tself.level_needs_saving = True",
"def updateLayers(self):\n\t\tself.layers = self.extractLayers()",
"def update_flags(self):\n # view mode, filled vs wirefrom\n if self.view['wireframe']:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)\n else:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)\n\n # set fullscreen or windowed\n self.set_fullscreen(fullscreen=self.view['fullscreen'])\n\n # backface culling on or off\n if self.view['cull']:\n gl.glEnable(gl.GL_CULL_FACE)\n else:\n gl.glDisable(gl.GL_CULL_FACE)\n\n # case where we WANT an axis and NO vertexlist\n # is stored internally\n if self.view['axis'] and self._axis is None:\n from .. import creation\n # create an axis marker sized relative to the scene\n axis = creation.axis(origin_size=self.scene.scale / 100)\n # create ordered args for a vertex list\n args = rendering.mesh_to_vertexlist(axis)\n # store the axis as a reference\n self._axis = self.batch.add_indexed(*args)\n\n # case where we DON'T want an axis but a vertexlist\n # IS stored internally\n elif not self.view['axis'] and self._axis is not None:\n # remove the axis from the rendering batch\n self._axis.delete()\n # set the reference to None\n self._axis = None",
"def toggle_surface(self):",
"def updateGeometryInfo(self,*args):\r\n self.wf.dataGridView.Rows.Clear()\r\n sceneRoot = Application.ActiveSceneRoot\r\n children = sceneRoot.FindChildren2( \"\", constants.siPolyMeshType, constants.siMeshFamily, True )\r\n for child in children:\r\n vTrans = child.Kinematics.Local.GetTransform2(None).Translation\r\n self.wf.AddRow( child.FullName, vTrans.X, vTrans.Y, vTrans.Z )",
"def fDataChanged(self):\n\n self._layerManager.getAimsFeatures()",
"def updateOverlay(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle update of the cut plane (and take care of mode change | def _updated(self, event=None):
if event == ItemChangedType.COMPLEX_MODE:
self._syncDataWithParent()
super(ComplexCutPlane, self)._updated(event) | [
"def plane_update(self):\n self.plane.update()",
"def vp_update_after_active_tile_selection(self):\n self.ovm.update_all_debris_detections_areas(self.gm)\n self.main_controls_trigger.transmit('SHOW CURRENT SETTINGS')\n self.vp_draw()",
"def onUpdateFactors(self, evt):\n\t\tif self.blockFactorUpdate:\n\t\t\tprint \"Blocking factor update\"\n\t\t\treturn\n\t\tx, y, z = self.dataUnits[0].dataSource.getOriginalDimensions()\n\t\tfx = 1\n\t\tfy = 1\n\t\tfz = 1\n\t\ttry:\n\t\t\tfx = float(self.factorX.GetValue())\n\t\t\tfy = float(self.factorY.GetValue())\n\t\t\tfz = float(self.factorZ.GetValue())\n\t\texcept:\n\t\t\tpass\n\t\tx *= fx\n\t\ty *= fy\n\t\tz *= fz\n\t\tself.blockDimUpdate = 1\n\t\tself.newDimX.SetValue(\"%d\" % x)\n\t\tself.newDimY.SetValue(\"%d\" % y)\n\t\tself.newDimZ.SetValue(\"%d\" % z)\n\t\tself.currSize = (x, y, z)\n\t\tself.blockDimUpdate = 0",
"def updateSurface(self):\n \n pass",
"def updateMode(self, value):\n\n # If the given value matches one of the accepted modes\n # set the mode of the background and update the GraphicsScene\n if value in [\"Light\", \"Dark\", \"Off\"]:\n self.mode = value\n self.update()\n # Otherwise if a non supported mode is entered (should never reach this code)\n # print an error message to console\n else:\n print(\"Grid mode not supported.\")",
"def updateCurve(self):\n \n pass",
"def _update(self):\n self._execute_lane_changes()\n self._execute_forward_movement()",
"def update(i):\n epoch = i//features.instance_count\n w = pl.weights_tracker[i]\n a = pl.accuracy_tracker[epoch]\n divider.set_data([xmin,xmax],[(-xmin * w[0] - w[2]) / w[1], (-xmax * w[0] - w[2]) / w[1]])\n epoch_tracker.set_text(\"{} {}\".format(epoch + 1, a))\n\n # Keep a shadow of the hyperplane at the end of each epoch\n if i % features.instance_count == 0:\n plot_hyperplane(w,xmin,xmax,iter = i, alpha = .3, color='black',linestyle='dashed')\n\n return divider",
"def mesh_change(attrname, old, new):\n # read discretization parameters\n h = h_slider.value # spatial meshwidth\n k = k_slider.value # temporal meshwidth\n update_mesh(h, k)",
"def update_camera(self):\n if self.tracker is not None:\n self.ratio = self.tracker.get_converted_centroid()\n self.vector = self.calculate_vector() if self.ratio is not None else None",
"def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n\n elif event in (ItemChangedType.COLORMAP,\n Item3DChangedType.INTERPOLATION):\n self._updateScenePrimitive()\n super(ComplexIsosurface, self)._updated(event)",
"def __update_device(self):\n self.logger.debug(\"running\")\n if self.__changed[0]:\n self.__set_device_stim_duration(self.tab.get_stim_dur_val())\n if self.__changed[1]:\n self.__set_device_stim_intensity(self.tab.get_stim_intens_val())\n if self.__changed[2]:\n self.__set_device_upper_isi(self.tab.get_upper_isi_val())\n if self.__changed[3]:\n self.__set_device_lower_isi(self.tab.get_lower_isi_val())\n self.tab.set_config_val(\"Custom\")\n self.__set_change_bools_false()\n self.__set_upload_button(False)\n self.logger.debug(\"done\")",
"def modeChanged(self, user, channel, set, modes, args):",
"def update_display(self):\n #self.optimize_plot_line.setData(self.buffer) \n\n #self.imv.setImage(np.reshape(self.np_data,(self.camera.subarrayh.val, self.camera.subarrayv.val)).T)\n #self.imv.setImage(self.image, autoLevels=False, levels=(100,340))\n if self.autoLevels == False: \n self.imv.setImage((self.displayed_image), \n autoLevels=self.settings.autoLevels.val,\n autoRange=self.settings.autoRange.val,\n levels=(self.level_min, self.level_max))\n else: #levels should not be sent when autoLevels is True, otherwise the image is displayed with them\n self.imv.setImage((self.displayed_image), \n autoLevels=self.settings.autoLevels.val, \n autoRange=self.settings.autoRange.val)\n self.settings.level_min.read_from_hardware()\n self.settings.level_max.read_from_hardware()",
"def doUpdate(data, otherargs):\n x = data.input1.getPoints(colNames='X')\n y = data.input1.getPoints(colNames='Y')\n z = data.input1.getPoints(colNames='Z')\n classification = data.input1.getPoints(colNames='CLASSIFICATION')\n height = numpy.zeros(len(classification), dtype=numpy.float32)\n calcNewClassAndHeight(x, y, z, otherargs.resolution, classification, height, \n otherargs.zArrFiltered, otherargs.zThresh, otherargs.xMin, otherargs.yMax, otherargs.zNull)\n\n data.input1.setPoints(classification, colName='CLASSIFICATION')\n data.input1.setScaling('HEIGHT', lidarprocessor.ARRAY_TYPE_POINTS, otherargs.heightGain, \n otherargs.heightOffset)\n data.input1.setPoints(height, colName='HEIGHT')",
"def updateMode(self, value):\n\n if value in [\"Light\", \"Dark\", \"Off\"]:\n self.mode = value\n self.checkMode()\n self.update()\n else:\n print(\"Block mode not supported.\")",
"def refresh(self):\n self._create_plane()\n self.fs = []",
"def switch_cut_cor(self):\n if self.cut_cor == 41:\n self.cut_cor = 42\n elif self.cut_cor == 42:\n self.cut_cor = 41",
"def update_hdv(self):\n\n dict_var_checkboxes = self.parent.parent.dicom_left_window.dicom_contourage.dict_lines\n\n # Reset figure\n # Preservation du zoom (1)\n x_lim = self.fig.get_xlim()\n y_lim = self.fig.get_ylim()\n \n # On nettoie le graphe\n self.fig.clear()\n\n # Preservation du zoom (2)\n self.fig.set_xlim(x_lim)\n self.fig.set_ylim(y_lim)\n\n # Infos\n self.fig.set_title(\"Histogramme dose/volume\")\n self.fig.set_xlabel('Dose absorbee')\n self.fig.set_ylabel('Pourcentage du volume')\n self.fig.grid(True)\n\n dose_matrix = self.dicom_navigation.slice.get_dose_matrix()\n\n # Cas ou aucune source n'est placee\n if dose_matrix is None:\n self.canvas.draw()\n self.parent.top_info.canvas_HDV.draw()\n return\n\n # On calcule le HDV\n for ROI_id in dict_var_checkboxes:\n if dict_var_checkboxes[ROI_id]['cum'].get() == 1:\n self.add_hdv(ROI_id)\n if dict_var_checkboxes[ROI_id]['diff'].get() == 1:\n self.add_hdv(ROI_id, type_hdv='diff')\n\n # Contraintes\n if self.got_contraintes:\n self.dicom_navigation.get_dicom_contraintes().verifier_les_contraintes_des_hdv_choisis()\n\n # Affichage de la version mise a jour\n self.refresh_HDV()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle data change in the parent this isosurface belongs to | def _parentChanged(self, event):
if event == ItemChangedType.COMPLEX_MODE:
self._syncDataWithParent()
super(ComplexIsosurface, self)._parentChanged(event) | [
"def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n\n elif event in (ItemChangedType.COLORMAP,\n Item3DChangedType.INTERPOLATION):\n self._updateScenePrimitive()\n super(ComplexIsosurface, self)._updated(event)",
"def _isosurfaceItemChanged(self, event):\n if event == Item3DChangedType.ISO_LEVEL:\n self._updateIsosurfaces()",
"def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n super(ComplexCutPlane, self)._updated(event)",
"def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n self._data = None\n else:\n self._data = parent.getData(copy=False)\n self._updateScenePrimitive()",
"def _parentChanged(self, event):\n if event == ItemChangedType.DATA:\n self._syncDataWithParent()",
"def update_data(self):\n # Just set data_changed, the component should do the rest.\n self.data_changed = True",
"def data_changed(self):\n return",
"def XPLMDataChanged_f(inRefcon):",
"def data_changed(self):\n self.data_changed_signal.emit(self)",
"def _parent_changed(self):\n raise NotImplementedError(\"shouldnt happen, Parentable objects need to be able to change their parent\")",
"def fDataChanged(self):\n\n self._layerManager.getAimsFeatures()",
"def update_data(self):\n pass",
"def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n data, range_ = None, None\n else:\n data = parent.getData(copy=False)\n range_ = parent.getDataRange()\n self._updateData(data, range_)",
"def update_data():\n pass",
"def _levelChanged(self, event):\n if event == items.Item3DChangedType.ISO_LEVEL:\n model = self.model()\n if model is not None:\n index = self.index(column=1)\n model.dataChanged.emit(index, index)",
"def on_parent_changed(self):\n pass",
"def updateGeometryInfo(self,*args):\r\n self.wf.dataGridView.Rows.Clear()\r\n sceneRoot = Application.ActiveSceneRoot\r\n children = sceneRoot.FindChildren2( \"\", constants.siPolyMeshType, constants.siMeshFamily, True )\r\n for child in children:\r\n vTrans = child.Kinematics.Local.GetTransform2(None).Translation\r\n self.wf.AddRow( child.FullName, vTrans.X, vTrans.Y, vTrans.Z )",
"def _notify_parent_change(self):\n for p in self.parameters:\n p._parent_changed(self)",
"def _numberOfPoints_changed(self):\n self.reinitialiseData()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle update of the isosurface (and take care of mode change) | def _updated(self, event=None):
if event == ItemChangedType.COMPLEX_MODE:
self._syncDataWithParent()
elif event in (ItemChangedType.COLORMAP,
Item3DChangedType.INTERPOLATION):
self._updateScenePrimitive()
super(ComplexIsosurface, self)._updated(event) | [
"def updateSurface(self):\n \n pass",
"def _isosurfaceItemChanged(self, event):\n if event == Item3DChangedType.ISO_LEVEL:\n self._updateIsosurfaces()",
"def toggle_surface(self):",
"def update(self):\n pygame.surfarray.blit_array(self.surface, self.array2d)",
"def update_flags(self):\n # view mode, filled vs wirefrom\n if self.view['wireframe']:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)\n else:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)\n\n # set fullscreen or windowed\n self.set_fullscreen(fullscreen=self.view['fullscreen'])\n\n # backface culling on or off\n if self.view['cull']:\n gl.glEnable(gl.GL_CULL_FACE)\n else:\n gl.glDisable(gl.GL_CULL_FACE)\n\n # case where we WANT an axis and NO vertexlist\n # is stored internally\n if self.view['axis'] and self._axis is None:\n from .. import creation\n # create an axis marker sized relative to the scene\n axis = creation.axis(origin_size=self.scene.scale / 100)\n # create ordered args for a vertex list\n args = rendering.mesh_to_vertexlist(axis)\n # store the axis as a reference\n self._axis = self.batch.add_indexed(*args)\n\n # case where we DON'T want an axis but a vertexlist\n # IS stored internally\n elif not self.view['axis'] and self._axis is not None:\n # remove the axis from the rendering batch\n self._axis.delete()\n # set the reference to None\n self._axis = None",
"def updateOverlay(self):\n pass",
"def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n super(ComplexCutPlane, self)._updated(event)",
"def update(self):\r\n if self.player:\r\n self.player.update()\r\n #check for any enities or update will be error\r\n if self.entities:\r\n for entity in self.entities:\r\n #check to update only the entities that is visible\r\n if self.camera:\r\n translatedRect = self.camera.translate(entity.rect)\r\n if (translatedRect.left <= -32 or translatedRect.right >= 832 or translatedRect.top <= -32 or\r\n translatedRect.bottom >= 632):\r\n continue\r\n entity.update()\r\n if self.camera:\r\n self.camera.update()\r\n\r\n## if self.dialog.visable:\r\n self.dialog.update()",
"def plane_update(self):\n self.plane.update()",
"def update(self, *args):\n self.widget.updateGL()",
"def fDataChanged(self):\n\n self._layerManager.getAimsFeatures()",
"def update_surfs(self, surf_path, surf_type, offset=None):\n try:\n self.surf[surf_type]\n except KeyError:\n pass\n # Here should be a dialog for confirm, whether adding data or not\n else:\n self._add_surface(surf_path, surf_type, offset)",
"def _update(self):\n done = self._x.update()\n done &= self._y.update()\n done &= self._width.update()\n done &= self._height.update()\n done &= self._dx.update()\n done &= self._dy.update()\n done &= self._scale.update()\n done &= self._rotation.update()\n if not done: # i.e. the layer is being transformed\n self._transform_cache = None\n self._opacity.update()\n self.update()\n for layer in self:\n layer._update()",
"def update(self, now=True):\n self.Time.update(now=now)\n self.Precess()\n self.ApparentPlace()\n self.RaC, self.DecC = self.RaA, self.DecA\n self.posviolate = False\n self.AltAziConv()\n if prefs.RefractionOn:\n dRA, dDEC = self.Refrac()\n self.RaC += dRA\n self.DecC += dDEC\n if prefs.FlexureOn:\n dRA, dDEC = self.Flex()\n self.RaC += dRA\n self.DecC += dDEC",
"def __update_device(self):\n self.logger.debug(\"running\")\n if self.__changed[0]:\n self.__set_device_stim_duration(self.tab.get_stim_dur_val())\n if self.__changed[1]:\n self.__set_device_stim_intensity(self.tab.get_stim_intens_val())\n if self.__changed[2]:\n self.__set_device_upper_isi(self.tab.get_upper_isi_val())\n if self.__changed[3]:\n self.__set_device_lower_isi(self.tab.get_lower_isi_val())\n self.tab.set_config_val(\"Custom\")\n self.__set_change_bools_false()\n self.__set_upload_button(False)\n self.logger.debug(\"done\")",
"def update_mode(self):\n pass",
"def update_surface(frame):\n \n #fig.suptitle(time[frame])\n im.set_array(surf[frame])\n im.set_extent([np.nanmin(xx[frame]), np.nanmax(xx[frame]), np.nanmin(yy[frame]), np.nanmax(yy[frame])])\n \n line.set_data([(times[:-1] + utc_to_east).plot_date[frame]]*2, ylim)",
"def _updateIsosurfaces(self):\n # Sorting using minus, this supposes data 'object' to be max values\n sortedIso = sorted(self.getIsosurfaces(),\n key=lambda isosurface: - isosurface.getLevel())\n self._isogroup.children = [iso._getScenePrimitive() for iso in sortedIso]",
"def update(self):\n # Write black to the screen before every blit.\n self.surface.fill(self.theme['BACKGROUND_COLOR'])\n\n for element in self.elements:\n element.updateElement()\n\n # \"Flip\" the display (update the display with the newly created surface.\n pygame.display.flip()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return 3D dataset. This method does not cache data converted to a specific mode, it computes it for each request. | def getData(self, copy=True, mode=None):
if mode is None:
return super(ComplexField3D, self).getData(copy=copy)
else:
return self._convertComplexData(self._data, mode) | [
"def get_3d_train(self, jnts=14):\n\n to_select, to_sort = dataset_indices(self.dataset_name, jnts)\n\n return self._data_train['3d'][:, to_select, :][:, to_sort, :]",
"def get_dataset(self, cid, type=\"train\"):\n dataset = torch.load(\n os.path.join(self.path, type, \"data{}.pkl\".format(cid)))\n return dataset",
"def cube_data(self):\n cube_data = copy.deepcopy(self.data)\n cube_data.shape = [self.nints * self.ngroups, self.rows, self.columns]\n return cube_data",
"def get_dataset(self):\n return",
"def cufftPlan3d(nx, ny, nz, type_):\n assert isinstance(nx, int)\n assert 2 <= nx <= 16384 # XXX: CUDA 1.0; may change\n assert isinstance(ny, int)\n assert 2 <= ny <= 16384 # XXX: CUDA 1.0; may change\n assert isinstance(nz, int)\n assert 2 <= nz <= 16384 # XXX: CUDA 1.0; may change\n assert type_ in cufftType_enum\n plan = cufftHandle()\n result = _cufftPlan3d(byref(plan), nx, ny, nz, type_)\n checkCufftResult(result)\n return plan",
"def convert_1d_to_3d(data_X, data_Y):\n\n data_X = data_X.tocsr()\n \n data_dim_x = [] # slices along x-axis (has shape of (total_trials * dim_x, dim_z, dim_y))\n data_dim_x_label = [] # contains (total_trials * dim_x) labels\n data_dim_y = [] # slices along y-axis (has shape of (total_trials * dim_y, dim_z, dim_x))\n data_dim_y_label = [] # contains (total_trials * dim_y) labels\n data_dim_z = [] # slices along z-axis (has shape of (total_trials * dim_z, dim_y, dim_x))\n data_dim_z_label = [] # contains (total_trials * dim_z) labels\n\n for num_trial in range(data_X.shape[0]):\n label = data_Y[num_trial]\n data_1d = data_X[num_trial]\n data_3d = np.squeeze(np.asarray(data_1d.todense())).reshape((dim_z, dim_y, dim_x))\n for x in range(dim_x):\n x_slice = data_3d[:,:,x]\n # append only if the slice is not empty \n if x_slice.sum() != 0:\n data_dim_x.append(data_3d[:, :, x])\n data_dim_x_label.append(label)\n for y in range(dim_y):\n y_slice = data_3d[:, y, :]\n if y_slice.sum() != 0:\n data_dim_y.append(data_3d[:, y, :])\n data_dim_y_label.append(label)\n for z in range(dim_z):\n z_slice = data_3d[:, :, z]\n if z_slice.sum() != 0:\n data_dim_z.append(data_3d[z, :, :])\n data_dim_z_label.append(label)\n\n return np.array(data_dim_x), np.array(data_dim_x_label), \\\n np.array(data_dim_y), np.array(data_dim_y_label), \\\n np.array(data_dim_z), np.array(data_dim_z_label)",
"def ggml_view_3d(ctx: ffi.CData, a: ffi.CData, ne0: int, ne1: int, ne2: int, nb1: int, nb2: int, offset: int) -> ffi.CData:\n ...",
"def readpil3d(self):\r\n\r\n # Read the data in as an array.\r\n res = np.loadtxt(self.name, delimiter=' ')\r\n\r\n # Split into useful chunks\r\n self.pos = res[:, 0:3] # Grid point locations\r\n self.Pn = res[:, 3:4] # Normal pressure [Pa]\r\n self.flux = res[:, -1] # Flux\r",
"def cells3d():\n\n return _load(\"data/cells3d.tif\")",
"def create_dataset(opt):\r\n dataset_class = find_dataset_using_name(opt.dataset_mode)\r\n datasets = dataset_class(opt)\r\n train_sampler = torch.utils.data.distributed.DistributedSampler(datasets)\r\n data_loader = CustomDatasetDataLoader(opt,datasets,train_sampler)\r\n dataset = data_loader.load_data()\r\n return dataset,train_sampler",
"def get_3Ddata(self, start=0, stop=None, step=1):\n data3d = []\n dcmlist = self.files_in_serie\n # print('stsp ', start, stop, step)\n\n # raw_max = None\n # raw_min = None\n # slope = None\n # inter = None\n\n # get shape 2d\n\n # sometimes there is render in series\n if len(self.files_in_serie) > 1:\n data = self._read_file(dcmlist[0])\n data2d1 = data.pixel_array\n data = self._read_file(dcmlist[1])\n data2d2 = data.pixel_array\n if (data2d1.shape[0] == data2d2.shape[0]) and (\n data2d1.shape[1] == data2d2.shape[1]\n ):\n pass\n else:\n dcmlist.pop(0)\n\n if stop is None:\n stop = len(dcmlist)\n\n # printRescaleWarning = False\n for i in xrange(start, stop, step):\n onefile = dcmlist[i]\n data = self._read_file(onefile)\n new_data2d = data.pixel_array\n # new_data2d, slope, inter = dcmtools.get_pixel_array_from_pdcm(data)\n # mport pdb; pdb.set_trace()\n\n if len(data3d) == 0:\n shp2 = new_data2d.shape\n data3d = np.zeros(\n [len(dcmlist), shp2[0], shp2[1]], dtype=new_data2d.dtype\n )\n slope, inter = dcmtools.get_slope_and_intercept_from_pdcm(data)\n\n # first readed slide is at the end\n\n if (data3d.shape[1] == new_data2d.shape[0]) and (\n data3d.shape[2] == new_data2d.shape[1]\n ):\n data3d[-i - 1, :, :] = new_data2d\n else:\n msg = (\n \"Problem with shape \"\n + \"Data size: \"\n + str(data3d.nbytes)\n + \", shape: \"\n + str(shp2)\n + \"x\"\n + str(len(dcmlist))\n + \" file \"\n + onefile\n )\n logger.warning(msg)\n print(msg)\n\n logger.debug(\n \"Data size: \"\n + str(data3d.nbytes)\n + \", shape: \"\n + str(shp2)\n + \"x\"\n + str(len(dcmlist))\n + \" file \"\n + onefile\n )\n data3d = misc.use_economic_dtype(data3d, slope=slope, inter=inter)\n # if original_dtype == np.uint16 and data3d.dtype == np.int16:\n # data3d = data3d.astype(np.int32)\n # or just force set slope=0.5, inter = 0\n # new_data2d = rescale_pixel_array(data2d, slope, inter)\n # if printRescaleWarning:\n # print(\"Automatic Rescale with slope 0.5\")\n # logger.warning(\"Automatic Rescale with slope 0.5\")\n # data3d = dcmtools.rescale_pixel_array(data3d, slope=slope, inter=inter)\n\n return data3d",
"def get_dataset(self):\n return Dataset.query.get(self.id)",
"def dataset(self):\n return self.zfs.get_dataset(self.dataset_name)",
"def _nc_dataset(url, requests_kwargs: Optional[Dict] = None):\n from netCDF4 import Dataset\n\n data = urlopen(url, requests_kwargs)\n try:\n return Dataset(Path(urlparse(url).path).name, memory=data.read())\n except OSError:\n # if libnetcdf is not compiled with in-memory support fallback to a local tmp file\n data.seek(0)\n with _tempnc(data) as _nc:\n return Dataset(_nc)",
"def __call__(self, data):\n img = data[self.key]\n if self.in_order == 'nhwc':\n img = img.permute(0, 2, 3, 1)\n if self.in_type == 'numpy':\n img = img.cpu().numpy()\n out = self.mde(img)\n if self.out_type == 'numpy':\n out = torch.from_numpy(out)\n if self.out_order == 'nhwc':\n out = out.permute(0, 3, 1, 2)\n return out",
"def __getitem__(self, i):\n return _RMF_HDF5.DataSetIndex3D___getitem__(self, i)",
"def data(dataname = None, package = None, cache = False):\n\t#if dataname == None and data == None:\n\t# from rpy2.robjects import r\n\t# print(r.data())\n\treturn sm.datasets.get_rdataset(dataname = dataname, package = package, cache = cache).data",
"def get_datasset(self):\n self.dataset = self.get_df_from_db(self.database, self.params['table_name'], self.params['columns'])",
"def generate_synthetic_dataset(dataset_name, mesh_file, camera_param_ranges, img_size):\n intrinsic_params = animate_intrinsics(camera_param_ranges, img_size)\n extrinsic_params = animate_extrinsics(camera_param_ranges)\n camera_params = list(itertools.product(intrinsic_params, extrinsic_params))\n print('Using %d camera parameter combinations' % len(camera_params))\n\n filename = 'backup/3d_models/' + mesh_file + '.ply'\n print('Reading mesh file \"%s\"' % filename)\n mesh = read_triangle_mesh(filename)\n\n render_dataset(dataset_name, img_size, camera_params, mesh)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Population prior, i.e. $Categorical(\pi)$. | def prior_z(self) -> distributions.Distribution:
return distributions.Categorical(self.pi) | [
"def test_prior_name(self):\n dim = Dimension(\"yolo\", \"reciprocal\", 1e-10, 1)\n assert dim.prior_name == \"reciprocal\"\n\n dim = Dimension(\"yolo\", \"norm\", 0.9)\n assert dim.prior_name == \"norm\"\n\n dim = Real(\"yolo\", \"uniform\", 1, 2)\n assert dim.prior_name == \"uniform\"\n\n dim = Integer(\"yolo1\", \"uniform\", -3, 6)\n assert dim.prior_name == \"int_uniform\"\n\n dim = Integer(\"yolo1\", \"norm\", -3, 6)\n assert dim.prior_name == \"int_norm\"\n\n categories = {\"asdfa\": 0.1, 2: 0.2, 3: 0.3, \"lalala\": 0.4}\n dim = Categorical(\"yolo\", categories)\n assert dim.prior_name == \"choices\"",
"def PriorExperiment():\n return Uniform(0.1, 10.0)",
"def test_get_prior_string_dict(self):\n categories = {\"asdfa\": 0.1, 2: 0.2, 3: 0.3, \"lalala\": 0.4}\n dim = Categorical(\n \"yolo\", categories, shape=2, default_value=[\"asdfa\", \"lalala\"]\n )\n assert dim.get_prior_string() == (\n \"choices({'asdfa': 0.10, 2: 0.20, 3: 0.30, 'lalala': 0.40}, \"\n \"shape=2, default_value=['asdfa', 'lalala'])\"\n )",
"def _prior_probability(self, c):\n p = np.mean(self.y == c)\n return p",
"def analysis(self) -> \"PriorFactor\":\n return self",
"def _compute_mix_prior(self):\n if np.all(self.mix_prior == 1):\n return 0\n return np.dot(np.log(self.mix_weight).T, (self.mix_prior - 1))",
"def get_prior(self):\n return get_prior(self.dataset, self.class_value)",
"def buildConditionalPriorTerm(self):\r\n\r\n # shape is (batch size,)\r\n self.conditional_prior = - T.mean(T.sum(T.exp(self.log_pzgxw)*(self.log_qxgy.dimshuffle(0,'x',1,'x') - self.log_pxgzw), axis=3), axis=[1,2])",
"def __init__(self, prior: Prior):\n # TODO: Consider analytical solution rather than implementing optimisation\n super().__init__(prior.factor, x=prior, name=namer(self.__class__.__name__))\n self.prior = prior\n self.label = f\"PriorFactor({prior.label})\"",
"def bias_prior(self):",
"def prior(self, t):\n return self.p * (1 - self.p)**(t - 1)",
"def get_prior(self):\n class_column = self.data[:, -1]\n freq = np.count_nonzero(class_column == self.class_label)\n count = np.shape(class_column)[0]\n return freq / count",
"def get_population_precondition(self):\n # TODO: Not quite sure what to do with general modifier here?\n if self.role in [\"<<\", \"(+)\"]:\n return self.stoichiometry\n else:\n return 0",
"def set_prior_priorunc_synthetic(self):\n\n lai_coeff_absunc = None\n statevec_absunc = None\n\n #-- \n if self.prior_inifile!=None:\n lai_coeff_absunc, statevec_absunc = self._setprior_from_inifile()\n elif self.use_generic_prior:\n self._setprior_generic_agriculture()\n statevec_absunc = self.generic_prior_unc\n else:\n #-- overall number of time-points in schedule\n npts = self.get_npts()\n\n #-- default prior file\n prior_file = os.path.join(ipt_dir_path, 'mni_stat_jules_2017.csv')\n\n #-- get signature simulator default state\n msg = \"START reading state variables from file ***{}***...\".format(prior_file)\n FileLogger.info(msg)\n state_inst = sv.get_state_csv(fname=prior_file, fmt='%Y-%m-%d %H:%M:%S' )\n msg = \"...reading DONE\"\n FileLogger.info(msg)\n\n #-- LAI,Canopy-Height,Soil-Moisture\n self.prstate = np.empty((3,npts), dtype=np.float64)\n\n for i,date_utc in enumerate(self.schedule_dct['date_utc']):\n idx, timedelt = sv.find_nearest_date_idx(state_inst.date_utc, date_utc)\n # print \"MVMV::nearest={} idx={} timedelt={}\".format(\n # state_inst.date_utc[idx], idx, timedelt)\n #-- LAI\n self.prstate[0,i] = state_inst.lai[idx]\n #-- canopy-height\n self.prstate[1,i] = state_inst.can_height[idx]\n #-- SM\n self.prstate[2,i] = state_inst.soil_moisture[idx]\n\n #-- set uncertainty values\n self._set_priorunc(statevec_absunc=statevec_absunc, lai_coeff_absunc=lai_coeff_absunc)",
"def _setprior_generic_agriculture(self):\n\n #-- number of time-points\n npts = self.get_npts()\n\n #-- LAI,Canopy-Height,Soil-Moisture\n self.prstate = np.empty((3,npts), dtype=np.float64)\n #-- LAI\n self.prstate[0,:] = self.generic_prior[0]\n #-- canopy-height\n self.prstate[1,:] = self.generic_prior[1]\n #-- soil moisture (volumetric)\n self.prstate[2,:] = self.generic_prior[2]",
"def priorLikelihood(self, theta, prior):",
"def initial_cond(m: int, n: int, pa: float = 1./3., pb: float = 1./3.) -> np.array:\n popu = np.random.choice([1, 0, -1], size=(m, n), p=[pa, 1.0 - pa - pb, pb])\n return popu",
"def set_prior(self, verbose=True):\n if not os.path.exists(self.param['PRIOR_FILE']):\n msg = 'PRIOR_FILE ({0}) not found!'\n warnings.warn(msg.format(self.param['PRIOR_FILE']), \n AstropyUserWarning)\n \n return False\n \n # prior_raw = np.loadtxt(self.param['PRIOR_FILE'])\n # prior_header = open(self.param['PRIOR_FILE']).readline()\n # \n # self.prior_mags = np.cast[float](prior_header.split()[2:])\n # self.prior_data = np.zeros((self.NZ, len(self.prior_mags)))\n # \n # for i in range(self.prior_data.shape[1]):\n # self.prior_data[:,i] = np.interp(self.zgrid, prior_raw[:,0], \n # prior_raw[:,i+1], \n # left=0, right=0)\n # \n # self.prior_data /= np.trapz(self.prior_data, self.zgrid, axis=0)\n # \n # if 'PRIOR_FLOOR' in self.param.param_names:\n # prior_floor = self.param['PRIOR_FLOOR']\n # self.prior_data += prior_floor\n # self.prior_data /= np.trapz(self.prior_data, self.zgrid, axis=0)\n \n self.prior_mags, self.prior_data = self.read_prior(zgrid=self.zgrid, \n **self.param.kwargs)\n \n if isinstance(self.param['PRIOR_FILTER'], str):\n ix = self.flux_columns.index(self.param['PRIOR_FILTER'])\n ix = np.arange(self.NFILT) == ix\n else:\n ix = self.f_numbers == int(self.param['PRIOR_FILTER'])\n \n if ix.sum() == 0:\n msg = 'PRIOR_FILTER ({0}) not found in the catalog!'\n warnings.warn(msg.format(self.param['PRIOR_FILTER']), \n AstropyUserWarning)\n \n self.prior_mag_cat = np.zeros(self.NOBJ, dtype=self.ARRAY_DTYPE)-1\n \n else:\n self.prior_mag_cat = self.param['PRIOR_ABZP'] \n self.prior_mag_cat += -2.5*np.log10(np.squeeze(self.fnu[:,ix]))\n self.prior_mag_cat[~np.isfinite(self.prior_mag_cat)] = -1\n \n for i in range(self.NOBJ):\n if self.prior_mag_cat[i] > 0:\n #print(i)\n pz = self._get_prior_mag(self.prior_mag_cat[i], \n self.prior_mags, self.prior_data)\n self.full_logprior[i,:] = np.log(pz)\n\n if verbose:\n print('Read PRIOR_FILE: ', self.param['PRIOR_FILE'])",
"def cat_prior_ent(p, ent_weight=1.0):\n log_prob = -cat_entropy * ent_weight\n return log_prob"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test vertex_areas. Vertex area is the area of all of the triangles who are in contact | def test_vertex_areas(self, faces, point):
number_of_contact_faces = gs.array([3, 5, 5, 5, 5, 5, 3, 5])
triangle_area = 0.5 * 2 * 2
expected = 2 * (number_of_contact_faces * triangle_area) / 3
space = self.Space(faces)
result = space.vertex_areas(point)
assert result.shape == (8,)
assert expected.shape == (8,)
assert gs.allclose(result, expected), result
point = gs.array([point, point])
expected = gs.array([expected, expected])
result = space.vertex_areas(point)
assert point.shape == (2, 8, 3)
assert result.shape == (2, 8), result.shape
assert gs.allclose(result, expected), result | [
"def test_get_triangle_area():\n v1 = (0,0); v2 = (1,0); v3 = (0,2)\n verticies = [v1,v2,v3]\n expected = 1\n computed = get_triangle_area(verticies)\n tol = 1E-14\n success = abs(expected-computed) < tol\n msg = 'computed area={} != {} (expected)'.format(computed,expected)\n assert success,msg",
"def test_regular_polygon_area(self):\n self.assertEqual(10, regular_polygon_area(\n self.values['perimeter'], self.values['apothem']))",
"def test_area_triangle_result(self):\n self.assertTrue(tm.area_triangle([1, 0, 0],\n [0, 1, 0], [0, 0, 0]) == 0.5 and\n np.isclose(tm.area_triangle([1, 0, 0],\n [0, 1, 0], [0, 0, 1]),\n 0.866025403784))",
"def compute_triangle_area(vertices):\n v01 = vertices[0] - vertices[1]\n v02 = vertices[0] - vertices[2]\n cross_prod = np.cross(v01, v02)\n area = 0.5 * np.linalg.norm(cross_prod)\n return area",
"def test_calc_area_triangle_vol():\n three_vols = df.DagmcFile(test_env['three_vols'])\n vol = three_vols.entityset_ranges['volumes'][0]\n three_vols_query = dq.DagmcQuery(three_vols, vol)\n three_vols_query.calc_area_triangle()\n np.testing.assert_almost_equal(\n list(three_vols_query._tri_data['area']), list(np.full(12, 50)))",
"def test_triangle(self):\n result = shape_area.triangle_area(10,5)\n self.assertEqual(result,25)",
"def test_triangle_area(self):\n self.assertEqual(6, triangle_area(\n self.values['base'], self.values['height']))",
"def test_polyarea(self):\n\n xcoords, ycoords = [0, 1, 1, 0, 0], [0, 0, 1, 1, 0]\n xycoords = np.stack((xcoords, ycoords), axis=1)\n\n # Area calculation from separately provided x, y coordinates\n self.assertEqual(po.polyarea(x=xcoords, y=ycoords), 1.)\n # Area calculation from combined x, y coordinates\n self.assertEqual(po.polyarea(coords=xycoords), 1.)",
"def test_triangle_positive_area(self):\n t = Triangle(Point(0, 3.1415), Point(2.7, 3), Point(3 ** 0.5, 6.023))\n self.assertEqual(t.area(1), 4.0,\n \"Test of Triangle(Point(0, 3.1415), Point(2.7, 3), Point(3 ** 0.5, 6.023)).area(1),\\\n returned value != 4.0.\")\n self.assertEqual(t.area(), 4.013,\n \"Test of Triangle(Point(0, 3.1415), Point(2.7, 3), Point(3 ** 0.5, 6.023)).area(1) failed,\\\n returned value != 4.013.\")\n self.assertEqual(t.area(6), 4.012568,\n \"Test of Triangle(Point(0, 3.1415), Point(2.7, 3), Point(3 ** 0.5, 6.023)).area(6) failed,\\\n returned value != 4.012568.\")",
"def test_inside_triangle(self):\n\n # defining triangle vertices\n v1x, v1y = 0, 0\n v2x, v2y = 1, 1\n v3x, v3y = 1, 0\n\n # test vertices are inside\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, v1x, v1y))\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, v2x, v2y))\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, v3x, v3y))\n\n # check line segments are inside\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 0.5, 0))\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 1, 0.5))\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 0.5, 0.5))\n\n # check an interior point\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 0.5, 0.1))\n\n # check an exterior point\n self.assertFalse(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, -0.5, -0.5))\n self.assertFalse(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 0.5, -0.01))\n self.assertFalse(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 1.01, 0.5))\n self.assertFalse(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 0.49999, 0.5001))",
"def select_area(minArea): \n # Switch in edit mode \n bpy.ops.object.mode_set(mode='EDIT')\n \n # Deselect everything\n bpy.ops.mesh.select_all(action=\"DESELECT\")\n \n # Load mesh\n me = bpy.context.edit_object.data\n bm = bmesh.from_edit_mesh(me)\n # Ensure internal data needed for int subscription is initialized\n bm.faces.ensure_lookup_table()\n\n # Array containing the different areas\n loops = []\n faces = bm.faces\n\n # Loop for detect multiple areas\n while faces:\n faces[0].select_set(True) # Select 1st face\n bpy.ops.mesh.select_linked() # Select all linked faces makes a full loop\n loops.append([f.index for f in faces if f.select])\n bpy.ops.mesh.hide(unselected=False) # Hide the detected loop\n faces = [f for f in bm.faces if not f.hide] # Update faces\n\n # Unhide all faces\n bpy.ops.mesh.reveal()\n print(\"Mesh has {} parts\".format(len(loops)))\n\n print(\"\\nThe face lists are:\")\n for loop in loops:\n print(loop)\n \n # Switch in edit mode \n bpy.ops.object.mode_set(mode='EDIT')\n # Deselect everything\n bpy.ops.mesh.select_all(action=\"DESELECT\")\n # Switch in object mode\n bpy.ops.object.mode_set(mode='OBJECT')\n\n # Loop to select areas are higher than the area min\n area = 0 \n for rows in range(len(loops)):\n area = 0\n for columns in loops[rows]:\n # Calculate the area\n area = area + bpy.context.active_object.data.polygons[columns].area\n print(rows)\n print(area)\n print(minArea)\n # Compare the area with the area min\n if area > minArea:\n for columns in loops[rows]:\n # Select all the faces of the area\n bpy.context.active_object.data.polygons[columns].select = True\n\n # Switch in edit mode \n bpy.ops.object.mode_set(mode='EDIT')",
"def test_polygon_area(self):\n\n # Create closed simple polygon (counter clock wise)\n P = numpy.array([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]])\n A = calculate_polygon_area(P)\n msg = 'Calculated area was %f, expected 1.0 deg^2' % A\n assert numpy.allclose(A, 1), msg\n\n # Create closed simple polygon (clock wise)\n P = numpy.array([[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]])\n A = calculate_polygon_area(P)\n msg = 'Calculated area was %f, expected 1.0 deg^2' % A\n assert numpy.allclose(A, 1), msg\n\n A = calculate_polygon_area(P, signed=True)\n msg = 'Calculated signed area was %f, expected -1.0 deg^2' % A\n assert numpy.allclose(A, -1), msg\n\n # Not starting at zero\n # Create closed simple polygon (counter clock wise)\n P = numpy.array([[168, -2], [169, -2], [169, -1],\n [168, -1], [168, -2]])\n A = calculate_polygon_area(P)\n\n msg = 'Calculated area was %f, expected 1.0 deg^2' % A\n assert numpy.allclose(A, 1), msg\n\n # Realistic polygon\n filename = '%s/%s' % (TESTDATA, 'test_polygon.shp')\n layer = read_layer(filename)\n geometry = layer.get_geometry()\n\n P = geometry[0]\n A = calculate_polygon_area(P)\n\n # Verify against area reported by qgis (only three decimals)\n qgis_area = 0.003\n assert numpy.allclose(A, qgis_area, atol=1.0e-3)\n\n # Verify against area reported by ESRI ARC (very good correspondence)\n esri_area = 2.63924787273461e-3\n assert numpy.allclose(A, esri_area, rtol=0, atol=1.0e-10)",
"def calculate_triangle_areas(self):\n # get three corner points (each with x and y coord) of one triangle (with index 100)\n a, b, c = [np.array([self.triobject.x[k], self.triobject.y[k]]) for k in self.triobject.triangles[100]]\n # Area of the triangle = 1/2 * |AC x AB|_z (x = cross product)\n self.triangle_area_m2 = 1e6 * abs(0.5 * ((c - a)[0] * (b - a)[1] - (c - a)[1] * (b - a)[0])) # in m^2\n # Area of Easter Island in the discretised state\n self.area_map_m2 = self.triangle_area_m2 * self.n_triangles_map\n # Number of gardens per cell (rounded down)\n self.n_gardens_percell = int(self.triangle_area_m2 / self.m.garden_area_m2)\n print(\"Area of triangles in m^2: {}; Area of discretised EI: {}; Nr of gardens per cell: {}\".format(\n self.triangle_area_m2, self.area_map_m2, self.n_gardens_percell))\n return",
"def isInside(x1, y1, x2, y2, x3, y3, x, y):\n # Calculate area of triangle ABC\n A = area (x1, y1, x2, y2, x3, y3)\n \n # Calculate area of triangle PBC\n A1 = area (x, y, x2, y2, x3, y3)\n \n # Calculate area of triangle PAC\n A2 = area (x1, y1, x, y, x3, y3)\n \n # Calculate area of triangle PAB\n A3 = area (x1, y1, x2, y2, x, y)\n \n # Check if sum of A1, A2 and A3\n # is same as A\n if(A == A1 + A2 + A3):\n return True\n else:\n return False",
"def test_area(self):\n self.assertEqual(self.single_pixel.area(), 1)\n self.assertEqual(self.first_rectangle.area(), 12)\n self.assertEqual(self.second_rectangle.area(), 12)",
"def identify_points_in_area(self) -> None:\n self.points_in_area = np.array([])\n self.points_outside_area = np.array([])\n currently_in_area = False\n p1 = None\n\n # Checking whether coordinates are inside of the given area.\n for i in np.arange(np.shape(self.positions)[0]):\n number_of_intersections = 0\n\n # If the ray intersects with the area even times,\n # the point is not inside of the area.\n for border in np.arange(np.shape(self.area_coordinates)[0]):\n for j in np.arange(\n -1,\n np.shape(self.area_coordinates[border])[0] - 1,\n ):\n if self.do_two_line_segments_intersect(\n self.area_coordinates[border][j],\n self.area_coordinates[border][j + 1],\n np.array([self.positions[i][0], self.positions[i][1]]),\n np.array([190, self.positions[i][1]]),\n ):\n number_of_intersections += 1\n\n # If the number of intersections is odd,\n # the point is inside of the given area.\n if number_of_intersections % 2 == 1:\n if not currently_in_area:\n p1 = int(i)\n currently_in_area = True\n else:\n if currently_in_area:\n self.points_in_area = np.append(\n self.points_in_area,\n (p1, int(i) - 1),\n )\n p1 = None\n currently_in_area = False\n\n self.points_in_area = self.points_in_area.astype('int32')\n self.points_in_area = np.reshape(self.points_in_area, (-1, 2))",
"def check_integrity(self):\n\n from anuga.config import epsilon\n from anuga.utilities.numerical_tools import anglediff\n\n N = len(self)\n\n # Get x,y coordinates for all vertices for all triangles\n V = self.get_vertex_coordinates()\n\n# # Check each triangle\n# for i in xrange(0):\n#\n# x0, y0 = V[3*i, :]\n# x1, y1 = V[3*i+1, :]\n# x2, y2 = V[3*i+2, :]\n#\n# # Check that area hasn't been compromised\n# area = self.areas[i]\n# ref = -((x1*y0-x0*y1)+(x2*y1-x1*y2)+(x0*y2-x2*y0))/2\n# msg = 'Triangle %i (%f,%f), (%f,%f), (%f, %f)' % (i, x0,y0,x1,y1,x2,y2)\n# msg += 'Wrong area: %f %f'\\\n# %(area, ref)\n# assert abs((area - ref)/area) < epsilon, msg\n#\n# msg = 'Triangle %i (%f,%f), (%f,%f), (%f, %f)' % (i, x0,y0,x1,y1,x2,y2)\n# msg += ' is degenerate: area == %f' % self.areas[i]\n# assert area > 0.0, msg\n#\n# # Check that points are arranged in counter clock-wise order\n# v0 = [x1-x0, y1-y0]\n# v1 = [x2-x1, y2-y1]\n# v2 = [x0-x2, y0-y2]\n# a0 = anglediff(v1, v0)\n# a1 = anglediff(v2, v1)\n# a2 = anglediff(v0, v2)\n#\n# msg = '''Vertices (%s,%s), (%s,%s), (%s,%s) are not arranged\n# in counter clockwise order''' %(x0, y0, x1, y1, x2, y2)\n# assert a0 < pi and a1 < pi and a2 < pi, msg\n#\n# # Check that normals are orthogonal to edge vectors\n# # Note that normal[k] lies opposite vertex k\n#\n# normal0 = self.normals[i, 0:2]\n# normal1 = self.normals[i, 2:4]\n# normal2 = self.normals[i, 4:6]\n#\n# for u, v in [ (v0, normal2), (v1, normal0), (v2, normal1) ]:\n#\n# # Normalise\n# l_u = num.sqrt(u[0]*u[0] + u[1]*u[1])\n# l_v = num.sqrt(v[0]*v[0] + v[1]*v[1])\n#\n# msg = 'Normal vector in triangle %d does not have unit length' %i\n# assert num.allclose(l_v, 1), msg\n#\n# x = (u[0]*v[0] + u[1]*v[1])/l_u # Inner product\n#\n# msg = 'Normal vector (%f,%f) is not perpendicular to' %tuple(v)\n# msg += ' edge (%f,%f) in triangle %d.' %(tuple(u) + (i,))\n# msg += ' Inner product is %e.' %x\n# assert x < epsilon, msg\n\n\n # let's try numpy constructs\n\n x0 = V[0::3, 0]\n y0 = V[0::3, 1]\n x1 = V[1::3, 0]\n y1 = V[1::3, 1]\n x2 = V[2::3, 0]\n y2 = V[2::3, 1]\n\n\n #print 'check areas'\n area = self.areas\n\n ref = -((x1*y0-x0*y1)+(x2*y1-x1*y2)+(x0*y2-x2*y0))/2\n\n\n assert num.sum(num.abs((area - ref)/area)) < epsilon, 'Error in areas'\n\n assert num.all(area > 0.0), 'A negative area'\n\n\n tx0 = x2 - x1\n ty0 = y2 - y1\n a0 = num.sqrt(tx0**2 + ty0**2)\n\n\n tx0 = tx0/a0\n ty0 = ty0/a0\n\n\n tx1 = x0 - x2\n ty1 = y0 - y2\n a1 = num.sqrt(tx1**2 + ty1**2)\n tx1 = tx1/a1\n ty1 = ty1/a1\n\n tx2 = x1 - x0\n ty2 = y1 - y0\n a2 = num.sqrt(tx2**2 + ty2**2)\n tx2 = tx2/a2\n ty2 = ty2/a2\n\n nx0 = self.normals[:,0]\n ny0 = self.normals[:,1]\n nx1 = self.normals[:,2]\n ny1 = self.normals[:,3]\n nx2 = self.normals[:,4]\n ny2 = self.normals[:,5]\n\n\n assert num.all(tx0*nx0 + ty0*ny0 < epsilon), 'Normal not perpendicular to edge'\n assert num.all(tx1*nx1 + ty1*ny1 < epsilon), 'Normal not perpendicular to edge'\n assert num.all(tx2*nx2 + ty2*ny2 < epsilon), 'Normal not perpendicular to edge'\n\n\n #print 'check normals are unit length'\n assert num.all(num.abs(nx0**2 + ny0**2 - 1) < epsilon), 'Normal are not normalised'\n assert num.all(num.abs(nx1**2 + ny1**2 - 1) < epsilon), 'Normal are not normalised'\n assert num.all(num.abs(nx2**2 + ny2**2 - 1) < epsilon), 'Normal are not normalised'\n\n\n\n # Check that neighbour of neighbour is self\n\n # 0 neighbours\n neighs = self.neighbours\n ids = num.arange(len(neighs))\n\n # 0 neighbours\n nid = neighs[:, 0]\n eid = self.neighbour_edges[:, 0]\n nnid = num.argwhere(nid>-1).reshape(-1,)\n nid = nid[nnid]\n eid = eid[nnid]\n id = ids[nnid]\n\n assert num.all(neighs[nid, eid] == id)\n\n # 1 neighbours\n nid = neighs[:, 1]\n eid = self.neighbour_edges[:, 1]\n nnid = num.argwhere(nid>-1).reshape(-1,)\n nid = nid[nnid]\n eid = eid[nnid]\n id = ids[nnid]\n\n assert num.all(neighs[nid, eid] == id)\n\n # 2 neighbours\n nid = neighs[:, 2]\n eid = self.neighbour_edges[:, 2]\n nnid = num.argwhere(nid>-1).reshape(-1,)\n nid = nid[nnid]\n eid = eid[nnid]\n id = ids[nnid]\n\n assert num.all(neighs[nid, eid] == id)\n\n\n\n\n# # Check neighbour structure\n# for i in xrange(N):\n# # For each triangle\n#\n# for k, neighbour_id in enumerate(self.neighbours[i,:]):\n#\n# #Assert that my neighbour's neighbour is me\n# #Boundaries need not fulfill this\n# if neighbour_id >= 0:\n# edge = self.neighbour_edges[i, k]\n# msg = 'Triangle %d has neighbour %d but it does not point back. \\n' %(i,neighbour_id)\n# msg += 'Only points to (%s)' %(self.neighbours[neighbour_id,:])\n# assert self.neighbours[neighbour_id, edge] == i ,msg\n\n\n\n #Check that all boundaries have\n # unique, consecutive, negative indices\n\n #L = len(self.boundary)\n #for i in range(L):\n # id, edge = self.boundary_segments[i]\n # assert self.neighbours[id, edge] == -i-1\n\n\n #NOTE: This assert doesn't hold true if there are internal boundaries\n #FIXME: Look into this further.\n #FIXME (Ole): In pyvolution mark 3 this is OK again\n #NOTE: No longer works because neighbour structure is modified by\n # domain set_boundary.\n #for id, edge in self.boundary:\n # assert self.neighbours[id,edge] < 0\n #\n #NOTE (Ole): I reckon this was resolved late 2004?\n #\n #See domain.set_boundary\n\n\n\n #Check integrity of inverted triangle structure\n\n V = self.vertex_value_indices[:] #Take a copy\n V = num.sort(V)\n assert num.allclose(V, list(range(3*N)))\n\n assert num.sum(self.number_of_triangles_per_node) ==\\\n len(self.vertex_value_indices)\n\n \n # Check number of triangles per node\n# count = [0]*self.number_of_nodes\n# for triangle in self.triangles:\n# for i in triangle:\n# count[i] += 1\n\n count = num.bincount(self.triangles.flat)\n\n\n ncount = len(count)\n #print len(count)\n #print len(self.number_of_triangles_per_node)\n\n\n number_of_lone_nodes = self.number_of_nodes - len(self.number_of_triangles_per_node)\n\n\n assert num.allclose(count, self.number_of_triangles_per_node[:ncount])\n\n\n from .neighbour_mesh_ext import check_integrity_c\n\n\n #print self.vertex_value_indices.shape\n #print self.triangles.shape\n #print self.node_index.shape\n #print self.number_of_triangles_per_node.shape\n\n\n #print 'vertex_value_indices', self.vertex_value_indices.dtype\n #print 'triangles',self.triangles.dtype\n #print 'node_index',self.node_index.dtype\n #print 'number_of_triangles_per_node',self.number_of_triangles_per_node.dtype\n\n\t\t\n check_integrity_c(self.vertex_value_indices,\n self.triangles,\n self.node_index,\n self.number_of_triangles_per_node)",
"def test_area(self):\n self.assertEqual(self.rectangle.area(), 24)\n self.assertEqual(self.rectangle_1.area(), 7)\n self.assertEqual(self.rectangle_2.area(), 8)\n self.assertEqual(self.rectangle_3.area(), 10)",
"def compute_areas(points: PointList):\r\n for x, y in itertools.product(range(points.min_x, points.max_x + 1), range(points.min_y, points.max_y + 1)):\r\n other = Point(x, y)\r\n closest = other.closest(points)\r\n if closest is not None:\r\n closest.area += 1\r\n if x == points.min_x or x == points.max_x or y == points.min_y or y == points.max_y:\r\n closest.can_have_infinite_area = True\r\n return points"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test normals. We test this on a space whose initializing point is a cube, and we test the function on a cube with sides of length 2 centered at the origin. The cube is meshed with 12 triangles (2 triangles per face.) Recall that the magnitude of each normal vector is equal to the area of the face it is normal to. | def test_normals(self, faces, point):
space = self.Space(faces=faces)
cube_normals = gs.array(
[
[0.0, 0.0, 2.0],
[0.0, 0.0, 2.0],
[0.0, 2.0, 0.0],
[0.0, 2.0, 0.0],
[2.0, 0.0, 0.0],
[2.0, 0.0, 0.0],
[0.0, -2.0, 0.0],
[0.0, -2.0, 0.0],
[-2.0, 0.0, 0.0],
[-2.0, 0.0, 0.0],
[0.0, 0.0, -2.0],
[0.0, 0.0, -2.0],
]
)
expected = cube_normals
result = space.normals(point)
are_close = [
(gs.allclose(res, exp) or gs.allclose(res, -exp))
for res, exp in zip(result, expected)
]
assert gs.all(are_close)
point = gs.array([point, point])
result = space.normals(point)
are_close_0 = [
(gs.allclose(res, exp) or gs.allclose(res, -exp))
for res, exp in zip(result[0], expected)
]
are_close_1 = [
(gs.allclose(res, exp) or gs.allclose(res, -exp))
for res, exp in zip(result[1], expected)
]
assert gs.all(gs.array([are_close_0, are_close_1])) | [
"def test_surface_normal(self):\n vertices = np.array([[0, 1, 0], [0, 0, 0], [1, 0, 0]])\n expected = np.array([0, 0, 1])\n np.testing.assert_almost_equal(surface_normal(vertices), expected)\n\n # Test against multiple triangles\n vertices = np.r_[vertices[np.newaxis, :, :], [[[0, 0, 0], [0, 2, 0], [2, 0, 0]]]]\n expected = np.array([[0, 0, 1], [0, 0, -1]])\n np.testing.assert_almost_equal(surface_normal(vertices), expected)\n\n # Some real data\n vertices = np.array([[2.435, -1.82, -0.53], [2.635, -2., -0.58], [2.535, -1.7, -0.58]])\n expected = np.array([0.33424239, 0.11141413, 0.93587869])\n np.testing.assert_almost_equal(surface_normal(vertices), expected)\n\n # Test input validation\n self.assertRaises(ValueError, surface_normal, np.array([[1, 2, 3, 4]]))",
"def test_face_normals_random(self):\n tensor_vertex_size = np.random.randint(1, 3)\n tensor_out_shape = np.random.randint(1, 5, size=tensor_vertex_size)\n tensor_out_shape = tensor_out_shape.tolist()\n tensor_vertex_shape = list(tensor_out_shape)\n tensor_vertex_shape[-1] *= 3\n tensor_index_shape = tensor_out_shape[-1]\n\n for i in range(3):\n vertices = np.random.random(size=tensor_vertex_shape + [3])\n indices = np.arange(tensor_vertex_shape[-1])\n np.random.shuffle(indices)\n indices = np.reshape(indices,\n newshape=[1] * (tensor_vertex_size - 1) \\\n + [tensor_index_shape, 3])\n indices = np.tile(indices, tensor_vertex_shape[:-1] + [1, 1])\n vertices[..., i] = 0.\n expected = np.zeros(shape=tensor_out_shape + [3], dtype=vertices.dtype)\n expected[..., i] = 1.\n faces = normals.gather_faces(vertices, indices)\n\n self.assertAllClose(\n tf.abs(normals.face_normals(faces)), expected, rtol=1e-3)",
"def test_face_normals_preset(self, test_inputs, test_outputs):\n faces = normals.gather_faces(*test_inputs[:2])\n test_inputs = [faces] + list(test_inputs[2:])\n\n self.assert_output_is_correct(\n normals.face_normals, test_inputs, test_outputs, tile=False)",
"def FaceNormals(self):\n\n self.__do_memebers_exist__()\n\n points = np.copy(self.points)\n if points.shape[1] < 3:\n dum = np.zeros((points.shape[0],3))\n dum[:,:points.shape[1]] = points\n points = dum\n\n if self.element_type == \"tet\" or self.element_type == \"hex\":\n faces = self.faces\n elif self.element_type == \"tri\" or self.element_type == \"quad\":\n faces = self.elements\n else:\n raise ValueError(\"Cannot compute face normals on {}\".format(self.element_type))\n\n\n face_coords = self.points[faces[:,:3],:]\n\n p1p0 = face_coords[:,1,:] - face_coords[:,0,:]\n p2p0 = face_coords[:,2,:] - face_coords[:,0,:]\n\n normals = np.cross(p1p0,p2p0)\n norm_normals = np.linalg.norm(normals,axis=1)\n normals[:,0] /= norm_normals\n normals[:,1] /= norm_normals\n normals[:,2] /= norm_normals\n\n # CHECK IF THE NORMAL IS OUTWARD - FOR LINES DIRECTIONALITY DOES NOT MATTER\n if self.element_type == \"tet\" or self.element_type == \"hex\":\n self.GetElementsWithBoundaryFaces()\n meds = self.Medians()\n face_element_meds = meds[self.boundary_face_to_element[:,0],:]\n p1pm = face_coords[:,1,:] - face_element_meds\n # IF THE DOT PROUCT OF NORMALS AND EDGE-MED NODE VECTOR IS NEGATIVE THEN FLIP\n _check = np.einsum(\"ij,ij->i\",normals,p1pm)\n normals[np.less(_check,0.)] = -normals[np.less(_check,0.)]\n\n return normals",
"def test_normal_unit_length(self):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood()\n normals = np.array(EigenValueVectorizeFeatureExtractor().extract(pc, neighborhood, None, None, None)[3:6])\n lengths = np.sum(normals * normals, axis=0)\n np.testing.assert_almost_equal(np.ones_like(lengths), lengths)",
"def _triangle_normals(self):\n \n normals = np.zeros((self.triangles.shape[0], 3))\n components = self.vertices[self.triangles]\n \n for i, c in enumerate(components):\n d0 = c[0] - c[1]\n d1 = c[0] - c[2]\n \n n = np.cross(d0, d1)\n normals[i] = n/np.linalg.norm(n)\n \n return normals",
"def test_normal_always_up(self):\n z_of_normals = []\n for i in range(100):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood()\n z_of_normals += list(EigenValueVectorizeFeatureExtractor().extract(pc, neighborhood, None, None, None)[5])\n np.testing.assert_array_less(np.zeros_like(z_of_normals), z_of_normals)",
"def test_vertex_normals_preset(self, test_inputs, test_outputs):\n self.assert_output_is_correct(\n normals.vertex_normals, test_inputs, test_outputs, tile=False)",
"def normals(facets, points):\n nv = [vo.normal(points[i], points[j], points[k]) for i, j, k in facets]\n return vo.indexate(nv)",
"def _vertex_normals(self):\n\n t_norms = self.normals(kind='triangle')\n t_areas = self.areas()\n t_adj = self.triangle_membership()\n \n v_norms = np.zeros((32492, 3))\n\n for i in np.arange(self.vertices.shape[0]):\n \n # compute contribution of each triangle\n weights = (t_areas[t_adj[i]]/t_areas[t_adj[i]].sum())[:, None]\n # weight triangle normals by contribution\n v_norm = weights*t_norms[t_adj[i]]\n # compute mean normal\n v_norm = v_norm.mean(0)\n # normalize to unit length\n v_norm = v_norm / np.linalg.norm(v_norm)\n v_norms[i] = v_norm\n \n return v_norms",
"def normals(triangles):\n edges = np.roll(triangles, -1, axis=1)-np.roll(triangles, +1, axis=1)\n return normalize(null(edges))",
"def generate_normals(v1, v2, v3, normalize_result=True):\n # make vectors relative to v2\n # we assume opengl counter-clockwise ordering\n a = v1 - v2\n b = v3 - v2\n n = cross(b, a)\n if normalize_result:\n n = normalize(n)\n return n",
"def _check_normal_vec(self, i_face, vec_n):\n\n id_o = self.owner[i_face]\n id_n = self.neighbour[i_face]\n\n if id_n >= 0: # For inner faces\n vec_lr = self.centers[id_n] - self.centers[id_o]\n else: # For boundary faces\n vec_lr = self.face_centers[i_face] - self.centers[id_o]\n\n if np.dot(vec_lr, vec_n) < 0.0:\n print('Flip normal vector!')\n return -1.0\n else:\n return 1.0",
"def face_normals(self) -> np.ndarray:\n if self._face_normals is None:\n self.compute_face_normals()\n assert self._face_normals is not None\n return self._face_normals",
"def update_normals(self):\r\n spine = self.centers\r\n self.normals = np.vstack((spine[1, :] - spine[0, :], spine[2:, :] - spine[:-2, :], spine[-1, :] - spine[-2, :]))",
"def parse_normals(lines):\n print \" * Parsing normals\"\n return _parse_vn(lines, \"vn %.6f %.6f %.6f\")",
"def compareNormals():\n computeNormals = False\n if computeNormals:\n r1,r2,r3 = read('r1'),read('r2'),read('r3')\n r = [r1,r2,r3]\n x2 = [like(r1),like(r1),like(r1)]\n x3 = [like(r1),like(r1),like(r1)]\n v = [like(r1),like(r1),like(r1)]\n FlattenerUtil.getFrame(r,None,x2,x3)\n FlattenerUtil.cross(x3,x2,v)\n FlattenerUtil.normalize(v,v)\n write('v1',v[0])\n write('v2',v[1])\n write('v3',v[2])\n v1,v2,v3 = read('v1'),read('v2'),read('v3')\n u1,u2,u3 = read('u1'),read('u2'),read('u3')\n display(sub(v1,u1),cmap=rwb,cmin=-0.2,cmax=0.2,name='v1-u1')\n display(sub(v2,u2),cmap=rwb,cmin=-0.2,cmax=0.2,name='v2-u2')\n display(sub(v3,u3),cmap=rwb,cmin=-0.2,cmax=0.2,name='v3-u3')",
"def testParsingOfNormals(self):\n self.assertEqual(len(self.objStream.GetNormals()), 1289)",
"def calculateMeshNormal(mesh_face_vertices):\n mesh_normal = []\n for mesh in mesh_face_vertices:\n v1x = mesh[1, 0] - mesh[0, 0]\n v1y = mesh[1, 1] - mesh[0, 1]\n v1z = mesh[1, 2] - mesh[0, 2]\n v2x = mesh[2, 0] - mesh[1, 0]\n v2y = mesh[2, 1] - mesh[1, 1]\n v2z = mesh[2, 2] - mesh[1, 2]\n \n normal = np.array([v1y * v2z - v1z * v2y, v1z * v2x - v1x * v2z, v1x * v2y - v1y * v2x])\n normal = normal / np.max((np.linalg.norm(normal), 1e-5))\n normal = (normal + 1) * 127.5\n mesh_normal.append(normal)\n return np.array(mesh_normal)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test surface metric matrices. | def test_surface_metric_matrices(self, faces, point):
space = self.Space(faces=faces)
result = space.surface_metric_matrices(point=point)
assert result.shape == (
space.n_faces,
2,
2,
), result.shape
point = gs.array([point, point])
result = space.surface_metric_matrices(point=point)
assert result.shape == (2, space.n_faces, 2, 2) | [
"def surface_test(xgrid, ygrid):\n\txfactor = 2*numpy.pi/20\n\tyfactor = 2*numpy.pi/11\n\treturn numpy.sin(xgrid*xfactor) * numpy.cos(ygrid*yfactor)",
"def test_surfacegrid():\n elem = omf.surface.TensorGridSurface()\n elem.tensor_u = [1.0, 1.0]\n elem.tensor_v = [2.0, 2.0, 2.0]\n assert elem.validate()\n assert elem.location_length(\"vertices\") == 12\n assert elem.location_length(\"faces\") == 6\n elem.axis_v = [1.0, 1.0, 0]\n with pytest.raises(ValueError):\n elem.validate()\n elem.axis_v = \"Y\"\n elem.offset_w = np.random.rand(12)\n elem.validate()\n elem.offset_w = np.random.rand(6)\n with pytest.raises(ValueError):\n elem.validate()",
"def surface_metric_matrices(self, point):\n one_forms = self.surface_one_forms(point)\n\n return self._surface_metric_matrices_from_one_forms(one_forms)",
"def _test_surface_under_function(info):\n assert isinstance(info, TestInfo)\n data = [\n # Each element is a tuple (function, x_lo, x_hi, delta_x, correct_result, func_text)\n (lambda x: 2.0, -1.0, 2.0, 0.01, 6.0, \"lambda x: 2.0\"),\n (lambda x: 3 - x, 0.0, 3.0, 0.0005, 4.5, \"lambda x: 3 - x\"),\n (lambda x: x**2 - 1, -2.0, 2.0, 0.001, 2.0*(-2.0/3.0 + 4.0/3.0), \"lambda x: x**2 - 1\")\n ]\n num_failures = 0\n for func, x_lo, x_hi, delta_x, correct_result, func_text in data:\n result = utility.compute_surface_under_function(func, x_lo, x_hi, None, delta_x)\n if abs(result - correct_result) > 0.001:\n print(\"FAILURE: func=\" + func_text +\n \", x_lo=\" + str(x_lo) +\n \", x_hi=\" + str(x_hi) +\n \", delta_x=\" + str(delta_x) +\n \", correct_result=\" + str(correct_result) +\n \", computed_result=\" + str(result)\n )\n num_failures += 1\n return num_failures",
"def test_symmetry_surface_average_2(self):\n\n def test(grid, basis, true_avg=1):\n transform = Transform(grid, basis)\n\n # random data with specified average on each surface\n coeffs = np.random.rand(basis.num_modes)\n coeffs[np.where((basis.modes[:, 1:] == [0, 0]).all(axis=1))[0]] = 0\n coeffs[np.where((basis.modes == [0, 0, 0]).all(axis=1))[0]] = true_avg\n\n # compute average for each surface in grid\n values = transform.transform(coeffs)\n numerical_avg = surface_averages(grid, values, expand_out=False)\n if isinstance(grid, ConcentricGrid):\n # values closest to axis are never accurate enough\n numerical_avg = numerical_avg[1:]\n np.testing.assert_allclose(\n numerical_avg,\n true_avg,\n err_msg=str(type(grid)) + \" \" + str(grid.sym),\n )\n\n M = 10\n M_grid = 23\n test(\n QuadratureGrid(L=M_grid, M=M_grid, N=0),\n FourierZernikeBasis(L=M, M=M, N=0),\n )\n test(\n LinearGrid(L=M_grid, M=M_grid, N=0, sym=True),\n FourierZernikeBasis(L=M, M=M, N=0, sym=\"cos\"),\n )\n test(\n ConcentricGrid(L=M_grid, M=M_grid, N=0),\n FourierZernikeBasis(L=M, M=M, N=0),\n )\n test(\n ConcentricGrid(L=M_grid, M=M_grid, N=0, sym=True),\n FourierZernikeBasis(L=M, M=M, N=0, sym=\"cos\"),\n )",
"def test_compare_outputs_surface_form(self):\n # load models\n options = [\n {\"surface form\": cap} for cap in [\"false\", \"differential\", \"algebraic\"]\n ]\n model_combos = [\n ([pybamm.lead_acid.LOQS(opt) for opt in options]),\n ([pybamm.lead_acid.Full(opt) for opt in options]),\n ]\n\n for models in model_combos:\n # load parameter values (same for all models)\n param = models[0].default_parameter_values\n param.update({\"Current function [A]\": 1})\n for model in models:\n param.process_model(model)\n\n # set mesh\n var_pts = {\"x_n\": 5, \"x_s\": 5, \"x_p\": 5}\n\n # discretise models\n discs = {}\n for model in models:\n geometry = model.default_geometry\n param.process_geometry(geometry)\n mesh = pybamm.Mesh(geometry, model.default_submesh_types, var_pts)\n disc = pybamm.Discretisation(mesh, model.default_spatial_methods)\n disc.process_model(model)\n discs[model] = disc\n\n # solve model\n solutions = []\n t_eval = np.linspace(0, 3600 * 20, 100)\n for model in models:\n solution = pybamm.CasadiSolver().solve(model, t_eval)\n solutions.append(solution)\n\n # compare outputs\n comparison = StandardOutputComparison(solutions)\n comparison.test_all(skip_first_timestep=True)",
"def test_comp_surface(self, test_dict):\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_surface()\n\n a = result\n b = test_dict[\"S_exp\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)\n\n b = comp_surface(test_obj.slot)\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)",
"def test_random_surface():\n np.random.seed(0)\n vertices = np.random.random((10, 2))\n faces = np.random.randint(10, size=(6, 3))\n values = np.random.random(10)\n data = (vertices, faces, values)\n layer = Surface(data)\n assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])\n assert np.all(layer.vertices == vertices)\n assert np.all(layer.faces == faces)\n assert np.all(layer.vertex_values == values)\n assert layer._data_view.shape[1] == 2",
"def test_symmetry_surface_average_1(self):\n\n def test(grid):\n r = grid.nodes[:, 0]\n t = grid.nodes[:, 1]\n z = grid.nodes[:, 2] * grid.NFP\n true_surface_avg = 5\n function_of_rho = 1 / (r + 0.35)\n f = (\n true_surface_avg\n + np.cos(t)\n - 0.5 * np.cos(z)\n + 3 * np.cos(t) * np.cos(z) ** 2\n - 2 * np.sin(z) * np.sin(t)\n ) * function_of_rho\n np.testing.assert_allclose(\n surface_averages(grid, f),\n true_surface_avg * function_of_rho,\n rtol=1e-15,\n err_msg=type(grid),\n )\n\n # these tests should be run on relatively low resolution grids,\n # or at least low enough so that the asymmetric spacing test fails\n L = [3, 3, 5, 3]\n M = [3, 6, 5, 7]\n N = [2, 2, 2, 2]\n NFP = [5, 3, 5, 3]\n sym = np.asarray([True, True, False, False])\n # to test code not tested on grids made with M=.\n even_number = 4\n n_theta = even_number - sym\n\n # asymmetric spacing\n with pytest.raises(AssertionError):\n theta = 2 * np.pi * np.asarray([t**2 for t in np.linspace(0, 1, max(M))])\n test(LinearGrid(L=max(L), theta=theta, N=max(N), sym=False))\n\n for i in range(len(L)):\n test(LinearGrid(L=L[i], M=M[i], N=N[i], NFP=NFP[i], sym=sym[i]))\n test(LinearGrid(L=L[i], theta=n_theta[i], N=N[i], NFP=NFP[i], sym=sym[i]))\n test(\n LinearGrid(\n L=L[i],\n theta=np.linspace(0, 2 * np.pi, n_theta[i]),\n N=N[i],\n NFP=NFP[i],\n sym=sym[i],\n )\n )\n test(\n LinearGrid(\n L=L[i],\n theta=np.linspace(0, 2 * np.pi, n_theta[i] + 1),\n N=N[i],\n NFP=NFP[i],\n sym=sym[i],\n )\n )\n test(QuadratureGrid(L=L[i], M=M[i], N=N[i], NFP=NFP[i]))\n test(ConcentricGrid(L=L[i], M=M[i], N=N[i], NFP=NFP[i], sym=sym[i]))\n # nonuniform spacing when sym is False, but spacing is still symmetric\n test(\n LinearGrid(\n L=L[i],\n theta=np.linspace(0, np.pi, n_theta[i]),\n N=N[i],\n NFP=NFP[i],\n sym=sym[i],\n )\n )\n test(\n LinearGrid(\n L=L[i],\n theta=np.linspace(0, np.pi, n_theta[i] + 1),\n N=N[i],\n NFP=NFP[i],\n sym=sym[i],\n )\n )",
"def test_matrix_measure_2(self):\n observed = np.array([0, 1, 0, 1, 0, 0, 1])\n calculated = np.array([0, 1, 1, 0, 0, 0, 1])\n\n measure = evaluation.classification_model_performance_matrix(observed, calculated)\n expected_measure = np.array([[3, 1], [1, 2]])\n\n np.testing.assert_array_almost_equal(measure, expected_measure)",
"def kernel_test(slabs, data, backend):\n with use_reflect_backend(backend) as abeles:\n R = abeles(data[:, 0], slabs)\n assert R.shape == data[:, 1].shape\n\n np.testing.assert_allclose(R, data[:, 1], rtol=8e-5)",
"def test_matrix_measure_1(self):\n observed = np.array([0, 1, 1, 0, 0, 0, 1])\n calculated = np.array([0, 1, 1, 0, 0, 0, 1])\n\n measure = evaluation.classification_model_performance_matrix(observed, calculated)\n expected_measure = np.array([[4, 0], [0, 3]])\n\n np.testing.assert_array_almost_equal(measure, expected_measure)",
"def _surface_metric_matrices_from_one_forms(one_forms):\n ndim = one_forms.ndim\n transpose_axes = tuple(range(ndim - 2)) + tuple(reversed(range(ndim - 2, ndim)))\n transposed_one_forms = gs.transpose(one_forms, axes=transpose_axes)\n return gs.matmul(one_forms, transposed_one_forms)",
"def test_score_matrix_score(self):\n ### FILL IN ###\n # this should be very similar to test match matrix\n M = ScoreMatrix(\"M\",2,2)\n M.set_score(1,1,3)\n self.assertEqual(M.get_score(1,1),3)",
"def test_massmatrix_result(self):\n testtrimesh = tm.TriMesh(self.testdatatriangles,\n self.testdatavertices)\n (self.assertTrue(np.allclose(\n testtrimesh.massmatrix(),\n self.testdatamassmatrix)))",
"def test_calc_tris_per_surf_vol():\n three_vols = df.DagmcFile(test_env['three_vols'])\n vol = three_vols.entityset_ranges['volumes'][0]\n three_vols_query = dq.DagmcQuery(three_vols, vol)\n three_vols_query.calc_tris_per_surf()\n assert(sorted(three_vols_query._surf_data['tri_per_surf']) == list(np.full(6,2)))",
"def test_surface():\n elem = omf.surface.Surface()\n elem.vertices = np.random.rand(10, 3)\n elem.triangles = np.random.randint(9, size=[5, 3])\n assert elem.validate()\n assert elem.location_length(\"vertices\") == 10\n assert elem.location_length(\"faces\") == 5\n elem.triangles.array[0, 0] = -1\n with pytest.raises(ValueError):\n elem.validate()\n elem.triangles.array[0, 0] = 10\n with pytest.raises(ValueError):\n elem.validate()",
"def test_comparison_of_rates(self): \n tensor = True\n# matrix = True\n \n dim = self.H1.dim\n KT = numpy.zeros((dim,dim), dtype=REAL)\n KM = numpy.zeros((dim,dim), dtype=REAL)\n \n if tensor:\n #print(self.H1)\n LT = LindbladForm(self.H1, self.sbi1, as_operators=False)\n \n for n in range(2):\n for m in range(2):\n #print(n,m,numpy.real(RT.data[n,n,m,m]))\n KT[n,m] = numpy.real(LT.data[n,n,m,m])\n \n KM = numpy.zeros((dim,dim))\n KM[0,0] = -self.rates[1]\n KM[1,1] = -self.rates[0]\n KM[0,1] = self.rates[0]\n KM[1,0] = self.rates[1]\n \n \n numpy.testing.assert_allclose(KT,KM, rtol=1.0e-2)",
"def test_score_matrix_score(self):\n score_matrix = ScoreMatrix('test', 3, 4)\n score_matrix.set_score(1,2,10)\n self.assertEqual(score_matrix.get_score(1,2), 10)\n # this should be very similar to test match matrix\n return"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that energy of a path of surfaces is positive at each timestep. | def test_path_energy_per_time_is_positive(
self, space, a0, a1, b1, c1, d1, a2, path, atol
):
n_times = len(path)
space.equip_with_metric(self.Metric, a0=a0, a1=a1, b1=b1, c1=c1, d1=d1, a2=a2)
energy = space.metric.path_energy_per_time(path)
self.assertAllEqual(energy.shape, (n_times - 1, 1))
result = gs.all(energy > -1 * atol)
self.assertTrue(result)
expected_shape = (2, n_times - 1, 1)
path = gs.array([path, path])
energy = space.metric.path_energy_per_time(path)
self.assertAllEqual(energy.shape, expected_shape)
result = gs.all(energy > -1 * atol)
self.assertTrue(result) | [
"def test_path_energy_is_positive(self, space, a0, a1, b1, c1, d1, a2, path, atol):\n space.equip_with_metric(self.Metric, a0=a0, a1=a1, b1=b1, c1=c1, d1=d1, a2=a2)\n\n energy = space.metric.path_energy(path)\n self.assertAllEqual(energy.shape, ())\n result = gs.all(energy > -1 * atol)\n self.assertTrue(result)\n\n path = gs.array([path, path])\n energy = space.metric.path_energy(path)\n self.assertAllEqual(energy.shape, (2,))\n result = gs.all(energy > -1 * atol)\n self.assertTrue(result)",
"def testfreeenergy(self) -> None:\r\n assert round(abs(0.14339635634084155 - self.data.freeenergy), self.freeenergy_places) == 0",
"def testfreeenergy(self) -> None:\r\n assert round(abs(-381.88826585 - self.data.freeenergy), self.freeenergy_places) == 0",
"def isstationary(self):\n if np.all(np.abs(self.arroots) > 1.0):\n return True\n else:\n return False",
"def check_ts(self, log=True):\n if any([spc.e0 is None for spc in self.r_species + self.p_species + [self.ts_species]]):\n logging.error(\"Could not get E0's of all species participating in reaction {0}. Cannot check TS E0.\".format(\n self.label))\n return True\n r_e0 = sum([spc.e0 for spc in self.r_species])\n p_e0 = sum([spc.e0 for spc in self.p_species])\n if self.ts_species.e0 < r_e0 or self.ts_species.e0 < p_e0:\n if log:\n logging.error('TS of reaction {0} has a lower E0 value than expected:\\nReactants: {1} kJ/mol\\nTS:'\n ' {2} kJ/mol\\nProducts: {3} kJ/mol'.format(self.label, r_e0, self.ts_species.e0, p_e0))\n return False\n if log:\n logging.info('Reaction {0} has the following path energies:\\nReactants: {1} kJ/mol'\n '\\nTS: {2} kJ/mol\\nProducts: {3} kJ/mol'.format(self.label, r_e0, self.ts_species.e0, p_e0))\n return True",
"def test_conjecture_4():\n return all(\n is_e_positive(csf[path])\n for path in csf\n )",
"def test_volume_surface_empty(self):\n for k in (0, -1, 1, 1.75, 0.325, 1/7, -1.75, -0.325, -1/7):\n s = space(fake_curvature=k) \n for name in ('sphere_s1', 'sphere_v2', 'sphere_s2', 'sphere_v3'):\n self.assertTrue(getattr(s, name)(0) == 0)",
"def test_negative_electrode_potential_profile(self):\n np.testing.assert_array_almost_equal(self.phi_s_n(self.t, x=0), 0, decimal=5)",
"def check_quantities(self):\n \n try:\n self.veg_diameter = self.domain.quantities['veg_diameter'].centroid_values\n self.veg_spacing = self.domain.quantities['veg_spacing'].centroid_values\n \n self.veg = num.zeros(self.depth.shape)\n self.veg[self.veg_spacing > 0] = (self.veg_diameter[self.veg_spacing > 0] /\n self.veg_spacing[self.veg_spacing > 0]**2)\n \n self.ad = self.veg * self.veg_diameter\n self.calculate_drag_coefficient()\n \n self.quantity_flag = True\n \n except:\n \n print 'Vegetation quantities not yet defined. Continuing without veg'",
"def check_convergency(self):\n if self.vars['ema_trace'][self.vars['step']] <= self.settings[\"emaSpeedTol\"]:\n return True\n else:\n return False",
"def is_edge_phase(x, x_last):\n _x = x/(2*np.pi)\n _x = round(_x - round(_x), 5)\n _x_last = x_last/(2*np.pi)\n _x_last = round(_x_last - round(_x_last), 5)\n if _x == 0.0 or (_x_last < 0.0 and _x > 0.0):\n return True\n else:\n return False",
"def min_energy_storage_rule(_m, g, y, s, t):\r\n\r\n return - m.q[g, y, s, t] <= 0",
"def run_one_step(self, dt):\n self.tldiffusion(dt)\n\n # Test code stability for timestep dt\n # Raise unstability error if local slope is reversed by erosion\n # and deposition during a timestep dt\n elev_dif = self.elev - self.elev[self.receiver]\n s = elev_dif[np.where(self.grid.at_node[\"flow__sink_flag\"] == 0)]\n if np.any(s < -1) is True:\n raise ValueError(\n \"The component is unstable\" \" for such a large timestep \" \"on this grid\"\n )\n else:\n pass",
"def test_valid_times(self):\n self.assertGreaterEqual(len(self.traj.t), 1)\n\n self.assertTrue(np.all(np.logical_and(self.traj.t >= 0.0,\n self.traj.t <=\n np.float(self.time))),\n \"Invalid time array.\")",
"def negative_paths_available(times, row):\n neg_paths = [i for i, j in enumerate(times[row]) if j < 0]\n return neg_paths",
"def _is_negative_orthant(w: WorkloadSpace, eps: float = 1e-6) -> bool:\n # casting to bool to satisfy mypy disagreement with type np.bool_\n return bool(np.all(w <= eps) and np.any(w < -eps))",
"def is_equidistant(self) -> bool:\n if len(self.time) < 3:\n return True\n return len(self.time.to_series().diff().dropna().unique()) == 1",
"def _check_spd(matrix):\n if not is_spd(matrix, decimal=7):\n raise ValueError(\"Expected a symmetric positive definite matrix.\")",
"def check_ephem():\n import subprocess\n import astropy.time as t\n import astropy.units as u\n \n fits = 'ic8n04wpq_flt.fits.gz'\n \n files = glob.glob('ic8n07*flt.fits.gz')\n tstart = []\n tend = []\n for fits in files:\n if 'gz' in fits:\n p = subprocess.Popen('gunzip -c %s | dfits - | fitsort EXPSTART EXPEND | tail -1 ' %(fits), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = p.communicate()\n expstart, expend = np.cast[float](stdout.split())\n else:\n head = pyfits.getheader(fits, ext=0)\n expstart, expend = head['EXPSTART'], head['EXPEND']\n #\n tstart.append(expstart)\n tend.append(expend)\n \n tstart = t.Time(tstart, format='mjd', scale='utc')\n tend = t.Time(tend, format='mjd', scale='utc')\n #plt.plot_date(texp.plot_date, np.ones(2), color='blue', alpha=0.5, linestyle='-', linewidth=4)\n \n ### Read SHADOW ephemeris\n e_ttag, e_d, e_item, e_comment = np.loadtxt('shadow_ephem.dat', unpack=True, dtype=str)\n\n ttag = []\n for tt in e_ttag:\n ttag.append('2013:'+tt)\n \n tshadow = t.Time(ttag, scale='utc', format='yday')\n i = e_comment[0] == 'EXIT'\n tshad_entry = tshadow[i::2]\n tshad_exit = tshadow[i+1::2]\n \n dt = t.TimeDelta(120*u.second)\n \n ### Show exposures\n NEXP = len(tstart)\n for i in range(NEXP):\n texp = t.Time([tstart[i], tend[i]])\n plt.plot_date(texp.plot_date, np.ones(2)*10, color='blue', alpha=0.5, linestyle='-', linewidth=1)\n plt.fill_between(texp.plot_date, 0.5*np.ones(2), 1.5*np.ones(2), color='blue', alpha=0.5)\n \n plt.gcf().autofmt_xdate()\n \n ### Show SHADOW\n dt = t.TimeDelta(2*u.hour)\n ok = (tshad_entry > (tstart[0]-dt)) & (tshad_entry < (tend[-1]+dt))\n \n ix = np.arange(len(ok))[ok]\n for i in ix:\n tshad = t.Time([tshad_entry[i], tshad_exit[i]])\n #plt.plot_date(tshad.plot_date, np.ones(2)+1, color='black', alpha=0.5, linestyle='-', linewidth=1)\n plt.fill_between(tshad.plot_date, 1.6*np.ones(2), 2.1*np.ones(2), color='black', alpha=0.5)\n \n ### Set xlimits\n dt = t.TimeDelta(10*u.minute)\n xlim = t.Time([tstart[0]-dt, tend[-1]+dt])\n plt.xlim(xlim.plot_date)\n \n plt.ylim(0,2.6)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that energy of a path of surfaces is positive at each timestep. | def test_path_energy_is_positive(self, space, a0, a1, b1, c1, d1, a2, path, atol):
space.equip_with_metric(self.Metric, a0=a0, a1=a1, b1=b1, c1=c1, d1=d1, a2=a2)
energy = space.metric.path_energy(path)
self.assertAllEqual(energy.shape, ())
result = gs.all(energy > -1 * atol)
self.assertTrue(result)
path = gs.array([path, path])
energy = space.metric.path_energy(path)
self.assertAllEqual(energy.shape, (2,))
result = gs.all(energy > -1 * atol)
self.assertTrue(result) | [
"def test_path_energy_per_time_is_positive(\n self, space, a0, a1, b1, c1, d1, a2, path, atol\n ):\n n_times = len(path)\n space.equip_with_metric(self.Metric, a0=a0, a1=a1, b1=b1, c1=c1, d1=d1, a2=a2)\n\n energy = space.metric.path_energy_per_time(path)\n\n self.assertAllEqual(energy.shape, (n_times - 1, 1))\n result = gs.all(energy > -1 * atol)\n self.assertTrue(result)\n\n expected_shape = (2, n_times - 1, 1)\n path = gs.array([path, path])\n energy = space.metric.path_energy_per_time(path)\n self.assertAllEqual(energy.shape, expected_shape)\n result = gs.all(energy > -1 * atol)\n self.assertTrue(result)",
"def testfreeenergy(self) -> None:\r\n assert round(abs(0.14339635634084155 - self.data.freeenergy), self.freeenergy_places) == 0",
"def testfreeenergy(self) -> None:\r\n assert round(abs(-381.88826585 - self.data.freeenergy), self.freeenergy_places) == 0",
"def isstationary(self):\n if np.all(np.abs(self.arroots) > 1.0):\n return True\n else:\n return False",
"def check_ts(self, log=True):\n if any([spc.e0 is None for spc in self.r_species + self.p_species + [self.ts_species]]):\n logging.error(\"Could not get E0's of all species participating in reaction {0}. Cannot check TS E0.\".format(\n self.label))\n return True\n r_e0 = sum([spc.e0 for spc in self.r_species])\n p_e0 = sum([spc.e0 for spc in self.p_species])\n if self.ts_species.e0 < r_e0 or self.ts_species.e0 < p_e0:\n if log:\n logging.error('TS of reaction {0} has a lower E0 value than expected:\\nReactants: {1} kJ/mol\\nTS:'\n ' {2} kJ/mol\\nProducts: {3} kJ/mol'.format(self.label, r_e0, self.ts_species.e0, p_e0))\n return False\n if log:\n logging.info('Reaction {0} has the following path energies:\\nReactants: {1} kJ/mol'\n '\\nTS: {2} kJ/mol\\nProducts: {3} kJ/mol'.format(self.label, r_e0, self.ts_species.e0, p_e0))\n return True",
"def test_conjecture_4():\n return all(\n is_e_positive(csf[path])\n for path in csf\n )",
"def test_volume_surface_empty(self):\n for k in (0, -1, 1, 1.75, 0.325, 1/7, -1.75, -0.325, -1/7):\n s = space(fake_curvature=k) \n for name in ('sphere_s1', 'sphere_v2', 'sphere_s2', 'sphere_v3'):\n self.assertTrue(getattr(s, name)(0) == 0)",
"def test_negative_electrode_potential_profile(self):\n np.testing.assert_array_almost_equal(self.phi_s_n(self.t, x=0), 0, decimal=5)",
"def check_quantities(self):\n \n try:\n self.veg_diameter = self.domain.quantities['veg_diameter'].centroid_values\n self.veg_spacing = self.domain.quantities['veg_spacing'].centroid_values\n \n self.veg = num.zeros(self.depth.shape)\n self.veg[self.veg_spacing > 0] = (self.veg_diameter[self.veg_spacing > 0] /\n self.veg_spacing[self.veg_spacing > 0]**2)\n \n self.ad = self.veg * self.veg_diameter\n self.calculate_drag_coefficient()\n \n self.quantity_flag = True\n \n except:\n \n print 'Vegetation quantities not yet defined. Continuing without veg'",
"def check_convergency(self):\n if self.vars['ema_trace'][self.vars['step']] <= self.settings[\"emaSpeedTol\"]:\n return True\n else:\n return False",
"def is_edge_phase(x, x_last):\n _x = x/(2*np.pi)\n _x = round(_x - round(_x), 5)\n _x_last = x_last/(2*np.pi)\n _x_last = round(_x_last - round(_x_last), 5)\n if _x == 0.0 or (_x_last < 0.0 and _x > 0.0):\n return True\n else:\n return False",
"def min_energy_storage_rule(_m, g, y, s, t):\r\n\r\n return - m.q[g, y, s, t] <= 0",
"def run_one_step(self, dt):\n self.tldiffusion(dt)\n\n # Test code stability for timestep dt\n # Raise unstability error if local slope is reversed by erosion\n # and deposition during a timestep dt\n elev_dif = self.elev - self.elev[self.receiver]\n s = elev_dif[np.where(self.grid.at_node[\"flow__sink_flag\"] == 0)]\n if np.any(s < -1) is True:\n raise ValueError(\n \"The component is unstable\" \" for such a large timestep \" \"on this grid\"\n )\n else:\n pass",
"def test_valid_times(self):\n self.assertGreaterEqual(len(self.traj.t), 1)\n\n self.assertTrue(np.all(np.logical_and(self.traj.t >= 0.0,\n self.traj.t <=\n np.float(self.time))),\n \"Invalid time array.\")",
"def negative_paths_available(times, row):\n neg_paths = [i for i, j in enumerate(times[row]) if j < 0]\n return neg_paths",
"def _is_negative_orthant(w: WorkloadSpace, eps: float = 1e-6) -> bool:\n # casting to bool to satisfy mypy disagreement with type np.bool_\n return bool(np.all(w <= eps) and np.any(w < -eps))",
"def is_equidistant(self) -> bool:\n if len(self.time) < 3:\n return True\n return len(self.time.to_series().diff().dropna().unique()) == 1",
"def _check_spd(matrix):\n if not is_spd(matrix, decimal=7):\n raise ValueError(\"Expected a symmetric positive definite matrix.\")",
"def check_ephem():\n import subprocess\n import astropy.time as t\n import astropy.units as u\n \n fits = 'ic8n04wpq_flt.fits.gz'\n \n files = glob.glob('ic8n07*flt.fits.gz')\n tstart = []\n tend = []\n for fits in files:\n if 'gz' in fits:\n p = subprocess.Popen('gunzip -c %s | dfits - | fitsort EXPSTART EXPEND | tail -1 ' %(fits), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = p.communicate()\n expstart, expend = np.cast[float](stdout.split())\n else:\n head = pyfits.getheader(fits, ext=0)\n expstart, expend = head['EXPSTART'], head['EXPEND']\n #\n tstart.append(expstart)\n tend.append(expend)\n \n tstart = t.Time(tstart, format='mjd', scale='utc')\n tend = t.Time(tend, format='mjd', scale='utc')\n #plt.plot_date(texp.plot_date, np.ones(2), color='blue', alpha=0.5, linestyle='-', linewidth=4)\n \n ### Read SHADOW ephemeris\n e_ttag, e_d, e_item, e_comment = np.loadtxt('shadow_ephem.dat', unpack=True, dtype=str)\n\n ttag = []\n for tt in e_ttag:\n ttag.append('2013:'+tt)\n \n tshadow = t.Time(ttag, scale='utc', format='yday')\n i = e_comment[0] == 'EXIT'\n tshad_entry = tshadow[i::2]\n tshad_exit = tshadow[i+1::2]\n \n dt = t.TimeDelta(120*u.second)\n \n ### Show exposures\n NEXP = len(tstart)\n for i in range(NEXP):\n texp = t.Time([tstart[i], tend[i]])\n plt.plot_date(texp.plot_date, np.ones(2)*10, color='blue', alpha=0.5, linestyle='-', linewidth=1)\n plt.fill_between(texp.plot_date, 0.5*np.ones(2), 1.5*np.ones(2), color='blue', alpha=0.5)\n \n plt.gcf().autofmt_xdate()\n \n ### Show SHADOW\n dt = t.TimeDelta(2*u.hour)\n ok = (tshad_entry > (tstart[0]-dt)) & (tshad_entry < (tend[-1]+dt))\n \n ix = np.arange(len(ok))[ok]\n for i in ix:\n tshad = t.Time([tshad_entry[i], tshad_exit[i]])\n #plt.plot_date(tshad.plot_date, np.ones(2)+1, color='black', alpha=0.5, linestyle='-', linewidth=1)\n plt.fill_between(tshad.plot_date, 1.6*np.ones(2), 2.1*np.ones(2), color='black', alpha=0.5)\n \n ### Set xlimits\n dt = t.TimeDelta(10*u.minute)\n xlim = t.Time([tstart[0]-dt, tend[-1]+dt])\n plt.xlim(xlim.plot_date)\n \n plt.ylim(0,2.6)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
goes through each neuron, each neuron has a chance of mutating equal to the learning rate of the network. There is a 20% chance of a physical mutation. | def mutate(self):
#First, mutate masses
for neuronNum in range(self.neuronCounter - 1):
if self.learningRate > random.random():
self.neurons[neuronNum].mutate()
else:
continue
#Now determine physical mutations
if random.random() < 0.2:
try:
physMutation = random.choice(['a','l','c'])
if physMutation == 'a':
self.addNeuron(random.choice([0,1,2]))
elif physMutation == 'l':
begin = random.randint(1,self.neuronCounter - 1)
end = random.randint(1, self.neuronCounter - 1)
self.link(begin, end)
else:
begin = random.randint(1,self.neuronCounter - 1)
end = random.choice(self.neurons[begin].outDic.keys())
self.cut(begin, end)
except:
return self
return self | [
"def weight_mutate(self):\n\n starting_pol = int(self.n_elites)\n while starting_pol < self.pop_size:\n # Output bias weights\n for w in range(self.n_outputs):\n rnum = random.uniform(0, 1)\n if rnum <= self.mut_chance:\n weight = self.population[\"pop{0}\".format(starting_pol)][\"b_out\"][w]\n mutation = np.random.normal(0, self.mut_rate) * weight\n self.population[\"pop{0}\".format(starting_pol)][\"b_out\"][w] += mutation\n\n # Output layer weights\n for w in range(self.mem_block_size):\n rnum = random.uniform(0, 1)\n if rnum <= self.mut_chance:\n weight = self.population[\"pop{0}\".format(starting_pol)][\"p_out\"][w]\n mutation = (np.random.normal(0, self.mut_rate)) * weight\n self.population[\"pop{0}\".format(starting_pol)][\"p_out\"][w] += mutation\n\n starting_pol += 1\n\n self.mutate_igate()\n self.mutate_rgate()\n self.mutate_wgate()\n self.mutate_block()\n self.mutate_mem_weights()",
"def mutate_nonstructural(self):\n # TODO consider clamping weights and biases?\n for link in self.gene_links:\n # Disable/Enable links\n if event(link_toggle_prob): # Chance of toggling link\n link.enabled = True if link.enabled is False else False\n if link.enabled is False and event(link_enable_prob): # Chance of enabling a disabled link\n link.enabled = True\n # Mutate weights\n if event(weight_mutate_rate):\n if event(weight_replace_rate): # replace with random weight\n link.weight = random.uniform(weight_init_min, weight_init_max)\n else: # adjust weight\n link.weight += random.uniform(-uniform_weight_scale, uniform_weight_scale)\n for node in self.gene_nodes:\n # Mutate bias\n if event(bias_mutate_rate):\n if event(bias_replace_rate): # replace with random bias\n node.bias = random.uniform(bias_init_min, bias_init_max)\n else: # adjust bias\n node.bias += random.uniform(-uniform_weight_scale, uniform_weight_scale)\n # Mutate activation func\n if node.can_modify:\n if event(change_act_prob):\n node.act_func = self.act_set.get_random_activation_func()\n # reinit freq amp and vshift when act func changes\n if node.act_func.__name__[0] == \"g\":\n node.freq = random.uniform(-gauss_freq_range, gauss_freq_range)\n node.amp = random.uniform(-func_amp_range, func_amp_range)\n node.vshift = random.uniform(-gauss_vshift_range, gauss_vshift_range)\n elif node.act_func.__name__[0] == \"s\":\n node.freq = random.uniform(-sin_freq_range, sin_freq_range)\n node.amp = random.uniform(-func_amp_range, func_amp_range)\n node.vshift = random.uniform(-sin_vshift_range, sin_vshift_range)\n # Adjust freq amp and vshift of activation function\n if event(func_adjust_prob):\n if node.act_func.__name__[0] == \"g\":\n node.freq += random.uniform(-guass_freq_adjust, guass_freq_adjust)\n elif node.act_func.__name__[0] == \"s\":\n node.freq += random.uniform(-sin_freq_adjust, sin_freq_adjust)\n if event(func_adjust_prob):\n if node.act_func.__name__[0] == \"g\" or node.act_func.__name__[0] == \"s\":\n node.amp += random.uniform(-func_amp_adjust, func_amp_adjust)\n if event(func_adjust_prob):\n if node.act_func.__name__[0] == \"g\" or node.act_func.__name__[0] == \"s\":\n node.vshift += random.uniform(-func_vshift_adjust, func_vshift_adjust)\n # Mutate substrate width/height rectangles\n if event(width_mutate_prob):\n if event(0.5):\n self.substrate_width += 1\n elif self.substrate_width > 1:\n self.substrate_width -= 1\n if event(height_mutate_prob):\n if event(0.5):\n self.substrate_height += 1\n elif self.substrate_height > 1:\n self.substrate_height -= 1\n \"\"\" ES-HyperNeat - no longer used\n # Mutate QuadTree variance\n if event(var_mutate_prob):\n self.var_thresh += np.random.normal(scale=gauss_var_scale)\n self.var_thresh = self.var_thresh if self.var_thresh > 0 else 0\n # Mutate QuadTree band thresh\n if event(band_mutate_prob):\n self.band_thresh += np.random.normal(scale=gauss_band_scale)\n self.band_thresh = self.band_thresh if self.band_thresh > 0 else 0\n \"\"\"",
"def mutate(self, probability, rate):\n for i in range(self.T):\n shape = np.shape(self.weights[i])\n weights = self.weights[i].flatten()\n for j in range(len(weights)):\n if np.random.uniform(0, 1) < probability:\n weights[j] = weights[j] + rate * np.random.normal(0, 1 / np.sqrt(shape[0]))\n self.weights[i] = weights.reshape(shape)\n for j in range(len(self.biases[i])):\n if np.random.uniform(0, 1) < probability:\n self.biases[i][j] = self.biases[i][j] + rate * np.random.normal(0, 1)",
"def mutate(self, probability, rate):\n for i in range(self.number_of_transitions):\n shape = np.shape(self.weights[i])\n size = self.weights[i].size\n weights = self.weights[i].flatten()\n for j in range(len(weights)):\n if np.random.uniform(0, 1) < probability:\n weights[j] = weights[j] + rate * np.random.normal(0, 1 / np.sqrt(shape[0]))\n self.weights[i] = weights.reshape(shape)\n for j in range(len(self.biases[i])):\n if np.random.uniform(0, 1) < probability:\n self.biases[i][j] = self.biases[i][j] + rate * np.random.normal(0, 1)",
"def mutation_process(self, new_population):\n for s in sample(new_population, int(round(Problem.MUTATION_RATE*Problem.NB_POPULATION))):\n s.mutate()",
"def mutate(self):\n\n if len(self.genes) < 250:\n for g in self.genes:\n\n if MUTATION_CHANCE < random.random(): # random.random() gives float in [0,1)\n g.mutate()\n\n else:\n k = int(MUTATION_CHANCE*len(self.genes))\n for g in random.sample(self.genes,int(k)): #int(k)\n g.mutate()\n\n #To add random gene\n if ADD_GENE_CHANCE < random.random():\n self.genes.append(Gene(self.size)) #Call to Gene to add to genes list\n\n #To randomly remove genes\n\n if REM_GENE_CHANCE < random.random() and len(self.genes)>0:\n self.genes.remove(random.choice(self.genes))",
"def _mutate(self, individuals):\n for cur in individuals:\n if random.random() < self.mutation_probability:\n self.op.mutate(cur['individual'])\n cur['fitness'] = None",
"def mutate1(self, probability):\n for i in range(self.number_of_transitions):\n shape = np.shape(self.weights[i])\n size = self.weights[i].size\n weights = self.weights[i].flatten()\n for j in range(len(weights)):\n if np.random.uniform(0, 1) < probability:\n weights[j] = np.random.normal(0, 1 / np.sqrt(shape[0]))\n self.weights[i] = weights.reshape(shape)\n for j in range(len(self.biases[i])):\n if np.random.uniform(0, 1) < probability:\n self.biases[i][j] = np.random.normal(0, 1)",
"def mutate_all(self):\n for indiv in self.individuals[self.elite_count:]:\n self.update_mutation_factor()\n new_genotype = indiv.genotype\n for j in range(self.num_vars):\n if random.random() < self.mut_rate:\n offset = (2*(random.random()-0.5)\n *self.mutation_factor\n *(self.ub[j]-self.lb[j]))\n # print \"offset:\",offset\n # if offset > 0 and offset < 1:\n # offset = 1\n # elif offset < 0 and offset > -1:\n # offset = -1\n # else:\n # offset = int(offset)\n new_genotype[j] = int(round(new_genotype[j]+offset))\n new_genotype[j] %= (self.fitness_function.ub[j])\n if new_genotype[j] > self.ub[j]:\n new_genotype[j] = self.ub[j]\n elif new_genotype[j] < self.lb[j]:\n new_genotype[j] = self.lb[j]\n indiv.genotype = new_genotype\n for i in range(len(self.individuals)):\n for j in range(i):\n if self.individuals[i].genotype == self.individuals[j].genotype:\n self.individuals[i] = Individual(self.fitness_function)\n break",
"def mutate(self):\n\n #print('modifalpha', self.alpha)\n modifalpha = self.alpha+np.random.normal(0,0.01,self.noms); # Determining std dev for mutation can be a parameteric study\n #print('modifalpha', modifalpha)\n pusum = np.sum(modifalpha);\n if pusum == 0: # Then complete weightage assigned to target model alone\n self.alpha = np.zeros(self.noms)\n self.alpha[-1] = 1\n else:\n self.alpha = modifalpha/pusum",
"def _mutate(self, individual: List[List[float]]) -> None:\n for i in range(len(individual[0])):\n if random.random() < self.config.gene_mutation_probability:\n individual[0][i] = np.random.normal(individual[0][i])",
"def __mutate(self, chromosomes, mutation_probability):\n\n for chromosome in chromosomes:\n for i in range(self.chromosome_size):\n if random.randint(1, 100) <= mutation_probability:\n logging.getLogger().debug(\n \"---> Mutation in Chromosome \" + str(\n chromosome.chromosome_id) + \"in gene \" + str(i)\n + \" <---\")\n chromosome.genes[i] = random.choice(self.gene_pool)",
"def mutate(genome, config):\n\n if utils.rand_uni_val() < config.CONNECTION_MUTATION_RATE:\n for c_gene in genome.connection_genes:\n if utils.rand_uni_val() < config.CONNECTION_PERTURBATION_RATE:\n perturb = utils.rand_uni_val() * random.choice([1, -1])\n c_gene.weight += perturb\n else:\n c_gene.set_rand_weight()\n\n if utils.rand_uni_val() < config.ADD_NODE_MUTATION_RATE:\n genome.add_node_mutation()\n\n if utils.rand_uni_val() < config.ADD_CONNECTION_MUTATION_RATE:\n genome.add_connection_mutation()",
"def mutate(self, model):\n\n for i in range(self.numPredictors):\n if np.random.random() < self.mutationRate:\n model[i] = not model[i]\n\n return",
"def mutation(self, mut_rate):\n mut_rand = 1 - 2 * (np.random.rand(*self.array.shape) < mut_rate)\n self.array = self.array * mut_rand",
"def explore(self):\n for k, v in self._hyperparameters.items():\n mutation = random.choice([0.8, 1.2])\n self._hyperparameters[k] = mutation * v",
"def __init__(self, neurons, random=True, silent=False):\n\n self.neurons = neurons\n self.silent = silent\n\n # Set weights\n lastneuron = 0\n self.weights = []\n\n for neuron in self.neurons:\n if lastneuron != 0:\n x = np.random.rand(neuron, lastneuron) * 2.0 - 1.0\n if not random:\n for y in range(len(x)):\n for z in range(len(x[y])):\n x[y][z] = 0.0\n self.weights.append(x)\n lastneuron = neuron",
"def mutate(self): \n for atom in self.dict:\n\n randNum=random.random()\n if randNum>self.rateCromo:\n continue #rateChromo is the mutation acceptance rate for the entire chromosome\n\n for idx in range(len(self.dict[atom])):\n #idx is the index of each chromosome\n nums=self.dict[atom][idx][1].split(',')\n var=self.changePar[atom][idx][2]\n #self.b.write(str(nums)+'\\t'+str(var)+'\\n')\n for indx in range(len(nums)):\n if random.random()>self.rateGene:\n continue #rateGene is the mutation acceptance rate for some specific gene = a specific parameter\n print('****Mutation!!****: '+ str(self.rateGene))\n\n if '.' in nums[indx]:\n #self.__class__.logs.write('Num: b\\t'+str(nums[indx])+'var:'+str(var)+'\\n')\n nums[indx]=self._randomChooser(float(nums[indx]),var)\n #self.__class__.logs.write('Num: a\\t'+str(self.__class__.ccc)+'\\t'+str(nums[indx])+'\\n')\n #nums[indx]=float(nums[indx])+0.0001\n self.dict[atom][idx][1]=','.join(str(u) for u in nums)\n return self.dict",
"def mutate(self, std_dev):\n for link in self.links:\n link.mutate(std_dev)\n self.bias += np.random.normal(0, std_dev)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if a CLOUD device has already been added. | def is_cloud_device_already_added(self):
for entry in self._async_current_entries():
if entry.unique_id is not None and entry.unique_id == f"{DOMAIN}Cloud":
return True
return False | [
"def has_device(self, device_key):\r\n return self._devices.has_key(device_key)",
"def devicename_exists(devicename):\n # Initialize key variables\n exists = False\n\n # Get information on agent from database\n data = GetDevice(devicename)\n if data.exists() is True:\n exists = True\n\n # Return\n return exists",
"def has_devices(self):\n return len(self.__devices_list) > 0",
"def add_dev_test(self, new_dev):\n if 'cid' in new_dev:\n devices = [self.outlets, self.bulbs, self.switches, self.fans]\n was_found = False\n for dev in chain(*devices):\n if dev.cid == new_dev.get('cid') and\\\n new_dev.get('subDeviceNo', 0) == dev.sub_device_no:\n was_found = True\n break\n if not was_found:\n _LOGGER.debug(\"Adding device - %s\", new_dev)\n return True\n return False",
"def is_found(self):\n return bool(self._get_devices_info())",
"def device_exists(device):\n return os.path.exists('/sys/class/net/%s' % device)",
"def checkServiceAdded(self, name):\n for group in self.pinGroups:\n if group[0] == str(name):\n return True\n\n # otherwise return 0\n return False",
"def chip_add(chip):\n global _chip_list\n\n if _chip_list:\n exists = any(chip.chipset == item.chipset and\n chip.platform == item.platform and\n chip.subtype == item.subtype and\n chip.rev_num == item.rev_num and\n chip.pmic_model0 == item.pmic_model0 and\n chip.pmic_model1 == item.pmic_model1 and\n chip.pmic_model2 == item.pmic_model2 and\n chip.pmic_model3 == item.pmic_model3 for item in _chip_list)\n\n if exists:\n # Duplicated\n return False\n\n _chip_list.append(chip)\n return True",
"def client_exists(self, client_phone_number):\n for client in self.get_client_list():\n if client_phone_number == client.get_phone_number():\n return True\n return False",
"def verify_device_is_connected():\r\n Android.send_adb_command()\r\n open_file = open('adb_device_list.txt', 'r')\r\n temp = open_file.readlines()\r\n open_file.close()\r\n os.remove(\"adb_device_list.txt\")\r\n if len(temp) > 1:\r\n if 'device' in temp[1]:\r\n logging.info('Device is connected to the system')\r\n return True\r\n else:\r\n logging.error('Device is not connected to the system')\r\n return False\r\n else:\r\n logging.error('Device is not connected to the system')\r\n return False",
"def exists_device_node(self, device_node: Path) -> bool:\n try:\n self.get_by_path(device_node)\n except HardwareNotFound:\n return False\n return True",
"def isExistingSameDevice(config_db, deviceName, table):\n settings = config_db.get_table(table)\n for key,values in settings.items():\n if \"remote_device\" in values and deviceName == values[\"remote_device\"]:\n return True\n\n return False",
"def is_existing(self):\n return self.backend.is_existing",
"def is_product_added(self: object) -> bool:\n product_name = self.driver.find_element(*CartPageLocators.IPHONE_PRODUCT_NAME)\n wait = WebDriverWait(self.driver, 10)\n wait.until(expected_conditions.visibility_of_element_located(CartPageLocators.IPHONE_PRODUCT_NAME))\n if product_name.text == \"iPhone\":\n logging.info(\"You added iPhone!\")\n return True\n print('Cart is empty!')\n return False",
"def _is_device_connected(self, device):\n if device.lower() in [x.lower() for x in self.daq.getString('/zi/devices/connected').split(',')]:\n return True\n else:\n return False",
"def idx_device_exists(idx_device):\n # Initialize key variables\n exists = False\n\n # Fix values passed\n if isinstance(idx_device, int) is False:\n idx_device = None\n\n # Get information on agent from database\n data = GetIDXDevice(idx_device)\n if data.exists() is True:\n exists = True\n\n # Return\n return exists",
"def check_existed_did(self):\n for wallet in self.wallet_state_manager.wallets.values():\n if (\n wallet.type() == WalletType.DECENTRALIZED_ID\n and self.did_info.origin_coin.name() == wallet.did_info.origin_coin.name()\n ):\n self.log.warning(f\"DID {self.did_info.origin_coin} already existed, ignore the wallet creation.\")\n raise ValueError(\"Wallet already exists\")",
"def register_client(self, new_client):\n for client in self.get_client_list():\n if client.get_phone_number() == new_client.get_phone_number():\n # print(\"\\Client of given phone number already exists!\\n\")\n return False\n self.get_client_list().append(new_client)\n return True",
"def system_valid(self):\n return self.udev.devices_exist"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load the IMDB reviews dataset. Code adapted from the code for | def load_imdb_dataset():
(x_train, y_train), (x_test, y_test) = imdb.load_data(
path="./datasets", num_words=_IMDB_CONFIG["max_features"])
num_train = _IMDB_CONFIG["num_train"]
x_train, x_val = x_train[:num_train], x_train[num_train:]
y_train, y_val = y_train[:num_train], y_train[num_train:]
def preprocess(x, y, max_length):
x = sequence.pad_sequences(x, maxlen=max_length)
y = onp.array(y)
x = onp.array(x)
return x, y
max_length = _IMDB_CONFIG["max_len"]
x_train, y_train = preprocess(x_train, y_train, max_length=max_length)
x_val, y_val = preprocess(x_val, y_val, max_length=max_length)
x_test, y_test = preprocess(x_test, y_test, max_length=max_length)
data_info = {"num_classes": 2}
return (x_train, y_train), (x_test, y_test), (x_val, y_val), data_info | [
"def load_dataset():\n with open(\"../openreview-dataset/results/authors.json\", \"r\") as f:\n authors = json.load(f)\n\n with open(\"../openreview-dataset/results/papers.json\", \"r\") as f:\n papers = json.load(f)\n\n with open(\"../openreview-dataset/results/reviews.json\", \"r\") as f:\n reviews = json.load(f)\n\n with open(\"../openreview-dataset/results/confs.json\", \"r\") as f:\n confs = json.load(f)\n\n return (authors, papers, reviews, confs)",
"def load_movies_reviews():\n data = pd.read_csv(CSV_PATH + MOVIES_REVIEWS_CSV_NAME).T.to_dict()\n for i in range(len(data)):\n movie_id = Movies.query.filter(Movies.title == data[i]['Title'].strip()).first().id\n review = data[i]['Reviews'].strip()\n rating = float(data[i]['Rating'])*100000\n review_exist = Reviews.query.filter(Reviews.review == review).first()\n if not review_exist:\n db.session.add(Reviews(movie_id=movie_id, review=review, rating=int(rating)))\n db.session.commit()\n db.session.close()\n db.session.close()",
"def load_train_test_imdb_data(data_dir):\r\n\r\n print(\"... IMDB loading \\t\\n\")\r\n data = {}\r\n for split in [\"train\", \"test\"]:\r\n data[split] = []\r\n for sentiment in [\"neg\", \"pos\"]:\r\n score = 1 if sentiment == \"pos\" else 0\r\n\r\n path = os.path.join(data_dir, split, sentiment)\r\n file_names = os.listdir(path)\r\n for f_name in file_names:\r\n with open(os.path.join(path, f_name), encoding=\"latin-1\") as f:\r\n review = f.read()\r\n data[split].append([review, score])\r\n\r\n np.random.shuffle(data[\"train\"]) \r\n\r\n return data[\"train\"], data[\"test\"]",
"def prepare_review_data():\n with open(REVIEW_FILE, 'r') as fread:\n reviews = fread.read()\n with open(LABEL_FILE, 'r') as fread:\n labels = fread.read()\n return reviews, labels",
"def load_reviews(id_reviews=(), load_polarities=False, load_sentences=False, load_words=False, load_deptrees=False):\n from loacore.conf import DB_TIMEOUT\n reviews = []\n conn = sql.connect(DB_PATH, timeout=DB_TIMEOUT)\n c = conn.cursor()\n if len(id_reviews) > 0:\n for id_review in id_reviews:\n c.execute(\"SELECT ID_Review, Review.ID_File, File_Index, Review \"\n \"FROM Review WHERE ID_Review = \" + str(id_review) + \" ORDER BY File_Index\")\n result = c.fetchone()\n if result is not None:\n reviews.append(Review(result[0], result[1], result[2], result[3]))\n else:\n c.execute(\"SELECT ID_Review, Review.ID_File, File_Index, Review FROM Review\")\n results = c.fetchall()\n for result in results:\n reviews.append(Review(result[0], result[1], result[2], result[3]))\n\n conn.close()\n\n if load_polarities:\n # Load Polarities\n import loacore.load.polarity_load as polarity_load\n polarity_load.load_polarities_in_reviews(reviews)\n\n if load_sentences:\n # Load Sentences\n import loacore.load.sentence_load as sentence_load\n sentence_load.load_sentences_in_reviews(reviews, load_words=load_words, load_deptrees=load_deptrees)\n\n return reviews",
"def Preprocess_IMDB(path=\"datasets/raw/aclImdb/\"):\n output_path = \"datasets/preprocessed/IMDB_Data\"\n\n neg = glob.glob(os.path.join(path, 'test', 'neg', '*'))\n neg += glob.glob(os.path.join(path, 'train', 'neg', '*'))\n neg_data = [io.open(fname, 'r', encoding='utf-8').readlines() for fname in neg]\n neg_data = [sentence[0] for sentence in neg_data]\n\n\n pos = glob.glob(os.path.join(path, 'test', 'pos', '*'))\n pos += glob.glob(os.path.join(path, 'train', 'pos', '*'))\n pos_data = [io.open(fname, 'r', encoding='utf-8').readlines() for fname in pos]\n pos_data = [sentence[0] for sentence in pos_data]\n\n labels = compute_labels(pos_data, neg_data)\n text, labels = shuffle_data(pos_data + neg_data, labels)\n\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n # split data in 70%/20%/10% train/test/dev split\n train_len = ((len(text) / 10) * 7) + (len(text) % 10)\n test_len = (len(text) / 10) * 2\n dev_len = len(text) / 10\n\n trX = text[0:train_len]\n teX = text[train_len:train_len + test_len]\n vaX = text[train_len + test_len: train_len + test_len + dev_len]\n\n trY = labels[0:train_len]\n teY = labels[train_len:train_len + test_len]\n vaY = labels[train_len + test_len: train_len + test_len + dev_len]\n\n dat1 = pd.DataFrame({'label': trY})\n dat2 = pd.DataFrame({'sentence': trX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"train_binary_sent.csv\"), encoding='utf-8', index=False)\n\n\n dat1 = pd.DataFrame({'label': teY})\n dat2 = pd.DataFrame({'sentence': teX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"test_binary_sent.csv\"), encoding='utf-8', index=False)\n\n dat1 = pd.DataFrame({'label': vaY})\n dat2 = pd.DataFrame({'sentence': vaX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"dev_binary_sent.csv\"), encoding='utf-8', index=False)",
"def load_ratings():\n filepath = \"./seed_data/u.data\"\n ratings = open(filepath)\n\n for rating in ratings:\n rating = rating.rstrip().split()\n\n db_rating = Rating(movie_id=rating[1], user_id=rating[0],\n score=rating[2])\n db.session.add(db_rating)\n\n db.session.commit()",
"def load_reviews_data(reviews_data_path):\r\n with open(reviews_data_path) as csvfile:\r\n return list(csv.DictReader(csvfile, delimiter='\\t'))\r\n raise NotImplementedError",
"def load_data(glove_dict):\n print(\"loading data\")\n filename = check_file('reviews.tar.gz',14839260)\n extract_data(filename)\n dir= os.path.dirname(__file__)\n\n files= glob.glob(os.path.join(dir,\n 'data2/pos/*'))\n files.extend(glob.glob(os.path.join(dir,\n 'data2/neg/*')))\n\n data = np.empty([total_reviews, review_word_limit])\n\n file_idx = 0;\n for f in files:\n with open(f, 'r') as openf:\n s = openf.read()\n s = clean_line(s)\n words = s.split(\" \")\n # for word in words:\n word_count = 0\n while word_count < review_word_limit:\n if words:\n word = words.pop(0)\n if(word in string.punctuation or any(char.isdigit() for char in word)):\n continue\n data[file_idx][word_count] = glove_dict.get(word, 0)\n else:\n data[file_idx][word_count] = 0\n word_count += 1\n file_idx += 1\n print(\"file\", file_idx, \"done\")\n print(data[:5])\n # np.save(\"data\", data)\n return data",
"def load_reviews(self):\n\n self.reviews = defaultdict(dict)\n np.random.seed(7)\n # populate reviews dict\n for review_type in [\"positive\", \"negative\"]:\n for cat in self.categories:\n file_path = os.path.join(\n self._init_file_dir, \"reviews/{}/{}.review\".format(cat, review_type)\n )\n reviews_raw = BeautifulSoup(\n open(file_path).read(), features=\"html.parser\"\n )\n self.reviews[review_type][cat] = [\n self.strip_non_printable(review.text)\n for review in reviews_raw.find_all(\"review_text\")\n ]\n\n # merge all categories into one\n self.reviews[review_type] = list(\n chain(*list(self.reviews[review_type].values()))\n )\n np.random.shuffle(self.reviews[review_type], seed=7)\n\n # save tokenized reviews to cache to speedup build process\n with open(self.cached_path, \"w\") as fp:\n json.dump(self.reviews, fp)",
"def imdb_dataset(directory='data/', data_type='imdb', preprocessing=False, fine_grained=False,\n verbose=False, text_length=5000, share_id='1X7YI7nDpKEPio2J-eH7uWCiWoiw2jbSP'):\n\n # other dataset have been set before, only imdb should be set here\n if preprocessing and data_type == 'imdb':\n share_id = '1naVVErkRQNNJXTA6X_X6YrJY0jPOeuPh'\n\n if preprocessing:\n gdd.download_file_from_google_drive(share_id, data_type + '_preprocessed.zip', directory + data_type)\n if fine_grained:\n train_file, test_file = 'preprocessed_fine_grained_train.csv', 'preprocessed_fine_grained_test.csv'\n else:\n train_file, test_file = 'preprocessed_train.csv', 'preprocessed_test.csv'\n else:\n gdd.download_file_from_google_drive(share_id, data_type + '_original.zip', directory + data_type)\n if fine_grained:\n train_file, test_file = 'original_fine_grained_train.csv', 'original_fine_grained_test.csv'\n else:\n train_file, test_file = 'original_train.csv', 'original_test.csv'\n\n if verbose:\n min_train_length, avg_train_length, max_train_length = sys.maxsize, 0, 0\n min_test_length, avg_test_length, max_test_length = sys.maxsize, 0, 0\n\n ret = []\n for file_name in [train_file, test_file]:\n csv_file = np.array(pd.read_csv(os.path.join(directory, data_type, file_name), header=None)).tolist()\n examples = []\n for label, text in csv_file:\n label, text = str(label), str(text)\n if preprocessing:\n if len(text.split()) > text_length:\n text = ' '.join(text.split()[:text_length])\n elif preprocessing is None:\n text = text_preprocess(text, data_type)\n if len(text.split()) == 0:\n continue\n if verbose:\n if file_name == train_file:\n avg_train_length += len(text.split())\n if len(text.split()) > max_train_length:\n max_train_length = len(text.split())\n if len(text.split()) < min_train_length:\n min_train_length = len(text.split())\n if file_name == test_file:\n avg_test_length += len(text.split())\n if len(text.split()) > max_test_length:\n max_test_length = len(text.split())\n if len(text.split()) < min_test_length:\n min_test_length = len(text.split())\n examples.append({'label': label, 'text': text})\n ret.append(Dataset(examples))\n\n if verbose:\n print('[!] train samples: {} length--(min: {}, avg: {}, max: {})'.\n format(len(ret[0]), min_train_length, round(avg_train_length / len(ret[0])), max_train_length))\n print('[!] test samples: {} length--(min: {}, avg: {}, max: {})'.\n format(len(ret[1]), min_test_length, round(avg_test_length / len(ret[1])), max_test_length))\n return tuple(ret)",
"def loadDataSet():\t\r\n\t#词条切分后的文档集合\r\n\tpostingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],\r\n\t\t\t\t['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],\r\n\t\t\t\t['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],\r\n\t\t\t\t['stop', 'posting', 'stupid', 'worthless', 'garbage'],\r\n\t\t\t\t['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],\r\n\t\t\t\t['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]\r\n\t#类别标签的集合\r\n\tclassVec = [0,1,0,1,0,1] #1 is abusive, 0 not\r\n\t#词条切分后的文档集合和类别标签结合\r\n\treturn postingList, classVec",
"def extract_imdb_reviews(review_file):\n\n print(f'Decoding {review_file} ...')\n with open(review_file, encoding='utf-8') as f:\n raw = f.read()\n\n print('Extracting review text and labels ...')\n trash = {'<sssss>', '-rrb-', '-lrb-'}\n lines = raw.split('\\n')[:-1]\n reviews = []\n for line in lines:\n chunks = line.split('\\t\\t')\n label = chunks[2]\n review = ' '.join(w for w in chunks[3].split() if w not in trash)\n reviews.append((review, label))\n\n return reviews",
"def prepare_imdb_data(data, labels):\n \n #Combine positive and negative reviews and labels\n data_train = data['train']['pos'] + data['train']['neg']\n data_test = data['test']['pos'] + data['test']['neg']\n labels_train = labels['train']['pos'] + labels['train']['neg']\n labels_test = labels['test']['pos'] + labels['test']['neg']\n \n #Shuffle reviews and corresponding labels within training and test sets\n data_train, labels_train = shuffle(data_train, labels_train)\n data_test, labels_test = shuffle(data_test, labels_test)\n \n # Return a unified training data, test data, training labels, test labets\n return data_train, data_test, labels_train, labels_test",
"def load_ratings():\n\n print \"Ratings\"\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate users\n Rating.query.delete()\n\n # Read u.data file and insert data\n for row in open(\"seed_data/u.data\"):\n row = row.rstrip()\n user_id, movie_id, score, timestamp = row.split(\"\\t\")\n\n user_id = int(user_id)\n movie_id = int(movie_id)\n score = int(score)\n\n #from rating class take the movie_id and make it equal to the movie_id \n #from the for loop above. We are calling it to make an instance of the rating\n #class\n rating = Rating(movie_id=movie_id, user_id=user_id, score=score)\n \n #We need to add to the session or it won't ever be stored\n db.session.add(rating)\n\n #Once we're done, we should commit our work\n db.session.commit()",
"def load_movielens1m(path):\n if not os.path.isfile(path):\n data_dir = os.path.dirname(path)\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(data_dir)\n download_dataset(\n 'http://files.grouplens.org/datasets/movielens/ml-1m.zip', path)\n\n zp = zipfile.ZipFile(path, 'r')\n content = zp.read('ml-1m/ratings.dat')\n data_list = content.split('\\n')\n\n output1 = open('train', 'w')\n output2 = open('test', 'w')\n num_users = 0\n num_movies = 0\n corpus = []\n for item in data_list:\n term = item.split('::')\n if len(term) < 3:\n continue\n user_id = int(term[0]) - 1\n movie_id = int(term[1]) - 1\n rating = int(term[2])\n corpus.append((user_id, movie_id, rating))\n num_users = max(num_users, user_id + 1)\n num_movies = max(num_movies, movie_id + 1)\n\n corpus_data = np.array(corpus)\n np.random.shuffle(corpus_data)\n np.random.shuffle(corpus_data)\n N = np.shape(corpus_data)[0]\n Ndv = N // 20 * 17\n Ndv2 = N // 10 * 9\n train = corpus_data[:Ndv, :]\n valid = corpus_data[Ndv:Ndv2, :]\n test = corpus_data[Ndv2:, :]\n\n for i in range(np.shape(train)[0]):\n output1.write('%d\\t%d\\t%d\\n' % (train[i, 0], train[i, 1], train[i, 2]))\n output1.close()\n for i in range(np.shape(test)[0]):\n output2.write('%d\\t%d\\t%d\\n' % (test[i, 0], test[i, 1], test[i, 2]))\n output2.close() \n\n return num_movies, num_users, train, valid, test",
"def load_data2(reviews_path):\n df2 = pd.read_csv(reviews_path)\n # substituting 0 (negative) for all reviews rated 0 to 3 and 1 (positive) for all reviews rated 4-5\n # renaming columns to 'label' containing ratings and 'text' containing reviews to match df1\n df2['label'] = np.where(df2['review_rating'] < 4, 0, 1)\n df2['text'] = df2['review_text']\n df2 = df2 [['text', 'label']]\n return df2",
"def get_imdb_data(vocabulary_size, max_len):\n print(\"Getting IMDB data with vocabulary_size %d\" % vocabulary_size)\n (x_train, y_train), (x_test, y_test) = tf.keras.datasets.imdb.load_data(\n num_words=vocabulary_size)\n x_train = tf.keras.preprocessing.sequence.pad_sequences(x_train, maxlen=max_len)\n x_test = tf.keras.preprocessing.sequence.pad_sequences(x_test, maxlen=max_len)\n return x_train, y_train, x_test, y_test",
"def load_movies():\n filepath = \"./seed_data/u.item\"\n movies = open(filepath)\n\n for movie in movies:\n movie = movie.rstrip().split('|')\n title = movie[1][:-7]\n title = title.decode(\"latin-1\")\n if movie[2]:\n date = datetime.strptime(movie[2], '%d-%b-%Y')\n else:\n date = None\n db_movie = Movie(\n movie_id = movie[0], title = title, \n released_at = date, imdb_url = movie[4])\n db.session.add(db_movie)\n\n db.session.commit()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse name and seed for uci regression data. E.g. yacht_2 is the yacht dataset with seed 2. | def _parse_uci_regression_dataset(name_str):
pattern_string = "(?P<name>[a-z]+)_(?P<seed>[0-9]+)"
pattern = re.compile(pattern_string)
matched = pattern.match(name_str)
if matched:
name = matched.group("name")
seed = matched.group("seed")
return name, seed
return None, None | [
"def get_uci_data(name) -> Tuple[chex.Array, chex.Array]:\n spec = DATA_SPECS.get(name)\n if spec is None:\n raise ValueError('Unknown dataset: {}. Available datasets:\\n{}'.format(\n name, '\\n'.join(DATA_SPECS.keys())))\n with tf.io.gfile.GFile(spec.path) as f:\n df = pd.read_csv(f)\n labels = df.pop(spec.label).to_numpy().astype(np.float32)\n for ex in spec.excluded:\n _ = df.pop(ex)\n features = df.to_numpy().astype(np.float32)\n return features, labels",
"def load(name, y_interest):\n mat = pd.read_csv(name)\n mat = mat.set_index('Biopsy')\n y = mat[y_interest]\n fold = mat[\"fold\"]\n\n return y, fold",
"def read_UCI_Dataset(path):\n df = pd.read_csv(path, header=None)\n df_values = df.values\n col_num = df_values.shape[1]\n data = df_values[:, 0:col_num - 1]\n label = df_values[:, col_num - 1]\n return data, label",
"def USPS_data():\n\n data = []\n for i in range(1, 6):\n train = pandas.read_csv('data/USPS/train_' + str(i) + '.csv', header=None)\n test = pandas.read_csv('data/USPS/test_' + str(i) + '.csv', header=None)\n # data.append({\n # 'train_Y': label_to_num(train.ix[:, 0:2].values),\n # 'train_X': train.ix[:, 3:].values,\n # 'test_Y': label_to_num(test.ix[:, 0:2].values),\n # 'test_X': test.ix[:, 3:].values,\n # 'id': i\n # })\n data.append({\n 'train_Y': train.ix[:, 0:2].values,\n 'train_X': train.ix[:, 3:].values,\n 'test_Y': test.ix[:, 0:2].values,\n 'test_X': test.ix[:, 3:].values,\n 'id': i\n })\n\n return data",
"def dataset_preparation():\r\n with open('../data/patterns_num.txt', 'r') as f:\r\n data = f.readlines()\r\n X, Y = [], []\r\n for line in data:\r\n x, y = line.split('\\t')\r\n if len(x) > 5 and x not in X: # better results are achieved excluding short query patterns\r\n X.append(x.replace(\"X\", \"\").replace(\"Y\", \"\").lower())\r\n Y.append(int(y.replace('\\n', '')))\r\n test_size = 0.2\r\n # print('Test size:', test_size, '\\nWrong classifications:\\n')\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=42, stratify=Y)\r\n return X_train, y_train, X_test, y_test",
"def test_load_UCR_UEA_dataset():\n X, y = load_UCR_UEA_dataset(name=\"UnitTest\")\n assert isinstance(X, pd.DataFrame) and isinstance(y, np.ndarray)\n assert X.shape == (42, 1) and y.shape == (42,)",
"def __parse_sample_name(self):\n pattern = '(.*)(P53)(XR|NT)(\\d+)([A-Z]?|Ctr)?.*'\n vals = re.findall(pattern, self.sample_name.replace('_', ''))[0]\n self.cell_type = vals[0]\n self.treatment_type = vals[2]\n self.treatment_time = vals[3]\n if vals[3]:\n self.treatment_repeat = vals[4]",
"def load_data(): \n\tdf = pandas.read_csv('data/iris.data', header=None)\n\ty = df.iloc[0:df.shape[0], 4].values\n\ty = np.where(y == 'Iris-setosa', 0, y)\n\ty = np.where(y == 'Iris-versicolor', 1, y)\n\ty = np.where(y == 'Iris-virginica', 2, y)\n\tx = df.iloc[0:df.shape[0], 0:4].values\n\tx = tuple(x)\n\ty = tuple(y)\n\ttraining_inputs = x[0:40] + x[50:90] + x[100:140]\n\ttraining_results = y[0:40] + y[50:90] + y[100:140]\n\ttraining_data = (training_inputs, training_results)\n\ttest_inputs = x[40:50] + x[90:100] + x[140:150]\n\ttest_results = y[40:50] + y[90:1000] + y[140:150]\n\ttest_data = (test_inputs, test_results)\n\treturn (training_data, test_data)",
"def esm1_t6_43M_UR50S():\n return load_model_and_alphabet_hub(\"esm1_t6_43M_UR50S\")",
"def _load_vowel_test():\n vowel_data = np.loadtxt(_VOWEL_TEST_PATH, delimiter=',', skiprows=1)\n X = vowel_data[:, -10:]\n y = vowel_data[:, 1].astype(int)\n return (X, y)",
"def load():\n filepath = dirname(abspath(__file__))\n##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####\n data = recfromtxt(open(filepath + '/spector.csv',\"rb\"), delimiter=\" \",\n names=True, dtype=float, usecols=(1,2,3,4))\n names = list(data.dtype.names)\n endog = array(data[names[3]], dtype=float)\n endog_name = names[3]\n exog = column_stack(data[i] for i in names[:3]).astype(float)\n exog_name = names[:3]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset",
"def load_data_test(self):\n data_set = list(open(self.DATA_DIR + 'TREC_10.label', encoding='utf-8', errors='replace').readlines())\n data_set_cleaned = [self.clean_str(sent) for sent in data_set]\n Y_Test = [s.split(' ')[0].split(':')[0] for s in data_set_cleaned]\n X_Test = [s.split(\" \")[1:] for s in data_set_cleaned]\n return X_Test, Y_Test",
"def esm1_t34_670M_UR100():\n return load_model_and_alphabet_hub(\"esm1_t34_670M_UR100\")",
"def feature_extraction(_data):\n # Find the digits in the given string Example - data='18-20' digits = '1820'\n digits = str(''.join(c for c in _data if c.isdigit()))\n # calculate the length of the string\n len_digits = len(digits)\n # splitting digits in to values example - digits = '1820' ages = [18, 20]\n ages = [int(digits[i:i + 2]) for i in range(0, len_digits, 2)]\n # checking for special character in the given data\n special_character = '.+-<>?'\n spl_char = ''.join([c for c in list(special_character) if c in _data])\n # handling decimal age data\n if len_digits == 3:\n spl_char = '.'\n age = \"\".join([str(ages[0]), '.', str(ages[1])])\n # normalizing\n age = int(float(age) - 0.5)\n ages = [age]\n # Finding the maximum, minimum, average age values\n max_age = 0\n min_age = 0\n mean_age = 0\n if len(ages):\n max_age = max(ages)\n min_age = min(ages)\n if len(ages) == 2:\n mean_age = int((max_age + min_age) / 2)\n else:\n mean_age = max_age\n # specially added for 18 years cases\n only_18 = 0\n is_y = 0\n if ages == [18]:\n only_18 = 1\n if 'y' in _data or 'Y' in _data:\n is_y = 1\n under_18 = 0\n if 1 < max_age < 18:\n under_18 = 1\n above_65 = 0\n if mean_age >= 65:\n above_65 = 1\n # verifying whether digit is found in the given string or not.\n # Example - data='18-20' digits_found=True data='????' digits_found=False\n digits_found = 1\n if len_digits == 1:\n digits_found = 1\n max_age, min_age, mean_age, only_18, is_y, above_65, under_18 = 0, 0, 0, 0, 0, 0, 0\n elif len_digits == 0:\n digits_found, max_age, min_age, mean_age, only_18, is_y, above_65, under_18 = -1, -1, -1, -1, -1, -1, -1, -1\n \n feature = {\n 'ages': tuple(ages),\n 'len(ages)': len(ages),\n 'spl_chr': spl_char,\n 'is_digit': digits_found,\n 'max_age': max_age,\n 'mean_age': mean_age,\n 'only_18': only_18,\n 'is_y': is_y,\n 'above_65': above_65,\n 'under_18': under_18\n }\n\n return feature",
"def parse_IAU_name(name):\n # First see if there is a source type acronym\n if diag:\n print \"parse_IAU_name: received\",name\n parts = name.split()\n if len(parts) == 1:\n designation = parts[0]\n elif len(parts) == 2:\n acronym, designation = parts\n else:\n raise(\"Invalid format: \"+name)\n # Now process the designation\n flag = designation[0].upper()\n if flag == \"G\":\n # Galactic coordinates\n longitude,latitude,sign = split_on_sign(name[1:])\n X = parse_decimal_angle(longitude)\n Y = parse_decimal_angle(latitude)\n elif flag == \"J\":\n # Julian epoch celestial coordinates\n ra,dec,sign = split_on_sign(name[1:])\n X = parse_sexagesimal_angle(ra)\n Y = parse_sexagesimal_angle(dec)\n elif flag == \"B\":\n # Besselian epoch celestial coordinates\n ra,dec,sign = split_on_sign(name[1:])\n X = parse_sexagesimal_angle(ra)\n Y = parse_sexagesimal_angle(dec)\n elif designation[0].isdigit():\n # This should be Besselian but who knows?\n # If it is Besselian there should be at least four digits in RA\n # otherwise it could be galactic\n x,y,sign = split_on_sign(name)\n if len(x) > 3:\n X = parse_sexagesimal_angle(x)\n Y = parse_sexagesimal_angle(y)\n flag = \"B\"\n else:\n X = parse_decimal_angle(x)\n Y = parse_decimal_angle(y)\n flag = \"G\"\n else:\n return \"?\",None,None\n if sign == \"-\":\n Y = -Y\n return flag,X,Y",
"def main():\n unhashtagger = Unhashtagger()\n unhashtagger.train(iter_lines(DATA_DIR + '/training_orig.txt'))\n results = unhashtagger.evaluate()\n\n print('Precision: %5.2f%%' % (results['precision'] * 100))\n print('Recall: %5.2f%%' % (results['recall'] * 100))\n print('F1 score: %5.2f%%' % (results['f1'] * 100))",
"def _process_input_seed(self):\n\n Tcmb = 2.72548 * u.K # 0.00057 K\n Tfir = 70 * u.K\n ufir = 0.2 * u.eV / u.cm ** 3\n Tnir = 5000 * u.K\n unir = 0.2 * u.eV / u.cm ** 3\n\n # Allow for seed_photon_fields definitions of the type 'CMB-NIR-FIR' or 'CMB'\n if type(self.seed_photon_fields) != list:\n self.seed_photon_fields = self.seed_photon_fields.split('-')\n\n self.seeduf = {}\n self.seedT = {}\n self.seedisotropic = {}\n self.seedtheta = {}\n for idx, inseed in enumerate(self.seed_photon_fields):\n if isinstance(inseed, six.string_types):\n if inseed == 'CMB':\n self.seedT[inseed] = Tcmb\n self.seeduf[inseed] = 1.0\n self.seedisotropic[inseed] = True\n elif inseed == 'FIR':\n self.seedT[inseed] = Tfir\n self.seeduf[inseed] = (ufir / (ar * Tfir ** 4)).decompose()\n self.seedisotropic[inseed] = True\n elif inseed == 'NIR':\n self.seedT[inseed] = Tnir\n self.seeduf[inseed] = (unir / (ar * Tnir ** 4)).decompose()\n self.seedisotropic[inseed] = True\n else:\n log.warning('Will not use seed {0} because it is not '\n 'CMB, FIR or NIR'.format(inseed))\n raise TypeError\n elif type(inseed) == list and (len(inseed) == 3 or len(inseed) == 4):\n isotropic = len(inseed) == 3\n\n if isotropic:\n name, T, uu = inseed\n self.seedisotropic[name] = True\n else:\n name, T, uu, theta = inseed\n self.seedisotropic[name] = False\n self.seedtheta[name] = validate_scalar('{0}-theta'.format(name),\n theta, physical_type='angle')\n\n validate_scalar('{0}-T'.format(name), T, domain='positive',\n physical_type='temperature')\n self.seed_photon_fields[idx] = name\n self.seedT[name] = T\n if uu == 0:\n self.seeduf[name] = 1.0\n else:\n # pressure has same physical type as energy density\n validate_scalar('{0}-u'.format(name), uu,\n domain='positive', physical_type='pressure')\n self.seeduf[name] = (uu / (ar * T ** 4)).decompose()\n else:\n log.warning(\n 'Unable to process seed photon field: {0}'.format(inseed))\n raise TypeError",
"def scrap_initial_training_data():\n\tdata_interest_obj = np.zeros((50,3)) #for objective functions\n\tfor i in range(50):\n\t\tdata = pd.read_csv('Meshes/gen_1/random_design'+str(i)+'/history.csv',\n\t\t\theader=0, usecols=[' \"CD\" ',' \"CL\" ',' \"CMz\" '])\n\t\tinterest = np.array([data.iloc[len(data)-1]])\n\t\tdata_interest_obj[i] = interest \n\n\t\"\"\"Scraping the data of interest from random designs as design variables\"\"\"\n\tdata_interest_dv = np.zeros((1,28)) #for design variables\n\tfor i in range(50):\n\t\tdata_upper = np.genfromtxt('Designs/initial_samples/control_points/random_design'\n\t\t\t+ str(i) + '.dat', skip_header=1, skip_footer=17,\n\t\t\tusecols=(1), delimiter=' ')\n\t\tdata_lower = np.genfromtxt('Designs/initial_samples/control_points/random_design'\n\t\t\t+ str(i) + '.dat', skip_header=17, skip_footer=1,\n\t\t\tusecols=(1), delimiter=' ')\n\t\tdata_upper = np.array([data_upper])\n\t\tdata_lower = np.array([data_lower])\n\t\tinterest = np.append(data_upper, data_lower, axis=1)\n\t\tdata_interest_dv = np.append(data_interest_dv, interest, axis=0)\n\tdata_interest_dv = np.delete(data_interest_dv, 0, 0)\n\n\t\"\"\"Saving to dat files\"\"\"\n\tnp.savetxt('Data/Training/X.dat',\n\t\t\t\tdata_interest_dv,\n\t \t\t\tdelimiter=' ',\n\t \t\t\theader='',\n\t \t\t\tfooter='')\n\tnp.savetxt('Data/Training/OUT.dat',\n\t\t\t\tdata_interest_obj,\n\t \t\t\tdelimiter=' ',\n\t \t\t\theader='',\n\t \t\t\tfooter='')",
"def _parse_seed_course_1(self, raw_course):\n raw_course = raw_course.strip()\n\n try:\n if len(raw_course) == 0:\n self.__seed_course_1 = None\n elif (re.match(\"[LSY]\", raw_course.upper())):\n self.__seed_course_1 = raw_course.upper()\n else:\n raise line_format_errors.FieldParseError(\"seed_course_1\")\n except Exception:\n raise line_format_errors.FieldParseError(\"seed_course_1\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reshapes batch to have first axes size equal n_split. | def batch_split_axis(batch, n_split):
x, y = batch
n = x.shape[0]
n_new = n / n_split
assert n_new == int(n_new), (
"First axis cannot be split: batch dimension was {} when "
"n_split was {}.".format(x.shape[0], n_split))
n_new = int(n_new)
return tuple(arr.reshape([n_split, n_new, *arr.shape[1:]]) for arr in (x, y)) | [
"def split_last_dimension(x, n):\n x_shape = common_layers.shape_list(x)\n m = x_shape[-1]\n if isinstance(m, int) and isinstance(n, int):\n assert m % n == 0\n return tf.reshape(x, x_shape[:-1] + [n, m // n])",
"def _reshape_to_batchsize(im):\n sequence_ims = tf.split(im, num_or_size_splits=sequences_per_batch, axis=0)\n sequence_ims = [tf.squeeze(i) for i in sequence_ims]\n return tf.concat(sequence_ims, axis=0)",
"def _split_batch_dimension(new_batch: int, data: jnp.ndarray) -> jnp.ndarray:\n # The first dimension will be used for allocating to a specific ensemble\n # member, and the second dimension is the parallelized batch dimension, and\n # the remaining dimensions are passed as-is to the wrapped network.\n # We use Fortan (F) order so that each input batch i is allocated to\n # ensemble member k = i % new_batch.\n return jnp.reshape(data, (new_batch, -1) + data.shape[1:], order='F')",
"def reshape_to_batch(array):\n if len(array.shape) == 2:\n array = numpy.expand_dims(array, axis=2)\n array = numpy.expand_dims(array, axis=0)\n return array",
"def split_last_dim(self, x, n):\n old_shape = list(x.size())\n last = old_shape[-1]\n new_shape = old_shape[:-1] + [n] + [last // n if last else None]\n ret = x.view(new_shape)\n return ret.permute(0, 2, 1, 3)",
"def reshape(self, nr, nc):\n\n self.matrix.matrixReshape(nr, nc)",
"def _reshape(self, arr: np.ndarray) -> np.ndarray:\n return arr.reshape(self.TileHeight.value, self.TileWidth.value, self.bands,)",
"def split(array, nrows, ncols):\r\n r, h = array.shape\r\n return (array.reshape(h//nrows, nrows, -1, ncols)\r\n .swapaxes(1, 2)\r\n .reshape(-1, nrows, ncols))",
"def ReshapeDim(x, dim, dim_reshape_segments=None):\n if dim_reshape_segments is None:\n return x\n assert x.shape[dim] % dim_reshape_segments == 0\n new_shape = list(x.shape[0:dim])\n new_shape.append(dim_reshape_segments)\n new_shape.append(x.shape[dim] // dim_reshape_segments)\n new_shape.extend(d for d in x.shape[dim + 1:])\n return tf.reshape(x, new_shape)",
"def reshape(x, shape):\n return Reshape(shape)(x)",
"def split(x, axis, split_size):\n assert axis < x.ndim, 'Dimension out of range!'\n\n if isinstance(split_size, int):\n _split_size = [x.shape[axis] // split_size] * split_size\n\n elif isinstance(split_size, (list, tuple)):\n _split_size = split_size\n else:\n raise TypeError\n\n if x.ndim == 0:\n\n return [x for _ in range(len(_split_size))]\n\n return T.split(x, splits_size=_split_size, n_splits=len(_split_size), axis=axis)",
"def batch_split(self) -> np.array:\n pass",
"def batchify(data, batch_size):\n n_batch = data.shape[0] // batch_size\n data = data[:n_batch * batch_size]\n data = data.reshape((batch_size, n_batch)).T\n return data",
"def reshape_d(sequence, batch_size, num_steps):\n batch_length = batch_size * num_steps\n num_batches = sequence // batch_size\n if num_batches * batch_length > (len(sequence) - 1):\n num_batches -= 1\n # Round up batch\n X = sequence[: num_batches * batch_length]\n y = sequence[1: num_batches * batch_length + 1]\n X_splits = np.split(X, batch_size)\n y_splits = np.split(y, batch_size)\n # Stack batches\n X = np.stack(X_splits)\n y = np.stack(y_splits)\n return X, y",
"def batchify(data, batch_size):\n\n nbatch = data.shape[0] // batch_size\n data = data[:nbatch * batch_size]\n data = data.reshape((batch_size, nbatch)).T\n return data",
"def _reshape_X(X):\n if len(X.shape) != 3:\n return X.reshape((X.shape[0],1,X.shape[1]))\n return X",
"def coord_reshape(dat, n_dim=3):\n dat = dat.reshape([dat.shape[0], int(dat.shape[1] / n_dim), n_dim])\n return dat",
"def reshape(arr):\r\n reshape_arr = np.empty((3,240,320),dtype='float32')\r\n reshape_arr[0,:,:] = arr[:,:,0]\r\n reshape_arr[1,:,:] = arr[:,:,1]\r\n reshape_arr[2,:,:] = arr[:,:,2]\r\n return reshape_arr",
"def windows_partition(x, window_size):\n\n B, H, W, C = x.shape\n x = x.reshape([B, H//window_size, window_size, W//window_size, window_size, C])\n x = x.transpose([0, 1, 3, 2, 4, 5])\n x = x.reshape([-1, window_size, window_size, C]) #(num_windows*B, window_size, window_size, C)\n return x"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Iterate through the spike waveforms belonging in the current trace view. | def _iter_spike_waveforms(
interval=None, traces_interval=None, model=None, supervisor=None,
n_samples_waveforms=None, get_best_channels=None, show_all_spikes=False):
m = model
p = supervisor
sr = m.sample_rate
a, b = m.spike_times.searchsorted(interval)
s0, s1 = int(round(interval[0] * sr)), int(round(interval[1] * sr))
ns = n_samples_waveforms
k = ns // 2
for show_selected in (False, True):
for i in range(a, b):
t = m.spike_times[i]
c = m.spike_clusters[i]
is_selected = c in p.selected
# Show non selected spikes first, then selected spikes so that they appear on top.
if is_selected is not show_selected:
continue
# Skip non-selected spikes if requested.
if (not show_all_spikes and c not in supervisor.selected):
continue
# cg = p.cluster_meta.get('group', c)
channel_ids, channel_amps = get_best_channels(c)
s = int(round(t * sr)) - s0
# Skip partial spikes.
if s - k < 0 or s + k >= (s1 - s0): # pragma: no cover
continue
# Extract the waveform.
wave = Bunch(
data=traces_interval[s - k:s + ns - k, channel_ids],
channel_ids=channel_ids,
start_time=(s + s0 - k) / sr,
spike_id=i,
spike_time=t,
spike_cluster=c,
channel_amps=channel_amps, # for each of the channel_ids, the relative amp
select_index=p.selected.index(c) if c in p.selected else None,
)
assert wave.data.shape == (ns, len(channel_ids))
yield wave | [
"def waveforms(self):\n return list(self._waveforms)",
"def get_template_spike_waveforms(self, template_id):\n spike_ids = self.get_template_spikes(template_id)\n channel_ids = self.get_template_channels(template_id)\n return self.get_waveforms(spike_ids, channel_ids)",
"def scan_waveforms(pmtea,list_of_events=[0]):\n \n for event in list_of_events:\n plot_waveforms(PDF.get_waveforms(pmtea,event_number=event))\n wait()",
"def spikeGen(self):\n\t\tself.config.assertParams(\"spike.params\")\n\t\tparams = self.config.getStringListConfig(\"spike.params\")[0]\n\t\t\n\t\t#gap pameters\n\t\tgsampler = self.__simpleSampler(params[0])\t\n\t\t\n\t\t#width sampler\n\t\twsampler = self.__simpleSampler(params[1])\n\t\t\n\t\t#incr value sampler\n\t\tivsampler = self.__simpleSampler(params[2], False)\n\n\t\t# random noise sampler\n\t\trsampler = self.__genRandSampler()\n\t\t\n\t\tgap = gsampler.sample()\n\t\tsampTm = self.pastTm\n\t\tinSpike = False\n\t\tpreVal = rsampler.sample()\n\t\tiga = 0\n\t\tisp = 0\n\t\t\n\t\twhile (sampTm < self.curTm):\n\t\t\tif inSpike:\t\n\t\t\t\tif isp <= hwidth:\n\t\t\t\t\tcurVal = preVal + vinc\n\t\t\t\telse:\n\t\t\t\t\tcurVal = preVal - vinc\n\t\t\t\tcurVal += rsampler.sample() if rsampler is not None else 0\n\t\t\t\tpreVal = curVal\n\t\t\t\tisp += 1\n\t\t\t\tif isp == width:\n\t\t\t\t\tinSpike = False\n\t\t\t\t\tgap = gsampler.sample()\n\t\t\telse:\n\t\t\t\tcurVal = rsampler.sample() if rsampler is not None else 0\n\t\t\t\tpreVal = curVal\n\t\t\t\tiga += 1\n\t\t\t\tif iga == gap:\n\t\t\t\t\tvinc = ivsampler.sample()\n\t\t\t\t\twidth = wsampler.sample()\n\t\t\t\t\thwidth = int((width + 1) / 2)\n\t\t\t\t\tisp = 0\n\t\t\t\t\tinPike = True\n\t\t\t\t\t\n\t\t\tif self.tsValType == \"int\":\n\t\t\t\tcurVal = int(curVal)\n\t\t\t\t\n\t\t\t#date time\n\t\t\tdt = self.__getDateTime(sampTm)\n\t\t\t\t\n\t\t\trec = self.ouForm.format(dt, curVal)\n\t\t\tsampTm += self.sampIntv\n\t\t\tyield rec",
"def get_waveforms(self, spike_ids, channel_ids=None):\n if self.traces is None and self.spike_waveforms is None:\n return\n # Create the output array.\n ns = len(spike_ids)\n nsw = self.n_samples_waveforms\n channel_ids = np.arange(self.n_channels) if channel_ids is None else channel_ids\n\n nc = len(channel_ids)\n out = np.zeros((ns, nsw, nc), dtype=np.float64)\n\n # Extract the spike waveforms.\n for i, ts in enumerate(self.spike_samples[spike_ids]):\n if self.spike_waveforms is not None: # pragma: no cover\n # NOTE: this has not be extensively tested yet.\n # Precomputed waveforms in spikes.waveforms.npy\n ind = self.spike_waveforms.channel_ids[i, :]\n channel_common = np.intersect1d(channel_ids, ind)\n if len(channel_ids) > 0:\n cols0 = _index_of(channel_common, channel_ids)\n cols1 = _index_of(channel_common, ind)\n assert len(cols0) == len(cols1)\n out[i, :, cols0] = self.spike_waveforms.waveforms[i, :, cols1]\n else:\n # Extract waveforms on the fly from raw data.\n out[i, ...] = _extract_waveforms(\n self.traces, ts, channel_ids=channel_ids, n_samples_waveforms=nsw)\n out[i, ...] -= np.median(out[i, ...], axis=0)\n return out",
"def iter_spectra(self):\n for record in self.session.query(SpectrumLibraryIndexRecord).order_by(\n SpectrumLibraryIndexRecord.number).yield_per(10000):\n yield record",
"def _get_spikes(self, **kwargs):\n data = kwargs['data']\n threshold = kwargs['threshold']\n\n # parameter dictionary\n p = data['p']\n\n print p['path_combos']\n\n # load spike analysis functions\n spike_analysis = Spikes()\n\n # temporary store for current trial data. all dictionary entries will be transferred to full data structure (all trials)\n spike_dic = {}\n\n # print trial\n # parameter dictionary\n spike_dic['p'] = copy.copy(p)\n\n # iterate over polarity\n for polarity_key, polarity in data.iteritems():\n\n # exclude parameter dictionary\n if polarity_key != 'p':\n\n spike_dic[polarity_key]={}\n # iterate over pathways\n for path_key, path in polarity.iteritems():\n\n # exclude time vectors\n if path_key!='t':\n\n spike_dic[polarity_key][path_key]={}\n p_path = p['p_path'][path_key]\n\n # temporary dictionary\n dtemp={}\n\n dtemp['p'] = copy.copy(p)\n\n dtemp['p_path']=copy.copy(p_path)\n\n # list all dendritic spikes [spike times]\n dtemp['spikes_dend']=[] \n # for each dendritic spike in 'spikes_dend', list the distance from the soma, [distances]\n dtemp['spikes_dend_dist']=[]\n # for each activated segment, whether there was a spike at all, and where it was initiated\n # 0= no spike, 1=dendrite first, 2=soma first\n dtemp['spikes_first'] = []\n # list spike times for each segment, same organization as 'seg_idx', {tree_key}[sec_num][seg_num][spike_times]\n dtemp['spike_times'] ={}\n # list weight change for each segment, same organization as 'seg_idx', {tree_key}[sec_num][seg_num=dw]\n dtemp['dw']={}\n\n # add soma data\n dtemp['spike_times']['soma'] = [[]]\n spike_times = spike_analysis.detect_spikes(data[polarity_key][path_key]['soma_v'][0][0], threshold=threshold)['times'][0]\n dtemp['spike_times']['soma'][0].append(spike_times)\n dtemp['spikes_soma'] = spike_times\n\n # iterate over trees\n for tree_key, tree in p_path['seg_idx'].iteritems():\n # add dimension\n dtemp['spike_times'][tree_key] =[]\n dtemp['dw'][tree_key] =[]\n # iterate over sections\n for sec_i, sec in enumerate(tree):\n # add dimensions\n dtemp['spike_times'][tree_key].append([])\n dtemp['dw'][tree_key].append([])\n sec_num = p['p_path'][path_key]['sec_idx'][tree_key][sec_i]\n # iterate over segments\n for seg_i, seg in enumerate(sec):\n seg_num=seg\n distance = p['seg_dist'][tree_key][sec_num][seg_num]\n\n # list of spike times [spike_times]\n spike_times = spike_analysis.detect_spikes(data[polarity_key][path_key][tree_key+'_v'][sec_i][seg_i], threshold=threshold)['times'][0]\n\n # scalar weight change\n dw = data[polarity_key][path_key][tree_key+'_gbar'][sec_i][seg_i][-1]/data[polarity_key][path_key][tree_key+'_gbar'][sec_i][seg_i][0]\n\n # add to dtemp structure, each segment contains a list of spike times\n dtemp['spike_times'][tree_key][sec_i].append(spike_times)\n # each segment is a scalar weight change\n dtemp['dw'][tree_key][sec_i].append(dw)\n\n # record whether there was a spike in soma or dendrite first [all segments]\n # no spike=0, dend first=1, soma first=2\n # if there are dendritic spikes\n if len(spike_times)>0:\n \n # iterate through spike times\n for spike_i, spike_time in enumerate(spike_times):\n\n # add to list of all dendritic spike times for this trial/cell\n # [all dendritic spike times]\n dtemp['spikes_dend'].append(spike_time)\n # [all dendritic spike distances]\n dtemp['spikes_dend_dist'].append(distance)\n\n # if this is the first spike\n if spike_i==0:\n \n # if there is also a somatic spike\n if len(dtemp['spikes_soma'])>0:\n \n # if the somatic spike occurs first\n if spike_time > dtemp['spikes_soma'][0]:\n\n # store as soma first\n dtemp['spikes_first'].append(2)\n \n # otherwise the spike is dend first\n else:\n dtemp['spikes_first'].append(1)\n \n # if there is a dendritic but no somatic spike, it is dend first\n else:\n dtemp['spikes_first'].append(1)\n \n # otherwise no spike at all\n else:\n dtemp['spikes_first'].append(0)\n\n # create nested list of spike times with dimensions [pulse time bin][spike times]\n # nested list of time bins [bin number][time1, time2]\n dtemp['time_bins'] = []\n # nested list of dendritic spike times [time bin][spike times]\n dtemp['spikes_dend_bin'] = []\n # nested list of somatic spike times [time bin][spike times]\n dtemp['spikes_soma_bin'] = []\n # nested list of dendritic spike times normalized to input pulse onset time [time bin][spike times]\n dtemp['spikes_dend_bin_norm'] = []\n # nested list of somatic spike times normalized to input pulse onset time [time bin][spike times]\n dtemp['spikes_soma_bin_norm'] = []\n # nested list of dendritic spike distances [time bin][distances]\n dtemp['spikes_dend_dist_bin'] = []\n # nested list of timing difference between dendrite and soma [time bin][spike time differences], soma first=negative, dendrite first=positive, if no soma spike=no subtraction(positive)\n dtemp['spikes_dend_diff_bin'] = []\n # fraction of synapses with a spike in each bin, [time bin][fraction]\n dtemp['syn_frac_bin'] = []\n\n if kwargs['weakpath_bins']:\n pulses = p['pulses']\n else:\n pulses = p['p_path'][path_key]['pulses']\n # iterate through pulses in the current pathway\n for pulse_i, pulse in enumerate(range(int(pulses))):\n\n if kwargs['weakpath_bins'] and path_key=='weak':\n # determine time bins\n dtemp['time_bins'].append([])\n time1 = (p['warmup'] + 1000/p['pulse_freq']*pulse_i)/p['dt'] +1 \n time2 = (p['warmup'] + 1000/p['pulse_freq']*(pulse_i+1))/p['dt']\n dtemp['time_bins'][pulse_i].append(time1)\n dtemp['time_bins'][pulse_i].append(time2)\n else:\n # determine time bins\n dtemp['time_bins'].append([])\n time1 = (p_path['warmup'] + 1000/p_path['pulse_freq']*pulse_i)/p['dt'] +1 \n time2 = (p_path['warmup'] + 1000/p_path['pulse_freq']*(pulse_i+1))/p['dt']\n dtemp['time_bins'][pulse_i].append(time1)\n dtemp['time_bins'][pulse_i].append(time2)\n\n\n # get spike times that fall within the current bin \n binned_spikes_dist = []\n binned_spikes_dend =[]\n binned_spikes_dend_norm =[]\n binned_spikes_soma = []\n binned_spikes_soma_norm = []\n \n # if there are any somatic spikes\n if len(dtemp['spikes_soma'])>0:\n # list spikes in current bin, return empty list if no spikes\n binned_spikes_soma = [spike for spike_i, spike in enumerate(dtemp['spikes_soma']) if (spike > time1 and spike <= time2)]\n # if there are spikes in the current bin, normalized to the pulse onset\n if len(binned_spikes_soma)>0:\n binned_spikes_soma_norm = [spike-time1 for spike in binned_spikes_soma]\n \n # if there are any dendritic spikes\n if len(dtemp['spikes_dend'])>0:\n # list spikes in current bin, return empty list if no spikes\n binned_spikes_dend = [spike for spike_i, spike in enumerate(dtemp['spikes_dend']) if (spike > time1 and spike <= time2)]\n\n binned_spikes_dist = [dtemp['spikes_dend_dist'][spike_i] for spike_i, spike in enumerate(dtemp['spikes_dend']) if (spike > time1 and spike <= time2)]\n\n # if there are spikes in the current bin, normalized to the pulse onset\n if len(binned_spikes_dend)>0:\n binned_spikes_dend_norm = [spike-time1 for spike in binned_spikes_dend]\n \n \n # difference between dendritic and somatic spike times (dendrite first is positive, soma first is negative)\n # if there are somatic and dendritic spikes in the current bin\n if len(binned_spikes_soma)>0 and len(binned_spikes_dend)>0:\n # list of time differences\n binned_spikes_dend_diff = [binned_spikes_soma[0]-spike for spike_i, spike in enumerate(binned_spikes_dend)]\n else: \n # otherwise dendritic spiek list remains unchanged (positive)\n binned_spikes_dend_diff = binned_spikes_dend_norm\n\n # fraction of synapses that spike in current bin\n binned_distances = list(set(binned_spikes_dist))\n binned_syn_frac = float(len(binned_distances))/float(p_path['syn_num'])\n \n\n # add spike times for current bin to list of all bins\n # [bin number][list of spike times]\n dtemp['spikes_dend_bin'].append(binned_spikes_dend)\n dtemp['spikes_soma_bin'].append(binned_spikes_soma)\n dtemp['spikes_dend_bin_norm'].append(binned_spikes_dend_norm)\n dtemp['spikes_soma_bin_norm'].append(binned_spikes_soma_norm)\n dtemp['spikes_dend_dist_bin'].append(binned_spikes_dist)\n dtemp['spikes_dend_diff_bin'].append(binned_spikes_dend_diff)\n dtemp['syn_frac_bin'].append(binned_syn_frac)\n\n # fraction of synapses that spike at all during simulation\n distances = list(set(dtemp['spikes_dend_dist']))\n dtemp['syn_frac'] = float(len(distances))/float(p_path['syn_num'])\n\n # update main data structure\n # for each data type\n for dtype_key, dtype in dtemp.iteritems():\n\n spike_dic[polarity_key][path_key][dtype_key]=dtype\n\n return spike_dic",
"def spiketimes_each_frequency(spikeTimesFromEventOnset, trialIndexForEachSpike, currentFreq, uniqFreq):\n for freq in uniqFreq:\n trialsThisFreq = np.flatnonzero(currentFreq == freq)\n spikeTimesThisFreq = spikeTimesFromEventOnset[np.in1d(trialIndexForEachSpike, trialsThisFreq)]\n trialIndicesThisFreq = trialIndexForEachSpike[np.in1d(trialIndexForEachSpike, trialsThisFreq)]\n yield freq, spikeTimesThisFreq, trialIndicesThisFreq",
"def AllSpikeTimes(self):\n blah = []\n for neur in self.neurons:\n blah.append(np.array(neur.spikes))\n\n return blah",
"def getSpikeTimes(self):\n \n return self.spks*self.dt",
"def getSpikeTimes(self):\n\n return self.spks * self.dt",
"def stop_waveforms(wfm_dict):\n for key in wfm_dict:\n try:\n wfm_dict[key].ref.stop()\n except Exception as e:\n print(\"Failed to stop %s\"%str(key))",
"def get_cluster_spike_waveforms(self, cluster_id):\n spike_ids = self.get_cluster_spikes(cluster_id)\n channel_ids = self.get_cluster_channels(cluster_id)\n return self.get_waveforms(spike_ids, channel_ids)",
"def save_spike_waveforms(\n self, n_samples_waveforms=None, n_channels_max=None): # pragma: no cover\n\n if self.traces is None:\n logger.warning(\n \"Spike waveforms could not be extracted as the raw data file is not available.\")\n return\n\n path = self.dir_path / 'spikes.waveforms.npy'\n path_ind = self.dir_path / 'spikes.waveformsChannels.npy'\n\n # Determine the waveforms array shape.\n ns = self.n_spikes\n assert ns > 0\n nsw = n_samples_waveforms or self.n_samples_waveforms or 80\n assert nsw > 0\n nc = n_channels_max or 16\n assert nc > 0\n shape = (ns, nsw, nc)\n\n # Create the memmap npy file for writing.\n out = open_memmap(\n path, mode='w+', dtype=self.dtype, shape=shape, fortran_order=False)\n out_ind = open_memmap(\n path_ind, mode='w+', dtype=np.int32, shape=(ns, nc), fortran_order=False)\n # Iterate over all spikes.\n logger.info(\"Extract waveforms to %s and %s...\", path.name, path_ind.name)\n\n best_channels = {t: self.get_template(t).channel_ids[:nc] for t in range(self.n_templates)}\n\n for i, (s, t) in tqdm(enumerate(zip(self.spike_samples, self.spike_templates)), total=ns):\n # Find the best channel ids for the spike's template.\n c = best_channels[t]\n # Extract the waveforms and write them in the file.\n ncl = min(len(c), nc)\n out[i, :, :ncl] = _extract_waveforms(\n self.traces, s, channel_ids=c, n_samples_waveforms=nsw)\n # Save the best channels to the auxiliary file, putting -1 for unused channels.\n out_ind[i, :ncl] = c\n out_ind[i, ncl:] = -1\n\n return out, out_ind",
"def __fire(self):\n self.Vtrace.append(self.Vreset)\n for i in xrange(self.__network.nNeurons()):\n psp = self.__network.psp(i,self.__neuronID)\n self.__network.neuron(i).receiveSpike(psp)",
"def get_ser_spktimes(self):\n\n spktimes = []\n for sweep_no in range(self.get_no_sweeps()):\n spktimes_singlesweep = []\n for cell_no in range(self.get_no_ser_neurons()):\n spktimes_singlesweep.append(\n np.where(self.ser_spktrains[sweep_no, cell_no, :] > 0.5)[0]\n * self.get_dt()\n )\n spktimes.append(spktimes_singlesweep)\n return spktimes",
"def _get_all_spectra(self):\n pass",
"def __iter__(self):\n\n batch_sp = []\n batch_noise = []\n batch_mix = []\n batch_count = 0\n\n while True:\n\n # Randomizing wav lists\n random.shuffle(self._lst_spk_files)\n random.shuffle(self._lst_noise_files)\n\n for spk_file, noise_file in zip(self._lst_spk_files, self._lst_noise_files):\n\n # Read wav files\n sig_spk = self.__read_wav_file(spk_file)\n sig_noise = self.__read_wav_file(noise_file)\n\n # Align signal\n min_length = min(sig_spk.shape[0], sig_noise.shape[0])\n\n if min_length < self._fftsize:\n raise Exception(\"ERROR: Too short signals in dataset\")\n\n sig_spk = sig_spk[:min_length]\n sig_noise = sig_noise[:min_length]\n\n # Generate need SNR\n need_snr = random.uniform(self._min_snr, self._max_snr)\n\n # Calc scaled signals\n sig_spk, sig_noise = self.__mix_with_snr(sig_spk, sig_noise, need_snr)\n\n # Calc STFT\n stft_spk = stft(sig_spk, fftsize=self._fftsize, overlap=self._overlap)\n stft_noise = stft(sig_noise, fftsize=self._fftsize, overlap=self._overlap)\n stft_mix = stft_spk + stft_noise\n\n # Skip small segments\n frames, bin = stft_mix.shape\n if frames <= self._context_size:\n continue\n\n # Collect batch\n i = 0\n while i + self._context_size < frames:\n\n batch_sp.append(stft_spk[i:i + self._context_size, :])\n batch_noise.append(stft_noise[i:i + self._context_size, :])\n batch_mix.append(stft_mix[i:i + self._context_size, :])\n\n i += self._context_size // 2\n batch_count += 1\n\n if batch_count == self._batch_size:\n sp = np.array(batch_sp).reshape((self._batch_size,\n self._context_size, -1))\n noise = np.array(batch_noise).reshape((self._batch_size,\n self._context_size, -1))\n mix = np.array(batch_mix).reshape((self._batch_size,\n self._context_size, -1))\n yield sp, noise, mix\n\n batch_sp = []\n batch_noise = []\n batch_mix = []\n batch_count = 0",
"def process(self, waveforms):\n return {k: v for f in self.chain for k, v in f(waveforms).items()}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Switch between top and bottom origin for the channels. | def switch_origin(self):
self.origin = 'bottom' if self.origin == 'top' else 'top' | [
"def move_to_origin(self) -> None:\n\n _bb = self.bb()\n if _bb.x < 0:\n self.translate(abs(_bb.x), 0.0)\n else:\n self.translate(-abs(_bb.x), 0.0)\n\n if _bb.y < 0:\n self.translate(0.0, abs(_bb.y))\n else:\n self.translate(0.0, -abs(_bb.y))",
"def transition_to_top(self):\n self.move_to_top_side()",
"def mirrorHoriz():",
"def blend_channels_screen(bottom_chan, top_chan):\n return 1 - (1 - bottom_chan[:,:]) * (1 - top_chan[:,:])",
"def set_current_as_origin(self):\n ps = PoseStamped()\n ps.header.frame_id = \"world\"\n ps.pose.position.x = self.limb.endpoint_pose()[\"position\"].x\n ps.pose.position.y = self.limb.endpoint_pose()[\"position\"].y\n ps.pose.position.z = self.limb.endpoint_pose()[\"position\"].z\n ps.pose.orientation.z = 1\n self.stick_T_hand = transformations.pose_to_list(self.tfl.transformPose(\"right_hand\", ps))",
"def update_ballpos(self,pos):\n if self.options.visualize_switch_xy:\n self.col.set_offsets(pos[:,::-1]) # reverse x-y direction\n else:\n self.col.set_offsets(pos)",
"def reverse_channels(self):\n # raise NotImplementedError\n self.im=self.im[...,::-1]",
"def fl_flip_yorigin():\n _fl_flip_yorigin = library.cfuncproto(\n library.load_so_libforms(), \"fl_flip_yorigin\",\\\n None, [],\\\n \"\"\"void fl_flip_yorigin()\"\"\")\n _fl_flip_yorigin()",
"def backToMiddlePos():\n\tprogMode(True) # Active le couple de servos\n\taxDriver.goToPosition(axDriver.BROADCASTID, 0x1FF) # Renvoie a la position 0x1FF",
"def OnWindowSetTop(self, Top=sentinel):",
"def North(self):\n self.ypos -= 1\n self.runcommands()",
"def __init__(self):\r\n #set up pannel in centre of screen, just above the bottom of the screen.\r\n super(Pannel, self).__init__(image = Pannel.pannel,\r\n x = games.screen.width/2,\r\n y = games.screen.height -11)",
"def set_zlim(self, bottom=None, top=None):\n if isinstance(self._frame, root.TH1F):\n warnings.warn(\"Attempting to set z-axis limits for 2D axes\")\n return\n\n if top is None and np.iterable(bottom):\n bottom, top = bottom\n\n if bottom is None or top is None:\n old_bottom, old_top = self.get_zlim()\n if bottom is None:\n bottom = old_bottom\n if top is None:\n top = old_top\n\n if bottom == top:\n warnings.warn(\n \"Attempting to set identical bottom == top == {} z-axis limits\".format(\n bottom\n ),\n stacklevel=2,\n )\n\n if bottom > top:\n raise ValueError(\"Axis limits must be in increasing order\")\n\n if top <= 0 and self._logz:\n warnings.warn(\n \"Attempting to set non-positive top zlim on a log-scaled axis.\\n\"\n \"Invalid limit will be ignored.\",\n stacklevel=2,\n )\n top = self.get_zlim()[1]\n\n elif bottom <= 0 and self._logy:\n warnings.warn(\n \"Attempting to set non-positive bottom zlim on a log-scaled axis.\\n\"\n \"Invalid limit will be ignored.\",\n stacklevel=2,\n )\n bottom = self.get_zlim()[0]\n\n self._frame.SetMinimum(bottom)\n self._frame.SetMaximum(top)\n\n self._pad.Modified() # Draw the updated axes\n\n return (bottom, top)",
"def init_over_top_below_bottom(self):\n self.has_over_top = False\n self.has_bellow_bottom = False",
"def __window_forward(self):\n pass",
"def start_smartmirror(self):\n self._face_recognizer = FaceRecognizer()\n self._mirror_display_frame = MirrorDisplay(self, self)\n self.bind('<Escape>', self._stop_smartmirror)\n # Sets this window to fullscreen and makes it stay on top of\n # every other window.\n self.wm_attributes('-fullscreen', True, '-topmost', True)\n self._mirror_display_frame.grid(row=0, column=0)",
"def move(self, origin):\n self.coords = self.coords - origin",
"def mid_top(self):\r\n self.writing_position()\r\n self.half_left()",
"def adjust_position(self):\n if self.ANCHOR_TOP in self.window_anchors:\n desk_size = self.window_manager.get_desktop_size()\n x_pos = self.window_pos[0]\n y_pos = desk_size[1] - self.window_pos[1] - self.size[1]\n self.pos = (x_pos, y_pos)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Half of the duration of the current interval. | def half_duration(self):
if self._interval is not None:
a, b = self._interval
return (b - a) * .5
else:
return self.interval_duration * .5 | [
"def half_step_time(self):\n\n return self.full_step_time() * self.half_to_full_step_time_ratio",
"def _get_half_time(self):\n return self.__half_time",
"def half_life(self) -> u.s:\n return self._get_particle_attribute(\"half_life\", unit=u.s, default=np.nan * u.s)",
"def Interval(self) -> float:",
"def half_life(self):\r\n\r\n # Calculating the half-life\r\n output = np.log(2) / self.mu\r\n\r\n return output",
"def period(self):\n return 0.1",
"def duration(self) -> float:\n return self.path_interval[1] - self.path_interval[0]",
"def duration(self):\n return float('{0:.2f}'.format(self.end_time - self.start_time))",
"def get_interval(self):\n return self.interval * 1000",
"def s_interval(self) -> float:\n return self._s_interval",
"def full_step_time(self):\n\n total_step_time = self.duration()\n return total_step_time / (2 * self.half_to_full_step_time_ratio + (self.num_steps() - 2))",
"def middle_value(self):\n duration = self.__end.get_midpoint() - self.__begin.get_midpoint()\n return float(self.__begin.get_midpoint()) + float(duration) / 2.",
"def _period( self ):\r\n\treturn 2 * pi * sqrt( self.orbital_elements[0]**3 / self.mu_central_body )\r\n\t# http://en.wikipedia.org/wiki/Orbital_period#Calculation\r",
"def period(self, u):\n return (self.xmax - self.xmin)/u",
"def duration(self):\n return self.t_stop - self.t_start",
"def get_halfsteps(self) -> (int, float):\n halfsteps = int(self._cents/100)\n detune = self._cents - 100*halfsteps\n while detune >= 50:\n halfsteps += 1\n detune -= 100\n while detune < -50:\n halfsteps -= 1\n detune += 100\n return halfsteps, detune",
"def timestep(self):\n return dt_round( datetime.timedelta(seconds=self.conf_float('dt')) )",
"def period(self):\n return 1.0 / self.freq",
"def duration(self):\n return self.end_abs - self.start"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Go to a specific time (in seconds). | def go_to(self, time):
half_dur = self.half_duration
self.set_interval((time - half_dur, time + half_dur)) | [
"def soak_time(self, soaktime):\n time.sleep(0.1)\n time_now = time.strftime(\"%H:%M\")\n print str(soaktime) + ' minute soak time starts @ ' + time_now \n time_in_sec = soaktime * 60\n time.sleep(time_in_sec)\n print 'Finished soaking'",
"def sec_forward():\r\n player.set_time(player.get_time() + 1000)",
"def advanceTime(self, amount):\n self.currentSeconds += amount",
"def set_time(self, sec):\n self.set_timed(round(sec * 10.0))",
"def sleep(self, seconds=60):\n\t\ttime.sleep(seconds)",
"def advance_time_seconds(seconds):\r\n advance_time_delta(datetime.timedelta(0, seconds))",
"def set_time(self, time):\n self.game_inst.set_time(time)",
"def timer(self, minutes):\n\t\tseconds = minutes * 60\n\t\ttime.sleep(int(seconds))\n\t\tprint(\"Your time is up.\")",
"def setturntimeout(self, seconds):\n self._turntimeout = seconds",
"def seek_time(self, time):\n pass",
"def sleep(seconds):\r\n time.sleep(seconds)",
"def sleep(self, seconds):\n time.sleep(seconds)",
"def run_time(self, speed: int, time: int, then: Stop = Stop.HOLD, wait: bool = True):\n ...",
"def go_then_wait(self, position, seconds):\n self.go(position)\n self.wait(seconds)",
"def wait_up_to_second(second, time_template=None):\r\n current_second = datetime.datetime.now().second\r\n target_second = int(second)\r\n\r\n if current_second > target_second:\r\n sleep_time = 60 - (current_second - target_second)\r\n else:\r\n sleep_time = target_second - current_second\r\n\r\n if sleep_time:\r\n print('Waiting {} second(s)'.format(sleep_time))\r\n time.sleep(sleep_time)\r\n\r\n if time_template:\r\n return Utils.get_current_time(time_template)",
"def set_time(self, time):\n pass",
"def wait_up_to_second(second, time_template=None):\n current_second = datetime.datetime.now().second\n target_second = int(second)\n\n if current_second > target_second:\n sleep_time = 60 - (current_second - target_second)\n else:\n sleep_time = target_second - current_second\n\n if sleep_time:\n print('Waiting {} second(s)'.format(sleep_time))\n time.sleep(sleep_time)\n\n if time_template:\n return Utils.get_current_time(time_template)",
"def sleep(seconds):\n time.sleep(seconds)",
"def switch_to_countdown(self, seconds):\n self.render_state = RenderState.COUNTDOWN\n self.synchronized_start = datetime.now() + timedelta(seconds=seconds)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Shift the interval by a given delay (in seconds). | def shift(self, delay):
self.go_to(self.time + delay) | [
"def shift(self, delay):\n self.__begin.shift(delay)\n self.__end.shift(delay)",
"def delay(self, delay: int):\n if not delay >= 0:\n raise pyrado.ValueErr(given=delay, ge_constraint=\"0\")\n self._delay = round(delay) # round returns int",
"def delay(seconds):\n\n # Perform the delay\n time.sleep(seconds)",
"def delay(ms):\n pass",
"def change_delay(self, delay):\n print(\"Delay \" + str(delay))\n # Done once\n int_to_four_bytes = struct.Struct('<I').pack\n # Done many times (you need to mask here, because your number is >32 bits)\n y1, y2, y3, y4 = int_to_four_bytes(int(delay) & 0xFFFFFFFF)\n data = bytearray([3, y1, y2, y3, y4])\n self.change_leds(data)",
"def delay():\r\n time.sleep(2)",
"def sleepDelay(ms):\n time.sleep(ms/1000.0)",
"def _delay(self):\n if not self.next_scheduled:\n self.next_scheduled = self.clock_func() + self.interval\n return\n while True:\n current = self.clock_func()\n if current >= self.next_scheduled:\n extratime = current - self.next_scheduled\n self.next_scheduled = current + self.interval - extratime\n return\n delay_amt = self.next_scheduled - current\n #Call for 0, because that might be meaningful to sleep_func.\n if self.allow_negative_sleep or delay_amt >= 0: \n self.sleep_func(self.next_scheduled - current)",
"def delay(ms: int, /) -> None:",
"def positioner_delay(self, pdelay):\n self.put(\"PDLY\", pdelay, wait=True)",
"def SetDelay(self, delay, timeUnit):\n callResult = self._Call(\"SetDelay\", delay, timeUnit)",
"def SetStepDelay(self,delay=200): \n self.Bus.Transaction(chr(self.Address)+chr(0x43)+chr(delay))",
"def sleep(delay_sec, keep_conn=False):\n\n res = _change(\"STSLEEP {:d}\".format(delay_sec), reset=False, keep_conn=keep_conn)\n\n return res",
"def update(self, delay: float) -> None:\n self._time += delay",
"def udelay(us):\n pass",
"def pg_set_delay(self,channel,delay):#Delay in seconds\n delv = self.calc_delay(delay)#Calculate the counter value the delay counter must count upto before enabling the channel\n self.PG_UTIL.write(ch2_data,0x0)#Disable the signal generator, write the delay value to the delay controller\n self.PG_AUX[channel].write(ch2_data,delv)\n self.chdelays[channel]=delay#Save the delay setting\n self.PG_UTIL.write(ch2_data,0x1)#Restart the signal generator",
"def accurate_delay(delay):\n _ = time.perf_counter() + delay\n while time.perf_counter() < _:\n pass",
"def sleep(self, duration):\n time.sleep(duration)",
"def _delay(self, delay):\n self.cv.after(delay)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Go to end of the recording. | def go_to_end(self):
self.go_to(self.duration) | [
"def endRecording(self):\r\n return self.vmrun('endRecording')",
"def end_step(self):\n self.fh.end_step()",
"def stop(self):\n self.recording = False",
"def stop_recording(self):\n self.disarm()\n self._recorder.join()",
"def end(self):\n self.my_print(\"\\t[DONE]\", msg_types.INFO)\n self.in_progress = False",
"def endReplay(self):\r\n return self.vmrun('endReplay')",
"def end(self):\n self.kill_flag.value = True\n while (not self.pseye.thread_complete.value) or (not self.saver.saving_complete.value):\n pass",
"def stop_recording(self):\n\n\t\tself.eyetribe.stop_recording()\n\t\tself.recording = False",
"def end(self):\n self.f.close()\n print(\"Macro recorded, filename \" + self.name)",
"def end_drive(self):\n # TODO\n self.location = self.destination\n self.is_idle = True\n self.destination = None",
"def end(self) -> None:\n self.status = ExperimentStatus.SUCCEED\n self.ended_at = int(datetime.utcnow().timestamp())",
"def stop(self):\n self.__instance.seek(self.__instance.frames)",
"def stop_recording(self):\n self.is_recording = False # Ends the recording\n self.has_recorded += 1\n time.sleep(.1) # Gives time to return the data\n\n #only if we are actually getting streaming audio data\n if not GlobalSettings.USE_USB_MIC and (self.buffered_audio_data) > 0:\n elapsed_time = time.time() - self.start_recording_time\n print(\"recorded speech from Tega for \" + str(elapsed_time) + \" seconds\")\n print('RECORDING SUCCESSFUL, writing to wav')\n wav_file = wave.open(self.WAV_OUTPUT_FILENAME_PREFIX + self.expected_text + '_' + str(self.recording_index) + '.wav', 'wb')\n wav_file.setnchannels(AudioRecorder.CHANNELS)\n wav_file.setsampwidth(2)\n wav_file.setframerate(AudioRecorder.RATE)\n wav_file.writeframes(b''.join(self.buffered_audio_data))\n wav_file.close()",
"def stop_recording(self):\n self.flag_event.clear()\n self.statusBar().showMessage('Recording Stopped')\n print('boo ya')",
"def _on_key_release(self, key):\n if key is self.TRIGGER_KEY:\n print(\"End Recording\")\n self.do_record = False",
"def finish_episode(self):\n self.terminated = True",
"def end(self):\n\n # Close the prediction and\n # release the camera\n self.__predict_start = False\n self.__cap.release()",
"def _exit_print(self):\n if self.cur_frame >= self.config.MAX_FRAMES:\n self.stopped = True",
"def _track_finished(self, *_args):\n if not self.loop:\n self.stop()\n else:\n self.seek(0.)\n self.player.play()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Jump to the next spike from the first selected cluster. | def go_to_next_spike(self, ):
self._jump_to_spike(+1) | [
"def go_to_previous_spike(self, ):\n self._jump_to_spike(-1)",
"def goto_node(self):\n p = self.get_position()\n if p and p != self.c.p:\n self.c.selectPosition(p)",
"def jump(self):\n self.lyrics = self.disk[self.index]",
"def _next(self, _):\n self.notebook.SetSelection(self.idx+1)",
"def choose_next_player(self):\n player_index = self.players.index(self.current_player)\n if self.direction_clock_wise:\n if player_index >= len(self.players) - 1:\n self.current_player = self.players[0]\n else:\n self.current_player = self.players[player_index + 1]\n else:\n if player_index <= 0:\n self.current_player = self.players[len(self.players) - 1]\n else:\n self.current_player = self.players[player_index - 1]",
"def jumpToInsertionPoint(self):\n selectedNode = base.direct.selected.last\n if selectedNode:\n # Check if its a dna node\n dnaNode = self.findDNANode(selectedNode)\n if dnaNode:\n # Place the new node path at the current grid origin\n selectedNode.setPos(base.direct.grid, 0, 0, 0)\n # Initialize angle to match last object\n selectedNode.setHpr(self.getLastAngle(), 0, 0)\n # Now update DNA pos and hpr to reflect final pose\n dnaNode.setPos(selectedNode.getPos())\n dnaNode.setHpr(selectedNode.getHpr())\n # Update grid to get ready for the next object\n self.autoPositionGrid()",
"def nextTarget(self):\n if self.stepsToTarget:\n t = self.stepsToTarget.pop(0)\n self.currentTarget = pygame.Vector2(t[0]+0.5, t[1]+1)*TILE_WIDTH\n movementX = self.currentTarget.x - posToVect(self.pos).x\n self.direction = 0 if movementX > 0 else 1 if movementX < 0 else self.direction\n self.pos = t\n else:\n self.stepsToTarget = None\n self.currentTarget = None\n self.finalTarget = None",
"def next(self):\n\n # re add the commented part if you want to solve only after pressing next and at the highest step\n if self.hasToSolve:# and self.step == self.highestStep:\n if not self.solve():\n # If solve fails, E.G. cant connect to server or invalid info was received.\n return 0\n self.hasToSolve = False\n\n\n\n if len(self.plan) > self.step:\n self.step += 1\n\n if self.step - 1 == self.highestStep:\n self.highestStep += 1\n\n self.executeStep()\n\n for e in self.elevators:\n e.next()\n\n return 1",
"def next(self):\n next_index = self.current_target_index + 1\n self.current_target_index = next_index % self.targets_amount\n updated_pos = self.positions[self.current_target_index]\n self.current_target = updated_pos\n return updated_pos",
"def next_turn(self):\n self.lead = next(self.turn_cycle)",
"def go_first(self):\n # update current as the head\n self.current = self.head",
"def goto_next_level(self, *args):\n self.manager.current = self.manager.next()\n self.reset()",
"def step(self):\n # Only choose another seat if not seated yet or if the student has the\n # characteristic to change its seat again\n if not self.seated:\n # choose and move to seat\n self.choose_seat()\n\n if self.will_to_change_seat:\n # With the given probability the student searches for a better seat\n r = self.model.rand.uniform()\n if r < self.moving_prob:\n # Compare current seat utility with all other available seats.\n # If there is a much better one, move\n try:\n seat = self.model.seats[self.pos]\n self.choose_seat(seat)\n except:\n return",
"def select_leader(self, roulette_wheel):\n global pareto_front\n if len(pareto_front) < len(pareto_front[0][1]) +1:\n self.leader_i = random.choice(pareto_front)[0]\n return\n\n r = random.random()\n for i in range(len(pareto_front)):\n if r <= roulette_wheel[i]:\n self.leader_i = pareto_front[i][0]\n else:\n self.leader_i = random.choice(pareto_front)[0]",
"def moveToNextELM(self, ):\n self.ELMstart_current_ind = min(self.ELMstart_current_ind + 1, len(self.ELMonsets) - 1)\n self.snapSlider(self.ELMstart_current_ind, POI='first')",
"def jump_to(self, n):\r\n self.__token_counter = n\r\n self.__current_token = self.__tokens_list[n]",
"def next_current_player(self):\n self.index = not self.index\n self.currentPlayer = self.players[self.index]\n # print(self.currentPlayer.name, namestr(self.currentPlayer.color), \"Score:\", self.currentPlayer.score)",
"def move_to_first_free():\n while beepers_present():\n move()",
"def __next_key(self):\n self.current_kf_idx += 1\n if self.current_kf_idx >= len(self.key_frames)-1:\n self.__next_loop()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Jump to the previous spike from the first selected cluster. | def go_to_previous_spike(self, ):
self._jump_to_spike(-1) | [
"def go_to_next_spike(self, ):\n self._jump_to_spike(+1)",
"def jump_to_previous(self):\n self.nvim.command('silent! wincmd p')",
"def _prev(self, _):\n self.notebook.SetSelection(self.idx-1)",
"def jumpBack(self):\n if self.currentTrajectory is None:\n return\n \n self.start_up_in_progress = 0 #Controller will use low gains \n self.TrajectoryTimeDirection = 1 \n if self.cut_play_active == True:\n self.currentlyPublishingSampleIndex = self.currentTrajectoryInPoint\n else:\n self.currentlyPublishingSampleIndex = 0\n self.PublisherMode = PandaPublisherModeEnum.publishSample\n print(\"jumpback!\")\n return True",
"def jump(self):\n self.lyrics = self.disk[self.index]",
"def goto_node(self):\n p = self.get_position()\n if p and p != self.c.p:\n self.c.selectPosition(p)",
"def go_prev(self):\n # update current as the previous node\n self.current = self.current.prev_node",
"def MoveToPreviousSlide(self):\n pass",
"def previous(self, event):\n self.result = -1",
"def previousRange(self):\r\n if (self.selectedmap > 0):\r\n self.pickMap(self.selectedmap-1)",
"def moveToPrevELM(self, ):\n self.ELMstart_current_ind = max(self.ELMstart_current_ind - 1, 0)\n self.snapSlider(self.ELMstart_current_ind, POI='first')",
"def select_prev(self, event):\n self.controller.view.select_next_shape(-1)",
"def step_back():\r\n\r\n global index\r\n index -= 1",
"def continue_backtracking(self):\n self.movement = 1\n # Get direction in which the previous cell lies, to which we wish to backtrack to.\n direction = self.path_map[self.x][self.y].previous\n # Translate that direction into a possibly needed rotation of the robot,\n # considering the current heading.\n # This sets the rotation to -90, 0 or 90 to face the given direction.\n self.rotation = self.direction_to_rotation[self.heading].get(direction,\n 0)",
"def jumpToInsertionPoint(self):\n selectedNode = base.direct.selected.last\n if selectedNode:\n # Check if its a dna node\n dnaNode = self.findDNANode(selectedNode)\n if dnaNode:\n # Place the new node path at the current grid origin\n selectedNode.setPos(base.direct.grid, 0, 0, 0)\n # Initialize angle to match last object\n selectedNode.setHpr(self.getLastAngle(), 0, 0)\n # Now update DNA pos and hpr to reflect final pose\n dnaNode.setPos(selectedNode.getPos())\n dnaNode.setHpr(selectedNode.getHpr())\n # Update grid to get ready for the next object\n self.autoPositionGrid()",
"def prev(self):\n if self.place_stack and len(self.place_stack) > 2:\n print(self.gs.get_place(self.place_stack[-2]))\n else:\n self.curr()",
"def go_first(self):\n # update current as the head\n self.current = self.head",
"def moveToPrevPOI(self, ):\n self.POI_current_ind = max(self.POI_current_ind - 1, 0)\n POI = self.POIs[self.POI_current_ind]\n POI_realtime_ind = Conversion.valtoind(POI, self.dtime)\n self.xTimeSlider.setValue(POI_realtime_ind)\n \n self.updatexPlotText()",
"def restart(self):\n self.idx = 0"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Toggle between showing all spikes or selected spikes. | def toggle_highlighted_spikes(self, checked):
self.show_all_spikes = checked
self.set_interval() | [
"def toggle_surface_mode(self):\n for poly in self.poly_list:\n poly.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, True)\n if poly in self.hole_list:\n poly.setBrush(QBrush(QColor(255, 255, 255)))\n else:\n poly.setBrush(QBrush(QColor(0, 0, 0, 50)))\n\n # Disable the selection of edges and hide the marker if there is one\n for edge in self.edge_list:\n edge.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, False)\n\n if edge.childItems()[0].childItems():\n text = edge.childItems()[0].childItems()[0]\n text.setVisible(False)\n\n # Hide markers on points\n for point in self.point_marker_list:\n if point.childItems():\n point.childItems()[0].setVisible(False)",
"def highQToggle(self, clicked):\n self.rbFitHighQ.setEnabled(clicked)\n self.rbFixHighQ.setEnabled(clicked)\n self.txtNptsHighQ.setEnabled(clicked)\n self.txtPowerHighQ.setEnabled(clicked and not self._high_fit)",
"def toggle_solo(self):\n selected_tracks = self.project.selected_tracks\n self.make_only_selected_track()\n self.project.perform_action(7)\n self.project.selected_tracks = selected_tracks",
"def __toggleAll(self):\n aw = self.activeWindow()\n if aw:\n aw.foldAll()",
"def toggle(self):\n self.checked = not self.checked\n if self.command:\n self.command(self.name)",
"def toggle_pivot():\n for piv_switcher in get_one_switcher():\n piv_switcher.toggle()",
"def toggle(self):\r\n return self._filter.toggle",
"def toggleFilter(self):\r\n self.setFilterBool(not self.getFilterBool())",
"def toggle(self):\n self._interrupt_flash()\n GPIO.output(self.pin, GPIO.LOW if self.on else GPIO.HIGH)\n self.on = not self.on",
"def toggleShutter(self):\r\n\r\n jump = self.ser.write('ens\\r'.encode())\r\n\r\n self.ser.read(size=jump + 2)",
"def set_highlighted_spikes(self, spikes=[]):\n \n if len(spikes) == 0:\n # do update only if there were previously selected spikes\n do_update = len(self.highlighted_spikes) > 0\n self.highlight_mask[:] = 0\n else:\n do_update = True\n self.highlight_mask[:] = 0\n if len(spikes) > 0:\n ind = self.find_indices_from_spikes(spikes)\n self.highlight_mask[ind] = 1\n \n if do_update:\n self.paint_manager.set_data(\n highlight=self.highlight_mask,\n visual='waveforms')\n \n self.highlighted_spikes = spikes",
"def toggle_valve():\n new_status = not tank_valve_open\n print(\"- Toggling valve status to '{}'.\".format(\"Open\" if new_status\n else \"Closed\"))\n set_valve_open(new_status)",
"def toggle_selection(self):\n if self.selection_visible:\n self.canvas.itemconfig(self.selection, state='hidden')\n self.selection_visible = False\n else:\n self.canvas.itemconfig(self.selection, state='normal')\n self.selection_visible = True",
"def change(self):\r\n\r\n # If checkboxes are available, check status and set boat speed reference line visibility accordingly.\r\n if self.cb:\r\n if self.cb_bt.checkState() == QtCore.Qt.Checked:\r\n for item in self.bt:\r\n item.set_visible(True)\r\n else:\r\n for item in self.bt:\r\n item.set_visible(False)\r\n # GGA\r\n if self.cb_gga.checkState() == QtCore.Qt.Checked:\r\n for item in self.gga:\r\n item.set_visible(True)\r\n # self.gga[0].set_visible(True)\r\n elif self.gga is not None:\r\n for item in self.gga:\r\n item.set_visible(False)\r\n # self.gga[0].set_visible(False)\r\n # VTG\r\n if self.cb_vtg.checkState() == QtCore.Qt.Checked:\r\n for item in self.vtg:\r\n item.set_visible(True)\r\n # self.vtg[0].set_visible(True)\r\n elif self.vtg is not None:\r\n for item in self.vtg:\r\n item.set_visible(False)\r\n # self.vtg[0].set_visible(False)\r\n\r\n # Draw canvas\r\n self.canvas.draw()",
"def toggle_ensurance(self):\r\n\r\n self.ensurance_active = not self.ensurance_active",
"def startup_toggle(self, clicked):\n if clicked:\n self.startup = True\n self.good_pieces = [0, 0, 0, 0, 0, 0]\n self.defect_list = [14, 14, 14, 14, 14, 14]\n for station in range(1, 7):\n self.station_defect_dict[station].setEnabled(False)\n\n else:\n self.startup = False\n self.good_pieces = [1, 1, 1, 1, 1, 1]\n self.defect_list = [0, 0, 0, 0, 0, 0]\n for station in range(1, 7):\n self.station_defect_dict[station].setEnabled(True)",
"def selected_spikes(self, value):\n value = _as_array(value)\n # Make sure there are less spikes than n_spikes_max.\n self._selected_spikes = self._subset(value)",
"def show_sobel_fit(self):\n\n # set feature finder to display the raw video only\n setattr(self.ff_pipe, 'showRaw', False)\n setattr(self.ff_pipe, 'showFit', False)\n setattr(self.ff_pipe, 'showAdjFit', False)\n setattr(self.ff_pipe, 'showSobel', True)\n # change button states\n self.btnRawVid.setEnabled(True)\n self.btnRawFit.setEnabled(True)\n self.btnAdjFit.setEnabled(True)\n self.btnSobFit.setEnabled(False)",
"def toggleProbe(self, probe, vis):\n for p in self.probes:\n if p.name == probe:\n p.setSelected(self.canvas, vis)\n p.setVisible(self.canvas, vis)\n break\n scatter = self.scatters[probe]\n scatter.set_visible(vis)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Toggle automatic scaling of the traces. | def toggle_auto_scale(self, checked):
logger.debug("Set auto scale to %s.", checked)
self.auto_scale = checked | [
"def ontogglescale(self, event):\n self._onToggleScale(event)\n try:\n # mpl >= 1.1.0\n self.figure.tight_layout()\n except:\n self.figure.subplots_adjust(left=0.1, bottom=0.1)\n try:\n self.figure.delaxes(self.figure.axes[1])\n except:\n pass",
"def _onToggleScale(self, event):\r\n if self.get_yscale() == 'log':\r\n self.set_yscale('linear')\r\n else:\r\n self.set_yscale('log')\r\n self.subplot.figure.canvas.draw_idle()",
"def clickAutoscale(self, event):\n self.axes.autoscale_view()",
"def enable_autoscale(self, en=True):\n return _qtgui_swig.time_sink_f_sptr_enable_autoscale(self, en)",
"def enable_autoscale(self, en=True):\n return _qtgui_swig.number_sink_sptr_enable_autoscale(self, en)",
"def enable_autoscale(self, en=True):\n return _qtgui_swig.time_sink_f_enable_autoscale(self, en)",
"def enable_autoscale(self, en=True):\n return _qtgui_swig.freq_sink_f_sptr_enable_autoscale(self, en)",
"def enable_autoscale(self, en=True):\n return _qtgui_swig.number_sink_enable_autoscale(self, en)",
"def enable_autoscale(self, en=True):\n return _qtgui_swig.histogram_sink_f_sptr_enable_autoscale(self, en)",
"def enable_autoscale(self, en=True):\n return _qtgui_swig.freq_sink_c_sptr_enable_autoscale(self, en)",
"def enable_autoscale(self, en=True):\n return _qtgui_swig.time_sink_c_sptr_enable_autoscale(self, en)",
"def scaling_enabled(self):\n return False",
"def enable_autoscale(self, en=True):\n return _qtgui_swig.freq_sink_f_enable_autoscale(self, en)",
"def enable_autoscale(self, en=True):\n return _qtgui_swig.freq_sink_c_enable_autoscale(self, en)",
"def enable_autoscale(self, en=True):\n return _qtgui_swig.time_sink_c_enable_autoscale(self, en)",
"def _relim_trace_callback(self, *_):\n if self.relim_var.get():\n for axis in self.plot.axes:\n axis.set_autoscale_on(True)",
"def enable_autoscale(self, *args, **kwargs):\n return _qtgui_swig.time_raster_sink_b_sptr_enable_autoscale(self, *args, **kwargs)",
"def enable_autoscale(self, en=True):\n return _qtgui_swig.histogram_sink_f_enable_autoscale(self, en)",
"def scale_mode():\r\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Select a cluster by clicking on a spike. | def on_mouse_click(self, e):
if 'Control' in e.modifiers:
# Get mouse position in NDC.
box_id, _ = self.canvas.stacked.box_map(e.pos)
channel_id = np.nonzero(self.channel_y_ranks == box_id)[0]
# Find the spike and cluster closest to the mouse.
db = self.data_bounds
# Get the information about the displayed spikes.
wt = [(t, s, c, ch) for t, s, c, ch in self._waveform_times if channel_id in ch]
if not wt:
return
# Get the time coordinate of the mouse position.
mouse_pos = self.canvas.panzoom.window_to_ndc(e.pos)
mouse_time = Range(NDC, db).apply(mouse_pos)[0][0]
# Get the closest spike id.
times, spike_ids, spike_clusters, channel_ids = zip(*wt)
i = np.argmin(np.abs(np.array(times) - mouse_time))
# Raise the select_spike event.
spike_id = spike_ids[i]
cluster_id = spike_clusters[i]
emit('select_spike', self, channel_id=channel_id,
spike_id=spike_id, cluster_id=cluster_id)
if 'Shift' in e.modifiers:
# Get mouse position in NDC.
box_id, _ = self.canvas.stacked.box_map(e.pos)
channel_id = int(np.nonzero(self.channel_y_ranks == box_id)[0][0])
emit('select_channel', self, channel_id=channel_id, button=e.button) | [
"def click(self, event):\n x, y = self.canvas.invert([event.x, event.y])\n i, j = int(floor(x)), int(floor(y))\n patch = self.get_cell(i, j)\n if patch and patch.state == \"green\":\n cluster = self.get_cluster(patch)\n self.show_cluster(cluster)",
"def selected_clusters(self, value):\n # TODO: smarter subselection: select n_spikes_max/n_clusters spikes\n # per cluster, so that the number of spikes per cluster is independent\n # from the sizes of the clusters.\n value = _as_array(value)\n # All spikes from the selected clusters.\n spikes = _spikes_in_clusters(self._spike_clusters, value)\n # Make sure there are less spikes than n_spikes_max.\n self.selected_spikes = self._subset(spikes)",
"def selectPointsUnderCursor(self):\n spw = self.spw\n sw = spw.windows['Sort']\n #if clear:\n # sw.uslist.clearSelection()\n # sw.nlist.clearSelection()\n x, y = self.cursorPosGL()\n sids = self.pick(x, y, pb=10, multiple=True)\n if sids is None:\n return\n #t0 = time.time()\n spw.SelectSpikes(sids, on=self.selecting)\n #print('SelectSpikes took %.3f sec' % (time.time()-t0))\n if self.selecting == True:\n sat = 0.2 # desaturate\n else: # self.selecting == False\n sat = 1 # resaturate\n self.color(sids, sat=sat)\n self.updateGL()",
"def wheelchair_select(self) -> None:\n self.wheelchair_container.click()",
"def select_section(view, mn_consts, section_x, section_y):\n view.selectAt(section_x, section_y, mn_consts.infoSetSelection)",
"def click_seleccion(self):\n self.button.click(finiquito_masivo_catalog.LINK_SELECCION)",
"def selectNode( self, event ):\n canvas = event.widget.master\n item = canvas.widgetToItem.get( event.widget, None )\n self.selectItem( item, canvas )",
"def selectPointsUnderCursor(self):\n #spw = self.spw\n #sw = spw.windows['Sort']\n #if clear:\n # sw.uslist.clearSelection()\n # sw.nlist.clearSelection()\n x, y = self.cursorPosGL()\n sids = self.pick(x, y, pb=10, multiple=True)\n if sids == None:\n return\n #t0 = time.time()\n #if not sw.panel.maxed_out:\n # spw.SelectSpikes(sids, on=self.selecting)\n #else:\n # # for speed, while the mouse is held down and the sort panel is maxed out,\n # # don't call SelectSpikes, only call it once when the mouse is released\n self.collected_sids.append(sids)\n #print('SelectSpikes took %.3f sec' % (time.time()-t0))\n if self.selecting == True:\n sat = 0.2 # desaturate\n else: # self.selecting == False\n sat = 1 # resaturate\n self.color(sids, sat=sat)\n self.updateGL()",
"def navigate_clusters_page(self, timeout=120):\n log.info(\"Navigate into Clusters Page\")\n self.check_element_presence(\n (\n self.acm_page_nav[\"Infrastructure\"][1],\n self.acm_page_nav[\"Infrastructure\"][0],\n ),\n timeout=timeout,\n )\n self.choose_expanded_mode(\n mode=True, locator=self.acm_page_nav[\"Infrastructure\"]\n )\n self.do_click(locator=self.acm_page_nav[\"Clusters_page\"], timeout=timeout)",
"def get_cluster_by_id(self, context, cluster_id):",
"def tool_selection_click_ok_btn(driver, class_name, index):\r\n\r\n proximity_button = driver.find_elements_by_class_name(class_name)\r\n proximity_button[index].click()\r\n time.sleep(2)",
"def select(self, key):\n if self.association == 0:\n data = self.actor.inputdata().GetPointData()\n self.actor.mapper().SetScalarModeToUsePointData()\n else:\n data = self.actor.inputdata().GetCellData()\n self.actor.mapper().SetScalarModeToUseCellData()\n\n if isinstance(key, int):\n key = data.GetArrayName(key)\n\n arr = data.GetArray(key)\n if not arr:\n return\n\n nc = arr.GetNumberOfComponents()\n if nc == 1:\n data.SetActiveScalars(key)\n elif nc >= 2:\n if \"rgb\" in key.lower():\n data.SetActiveScalars(key)\n # try:\n # self.actor.mapper().SetColorModeToDirectScalars()\n # except AttributeError:\n # pass\n else:\n data.SetActiveVectors(key)\n elif nc >= 4:\n data.SetActiveTensors(key)\n\n try:\n self.actor.mapper().SetArrayName(key)\n self.actor.mapper().ScalarVisibilityOn()\n # .. could be a volume mapper\n except AttributeError:\n pass",
"def select(self, device):\n self.tk.call('snack::mixer', 'select', device)",
"def find_cluster(self, id):\n raise NotImplementedError",
"def setCluster(self, c):\n self.cluster = c",
"def pick_cluster(session, picks, pickwindow, pickaveraging_norm, counter):\n # | | /\\\n # | | / \\ /\\\n # | | /\\ /\\ / \\ / \\ /\\\n # _____________|/\\__|/ \\ / \\ / \\ / \\ / \\ /\\_________\n # | | \\ / \\ / \\ / \\ / \\/\n # | | \\/ \\ / \\ / \\/\n # | | \\/ \\/\n\n # pickwindow: ---- better to set pickwindow==t_up, t_up is to clean closed picks\n # STA1 E -----------|----|--------------------|--------------\n # STA1 N ------------|-------------------------|-------------\n # STA1 Z -------------|-------------------------|------------\n # stack -----------|||--|--------------------|||------------\n # cluster STA1 --------|---|---------------------|------------- chen highly recommend to use norm=='L2' to lower the effect of outlier, L2 takes median\n # ARGUE: whether only take the median or mean of the picks from different stations? won't count the followings after first one\n #\n\n picks_new = []\n # only one pick in picks\n if len(picks) == 1:\n cluster = []\n cluster.append(picks[0])\n cluster_time = []\n cluster_time.append(picks[0].time)\n picks[0].modified_id = 1 + counter # assign modified id to picks\n counter += 1\n pickave, pickstd = datetime_statistics(cluster_time,\n pickaveraging_norm)\n # append the row to the picks_new, not only the pick time\n picks_new.append(picks[0])\n pick_modified = PickModified(picks[0].sta, picks[0].chan, picks[0].net,\n picks[0].loc, picks[0].time,\n picks[0].phase, round(pickstd, 3),\n picks[0].assoc_id)\n session.add(pick_modified)\n session.commit()\n\n # more than one pick in picks\n else:\n j = 0\n counter = 1 + counter\n while True:\n i = j\n cluster = []\n cluster.append(picks[i])\n cluster_time = []\n cluster_time.append(picks[i].time)\n channel = []\n channel.append(picks[i].chan)\n picks[i].modified_id = counter\n while True:\n # cluster picks of different channels; notice that for the\n # closed picks on the same station, those picks behind the\n # first pick could be separated lonely or separated cluster\n if picks[i + 1].chan not in channel \\\n and (picks[i + 1].time -\n picks[i].time).total_seconds() < pickwindow:\n cluster.append(picks[i + 1])\n cluster_time.append(picks[i + 1].time)\n channel.append(picks[i + 1].chan)\n # assign modified id to picks\n picks[i + 1].modified_id = counter\n i = i + 1\n # make sure do not go over the range limit because j=i+1\n # below, jump out inner while loop\n if i == len(picks) - 1:\n break\n # elif is dealing with the exactly same picks, probably from\n # processing same stream twice\n # and picks[i+1].snr==picks[i].snr and picks[i+1].phase==\n # picks[i].phase and picks[i+1].uncert==picks[i].uncert:\n elif picks[i + 1].sta == picks[i].sta and picks[i + 1].chan == \\\n picks[i].chan and picks[i + 1].time == picks[i].time:\n cluster.append(picks[i + 1])\n cluster_time.append(picks[i + 1].time)\n channel.append(picks[i + 1].chan)\n # assign modified id to picks\n picks[i + 1].modified_id = counter\n i += 1\n # make sure do not go over the range limit\n # because j=i+1 below, jump out inner while loop\n if i == len(picks) - 1:\n break\n else:\n break\n pickave, pickstd = datetime_statistics(cluster_time,\n pickaveraging_norm)\n\n # append whole rows to the picks_new, not only the pick time\n for pick in cluster:\n if (pick.time - pickave).total_seconds() >= 0:\n break\n picks_new.append(pick)\n pick_modified = PickModified(pick.sta, pick.chan, pick.net,\n pick.loc, pick.time, pick.phase,\n round(pickstd, 3), pick.assoc_id)\n session.add(pick_modified)\n session.commit()\n # next cluster\n j = i + 1\n counter = counter + 1\n\n # jump outer while loop and compare last two picks. For the\n # situation that last one is ungrouped, use if statement to add\n # in picks_new\n if j >= len(picks) - 1:\n if (picks[-1].time - picks[-2].time).total_seconds() > \\\n pickwindow:\n picks_new.append(picks[-1])\n # assign modified id to picks\n picks[-1].modified_id = counter\n pick_modified = PickModified(picks[-1].sta, picks[-1].chan,\n picks[-1].net, picks[-1].loc,\n picks[-1].time,\n picks[-1].phase,\n round(pickstd, 3),\n picks[-1].assoc_id)\n session.add(pick_modified)\n session.commit()\n else:\n if picks[-1] in cluster:\n counter -= 1\n else:\n picks[-1].modified_id = counter\n pick_modified = PickModified(picks[-1].sta,\n picks[-1].chan,\n picks[-1].net,\n picks[-1].loc,\n picks[-1].time,\n picks[-1].phase,\n round(pickstd, 3),\n picks[-1].assoc_id)\n session.add(pick_modified)\n session.commit()\n break\n\n return picks_new, counter",
"def selectVertex(self, addToSelection: bool) -> None:\n ...",
"def test_selecting_nodes_clicking_them_discovered(self):\n with Nodes()as n:\n for node in n.nodes_discovered:\n node.parent.click()\n self.assertTrue(\n node.checkbox.find_element_by_tag_name('input').\n is_selected(),\n 'Discovered node is selected')",
"def set_cluster(cls, value):\n cls.set(\"cluster\", value, \"general\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Overloading the addition operator for particles types | def __add__(self, other):
if isinstance(other, type(self)):
# always create new particles, since otherwise c = a + b changes a as well!
p = particles(self)
p.pos[:] = self.pos + other.pos
p.vel[:] = self.vel + other.vel
p.m = self.m
p.q = self.q
return p
else:
raise DataError("Type error: cannot add %s to %s" % (type(other), type(self))) | [
"def __add__(self, other):\n if not isinstance(other, Particle):\n return NotImplemented\n mnew = self.mass + other.mass\n vnew = (self.momentum() + other.momentum()) / mnew\n return Particle(mnew, vnew)",
"def scalar_add(self, other: Numeric) -> \"Price\":",
"def __add__(self, other):\r\n # TODO : Implementasi operator overloading\r\n pass",
"def __add__(self, p):\n return Point(self.x + p.x, self.y + p.y)",
"def __add__(self, other: Any) -> Union[Var, AdditionPart]: # type: ignore\n if is_quantified_unit(other):\n if other.unit == self.unit:\n return QuantifiedUnit(self.value + other.value, self.unit)\n return Addition([self, other])\n if is_addition_part(other):\n return other.__radd__(self)\n return Var(f\"{self}+{other}\")",
"def add(self, x, y):\n return x + y",
"def __add__(self, other: AdditionPart) -> \"Addition\":\n if is_addition_part(other):\n return Addition(cast(List[AdditionPart], self.parts) + [other])\n raise TypeError(\n \"unsupported operand type(s) for +: \"\n f\"'{self.__class__.__name__}' and '{other.__class__.__name__}'\"\n )",
"def scalar_add(self, other: Numeric) -> \"Money\":",
"def vec_add(vec1: vec3d, vec2: vec3d) -> vec3d:\n return vec1 + vec2",
"def __iadd__(self, *args):\n return _almathswig.Velocity3D___iadd__(self, *args)",
"def __add__(self, *args):\n return _almathswig.Velocity3D___add__(self, *args)",
"def __add__(self,other):\n return Vector(self.x + other.x, self.y+other.y)\n pass",
"def __add__(self, other):\n if isinstance(other, quat):\n return quat(self.w+other.w, self.x+other.x,\n self.y+other.y, self.z+other.z)\n else:\n raise TypeError, \"unsupported operand type for +\"",
"def __iadd__(self, other): \n return self + other",
"def __add__(self,other):\n if isinstance(other, point):\n return self.add_points(other)\n else:\n return self.add_points_tuple(other)",
"def __add__(self, *args):\n return _almathswig.Velocity6D___add__(self, *args)",
"def __add__( self, v ) :\n\n d = []\n if ( type( v ) == type( 1 ) ) or ( type( v ) == type( 1. ) ) :\n for p in self.data : d.append( p + v )\n else :\n data = endl1dmathmisc.valid1dClassType(v, \"endl1dmath.__add__\", \"addition\")\n if ( len( self.data ) != len( data ) ) : raise Exception( \"\\nError in endl1dmath.__add__: data lengths differ.\" )\n i = 0\n for p in self.data :\n d.append( p + data[i] )\n i += 1\n return endl1dmath( d, checkDataType = 0, yLabel = self.yLabel )",
"def __add__(self, vector):\n ## point + vector = point\n if isinstance(vector, Vector3D):\n newpoint = self.copy()\n newpoint += vector\n return newpoint\n\n return NotImplemented",
"def __add__(self, *args):\n return _almathswig.Position6D___add__(self, *args)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Overloading the subtraction operator for particles types | def __sub__(self, other):
if isinstance(other, type(self)):
# always create new particles, since otherwise c = a - b changes a as well!
p = particles(self)
p.pos[:] = self.pos - other.pos
p.vel[:] = self.vel - other.vel
p.m = self.m
p.q = self.q
return p
else:
raise DataError("Type error: cannot subtract %s from %s" % (type(other), type(self))) | [
"def scalar_subtract(self, other: Numeric) -> \"Price\":",
"def __sub__(self, other):\n if type(other) in (types.IntType, types.FloatType):\n return self.__add__(-other)\n return self.totalSeconds() - other.totalSeconds()",
"def __sub__(self, other: Any) -> Union[Var, AdditionPart]: # type: ignore\n if is_quantified_unit(other):\n if other.unit == self.unit:\n return QuantifiedUnit(self.value - other.value, self.unit)\n return Addition([self, -other])\n if is_addition_part(other):\n return (-other).__rsub__(self)\n return Var(f\"{self}-{other}\")",
"def __sub__(self, *args):\n return _almathswig.Velocity3D___sub__(self, *args)",
"def scalar_subtract(self, other: Numeric) -> \"Money\":",
"def subtract(self, other: \"Price\") -> \"Price\":",
"def vars_subtract ( self , var1 , var2 , name = '' , title = '' ) :\n\n f1 = isinstance ( var1 , num_types )\n f2 = isinstance ( var2 , num_types )\n\n if f1 and f2 :\n ##\n res = float ( var1 ) - float ( var2 )\n return ROOT.RooRealConstant.value ( res ) \n elif f1 :\n ## \n var1 = ROOT.RooRealConstant.value ( var1 ) \n return self.vars_subtract ( var1 , var2 , name , title )\n elif f2 :\n ## shortcut \n if 0 == var2 : return var1 ## SHORTCUT\n #\n var2 = ROOT.RooRealConstant.value ( var2 ) \n return self.vars_subtract ( var1 , var2 , name , title )\n\n self.aux_keep.append ( var1 )\n self.aux_keep.append ( var2 )\n\n result = Ostap.MoreRooFit.Subtraction ( var1 , var2 )\n self.aux_keep.append ( result )\n \n return result",
"def __sub__( self, v ) :\n\n d = []\n if ( type( v ) == type( 1 ) ) or ( type( v ) == type( 1. ) ) :\n for p in self.data : d.append( p - v )\n else :\n data = endl1dmathmisc.valid1dClassType(v, \"endl1dmath.__sub__\", \"subtraction\")\n if ( len( self.data ) != len( data ) ) : raise Exception( \"\\nError in endl1dmath.__add__: data lengths differ.\" )\n i = 0\n for p in self.data :\n d.append( p - data[i] )\n i += 1\n return endl1dmath( d, checkDataType = 0, yLabel = self.yLabel )",
"def __sub__(self, other):\n\n if isinstance(other, type(self)):\n # always create new fields, since otherwise c = a - b changes a as well!\n p = fields(self)\n p.elec[:] = self.elec - other.elec\n p.magn[:] = self.magn - other.magn\n return p\n else:\n raise DataError(\"Type error: cannot subtract %s from %s\" % (type(other), type(self)))",
"def __sub__(self, other):\n if isinstance(other, quat):\n return quat(self.w-other.w, self.x-other.x,\n self.y-other.y, self.z-other.z)\n else:\n raise TypeError, \"unsupported operand type for +\"",
"def __sub__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Volt(self.volts - other, self.volt_unit, self.freq, self.freq_unit)\n if self.volt_unit != other.volt_unit:\n raise ArithmeticError(f\"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n volt_sum = self.volts - other.volts\n return Volt(volt_sum, self.volt_unit, self.freq, self.freq_unit)",
"def __sub__(self, *args):\n return _almathswig.Velocity6D___sub__(self, *args)",
"def __sub__(self, obj):\n if isinstance(obj, Vector):\n if self.m != obj.m:\n raise exc.ComformabilityError(\n \"vectors must have the same length\")\n data = [self[i] - obj[i] for i in range(self.m)]\n elif Vector.is_numeric(obj):\n data = [self[i] - obj for i in range(self.m)]\n else:\n raise TypeError(\n \"cannot subtract object of type \" + type(obj) +\n \" to vector\")\n return Vector(self.m, data)",
"def Subtract(self, *args):\n return _Quantity.Quantity_Period_Subtract(self, *args)",
"def __sub__(self, other):\n if isinstance(other, Vec2Array):\n if len(self) == len(other):\n return self.from_points(\n a - b for a, b in zip(self, other))\n else:\n raise ValueError(\n \"cannot subtract arrays with different lengths\")\n else:\n try:\n b = Vec2(*other)\n except Exception:\n return NotImplemented\n return self.from_points(a - b for a in self)",
"def __sub__(self, other):\n return self.__add__(other * -1)",
"def __sub__(self, rhs):\n return TensorIndex(_poly_op(lib.PLAIDML_INT_OP_SUB, self, rhs))",
"def __sub__(self, other):\n if isinstance(other, Position):\n other = other.index\n return self.index - other",
"def __neg__(self):\n return UnaryMinus(self)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Overloading the addition operator for fields types | def __add__(self, other):
if isinstance(other, type(self)):
# always create new fields, since otherwise c = a - b changes a as well!
p = fields(self)
p.elec[:] = self.elec + other.elec
p.magn[:] = self.magn + other.magn
return p
else:
raise DataError("Type error: cannot add %s to %s" % (type(other), type(self))) | [
"def __add__(self, other):\n if isinstance(other, NXfield):\n return NXfield(value=self.nxdata+other.nxdata, name=self.nxname,\n attrs=self.attrs)\n else:\n return NXfield(value=self.nxdata+other, name=self.nxname,\n attrs=self.attrs)",
"def __iadd__(self, other): \n return self + other",
"def __add__(self, other: Any) -> ColumnOperators:\n return self.operate(add, other)",
"def __add__(self, other):\r\n # TODO : Implementasi operator overloading\r\n pass",
"def __add__(self, other):\n try:\n total = {self.var: 1, other.var: 1}\n return AutoDiffReverse(self.val + other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val + other, None, {self.var: 1})",
"def scalar_add(self, other: Numeric) -> \"Money\":",
"def __radd__(self, other): \n return self + other",
"def __add__(self, other: Any) -> Union[Var, AdditionPart]: # type: ignore\n if is_quantified_unit(other):\n if other.unit == self.unit:\n return QuantifiedUnit(self.value + other.value, self.unit)\n return Addition([self, other])\n if is_addition_part(other):\n return other.__radd__(self)\n return Var(f\"{self}+{other}\")",
"def scalar_add(self, other: Numeric) -> \"Price\":",
"def __add__(self, other):\r\n return self.add(other)",
"def __radd__(self, T):\n return self+T",
"def __add__(self, other: AdditionPart) -> \"Addition\":\n if is_addition_part(other):\n return Addition(cast(List[AdditionPart], self.parts) + [other])\n raise TypeError(\n \"unsupported operand type(s) for +: \"\n f\"'{self.__class__.__name__}' and '{other.__class__.__name__}'\"\n )",
"def _add(self, other):\n if isinstance(other, self.__class__):\n sum_ = self._ip_dec + other._ip_dec\n elif isinstance(other, int):\n sum_ = self._ip_dec + other\n else:\n other = self.__class__(other)\n sum_ = self._ip_dec + other._ip_dec\n return sum_",
"def __add__(self, other):\n if isinstance(other, float) or isinstance(other, int):\n return Complex(self._reNum + other, self._imNum)\n if isinstance(other, complex):\n return Complex(self._reNum + other.real, self._imNum + other.imag)\n return Complex(self._reNum + other._reNum, self._imNum + other._imNum)",
"def __add__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during addition to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Plus(self, other)",
"def operator_addition(A, B):",
"def __radd__(self, other: Any) -> Var:\n return Var(f\"{other}+{self}\")",
"def __add__(self, other):\n if isinstance(other, EncryptedNumber):\n return self._add_encrypted(other)\n else:\n return self._add_scalar(other)",
"def plus(self, a, b):\n return a + b"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Overloading the subtraction operator for fields types | def __sub__(self, other):
if isinstance(other, type(self)):
# always create new fields, since otherwise c = a - b changes a as well!
p = fields(self)
p.elec[:] = self.elec - other.elec
p.magn[:] = self.magn - other.magn
return p
else:
raise DataError("Type error: cannot subtract %s from %s" % (type(other), type(self))) | [
"def scalar_subtract(self, other: Numeric) -> \"Price\":",
"def __sub__(self, other):\n try:\n total = {self.var: 1, other.var: -1}\n return AutoDiffReverse(self.val - other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val - other, None, {self.var: 1})",
"def scalar_subtract(self, other: Numeric) -> \"Money\":",
"def __sub__(self, other):\n if isinstance(other, NXfield):\n return NXfield(value=self.nxdata-other.nxdata, name=self.nxname,\n attrs=self.attrs)\n else:\n return NXfield(value=self.nxdata-other, name=self.nxname,\n attrs=self.attrs)",
"def __sub__(self, other):\n if type(other) in (types.IntType, types.FloatType):\n return self.__add__(-other)\n return self.totalSeconds() - other.totalSeconds()",
"def subtract(self, other: \"Price\") -> \"Price\":",
"def __rsub__(self, other):\n try:\n total = {self.var: 1, other.var: -1}\n return AutoDiffReverse(self.val - other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val - other, None, {self.var: -1})",
"def subtractAllNumericHas (self, other):\n \n if self.hasId():\n if other.hasId():\n self.id -= other.id\n \n if self.hasVal():\n if other.hasVal():\n self.val -= other.val\n \n \n pass",
"def subtract(self,f):\n pass",
"def __sub__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Volt(self.volts - other, self.volt_unit, self.freq, self.freq_unit)\n if self.volt_unit != other.volt_unit:\n raise ArithmeticError(f\"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n volt_sum = self.volts - other.volts\n return Volt(volt_sum, self.volt_unit, self.freq, self.freq_unit)",
"def subtract(self, other, label=None, atol=1.0E-12):\n # check the two solutions share the same grid\n assert numpy.allclose(self.x, other.x, atol=atol)\n assert numpy.allclose(self.y, other.y, atol=atol)\n assert self.values.shape == other.values.shape\n if not label:\n label = self.label + '-subtracted'\n return Field(label=label,\n time_step=self.time_step,\n x=self.x, y=self.y,\n values=self.values - other.values)",
"def __sub__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Subtract, value)\n return out",
"def __sub__(self, other: Any) -> Union[Var, AdditionPart]: # type: ignore\n if is_quantified_unit(other):\n if other.unit == self.unit:\n return QuantifiedUnit(self.value - other.value, self.unit)\n return Addition([self, -other])\n if is_addition_part(other):\n return (-other).__rsub__(self)\n return Var(f\"{self}-{other}\")",
"def __sub__(self, other):\n return self.__add__(other * -1)",
"def __neg__(self):\n return UnaryMinus(self)",
"def subtract(self, other: \"Money\") -> \"Money\":",
"def __sub__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Amp(self.amps - other, self.amp_unit, self.freq, self.freq_unit)\n if self.amp_unit != other.amp_unit:\n raise ArithmeticError(f\"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n amp_sum = self.amps - other.amps\n return Amp(amp_sum, self.amp_unit, self.freq, self.freq_unit)",
"def vars_subtract ( self , var1 , var2 , name = '' , title = '' ) :\n\n f1 = isinstance ( var1 , num_types )\n f2 = isinstance ( var2 , num_types )\n\n if f1 and f2 :\n ##\n res = float ( var1 ) - float ( var2 )\n return ROOT.RooRealConstant.value ( res ) \n elif f1 :\n ## \n var1 = ROOT.RooRealConstant.value ( var1 ) \n return self.vars_subtract ( var1 , var2 , name , title )\n elif f2 :\n ## shortcut \n if 0 == var2 : return var1 ## SHORTCUT\n #\n var2 = ROOT.RooRealConstant.value ( var2 ) \n return self.vars_subtract ( var1 , var2 , name , title )\n\n self.aux_keep.append ( var1 )\n self.aux_keep.append ( var2 )\n\n result = Ostap.MoreRooFit.Subtraction ( var1 , var2 )\n self.aux_keep.append ( result )\n \n return result",
"def __sub__ (self,other):\n if (self.debug): print(f'enter fraction.__sub__ with {other}')\n f2 = fraction(-1*other.value[0],other.value[1])\n f3 = self.__add__(f2)\n return f3"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
PSNR between two images | def _psnr(img1, img2):
mse = np.mean((img1 - img2) ** 2)
if mse == 0:
return 100
PIXEL_MAX = 1
return (20 * math.log10(PIXEL_MAX)) - (10 * math.log10(mse)) | [
"def _comput_PSNR(self, imgs1, imgs2):\n N = imgs1.size()[0]\n imdiff = imgs1 - imgs2\n imdiff = imdiff.view(N, -1)\n rmse = torch.sqrt(torch.mean(imdiff**2, dim=1))\n psnr = 20*torch.log(255/rmse)/math.log(10) # psnr = 20*log10(255/rmse)\n psnr = torch.sum(psnr)\n return psnr",
"def computePSNR(img1, img2, pad_y=0, pad_x=0):\n if pad_y != 0 and pad_x != 0:\n img1_u = (np.clip(img1, 0, 255.0)[pad_y:-pad_y, pad_x:-pad_x, ...]).astype(dtype=np.uint8)\n img2_u = (np.clip(img2, 0, 255.0)[pad_y:-pad_y, pad_x:-pad_x, ...]).astype(dtype=np.uint8)\n else:\n img1_u = (np.clip(img1, 0, 255.0)).astype(dtype=np.uint8)\n img2_u = (np.clip(img2, 0, 255.0)).astype(dtype=np.uint8)\n imdiff = (img1_u).astype(dtype=np.float32) - (img2_u).astype(dtype=np.float32)\n rmse = np.sqrt(np.mean(np.power(imdiff[:], 2)))\n return 20.0 * np.log10(255.0 / rmse)",
"def calculate_psnr(img0, img1, data_range=None):\n psnr = skm.peak_signal_noise_ratio(img0, img1, data_range=data_range) \n return psnr",
"def tf_psnr(im1, im2):\n mse = tf.losses.mean_squared_error(labels=im2 * 255.0, predictions=im1 * 255.0)\n return 10.0 * (tf.log(255.0 ** 2 / mse) / tf.log(10.0))",
"def compute_psnr(self, real_img, fake_img):\n assert inspect.currentframe().f_code.co_name == f'compute_{self.dist_name}', 'Not the right score function.'\n return 1 / psnr(real_img, fake_img, data_range=ds.MAX_NORM - ds.MIN_NORM)",
"def PSNR(y_true, y_pred):\n return tf.image.psnr(y_true,y_pred,1)",
"def test_psnr_with_two_completely_different_sets(self):\n low = np.zeros((10, 500, 500, 1), dtype=np.uint8)\n high = np.ones((10, 500, 500, 1), dtype=np.uint8) * 255\n\n avg_psnr = np.array(psnr(high, low)).mean()\n self.assertEqual(avg_psnr, 0.0)",
"def compute_psnr_and_ssim(image1, image2, border_size=0):\r\n if len(image1.shape) == 2:\r\n image1 = image1.reshape(image1.shape[0], image1.shape[1], 1)\r\n if len(image2.shape) == 2:\r\n image2 = image2.reshape(image2.shape[0], image2.shape[1], 1)\r\n\r\n if image1.shape[0] != image2.shape[0] or image1.shape[1] != image2.shape[1] or image1.shape[2] != image2.shape[2]:\r\n return None\r\n\r\n image1 = trim_image_as_file(image1)\r\n image2 = trim_image_as_file(image2)\r\n\r\n if border_size > 0:\r\n image1 = image1[border_size:-border_size, border_size:-border_size, :]\r\n image2 = image2[border_size:-border_size, border_size:-border_size, :]\r\n\r\n psnr = peak_signal_noise_ratio(image1, image2, data_range=255)\r\n ssim = structural_similarity(image1, image2, win_size=11, gaussian_weights=True, multichannel=True, K1=0.01, K2=0.03,\r\n sigma=1.5, data_range=255)\r\n return psnr, ssim",
"def calc_psnr(x: torch.Tensor, y: torch.Tensor):\n mse = calc_mse(x, y)\n psnr = -10.0 * torch.log10(mse)\n return psnr",
"def psnr(orig, recon):\n\n err = orig - recon\n l2_sq_err = np.dot(err.ravel(), err.ravel())\n ly, lx = orig.shape\n snr = 10*np.log10(255*255*lx*ly/l2_sq_err)\n return snr",
"def _check_PSNR(self, dataset, is_test=False):\n\n # process one image per iter for test phase\n if is_test:\n batch_size = 1\n else:\n batch_size = self.batch_size\n\n dataloader = DataLoader(dataset, batch_size=batch_size,\n shuffle=False, num_workers=4)\n \n avr_psnr = 0\n avr_ssim = 0\n \n # book keeping variables for test phase\n psnrs = [] # psnr for each image\n ssims = [] # ssim for each image\n proc_time = [] # processing time\n outputs = [] # output for each image\n\n for batch, (input_batch, label_batch) in enumerate(dataloader):\n input_batch, label_batch = self._wrap_variable(input_batch,\n label_batch,\n self.use_gpu)\n if is_test:\n start = time.time()\n output_batch = self.model(input_batch)\n elapsed_time = time.time() - start\n else:\n output_batch = self.model(input_batch)\n\n # ssim is calculated with the normalize (range [0, 1]) image\n ssim = pytorch_ssim.ssim(output_batch + 0.5, label_batch + 0.5, size_average=False)\n ssim = torch.sum(ssim.data)\n avr_ssim += ssim\n\n # calculate PSRN\n output = output_batch.data\n label = label_batch.data\n\n output = (output + 0.5)*255\n label = (label + 0.5)*255\n \n output = output.squeeze(dim=1)\n label = label.squeeze(dim=1)\n \n psnr = self._comput_PSNR(output, label)\n avr_psnr += psnr\n \n # save psnrs and outputs for statistics and generate image at test time\n if is_test:\n psnrs.append(psnr)\n ssims.append(ssim)\n proc_time.append(elapsed_time)\n np_output = output.cpu().numpy()\n outputs.append(np_output[0])\n \n epoch_size = len(dataset)\n avr_psnr /= epoch_size\n avr_ssim /= epoch_size\n stats = (psnrs, ssims, proc_time)\n\n return avr_psnr, avr_ssim, stats, outputs",
"def psnr(ref, tgt):\n return metrics.peak_signal_noise_ratio(image_true=ref,\n image_test=tgt)",
"def psnr(label, outputs, max_val=1.):\n label = label.cpu().detach().numpy()\n outputs = outputs.cpu().detach().numpy()\n # PSNR = -10. * np.log10(np.mean(np.square(outputs - label)))\n img_diff = outputs - label\n rmse = math.sqrt(np.mean((img_diff) ** 2))\n if rmse == 0:\n return 100\n else:\n PSNR = 20 * math.log10(max_val / rmse)\n return PSNR",
"def batch_psnr(test_image, target_image, max=1.):\n psnr = 0\n num_images = test_image.shape[0]\n for i in range(num_images):\n psnr += calc_psnr(test_image[i], target_image[i], max=max)\n psnr /= num_images\n return psnr",
"def test_compute_rate_psnr(self):\n path_to_before_hevc = 'hevc/temp/luminance_before_hevc.yuv'\n path_to_after_hevc = 'hevc/temp/luminance_after_hevc.yuv'\n path_to_cfg = 'hevc/configuration/intra.cfg'\n path_to_bitstream = 'hevc/temp/bitstream.bin'\n qp = 42\n path_to_storage = 'hevc/pseudo_visualization/compute_rate_psnr/'\n list_rotation = [0, 11, 4]\n positions_top_left = numpy.array([[300], [200]], dtype=numpy.int32)\n \n rgb_uint8 = tls.read_image_mode('hevc/pseudo_data/rgb_nightshot.jpg',\n 'RGB')\n (height_initial, width_initial, _) = rgb_uint8.shape\n height_surplus = height_initial % 8\n width_surplus = width_initial % 8\n \n # The 2nd and the 3rd dimension of `luminance_uint8`\n # must be divisible by 8 as the height and the width\n # of the images inserted into HEVC must be divisible\n # by the minimum CU size.\n luminances_uint8 = numpy.expand_dims(tls.rgb_to_ycbcr(rgb_uint8)[0:height_initial - height_surplus, 0:width_initial - width_surplus, 0],\n axis=0)\n (rate, psnr) = hevc.hevc.compute_rate_psnr(luminances_uint8,\n path_to_before_hevc,\n path_to_after_hevc,\n path_to_cfg,\n path_to_bitstream,\n qp,\n path_to_storage,\n list_rotation,\n positions_top_left)\n print('Rate: {}'.format(rate[0]))\n print('PSNR: {}'.format(psnr[0]))",
"def PSNR(ground_truth_images: np.ndarray, noisy_images: np.ndarray) -> List[float]:\n validate_inputs(ground_truth_images, noisy_images)\n\n psnr_acumulated = []\n\n quantity_of_images = ground_truth_images.shape[0]\n\n if need_to_normalize(ground_truth_images):\n ground_truth_images = normalize(ground_truth_images, \\\n interval=(0,255), data_type='int')\n \n if need_to_normalize(noisy_images):\n noisy_images = normalize(noisy_images, \\\n interval=(0,255), data_type='int')\n \n for i in range(quantity_of_images):\n psnr_image = psnr(\n ground_truth_images[i,:,:,0], \n noisy_images[i,:,:,0],\n data_range=256\n )\n psnr_acumulated.append(psnr_image)\n\n # psnr_acumulated = np.array(psnr_acumulated)\n\n # return psnr_acumulated.mean()\n return psnr_acumulated",
"def _check_PSNR(self, dataset, is_test=False):\n\n # process one image per iter for test phase\n if is_test:\n batch_size = 1\n else:\n batch_size = 1 # self.batch_size\n\n dataloader = DataLoader(dataset, batch_size=batch_size,\n shuffle=False, num_workers=1)\n\n avr_psnr = 0\n avr_ssim = 0\n\n # book keeping variables for test phase\n psnrs = [] # psnr for each image\n ssims = [] # ssim for each image\n proc_time = [] # processing time\n outputs = [] # output for each image\n names = []\n\n for batch, sample in enumerate(dataloader):\n input_batch, label_batch, name = sample['lr'], sample['hr'], sample['im_name']\n\n # Wrap with torch Variable\n input_batch, label_batch = self._wrap_variable(input_batch,\n label_batch,\n self.use_gpu)\n\n if is_test:\n start = time.time()\n if self.model_name in ['TDAN']:\n output_batch = chop_forward(input_batch, self.model, 4)\n #output_batch = chop_forward(input_batch, self.model, 4)\n #output_batch = forward_x8(input_batch, self.model).unsqueeze(0)\n #print(output_batch.size())\n # _, lrs = self.model(input_batch)\n # output_batch = lrs[:, -1, :, :, :]\n else:\n output_batch = self.model(input_batch)\n elapsed_time = time.time() - start\n else:\n if self.model_name in ['TDAN']:\n #output_batch, _ = self.model(input_batch)\n output_batch = chop_forward(input_batch, self.model, 4)\n else:\n output_batch = self.model(input_batch)\n # ssim is calculated with the normalize (range [0, 1]) image\n ssim = pytorch_ssim.ssim(output_batch + 0.5, label_batch + 0.5, size_average=False)\n ssim = torch.sum(ssim.data)\n avr_ssim += ssim\n\n # calculate PSRN\n output = output_batch.data\n label = label_batch.data\n\n output = (output + 0.5) * 255\n label = (label + 0.5) * 255\n\n output = quantize(output, 255)\n label = quantize(label, 255)\n # diff = input - target\n\n output = output.squeeze(dim=0)\n label = label.squeeze(dim=0)\n\n psnr = self._comput_PSNR(output / 255.0, label / 255.0)\n # print(psnr)\n avr_psnr += psnr\n\n # save psnrs and outputs for statistics and generate image at test time\n if is_test:\n psnrs.append(psnr)\n ssims.append(ssim)\n proc_time.append(elapsed_time)\n np_output = output.cpu().numpy()\n outputs.append(np_output)\n names.append(name)\n\n epoch_size = len(dataset)\n avr_psnr /= epoch_size\n avr_ssim /= epoch_size\n stats = (psnrs, ssims, proc_time)\n\n return avr_psnr, avr_ssim, stats, outputs, names",
"def calculateSNR(self):\n pass",
"def compute_rate_psnr(luminances_uint8, path_to_before_hevc, path_to_after_hevc, path_to_cfg,\n path_to_bitstream, qp, path_to_storage, list_rotation, positions_top_left):\n # If `luminances_uint8.ndim` is not equal to 3,\n # the unpacking below raises a `ValueError` exception.\n (nb_images, height, width) = luminances_uint8.shape\n rate = numpy.zeros(nb_images)\n psnr = numpy.zeros(nb_images)\n for i in range(nb_images):\n luminance_uint8 = luminances_uint8[i, :, :]\n \n # The function `compress_hevc` ensures that\n # `luminances_uint8.dtype` is equal to `numpy.uint8`.\n luminance_after_hevc_uint8 = compress_hevc(numpy.expand_dims(luminance_uint8, axis=2),\n path_to_before_hevc,\n path_to_after_hevc,\n path_to_cfg,\n path_to_bitstream,\n qp,\n False)\n reconstruction_uint8 = numpy.squeeze(luminance_after_hevc_uint8, axis=2)\n psnr[i] = tls.psnr_2d(luminance_uint8, reconstruction_uint8)\n nb_bytes = os.stat(path_to_bitstream).st_size\n rate[i] = float(8*nb_bytes)/(height*width)\n os.remove(path_to_bitstream)\n \n paths = [os.path.join(path_to_storage, 'reconstruction_{}.png'.format(i))]\n paths += [os.path.join(path_to_storage, 'reconstruction_{0}_crop_{1}.png'.format(i, index_crop)) for index_crop in range(positions_top_left.shape[1])]\n tls.visualize_rotated_luminance(reconstruction_uint8,\n i in list_rotation,\n positions_top_left,\n paths)\n return (rate, psnr)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The names of the roles performed by the model. This is required by QtQuick | def roleNames(self):
return self._roles | [
"def roles(self):\n return self.m_roles",
"def object_role_names(self):\n return (object_role.name for object_role in self.object_roles)",
"def roles(self):\n return list(self.roleNamed.values())",
"def object_role_names(self):\n return [object_role.name for object_role in self.object_roles]",
"def roles():\n pass",
"def roles(self):\n return self.token.get(\"roles\", [])",
"def roles(self):\n return self.SUPPORTED_ROLES",
"def rolenames(self):\n try:\n return self.roles.split(',')\n except Exception:\n return []",
"def list_roles(self):\n return list(self.roles)",
"def listRoleInfo( self ):\n return self._roles.values()",
"def listRoleInfo(self):\n return self._roles.values()",
"def present_roles(self):\n print(\"User\" + str(self.unique_id) + \": roles=\")\n for group in self._roles:\n print(\"\\tGroup\" + str(group) + \" -> [\"\n + self.get_role_from_type(group, roles_influence) + \", \"\n + self.get_role_from_type(group, roles_neighbors) + \", \"\n + self.get_role_from_type(group, roles_activities) + \", \"\n + self.get_role_from_type(group, roles_attitude) + \"]\")\n print('')",
"def get_roles(role):",
"def roles(self):\n # TODO: The admin interface only allows a subset of the roles\n # listed in model.py since it uses the OPDS representation of\n # the data, and some of the roles map to the same MARC code.\n CODES = Contributor.MARC_ROLE_CODES\n marc_to_role = dict()\n for role in [\n Contributor.ACTOR_ROLE,\n Contributor.ADAPTER_ROLE,\n Contributor.AFTERWORD_ROLE,\n Contributor.ARTIST_ROLE,\n Contributor.ASSOCIATED_ROLE,\n Contributor.AUTHOR_ROLE,\n Contributor.COMPILER_ROLE,\n Contributor.COMPOSER_ROLE,\n Contributor.CONTRIBUTOR_ROLE,\n Contributor.COPYRIGHT_HOLDER_ROLE,\n Contributor.DESIGNER_ROLE,\n Contributor.DIRECTOR_ROLE,\n Contributor.EDITOR_ROLE,\n Contributor.ENGINEER_ROLE,\n Contributor.FOREWORD_ROLE,\n Contributor.ILLUSTRATOR_ROLE,\n Contributor.INTRODUCTION_ROLE,\n Contributor.LYRICIST_ROLE,\n Contributor.MUSICIAN_ROLE,\n Contributor.NARRATOR_ROLE,\n Contributor.PERFORMER_ROLE,\n Contributor.PHOTOGRAPHER_ROLE,\n Contributor.PRODUCER_ROLE,\n Contributor.TRANSCRIBER_ROLE,\n Contributor.TRANSLATOR_ROLE,\n ]:\n marc_to_role[CODES[role]] = role\n return marc_to_role",
"def role_strings(self):\n return [s[RoleInfo.STRING] for s in [v for item in self.role_strings_info.values() for v in item] if s[RoleInfo.STRING]]",
"def role_list(self):\n method = '/auth/role/list'\n data = {}\n return self.call_rpc(method, data=data)",
"def roles(self):\n return Collection(self, PATH_ROLES)",
"def role(self):\n role = self.description[10] \n return role",
"def get_event_roles(self):\n return list(self.event_role_names)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The outline of the command used to perform a horizontal run | def horizontalCommand(self):
return self._horizontal_command | [
"def print_horizontal_rule():\n\n print \"******************************************\"",
"def print_prologue(self, command_name: str, argv: List[str]) -> None:\n # command_text = Text(f\"{command_name}\")\n # if len(argv) > 1:\n # command_text.append(f\" {' '.join(argv[1:])}\")\n # command_text.stylize(\"green on blue bold underline\")\n #\n # prologue_text = Text(\"Running command \").append(command_text)\n # panel = Panel(prologue_text)\n # self._console.print(panel)",
"def _display_command(self):\n idx = self.current_idx # Local copy to avoid race condition updates\n output = self.outputs[idx]\n if output is None:\n self.screen.addstr('Waiting for command to run...')\n return\n\n # Set row limits\n top_line = self.top_lines[idx]\n top_line = 0 if len(output) < self.max_line else min(max(top_line, 0), len(output)-self.max_line)\n bottom_line = min(top_line+self.max_line, len(output)) # Last page may not be full\n self.top_lines[idx] = top_line\n\n # Crop output to fit screen height & width\n output = [line[:self.max_col-1] for line in output[top_line:bottom_line]]\n self.screen.addstr(b'\\n'.join(output))",
"def explainerdashboard_cli(ctx):",
"def _show_topline(self):\n\n self.scr.erase()\n r = 3\n if self.nosep:\n return 3\n\n if self.topline is None:\n self.scr.resize(3, self.max_w)\n self.addstr('\\n ')\n return r\n\n elif isinstance(self.topline, inspection.ArgSpec):\n fn = self.topline[0]\n args = self.topline[1][0]\n kwargs = self.topline[1][3]\n _args = self.topline[1][1]\n _kwargs = self.topline[1][2]\n is_bound_method = self.topline[2]\n in_arg = self.topline[3]\n if PY3:\n kwonly = self.topline[1][4]\n kwonly_defaults = self.topline[1][5] or dict()\n self.scr.resize(3, self.max_w)\n h, w = self.scr.getmaxyx()\n\n self.addstr('\\n ')\n self.addstr(fn,\n app.get_colpair('name') | curses.A_BOLD)\n self.addstr(': (', app.get_colpair('name'))\n max_h = app.clirepl.scr.getmaxyx()[0]\n\n if is_bound_method and isinstance(in_arg, int):\n in_arg += 1\n\n punctuation_colpair = app.get_colpair('punctuation')\n\n for k, i in enumerate(args):\n _, x = self.scr.getyx()\n ln = len(str(i))\n kw = None\n if kwargs and k + 1 > len(args) - len(kwargs):\n kw = repr(kwargs[k - (len(args) - len(kwargs))])\n ln += len(kw) + 1\n\n if ln + x >= w:\n ty = self.scr.getbegyx()[0]\n if not self.down and ty > 0:\n h += 1\n self.scr.mvwin(ty - 1, 1)\n self.scr.resize(h, w)\n elif self.down and h + r < max_h - ty:\n h += 1\n self.scr.resize(h, w)\n else:\n break\n r += 1\n self.addstr('\\n\\t')\n\n if str(i) == 'self' and k == 0:\n color = app.get_colpair('name')\n else:\n color = app.get_colpair('token')\n\n if k == in_arg or i == in_arg:\n color |= curses.A_BOLD\n\n if not PY3:\n # See issue #138: We need to format tuple unpacking correctly\n # We use the undocumented function inspection.strseq() for\n # that. Fortunately, that madness is gone in Python 3.\n self.addstr(inspect.strseq(i, str), color)\n else:\n self.addstr(str(i), color)\n if kw is not None:\n self.addstr('=', punctuation_colpair)\n self.addstr(kw, app.get_colpair('token'))\n if k != len(args) - 1:\n self.addstr(', ', punctuation_colpair)\n\n if _args:\n if args:\n self.addstr(', ', punctuation_colpair)\n self.addstr('*%s' % (_args, ),\n app.get_colpair('token'))\n\n if PY3 and kwonly:\n if not _args:\n if args:\n self.addstr(', ', punctuation_colpair)\n self.addstr('*', punctuation_colpair)\n marker = object()\n for arg in kwonly:\n self.addstr(', ', punctuation_colpair)\n color = app.get_colpair('token')\n if arg == in_arg:\n color |= curses.A_BOLD\n self.addstr(arg, color)\n default = kwonly_defaults.get(arg, marker)\n if default is not marker:\n self.addstr('=', punctuation_colpair)\n self.addstr(repr(default),\n app.get_colpair('token'))\n\n if _kwargs:\n if args or _args or (PY3 and kwonly):\n self.addstr(', ', punctuation_colpair)\n self.addstr('**%s' % (_kwargs, ),\n app.get_colpair('token'))\n self.addstr(')', punctuation_colpair)\n return r\n\n elif isinstance(self.topline, inspection.CommandSpec):\n name = self.topline[0]\n\n self.scr.resize(3, self.max_w)\n\n self.addstr('\\n ')\n self.addstr(name, app.get_colpair('name') | curses.A_BOLD)\n self.addstr(': ', app.get_colpair('name'))\n self.addstr('command', app.get_colpair('command'))\n return r\n\n elif isinstance(self.topline, inspection.KeySpec):\n name = self.topline[0]\n\n self.scr.resize(3, self.max_w)\n\n self.addstr('\\n ')\n self.addstr(name, app.get_colpair('name') | curses.A_BOLD)\n self.addstr(': ', app.get_colpair('name'))\n self.addstr('keyword', app.get_colpair('keyword'))\n self.docstring = \"\"\n return r\n\n elif isinstance(self.topline, inspection.ImpSpec):\n obj_name = self.topline[0]\n obj = self.topline[1]\n\n self.scr.resize(3, self.max_w)\n\n self.addstr('\\n ')\n\n if obj is None:\n class_name = 'module'\n elif inspect.isclass(obj):\n class_name = 'class'\n elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):\n class_name = obj.__class__.__name__\n else:\n class_name = 'unknown'\n\n if not (self.docstring is not None and len(self.items) < 2) and class_name == \"module\":\n try:\n for summary in self.docstring.split('\\n'):\n if summary.strip:\n break\n except:\n summary = \"\"\n self.addstr(summary, app.get_colpair('keyword'))\n else:\n\n self.addstr(obj_name, app.get_colpair('name') | curses.A_BOLD)\n self.addstr(': ', app.get_colpair('string'))\n self.addstr(class_name, app.get_colpair('keyword'))\n return r\n\n elif isinstance(self.topline, inspection.ObjSpec):\n obj_name = self.topline[0]\n obj = self.topline[1]\n\n self.scr.resize(3, self.max_w)\n\n self.addstr('\\n ')\n\n val = repr(obj)\n\n self.addstr(obj_name, app.get_colpair('name') | curses.A_BOLD)\n if val:\n if len(val) > self.max_w - 8 - len(obj_name):\n val = val[:self.max_w - 11 - len(obj_name)] + '...'\n self.addstr(' = ', app.get_colpair('string'))\n self.addstr(val, app.get_colpair('keyword'))\n self.docstring = \"\"\n return r\n\n elif isinstance(self.topline, inspection.NoSpec):\n self.scr.resize(3, self.max_w)\n self.addstr('\\n ')\n return r",
"def show(cmd, *args, **argv):\n \n context = argv[\"context\"]\n \n commands = context.commands\n context.resolver.import_module(context.commands)\n from _prettytable import PrettyTable\n cmd_title = ansi.RED + ansi.BOLD + \"commands:\" + ansi.RESET\n cmd_desc = ansi.RED + ansi.BOLD + \"description:\" + ansi.RESET\n pt = PrettyTable([cmd_title, cmd_desc])\n pt.align = \"l\"\n pt.valign = \"t\"\n pt.border = False\n for command in commands:\n module = context.resolver.get_module(command)\n func = context.resolver.get_func(module,command)\n note = context.resolver.get_func_doc(func)[\"note\"]\n notes = note.split(\"\\n\")\n note = \"\".join(notes)\n cmd_n = ansi.BOLD + ansi.GREEN + command + ansi.RESET\n cmd_d = ansi.BOLD + ansi.GREEN +note + ansi.RESET\n pt.add_row([cmd_n, cmd_d])\n \n context.write(\"%s%s%s\" % (ansi.YELLOW, pt.get_string(), ansi.RESET))",
"def print_horiz_border_section():\n print('+ ', end='')\n print('- '*4, end='')",
"def image(self):\n return '%% %s -> %s\\n%s' % (self.command_line_image(),\n self.status,\n self.cmd_out)",
"def getRunupline(self):\n\n\n pass",
"def display(self, grid):\n for i in range(grid.height):\n print(\"-\" + \"-------\"*grid.width)\n for j in range(grid.width):\n if not j:\n print(\"|\", end=\"\") # begin row with vertical line\n a = self.actions.get((i, j), ' ')\n print(\" %s |\" % a, end=\"\")\n print(\"\") # new line\n print(\"-\" + \"-------\"*grid.width, end='\\n\\n')",
"def double_line():\n print (\"=============================================================\")",
"def get_command(self) -> str:\n return 'title'",
"def add_indented_commands(self, commands, *, heading, max_size: Optional[Any] = ...):\n ...",
"def _run_command_prefix_output(cmd, component):\n output = run_command(cmd, component, return_output=True)\n for line in output.splitlines():\n click.echo(click.style(f\"[{component}] \", bold=True) + line)",
"def command():\n pass",
"def _print_results_header(self):\n print(\"\\033[94m\"+\"Summary\\n\"+\"-\"*32+\"\\033[0m\")\n print(\"Subroutine: {}\".format(self.mc_sample.__name__))\n print(\"Num Runs: {:2.1e}\".format(self.num_runs))\n print(\"-\"*32+'\\n')",
"def do_overview(self):\n summaries = []\n for name, cmd in self.base.commands.iteritems():\n summaries.append(' %-14s %s\\n' % (name, cmd.get_summary()))\n summaries.sort()\n sys.stdout.write('Usage: %s COMMAND ARGUMENTS...\\n\\n' \\\n 'Available commands:\\n' % (self.base.scriptname, ))\n for line in summaries:\n sys.stdout.write(line)",
"def verticalCommand(self):\n return self._vertical_command",
"def header_pyhelp():\n print('\\33[30;42m-' * 35 + f'\\n{\"SISTEMA DE AJUDA PyHelp\":^35}\\n' + '-' * 35)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The outline of the command used to perform a vertical run | def verticalCommand(self):
return self._vertical_command | [
"def vertical_line(t, n):\n lt(t)\n fd(t,n)\n rt(t)",
"def inner_vertical(self):\n raise NotImplementedError()",
"def show(cmd, *args, **argv):\n \n context = argv[\"context\"]\n \n commands = context.commands\n context.resolver.import_module(context.commands)\n from _prettytable import PrettyTable\n cmd_title = ansi.RED + ansi.BOLD + \"commands:\" + ansi.RESET\n cmd_desc = ansi.RED + ansi.BOLD + \"description:\" + ansi.RESET\n pt = PrettyTable([cmd_title, cmd_desc])\n pt.align = \"l\"\n pt.valign = \"t\"\n pt.border = False\n for command in commands:\n module = context.resolver.get_module(command)\n func = context.resolver.get_func(module,command)\n note = context.resolver.get_func_doc(func)[\"note\"]\n notes = note.split(\"\\n\")\n note = \"\".join(notes)\n cmd_n = ansi.BOLD + ansi.GREEN + command + ansi.RESET\n cmd_d = ansi.BOLD + ansi.GREEN +note + ansi.RESET\n pt.add_row([cmd_n, cmd_d])\n \n context.write(\"%s%s%s\" % (ansi.YELLOW, pt.get_string(), ansi.RESET))",
"def drawLineVertical(self, *args):\n return _pyupm_lcd.EBOLED_drawLineVertical(self, *args)",
"def vertical_char(self):\n ...",
"def vertical(self, height=20, character=\"|\"):\n his = \"\"\"\"\"\"\n xl = [\"%.2f\" % n for n in self.h[1]]\n lxl = [len(l) for l in xl]\n bars = self.h[0] / max(self.h[0]) * height\n his += \" \" * (np.max(bars) + 2 + np.max(lxl)) + \"%s\\n\" % np.max(self.h[0])\n for i, c in enumerate(bars):\n line = xl[i] + \" \" * (np.max(lxl) - lxl[i]) + \": \" + character * c + \"\\n\"\n his += line\n return his",
"def _display_command(self):\n idx = self.current_idx # Local copy to avoid race condition updates\n output = self.outputs[idx]\n if output is None:\n self.screen.addstr('Waiting for command to run...')\n return\n\n # Set row limits\n top_line = self.top_lines[idx]\n top_line = 0 if len(output) < self.max_line else min(max(top_line, 0), len(output)-self.max_line)\n bottom_line = min(top_line+self.max_line, len(output)) # Last page may not be full\n self.top_lines[idx] = top_line\n\n # Crop output to fit screen height & width\n output = [line[:self.max_col-1] for line in output[top_line:bottom_line]]\n self.screen.addstr(b'\\n'.join(output))",
"def vertical(self):\n return self._vertical",
"def explainerdashboard_cli(ctx):",
"def print_prologue(self, command_name: str, argv: List[str]) -> None:\n # command_text = Text(f\"{command_name}\")\n # if len(argv) > 1:\n # command_text.append(f\" {' '.join(argv[1:])}\")\n # command_text.stylize(\"green on blue bold underline\")\n #\n # prologue_text = Text(\"Running command \").append(command_text)\n # panel = Panel(prologue_text)\n # self._console.print(panel)",
"def active_vertical_lines(self):\n val = ((self._block[1] & 0xF0) << 4) + self._block[0]\n return (val + 1) * 2",
"def help_movie(self):\n print_say(\"Jarvis - movie command\", self)\n print_say(\"List of commands:\", self)\n print_say(\"movie cast\", self)\n print_say(\"movie director\", self)\n print_say(\"movie plot\", self)\n print_say(\"movie producer\", self)\n print_say(\"movie rating\", self)\n print_say(\"movie year\", self)",
"def _draw_vertical_aid_line(self, pos, with_outer=True):\n return None",
"def do_show_run_params(self,line):\n pprint(self.run_params)\n print termstyle.green(\"*** End of TRex running parameters ***\")",
"def toVerticalLineSegmentToolMode(self):\n\n self.log.debug(\"Entered toVerticalLineSegmentToolMode()\")\n\n # Only do something if it is not currently in this mode.\n if self.toolMode != \\\n PriceBarChartGraphicsView.ToolMode['VerticalLineSegmentTool']:\n\n self.toolMode = \\\n PriceBarChartGraphicsView.ToolMode['VerticalLineSegmentTool']\n\n self.setCursor(QCursor(Qt.ArrowCursor))\n self.setDragMode(QGraphicsView.NoDrag)\n\n # Clear out internal working variables.\n self.clickOnePointF = None\n self.clickTwoPointF = None\n self.verticalLineSegmentGraphicsItem = None\n\n scene = self.scene()\n if scene != None:\n scene.clearSelection()\n\n items = scene.items()\n for item in items:\n self.setGraphicsItemFlagsPerCurrToolMode(item)\n \n self.log.debug(\"Exiting toVerticalLineSegmentToolMode()\")",
"def command(self, command):\r\n\t\tvim.command(command)",
"def _show_topline(self):\n\n self.scr.erase()\n r = 3\n if self.nosep:\n return 3\n\n if self.topline is None:\n self.scr.resize(3, self.max_w)\n self.addstr('\\n ')\n return r\n\n elif isinstance(self.topline, inspection.ArgSpec):\n fn = self.topline[0]\n args = self.topline[1][0]\n kwargs = self.topline[1][3]\n _args = self.topline[1][1]\n _kwargs = self.topline[1][2]\n is_bound_method = self.topline[2]\n in_arg = self.topline[3]\n if PY3:\n kwonly = self.topline[1][4]\n kwonly_defaults = self.topline[1][5] or dict()\n self.scr.resize(3, self.max_w)\n h, w = self.scr.getmaxyx()\n\n self.addstr('\\n ')\n self.addstr(fn,\n app.get_colpair('name') | curses.A_BOLD)\n self.addstr(': (', app.get_colpair('name'))\n max_h = app.clirepl.scr.getmaxyx()[0]\n\n if is_bound_method and isinstance(in_arg, int):\n in_arg += 1\n\n punctuation_colpair = app.get_colpair('punctuation')\n\n for k, i in enumerate(args):\n _, x = self.scr.getyx()\n ln = len(str(i))\n kw = None\n if kwargs and k + 1 > len(args) - len(kwargs):\n kw = repr(kwargs[k - (len(args) - len(kwargs))])\n ln += len(kw) + 1\n\n if ln + x >= w:\n ty = self.scr.getbegyx()[0]\n if not self.down and ty > 0:\n h += 1\n self.scr.mvwin(ty - 1, 1)\n self.scr.resize(h, w)\n elif self.down and h + r < max_h - ty:\n h += 1\n self.scr.resize(h, w)\n else:\n break\n r += 1\n self.addstr('\\n\\t')\n\n if str(i) == 'self' and k == 0:\n color = app.get_colpair('name')\n else:\n color = app.get_colpair('token')\n\n if k == in_arg or i == in_arg:\n color |= curses.A_BOLD\n\n if not PY3:\n # See issue #138: We need to format tuple unpacking correctly\n # We use the undocumented function inspection.strseq() for\n # that. Fortunately, that madness is gone in Python 3.\n self.addstr(inspect.strseq(i, str), color)\n else:\n self.addstr(str(i), color)\n if kw is not None:\n self.addstr('=', punctuation_colpair)\n self.addstr(kw, app.get_colpair('token'))\n if k != len(args) - 1:\n self.addstr(', ', punctuation_colpair)\n\n if _args:\n if args:\n self.addstr(', ', punctuation_colpair)\n self.addstr('*%s' % (_args, ),\n app.get_colpair('token'))\n\n if PY3 and kwonly:\n if not _args:\n if args:\n self.addstr(', ', punctuation_colpair)\n self.addstr('*', punctuation_colpair)\n marker = object()\n for arg in kwonly:\n self.addstr(', ', punctuation_colpair)\n color = app.get_colpair('token')\n if arg == in_arg:\n color |= curses.A_BOLD\n self.addstr(arg, color)\n default = kwonly_defaults.get(arg, marker)\n if default is not marker:\n self.addstr('=', punctuation_colpair)\n self.addstr(repr(default),\n app.get_colpair('token'))\n\n if _kwargs:\n if args or _args or (PY3 and kwonly):\n self.addstr(', ', punctuation_colpair)\n self.addstr('**%s' % (_kwargs, ),\n app.get_colpair('token'))\n self.addstr(')', punctuation_colpair)\n return r\n\n elif isinstance(self.topline, inspection.CommandSpec):\n name = self.topline[0]\n\n self.scr.resize(3, self.max_w)\n\n self.addstr('\\n ')\n self.addstr(name, app.get_colpair('name') | curses.A_BOLD)\n self.addstr(': ', app.get_colpair('name'))\n self.addstr('command', app.get_colpair('command'))\n return r\n\n elif isinstance(self.topline, inspection.KeySpec):\n name = self.topline[0]\n\n self.scr.resize(3, self.max_w)\n\n self.addstr('\\n ')\n self.addstr(name, app.get_colpair('name') | curses.A_BOLD)\n self.addstr(': ', app.get_colpair('name'))\n self.addstr('keyword', app.get_colpair('keyword'))\n self.docstring = \"\"\n return r\n\n elif isinstance(self.topline, inspection.ImpSpec):\n obj_name = self.topline[0]\n obj = self.topline[1]\n\n self.scr.resize(3, self.max_w)\n\n self.addstr('\\n ')\n\n if obj is None:\n class_name = 'module'\n elif inspect.isclass(obj):\n class_name = 'class'\n elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):\n class_name = obj.__class__.__name__\n else:\n class_name = 'unknown'\n\n if not (self.docstring is not None and len(self.items) < 2) and class_name == \"module\":\n try:\n for summary in self.docstring.split('\\n'):\n if summary.strip:\n break\n except:\n summary = \"\"\n self.addstr(summary, app.get_colpair('keyword'))\n else:\n\n self.addstr(obj_name, app.get_colpair('name') | curses.A_BOLD)\n self.addstr(': ', app.get_colpair('string'))\n self.addstr(class_name, app.get_colpair('keyword'))\n return r\n\n elif isinstance(self.topline, inspection.ObjSpec):\n obj_name = self.topline[0]\n obj = self.topline[1]\n\n self.scr.resize(3, self.max_w)\n\n self.addstr('\\n ')\n\n val = repr(obj)\n\n self.addstr(obj_name, app.get_colpair('name') | curses.A_BOLD)\n if val:\n if len(val) > self.max_w - 8 - len(obj_name):\n val = val[:self.max_w - 11 - len(obj_name)] + '...'\n self.addstr(' = ', app.get_colpair('string'))\n self.addstr(val, app.get_colpair('keyword'))\n self.docstring = \"\"\n return r\n\n elif isinstance(self.topline, inspection.NoSpec):\n self.scr.resize(3, self.max_w)\n self.addstr('\\n ')\n return r",
"def print_horizontal_rule():\n\n print \"******************************************\"",
"def image(self):\n return '%% %s -> %s\\n%s' % (self.command_line_image(),\n self.status,\n self.cmd_out)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The current number of runs | def count(self):
return len(self._runs) | [
"def get_runs(self) -> int:",
"def run_count(self) -> int:\n return self._run_count",
"def run_count(self):\n return self._run_count",
"def increment_run_count(self):\n self.run_count += 1",
"def next_run_idx(self):\n return self.num_runs",
"def get_num_of_executed_iters(self):\r\n return self.num_of_executed_iters",
"def number_of_launches(self):\n return self._number_of_launches",
"def NextRunNumber( self ):\r\n return self.LastRunNumber() + 1 # let the numbering be global - look for the last run number in *any* mode\r",
"def iterations(self):\n return self._task_count",
"def run_number(self):\n return self._runNumber",
"def num_trials(self):",
"def getRunningTaskCount():",
"def get_runs_count(self):\n if self.config_file == \"\" or self.section == \"\":\n print(\"Configuration error. Call set_config() before \"\n \"get_runs_count()\")\n sys.exit(1)\n return self.config.get_runs_count()",
"def runningWork(self):\n return self.workCount",
"def __len__(self) -> int:\n\n\t\treturn len(self._runs)",
"def get_run_number(self):\n if not self.thread:\n return 0\n with self.thread.data_lock:\n return self.thread.run_number",
"def number_of_active_runners(self):\n return self._number_of_active_runners",
"def NumIterations(self):\n\t\treturn self._get_attribute('numIterations')",
"def number_of_iterations(self) -> int:\n return self._solution.info.iter"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to perform a 5 year moving window filter for a single land cover value (such as Forest as 1) for all years in an image. Calls the function mask5. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The temporal filter inspects the central position of five consecutive years, and if the extremities of the consecutive years are identical but the centre position is not, then the central pixels are reclassified to match its temporal neighbour class. This function can be applied to whichever land cover values the user decides, such as all of the land cover values or a select few. | def applyWindow5years(imagem, value, bandNames):
img_out = imagem.select(bandNames[0])
for i in np.arange(1, len(bandNames)-3):
img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))
img_out = img_out.addBands(imagem.select(bandNames[-3]))
img_out = img_out.addBands(imagem.select(bandNames[-2]))
img_out = img_out.addBands(imagem.select(bandNames[-1]))
return img_out | [
"def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out",
"def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out",
"def prepare_ERA5_moisture_flux(era5_path=era5_path):\n import xarray as xr\n from aux_gps import save_ncfile\n from aux_gps import anomalize_xr\n import numpy as np\n from aux_gps import convert_wind_direction\n from dask.diagnostics import ProgressBar\n ds = xr.open_dataset(\n era5_path / 'ERA5_UVQ_4xdaily_israel_1996-2019.nc', chunks={'level': 5})\n # ds = ds.resample(time='D', keep_attrs=True).mean(keep_attrs=True)\n # ds.attrs['action'] = 'resampled to 1D from 12:00UTC data points'\n mf = (ds['q'] * ds['u']).to_dataset(name='qu')\n mf.attrs = ds.attrs\n mf['qu'].attrs['units'] = ds['u'].attrs['units']\n mf['qu'].attrs['long_name'] = 'U component of moisture flux'\n mf['qu'].attrs['standard_name'] = 'eastward moisture flux'\n mf['qv'] = ds['q'] * ds['v']\n mf['qv'].attrs['units'] = ds['v'].attrs['units']\n mf['qv'].attrs['long_name'] = 'V component moisture flux'\n mf['qv'].attrs['standard_name'] = 'northward moisture flux'\n mf['qf'], mf['qfdir'] = convert_wind_direction(u=mf['qu'], v=mf['qv'])\n mf['qf'].attrs['units'] = ds['v'].attrs['units']\n mf['qf'].attrs['long_name'] = 'moisture flux magnitude'\n # mf['qfdir'] = 270 - np.rad2deg(np.arctan2(mf['qv'], mf['qu']))\n mf['qfdir'].attrs['units'] = 'deg'\n mf['qfdir'].attrs['long_name'] = 'moisture flux direction (meteorological)'\n mf = mf.sortby('latitude')\n mf = mf.sortby('level', ascending=False)\n comp = dict(zlib=True, complevel=9)\n encoding_mf = {var: comp for var in mf}\n mf_delayed = mf.to_netcdf(era5_path / 'ERA5_MF_4xdaily_israel_1996-2019.nc',\n 'w', encoding=encoding_mf, compute=False)\n mf_anoms = anomalize_xr(mf, freq='MS', time_dim='time')\n mf_anoms_mean = mf_anoms.mean('latitude').mean('longitude')\n encoding_mf_anoms = {var: comp for var in mf_anoms}\n mf_anoms_delayed = mf_anoms_mean.to_netcdf(era5_path / 'ERA5_MF_anomalies_4xdaily_israel_mean_1996-2019.nc',\n 'w', encoding=encoding_mf_anoms, compute=False)\n with ProgressBar():\n results = mf_delayed.compute()\n with ProgressBar():\n results1 = mf_anoms_delayed.compute()\n # save_ncfile(mf, era5_path, 'ERA5_MF_4xdaily_israel_1996-2019.nc')\n # mf_anoms = anomalize_xr(mf, freq='MS', time_dim='time')\n # mf_anoms_mean = mf_anoms.mean('latitude').mean('longitude')\n # save_ncfile(mf_anoms_mean, era5_path,\n # 'ERA5_MF_anomalies_4xdaily_israel_mean_1996-2019.nc')\n return",
"def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year\n #currentImage = image.select(bandNames[1]), the image for the second year\n #previousImage = image.select(bandNames[0]), the first year\n #Find where the second year has missing data, replace those values with the values of the first year\n #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill\n #and the second band is the first years classification\n #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year\n def forwardNoDataFilter(bandName, previousImage):\n currentImage = image.select(ee.String(bandName))\n previousImage = ee.Image(previousImage)\n currentImage = currentImage.unmask(previousImage.select([0]))\n return currentImage.addBands(previousImage)\n \n #Iterate through all the years, starting with the first year's classification\n filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)",
"def get_S1(\n self,\n year,\n month,\n day,\n tempfilter=True,\n tempfilter_radius=7,\n applylcmask=False,\n mask_globcover=True,\n dualpol=True,\n trackflt=None,\n maskwinter=False,\n masksnow=True,\n explicit_t_mask=None,\n ascending=False,\n maskLIA=True,\n ):\n\n def computeLIA(image):\n # comput the local incidence angle (LIA) based on the srtm and the s1 viewing angle\n # get the srtm\n srtm = ee.Image(\"USGS/SRTMGL1_003\")\n srtm_slope = ee.Terrain.slope(srtm)\n srtm_aspect = ee.Terrain.aspect(srtm)\n # get the S1 incidence angle\n inc = ee.Image(image).select(\"angle\")\n # comput the LIA\n s = srtm_slope.multiply(\n ee.Image.constant(277)\n .subtract(srtm_aspect)\n .multiply(math.pi / 180)\n .cos()\n )\n lia = inc.subtract(\n ee.Image.constant(90).subtract(ee.Image.constant(90).subtract(s))\n ).abs()\n # add band to current image\n return image.addBands(\n lia.select([\"angle\"], [\"lia\"]).reproject(srtm.projection())\n )\n\n def maskterrain(image):\n # mask for terrain, local incidence angle and high and low backscatter\n tmp = ee.Image(image)\n # srtm dem\n if maskLIA == False:\n gee_srtm = ee.Image(\"USGS/SRTMGL1_003\")\n gee_srtm_slope = ee.Terrain.slope(gee_srtm)\n mask = gee_srtm_slope.lt(20)\n else:\n lia = tmp.select(\"lia\")\n mask = lia.gt(20).bitwiseAnd(lia.lt(45))\n mask2 = tmp.lt(0).bitwiseAnd(tmp.gt(-25))\n mask = mask.bitwiseAnd(mask2)\n tmp = tmp.updateMask(mask)\n\n return tmp\n\n def masklc(image):\n # load land cover info\n corine = ee.Image(\"users/felixgreifeneder/corine\")\n\n # create lc mask\n valLClist = [10, 11, 12, 13, 18, 19, 20, 21, 26, 27, 28, 29]\n\n lcmask = (\n corine.eq(valLClist[0])\n .bitwiseOr(corine.eq(valLClist[1]))\n .bitwiseOr(corine.eq(valLClist[2]))\n .bitwiseOr(corine.eq(valLClist[3]))\n .bitwiseOr(corine.eq(valLClist[4]))\n .bitwiseOr(corine.eq(valLClist[5]))\n .bitwiseOr(corine.eq(valLClist[6]))\n .bitwiseOr(corine.eq(valLClist[7]))\n .bitwiseOr(corine.eq(valLClist[8]))\n .bitwiseOr(corine.eq(valLClist[9]))\n .bitwiseOr(corine.eq(valLClist[10]))\n .bitwiseOr(corine.eq(valLClist[11]))\n )\n\n tmp = ee.Image(image)\n\n tmp = tmp.updateMask(lcmask)\n return tmp\n\n def mask_lc_globcover(image):\n\n tmp = ee.Image(image)\n\n # load lc\n glbcvr = ee.Image(\"ESA/GLOBCOVER_L4_200901_200912_V2_3\").select(\"landcover\")\n\n valLClist = [\n 11,\n 14,\n 20,\n 30,\n 40,\n 50,\n 60,\n 70,\n 90,\n 100,\n 110,\n 120,\n 130,\n 140,\n 150,\n 160,\n 170,\n 180,\n 190,\n 200,\n 210,\n 220,\n 230,\n ]\n\n lcmask = (\n glbcvr.eq(valLClist[0])\n .bitwiseOr(glbcvr.eq(valLClist[1]))\n .bitwiseOr(glbcvr.eq(valLClist[2]))\n .bitwiseOr(glbcvr.eq(valLClist[3]))\n .bitwiseOr(glbcvr.eq(valLClist[4]))\n .bitwiseOr(glbcvr.eq(valLClist[5]))\n .bitwiseOr(glbcvr.eq(valLClist[6]))\n .bitwiseOr(glbcvr.eq(valLClist[7]))\n .bitwiseOr(glbcvr.eq(valLClist[8]))\n .bitwiseOr(glbcvr.eq(valLClist[9]))\n .bitwiseOr(glbcvr.eq(valLClist[10]))\n .bitwiseOr(glbcvr.eq(valLClist[11]))\n .bitwiseOr(glbcvr.eq(valLClist[12]))\n .bitwiseOr(glbcvr.eq(valLClist[13]))\n .bitwiseOr(glbcvr.eq(valLClist[14]))\n .bitwiseOr(glbcvr.eq(valLClist[15]))\n .bitwiseOr(glbcvr.eq(valLClist[16]))\n .bitwiseOr(glbcvr.eq(valLClist[17]))\n .bitwiseOr(glbcvr.eq(valLClist[18]))\n .bitwiseOr(glbcvr.eq(valLClist[19]))\n .bitwiseOr(glbcvr.eq(valLClist[20]))\n .bitwiseOr(glbcvr.eq(valLClist[21]))\n .bitwiseOr(glbcvr.eq(valLClist[22]))\n )\n\n tmp = tmp.updateMask(lcmask)\n\n return tmp\n\n def setresample(image):\n image = image.resample()\n return image\n\n def toln(image):\n\n tmp = ee.Image(image)\n\n # Convert to linear\n vv = ee.Image(10).pow(tmp.select(\"VV\").divide(10))\n if dualpol == True:\n vh = ee.Image(10).pow(tmp.select(\"VH\").divide(10))\n\n # Convert to ln\n out = vv.log()\n if dualpol == True:\n out = out.addBands(vh.log())\n out = out.select([\"constant\", \"constant_1\"], [\"VV\", \"VH\"])\n else:\n out = out.select([\"constant\"], [\"VV\"])\n\n return out.set(\"system:time_start\", tmp.get(\"system:time_start\"))\n\n def tolin(image):\n\n tmp = ee.Image(image)\n\n # Covert to linear\n vv = ee.Image(10).pow(tmp.select(\"VV\").divide(10))\n if dualpol == True:\n vh = ee.Image(10).pow(tmp.select(\"VH\").divide(10))\n\n # Convert to\n if dualpol == True:\n out = vv.addBands(vh)\n out = out.select([\"constant\", \"constant_1\"], [\"VV\", \"VH\"])\n else:\n out = vv.select([\"constant\"], [\"VV\"])\n\n return out.set(\"system:time_start\", tmp.get(\"system:time_start\"))\n\n def todb(image):\n\n tmp = ee.Image(image)\n\n return (\n ee.Image(10)\n .multiply(tmp.log10())\n .set(\"system:time_start\", tmp.get(\"system:time_start\"))\n )\n\n def applysnowmask(image):\n\n tmp = ee.Image(image)\n sdiff = tmp.select(\"VH\").subtract(snowref)\n wetsnowmap = sdiff.lte(-2.6).focal_mode(100, \"square\", \"meters\", 3)\n\n return tmp.updateMask(wetsnowmap.eq(0))\n\n def projectlia(image):\n tmp = ee.Image(image)\n trgtprj = tmp.select(\"VV\").projection()\n tmp = tmp.addBands(tmp.select(\"angle\").reproject(trgtprj), [\"angle\"], True)\n return tmp\n\n def apply_explicit_t_mask(image):\n\n t_mask = ee.Image(\"users/felixgreifeneder/\" + explicit_t_mask)\n mask = t_mask.eq(0)\n return image.updateMask(mask)\n\n ee.Reset()\n ee.Initialize()\n\n # load S1 data\n gee_s1_collection = ee.ImageCollection(\"COPERNICUS/S1_GRD\")\n\n # Filter the image collection\n gee_s1_filtered = (\n gee_s1_collection.filter(ee.Filter.eq(\"instrumentMode\", \"IW\"))\n .filterBounds(self.roi)\n .filter(ee.Filter.eq(\"platform_number\", \"A\"))\n .filter(ee.Filter.listContains(\"transmitterReceiverPolarisation\", \"VV\"))\n )\n\n if ascending == True:\n gee_s1_filtered = gee_s1_filtered.filter(\n ee.Filter.eq(\"orbitProperties_pass\", \"ASCENDING\")\n )\n else:\n gee_s1_filtered = gee_s1_filtered.filter(\n ee.Filter.eq(\"orbitProperties_pass\", \"DESCENDING\")\n )\n\n if dualpol == True:\n # Consider only dual-pol scenes\n gee_s1_filtered = gee_s1_filtered.filter(\n ee.Filter.listContains(\"transmitterReceiverPolarisation\", \"VH\")\n )\n\n if trackflt is not None:\n # Specify track\n gee_s1_filtered = gee_s1_filtered.filter(\n ee.Filter.eq(\"relativeOrbitNumber_start\", trackflt)\n )\n\n if maskwinter == True:\n # Mask winter based on DOY\n gee_s1_filtered = gee_s1_filtered.filter(ee.Filter.dayOfYear(121, 304))\n\n # add LIA\n if maskLIA == True:\n # compute the local incidence angle if it shall be used for masking\n gee_s1_filtered = gee_s1_filtered.map(computeLIA)\n s1_lia = gee_s1_filtered.select(\"lia\")\n else:\n s1_lia = None\n\n s1_angle = gee_s1_filtered.select(\"angle\")\n\n if applylcmask == True:\n # apply land-cover mask based on Corine\n gee_s1_filtered = gee_s1_filtered.map(masklc)\n if mask_globcover == True:\n # apply land-cover mask based on globcover\n gee_s1_filtered = gee_s1_filtered.map(mask_lc_globcover)\n\n # Enable bilinear resampling (instead of NN)\n gee_s1_filtered = gee_s1_filtered.map(setresample)\n\n if explicit_t_mask == None:\n # apply masking based on the terraing (LIA)\n gee_s1_filtered = gee_s1_filtered.map(maskterrain)\n else:\n # apply specific terrain mask\n gee_s1_filtered = gee_s1_filtered.map(apply_explicit_t_mask)\n\n if masksnow == True:\n # automatic wet snow masking\n gee_s1_linear_vh = gee_s1_filtered.map(tolin).select(\"VH\")\n snowref = ee.Image(10).multiply(\n gee_s1_linear_vh.reduce(ee.Reducer.intervalMean(5, 100)).log10()\n )\n gee_s1_filtered = gee_s1_filtered.map(applysnowmask)\n\n #### SHOULD BE IF STATEMENT HERE\n\n # create a list of availalbel dates\n tmp = gee_s1_filtered.getInfo()\n tmp_ids = [x[\"properties\"][\"system:index\"] for x in tmp[\"features\"]]\n \n dates = np.array(\n [\n dt.date(year=int(x[17:21]), month=int(x[21:23]), day=int(x[23:25]))\n for x in tmp_ids\n ]\n )\n \n if not len(dates):\n raise Exception(\n \"There are no S1 images with the selected filters, please consider \"\n \"changing the area of interest or selecting a different orbit\"\n )\n \n # find the closest acquisitions\n doi = dt.date(year=year, month=month, day=day)\n doi_index = np.argmin(np.abs(dates - doi))\n date_selected = dates[doi_index]\n\n # filter imagecollection for respective date\n gee_s1_drange = gee_s1_filtered.filterDate(\n date_selected.strftime(\"%Y-%m-%d\"),\n (date_selected + dt.timedelta(days=1)).strftime(\"%Y-%m-%d\"),\n )\n s1_angle_drange = s1_angle.filterDate(\n date_selected.strftime(\"%Y-%m-%d\"),\n (date_selected + dt.timedelta(days=1)).strftime(\"%Y-%m-%d\"),\n )\n if maskLIA == True:\n s1_lia_drange = s1_lia.filterDate(\n date_selected.strftime(\"%Y-%m-%d\"),\n (date_selected + dt.timedelta(days=1)).strftime(\"%Y-%m-%d\"),\n )\n if gee_s1_drange.size().getInfo() > 1:\n if maskLIA == True:\n s1_lia = s1_lia_drange.mosaic()\n s1_angle = s1_angle_drange.mosaic()\n s1_sig0 = gee_s1_drange.mosaic()\n s1_lia = ee.Image(s1_lia.copyProperties(s1_lia_drange.first()))\n s1_sig0 = ee.Image(s1_sig0.copyProperties(gee_s1_drange.first()))\n else:\n s1_sig0 = ee.Image(gee_s1_drange.first())\n s1_angle = ee.Image(s1_angle_drange.first())\n s1_lia = ee.Image(s1_lia_drange.first())\n\n # fetch image from image collection\n # get the track number\n s1_sig0_info = s1_sig0.getInfo()\n track_nr = s1_sig0_info[\"properties\"][\"relativeOrbitNumber_start\"]\n\n # only uses images of the same track\n gee_s1_filtered = gee_s1_filtered.filterMetadata(\n \"relativeOrbitNumber_start\", \"equals\", track_nr\n )\n\n if tempfilter == True:\n # despeckle\n radius = tempfilter_radius\n units = \"pixels\"\n gee_s1_linear = gee_s1_filtered.map(tolin)\n gee_s1_dspckld_vv = self._multitemporalDespeckle(\n gee_s1_linear.select(\"VV\"),\n radius,\n units,\n {\"before\": -12, \"after\": 12, \"units\": \"month\"},\n )\n gee_s1_dspckld_vv = gee_s1_dspckld_vv.map(todb)\n gee_s1_fltrd_vv = gee_s1_dspckld_vv.filterDate(\n date_selected.strftime(\"%Y-%m-%d\"),\n (date_selected + dt.timedelta(days=1)).strftime(\"%Y-%m-%d\"),\n )\n s1_sig0_vv = gee_s1_fltrd_vv.mosaic()\n\n if dualpol == True:\n gee_s1_dspckld_vh = self._multitemporalDespeckle(\n gee_s1_linear.select(\"VH\"),\n radius,\n units,\n {\"before\": -12, \"after\": 12, \"units\": \"month\"},\n )\n gee_s1_dspckld_vh = gee_s1_dspckld_vh.map(todb)\n gee_s1_fltrd_vh = gee_s1_dspckld_vh.filterDate(\n date_selected.strftime(\"%Y-%m-%d\"),\n (date_selected + dt.timedelta(days=1)).strftime(\"%Y-%m-%d\"),\n )\n s1_sig0_vh = gee_s1_fltrd_vh.mosaic()\n\n if dualpol == True:\n s1_sig0 = s1_sig0_vv.addBands(s1_sig0_vh).select(\n [\"constant\", \"constant_1\"], [\"VV\", \"VH\"]\n )\n else:\n s1_sig0 = s1_sig0_vv.select([\"constant\"], [\"VV\"])\n\n # extract information\n s1_sig0_vv = s1_sig0.select(\"VV\")\n s1_sig0_vv = s1_sig0_vv.clip(self.roi)\n if dualpol == True:\n s1_sig0_vh = s1_sig0.select(\"VH\")\n s1_sig0_vh = s1_sig0_vh.clip(self.roi)\n\n gee_s1_ln = gee_s1_filtered.map(toln)\n gee_s1_lin = gee_s1_filtered.map(tolin)\n k1vv = ee.Image(gee_s1_ln.select(\"VV\").mean()).clip(self.roi)\n k2vv = ee.Image(gee_s1_ln.select(\"VV\").reduce(ee.Reducer.stdDev())).clip(\n self.roi\n )\n mean_vv = ee.Image(gee_s1_lin.select(\"VV\").mean()).clip(self.roi)\n std_vv = ee.Image(gee_s1_lin.select(\"VV\").reduce(ee.Reducer.stdDev())).clip(\n self.roi\n )\n\n if dualpol == True:\n k1vh = ee.Image(gee_s1_ln.select(\"VH\").mean()).clip(self.roi)\n k2vh = ee.Image(gee_s1_ln.select(\"VH\").reduce(ee.Reducer.stdDev())).clip(\n self.roi\n )\n mean_vh = ee.Image(gee_s1_lin.select(\"VH\").mean()).clip(self.roi)\n std_vh = ee.Image(gee_s1_lin.select(\"VH\").reduce(ee.Reducer.stdDev())).clip(\n self.roi\n )\n\n # export\n if dualpol == False:\n self.S1_SIG0_VV_db = s1_sig0_vv\n self.S1_ANGLE = s1_angle\n self.K1VV = k1vv\n self.K2VV = k2vv\n self.S1_DATE = date_selected\n else:\n self.S1_SIG0_VV_db = s1_sig0_vv\n self.S1_SIG0_VH_db = s1_sig0_vh\n self.S1_ANGLE = s1_angle\n self.K1VV = k1vv\n self.K1VH = k1vh\n self.K2VV = k2vv\n self.K2VH = k2vh\n self.S1_DATE = date_selected\n self.S1MEAN_VV = mean_vv\n self.S1MEAN_VH = mean_vh\n self.S1STD_VV = std_vv\n self.S1STD_VH = std_vh\n\n if maskLIA == True:\n self.S1_LIA = s1_lia",
"def stack_bands(yr, roi):\n\n water_year_start = '{}-10-01'.format(yr - 1)\n\n winter_s, winter_e = '{}-01-01'.format(yr), '{}-03-01'.format(yr),\n spring_s, spring_e = '{}-03-01'.format(yr), '{}-05-01'.format(yr),\n late_spring_s, late_spring_e = '{}-05-01'.format(yr), '{}-07-01'.format(yr)\n summer_s, summer_e = '{}-07-01'.format(yr), '{}-09-01'.format(yr)\n fall_s, fall_e = '{}-09-01'.format(yr), '{}-12-31'.format(yr)\n\n periods = [('cy', winter_s, fall_e),\n ('1', spring_s, spring_e),\n ('2', late_spring_s, late_spring_e),\n ('3', summer_s, summer_e),\n ('4', fall_s, fall_e)]\n\n first = True\n for name, start, end in periods:\n bands = landsat_composites(yr, start, end, roi, name)\n if first:\n input_bands = bands\n proj = bands.select('B2_cy').projection().getInfo()\n first = False\n else:\n input_bands = input_bands.addBands(bands)\n\n for s, e, n in [(spring_s, spring_e, 'espr'),\n (late_spring_s, late_spring_e, 'lspr'),\n (summer_s, summer_e, 'smr'),\n (fall_s, fall_e, 'fl'),\n (water_year_start, spring_e, 'wy_espr'),\n (water_year_start, late_spring_e, 'wy_espr'),\n (water_year_start, summer_e, 'wy_smr'),\n (water_year_start, fall_e, 'wy')]:\n gridmet = ee.ImageCollection(\"IDAHO_EPSCOR/GRIDMET\").filterBounds(\n roi).filterDate(s, e).select('pr', 'eto', 'tmmn', 'tmmx')\n temp_reducer = ee.Reducer.mean()\n t_names = ['tmax'.format(n), 'tmin'.format(n)]\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_{}'.format(n), 'pet_total_{}'.format(n)).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n wd_estimate = precip_sum.select('precip_total_{}'.format(n)).subtract(precip_sum.select(\n 'pet_total_{}'.format(n))).rename('wd_est_{}'.format(n))\n input_bands = input_bands.addBands([temp_perc, precip_sum, wd_estimate])\n\n temp_reducer = ee.Reducer.percentile([10, 50, 90])\n t_names = ['tmmn_p10_cy', 'tmmn_p50_cy', 'tmmn_p90_cy', 'tmmx_p10_cy', 'tmmx_p50_cy', 'tmmx_p90_cy']\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_cy', 'pet_total_cy').resample('bilinear').reproject(crs=proj['crs'], scale=30)\n wd_estimate = precip_sum.select('precip_total_cy').subtract(precip_sum.select(\n 'pet_total_cy')).rename('wd_est_cy')\n\n coords = ee.Image.pixelLonLat().rename(['Lon_GCS', 'LAT_GCS']).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n ned = ee.Image('USGS/NED')\n terrain = ee.Terrain.products(ned).select('elevation', 'slope', 'aspect').reduceResolution(\n ee.Reducer.mean()).reproject(crs=proj['crs'], scale=30)\n\n world_climate = get_world_climate(proj=proj)\n elev = terrain.select('elevation')\n tpi_1250 = elev.subtract(elev.focal_mean(1250, 'circle', 'meters')).add(0.5).rename('tpi_1250')\n tpi_250 = elev.subtract(elev.focal_mean(250, 'circle', 'meters')).add(0.5).rename('tpi_250')\n tpi_150 = elev.subtract(elev.focal_mean(150, 'circle', 'meters')).add(0.5).rename('tpi_150')\n static_input_bands = coords.addBands([temp_perc, wd_estimate, terrain, tpi_1250, tpi_250, tpi_150, world_climate])\n\n nlcd = ee.Image('USGS/NLCD/NLCD2011').select('landcover').reproject(crs=proj['crs'], scale=30).rename('nlcd')\n\n cdl_cult = ee.Image('USDA/NASS/CDL/2017').select('cultivated'). \\\n remap([1, 2], [0, 1]).reproject(crs=proj['crs'], scale=30).rename('cdlclt')\n\n cdl_crop = ee.Image('USDA/NASS/CDL/2017').select('cropland').reproject(crs=proj['crs'],\n scale=30).rename('cdlcrp')\n\n gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')\n occ_pos = gsw.select('occurrence').gt(0)\n water = occ_pos.unmask(0).rename('gsw')\n\n static_input_bands = static_input_bands.addBands([nlcd, cdl_cult, cdl_crop, water])\n\n input_bands = input_bands.addBands(static_input_bands).clip(roi)\n\n # standardize names to match EE javascript output\n standard_names = []\n temp_ct = 1\n prec_ct = 1\n names = input_bands.bandNames().getInfo()\n for name in names:\n if 'tavg' in name and 'tavg' in standard_names:\n standard_names.append('tavg_{}'.format(temp_ct))\n temp_ct += 1\n elif 'prec' in name and 'prec' in standard_names:\n standard_names.append('prec_{}'.format(prec_ct))\n prec_ct += 1\n else:\n standard_names.append(name)\n\n input_bands = input_bands.rename(standard_names)\n return input_bands",
"def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year\n #currentImage = image.select(bandNames[-2]), the second to last year\n #followingImage = image.select(bandNames[-1]), the final year\n #Find where the second to last year has missing data, replace those values with the values of the following year\n #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill\n #and the second band is the final years classification\n #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year\n def backwardNoDataFilter(bandName, followingImage):\n currentImage = image.select(ee.String(bandName))\n followingImage = ee.Image(followingImage)\n currentImage = currentImage.unmask(followingImage.select([0]))\n return currentImage.addBands(followingImage)\n \n #Apply backwards filter, starting with the final year and iterating through to year(0) \n filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1])))\n #Re-order bands to be in chronological order\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)",
"def fmask(bandname=\"fmask\"):\n\n def fmask(image):\n imgFmask = image.select(bandname)\n shadow = imgFmask.eq(3)\n snow = imgFmask.eq(4)\n cloud = imgFmask.eq(5)\n\n mask = shadow.Or(snow).Or(cloud)\n\n imgMask = image.updateMask(mask.Not())\n return imgMask\n return fmask",
"def add_ResNet_roi_conv5_head_for_masks(model, blob_in, dim_in, spatial_scale):\n model.RoIFeatureTransform(\n blob_in,\n blob_out='_[mask]_pool5',\n blob_rois='mask_rois',\n method=cfg.MRCNN.ROI_XFORM_METHOD,\n resolution=cfg.MRCNN.ROI_XFORM_RESOLUTION,\n sampling_ratio=cfg.MRCNN.ROI_XFORM_SAMPLING_RATIO,\n spatial_scale=spatial_scale\n )\n\n dilation = cfg.MRCNN.DILATION\n stride_init = int(cfg.MRCNN.ROI_XFORM_RESOLUTION / 7) # by default: 2\n\n s, dim_in = ResNet.add_stage(\n model,\n '_[mask]_res5',\n '_[mask]_pool5',\n 3,\n dim_in,\n 2048,\n 512,\n dilation,\n stride_init=stride_init\n )\n\n return s, 2048",
"def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img",
"def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6):\n #Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff\n num_changes = calculateNumberOfChanges(image, bandNames)\n too_many_changes = num_changes.gt(numChangesCutoff)\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Calculate the number of connected pixels for each land cover class and year, reduce to a single band image representing the number\n #of connected pixels of the same land cover class as the central pixel, and determine if it is over the connectedPixelCutoff\n connected_pixel_count = ee.ImageCollection(binary_class_images.map(lambda x: x.mask(x).connectedPixelCount(100,False).reduce(ee.Reducer.sum()).lt(connectedPixelCutoff)))\n \n #Get a bitwiseAnd determination if the number of connected pixels <= connectedPixelCutoff and the number of changes > numChangesCutoff \n incidence_filter = ee.ImageCollection(connected_pixel_count.map(lambda x: x.bitwiseAnd(too_many_changes))).toBands().rename(bandNames)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Replace pixels of image where incidence_filter is True with mode_image\n incidence_filtered = image.where(incidence_filter, mode_image)\n \n return incidence_filtered",
"def ResNet_roi_conv5_head_for_masks(dim_in):\n dilation = cfg.MRCNN.DILATION\n stride_init = cfg.MRCNN.ROI_XFORM_RESOLUTION // 7 # by default: 2\n module, dim_out = ResNet.add_stage(dim_in, 2048, 512, 3, dilation, stride_init)\n return module, dim_out",
"def _getTransformedLandmarks5(self) -> Landmarks5:\n if self._transformedLandmarks5 is None:\n self._transformedLandmarks5 = self.estimatorCollection.warper.makeWarpTransformationWithLandmarks(\n self, \"L5\"\n )\n return self._transformedLandmarks5",
"def _getTransformedLandmarks5(self) -> Landmarks5:\n if self._transformedLandmarks5 is None:\n warper = self.estimatorCollection.warper\n self._transformedLandmarks5 = warper.makeWarpTransformationWithLandmarks(self, \"L5\") # type: ignore\n return self._transformedLandmarks5 # type: ignore",
"def load_chaz_storms(path, mask_distance=None, mask_coordinate=(0.0, 0.0),\n mask_category=None, categorization=\"NHC\"):\n\n\n # Load the mat file and extract pertinent data\n #data = xarray.open_dataset(path)\n data = netCDF4.Dataset(path)\n print(data.dimensions.keys())\n print(data.variables.keys())\n\n # Days from 1-1-1950\n # start_date = datetime.datetime(1950, 1, 1)\n #stormIDs = data.variables['time']['stormID'][:]\n\n #stormIDs = data['time']['stormID']\n storms = []\n\n time_length = data['Mwspd'].shape[0]\n num_tracks = data['Mwspd'].shape[1]\n num_intensities = data['Mwspd'].shape[2]\n\n for i in range(num_tracks):\n\n # Extract initial data ranges\n for n in range(num_intensities):\n\n # Use intensity to find non-nans and extract correct arrays\n max_wind_speed = numpy.array(data.variables['Mwspd'][:, i, n])\n index_set = (numpy.isnan(max_wind_speed) - 1).nonzero()[0]\n #print(\"\")\n #print(\"Max Wind speed\")\n #print(max_wind_speed)\n\n index = len(index_set)\n t = numpy.array(data.variables['time'][0:index, i])\n x = numpy.array(data.variables['longitude'][0:index, i])\n y = numpy.array(data.variables['latitude'][0:index, i])\n\n #print(\"\")\n #print(x)\n #print(\"\")\n #print(y)\n\n # Remove zero-length intensities\n if len(index_set) > 0:\n # Create storm object\n storm = clawpack.geoclaw.surge.storm.Storm()\n storm.ID = i * num_intensities + n\n\n # Initialize the date set\n\n storm.t = [datetime.datetime(2000, 1, 1, 0) + \\\n datetime.timedelta(hours=6) * i\n for i in range(len(index_set))]\n storm.time_offset = storm.t[0]\n #storm.t = t[index_set]\n ## Add fields with proper non-nan values\n #storm.t[0] = datetime.datetime(2007, 1, 1, 0)\n #for i in range(1, index_set):\n # storm.t[i] = storm.t[i-1] + datetime.timedelta(hours = 6)\n\n #storm.t = t[index_set]\n #storm.t -= storm.t[0]\n #storm.t *= 24.0 * 60.0**2\n\n ## Check for missing last time point and adjust index set\n #if storm.t[-1] < 0:\n # index_set = index_set[:-1]\n # #print(index_set)\n # storm.t = storm.t[:-1]\n\n storm.eye_location = numpy.empty((len(index_set), 2))\n x[index_set] = x[index_set] - 360.0 * numpy.ones(len(index_set))\n storm.eye_location[:, 0] = x[index_set]\n storm.eye_location[:, 1] = y[index_set]\n\n #storm.eye_location = numpy.empty((2, len(index_set)))\n #storm.eye_location[0, :] = x[index_set]\n #storm.eye_location[1, :] = y[index_set]\n\n # TODO: Convert from knots\n storm.max_wind_speed = max_wind_speed[index_set]\n #print(\"Storm Max Wind Speed in Knots\") \n #print(storm.max_wind_speed)\n #print(\" \") \n # Assumed values\n storm.storm_radius = 500000 * numpy.ones(len(index_set))\n\n\n # Calculate Radius of Max Wind\n C0 = 218.3784 * numpy.ones(len(index_set))\n storm.max_wind_radius = C0 - 1.2014 * storm.max_wind_speed + \\\n (storm.max_wind_speed / 10.9884)**2 - \\\n (storm.max_wind_speed / 35.3052)**3 - \\\n 145.5090 * \\\n numpy.cos(storm.eye_location[:, 1] * 0.0174533)\n \n #storm.max_wind_radius = units.convert(storm.max_wind_radius, 'nmi', 'm')\n #storm.max_wind_speed = units.convert(storm.max_wind_speed,\n # 'knots', 'm/s')\n \n #units.convert(storm.max_wind_radius, 'nmi', 'm')\n #units.convert(storm.max_wind_speed,\n # 'knots', 'm/s')\n\n # Define maximum radius for all sotrms\n storm.storm_radius = 50e3 * numpy.ones(len(index_set))\n\n # From Kossin, J. P. WAF 2015\n a = -0.0025\n b = -0.36\n c = 1021.36\n storm.central_pressure = ( a * storm.max_wind_speed**2\n + b * storm.max_wind_speed\n + c)\n\n include_storm = True\n if mask_distance is not None:\n distance = numpy.sqrt((storm.eye_location[:, 0] -\n mask_coordinate[0])**2 +\n (storm.eye_location[:, 1] -\n mask_coordinate[1])**2)\n inlcude_storm = numpy.any(distance < mask_distance)\n\n if mask_category is not None:\n category = storm.category(categorization=categorization)\n include_storm = numpy.any(category > mask_category)\n #raise NotImplementedError(\"Category masking not implemented.\")\n\n if include_storm:\n storms.append(storm)\n return storms",
"def quantile_5(self):\n return mstats.mquantiles(self.masked_img, prob=0.05, axis=0).flatten()",
"def init_year(ts):\n ncfn = f\"/mesonet/data/era5/{ts.year}_era5land_hourly.nc\"\n if os.path.isfile(ncfn):\n LOG.info(\"Cowardly refusing to overwrite: %s\", ncfn)\n return\n nc = ncopen(ncfn, \"w\")\n nc.title = f\"ERA5 Hourly Reanalysis {ts.year}\"\n nc.platform = \"Grided Observations\"\n nc.description = \"ERA5 hourly analysis\"\n nc.institution = \"Iowa State University, Ames, IA, USA\"\n nc.source = \"Iowa Environmental Mesonet\"\n nc.project_id = \"IEM\"\n nc.realization = 1\n nc.Conventions = \"CF-1.0\"\n nc.contact = \"Daryl Herzmann, akrherz@iastate.edu, 515-294-5978\"\n nc.history = f\"{datetime.datetime.now():%d %B %Y} Generated\"\n nc.comment = \"No Comment at this time\"\n\n # Setup Dimensions\n nc.createDimension(\"lat\", (iemre.NORTH - iemre.SOUTH) * 10.0 + 1)\n nc.createDimension(\"lon\", (iemre.EAST - iemre.WEST) * 10.0 + 1)\n ts2 = datetime.datetime(ts.year + 1, 1, 1)\n days = (ts2 - ts).days\n LOG.info(\"Year %s has %s days\", ts.year, days)\n nc.createDimension(\"time\", int(days) * 24)\n nc.createDimension(\"soil_level\", 4)\n\n ncv = nc.createVariable(\"soil_level\", float, (\"soil_level\",))\n ncv.units = \"m\"\n # midpoints\n ncv[:] = [0.03, 0.14, 0.64, 1.94]\n\n # Setup Coordinate Variables\n lat = nc.createVariable(\"lat\", float, (\"lat\",))\n lat.units = \"degrees_north\"\n lat.long_name = \"Latitude\"\n lat.standard_name = \"latitude\"\n lat.axis = \"Y\"\n lat[:] = np.arange(iemre.SOUTH, iemre.NORTH + 0.001, 0.1)\n\n lon = nc.createVariable(\"lon\", float, (\"lon\",))\n lon.units = \"degrees_east\"\n lon.long_name = \"Longitude\"\n lon.standard_name = \"longitude\"\n lon.axis = \"X\"\n lon[:] = np.arange(iemre.WEST, iemre.EAST + 0.001, 0.1)\n\n tm = nc.createVariable(\"time\", float, (\"time\",))\n tm.units = f\"Hours since {ts.year}-01-01 00:00:0.0\"\n tm.long_name = \"Time\"\n tm.standard_name = \"time\"\n tm.axis = \"T\"\n tm.calendar = \"gregorian\"\n tm[:] = np.arange(0, int(days) * 24)\n\n # 0->65535\n tmpk = nc.createVariable(\n \"tmpk\", np.uint16, (\"time\", \"lat\", \"lon\"), fill_value=65535\n )\n tmpk.units = \"K\"\n tmpk.scale_factor = 0.01\n tmpk.long_name = \"2m Air Temperature\"\n tmpk.standard_name = \"2m Air Temperature\"\n tmpk.coordinates = \"lon lat\"\n\n # 0->65535 0 to 655.35\n dwpk = nc.createVariable(\n \"dwpk\", np.uint16, (\"time\", \"lat\", \"lon\"), fill_value=65335\n )\n dwpk.units = \"K\"\n dwpk.scale_factor = 0.01\n dwpk.long_name = \"2m Air Dew Point Temperature\"\n dwpk.standard_name = \"2m Air Dew Point Temperature\"\n dwpk.coordinates = \"lon lat\"\n\n # NOTE: we need to store negative numbers here, gasp\n # -32768 to 32767 so -98 to 98 mps\n uwnd = nc.createVariable(\n \"uwnd\", np.int16, (\"time\", \"lat\", \"lon\"), fill_value=32767\n )\n uwnd.scale_factor = 0.003\n uwnd.units = \"meters per second\"\n uwnd.long_name = \"U component of the wind\"\n uwnd.standard_name = \"U component of the wind\"\n uwnd.coordinates = \"lon lat\"\n\n # NOTE: we need to store negative numbers here, gasp\n # -32768 to 32767 so -98 to 98 mps\n vwnd = nc.createVariable(\n \"vwnd\", np.int16, (\"time\", \"lat\", \"lon\"), fill_value=32767\n )\n vwnd.scale_factor = 0.003\n vwnd.units = \"meters per second\"\n vwnd.long_name = \"V component of the wind\"\n vwnd.standard_name = \"V component of the wind\"\n vwnd.coordinates = \"lon lat\"\n\n # 0->65535 0 to 327.675\n p01m = nc.createVariable(\n \"p01m\", np.uint16, (\"time\", \"lat\", \"lon\"), fill_value=65535\n )\n p01m.units = \"mm\"\n p01m.scale_factor = 0.005\n p01m.long_name = \"Precipitation\"\n p01m.standard_name = \"Precipitation\"\n p01m.coordinates = \"lon lat\"\n p01m.description = \"Precipitation accumulation for the hour valid time\"\n\n # NOTE: Condensation is + and Evapration is -\n # -128 to 127 for -25 to 25\n ncv = nc.createVariable(\n \"evap\", np.int8, (\"time\", \"lat\", \"lon\"), fill_value=127\n )\n ncv.units = \"mm\"\n ncv.scale_factor = 0.4\n ncv.long_name = \"Evaporation\"\n ncv.standard_name = \"Evaporation\"\n ncv.coordinates = \"lon lat\"\n ncv.description = \"Evaporation for the hour valid time\"\n\n # 0 -> 65535 so 0 to 1966\n ncv = nc.createVariable(\n \"rsds\", np.uint16, (\"time\", \"lat\", \"lon\"), fill_value=65535\n )\n ncv.units = \"W m-2\"\n ncv.scale_factor = 0.03\n ncv.long_name = \"surface_downwelling_shortwave_flux_in_air\"\n ncv.standard_name = \"surface_downwelling_shortwave_flux_in_air\"\n ncv.coordinates = \"lon lat\"\n ncv.description = \"Global Shortwave Irradiance\"\n\n # 0->255 [213 333]\n ncv = nc.createVariable(\n \"soilt\",\n np.uint8,\n (\"time\", \"soil_level\", \"lat\", \"lon\"),\n fill_value=255,\n )\n ncv.units = \"K\"\n ncv.add_offset = 213.0\n ncv.scale_factor = 0.5\n ncv.long_name = \"Soil Temperature\"\n ncv.standard_name = \"Soil Temperature\"\n ncv.coordinates = \"lon lat\"\n\n # 0->255 [0 0.8] Hope this works?\n ncv = nc.createVariable(\n \"soilm\",\n np.uint8,\n (\"time\", \"soil_level\", \"lat\", \"lon\"),\n fill_value=255,\n )\n ncv.units = \"m^3 m^-3\"\n ncv.scale_factor = 0.0031\n ncv.long_name = \"Volumetric Soil Moisture\"\n ncv.standard_name = \"Volumetric Soil Moisture\"\n ncv.coordinates = \"lon lat\"\n\n nc.close()",
"def calculate_daily_climatology(\n pctile,\n windowHalfWidth,\n lenClimYear,\n smoothPercentile,\n smoothPercentileWidth,\n thresh_climYear, # empty array\n seas_climYear, # empty array\n clim, # empty dict\n feb29,\n doyClim,\n clim_start,\n clim_end,\n tempClim,\n temp,\n):\n # Loop over all day-of-year values, and calculate threshold and seasonal climatology across years\n for d in range(1, lenClimYear + 1):\n # Special case for Feb 29\n if d == feb29:\n continue\n # find all indices for each day of the year +/- windowHalfWidth and from them calculate the threshold\n tt0 = np.where(doyClim[clim_start : clim_end + 1] == d)[\n 0\n ] # the index for that day each year\n # If this doy value does not exist (i.e. in 360-day calendars) then skip it\n if len(tt0) == 0:\n continue\n tt = np.array([])\n for w in range(-windowHalfWidth, windowHalfWidth + 1): # -5 : 5 default\n tt = np.append(\n tt, clim_start + tt0 + w\n ) # append the daily values 5days before and 5days after\n tt = tt[tt >= 0] # Reject indices \"before\" the first element\n tt = tt[tt < TClim] # Reject indices \"after\" the last element\n thresh_climYear[d - 1] = np.percentile(nonans(tempClim[tt.astype(int)]), pctile)\n seas_climYear[d - 1] = np.mean(nonans(tempClim[tt.astype(int)]))\n\n # Special case for Feb 29 (LEAP YEAR)\n thresh_climYear[feb29 - 1] = (\n 0.5 * thresh_climYear[feb29 - 2] + 0.5 * thresh_climYear[feb29]\n )\n seas_climYear[feb29 - 1] = (\n 0.5 * seas_climYear[feb29 - 2] + 0.5 * seas_climYear[feb29]\n )\n\n if smoothPercentile:\n thresh_climYear, seas_climYear = smooth_climatologies(\n thresh_climYear, seas_climYear, smoothPercentileWidth\n )\n\n # Generate threshold for full time series\n clim[\"thresh\"] = thresh_climYear[doy.astype(int) - 1]\n clim[\"seas\"] = seas_climYear[doy.astype(int) - 1]\n # Save vector indicating which points in temp are missing values\n clim[\"missing\"] = np.isnan(temp)\n\n return clim",
"def load_nimbus5(filepath):\n assert os.path.isfile(filepath), 'Nimbus-5 data file cannot be found at {0}.'.format(filepath)\n with h5py.File(filepath, 'r') as data:\n ice_conc = data['Raster Image #0'].value.astype(float32)\n # note: missing data encoded as 157, land/coast/lake masks as 168/178/120 respectively, and\n # ocean (ice-free) as 125; here, ocean is changed to 0% ice, other flags changed to NaN\n ice_conc[logical_or.reduce((ice_conc == 157,ice_conc == 168,ice_conc == 178,ice_conc == 120))] = NaN\n ice_conc[ice_conc == 125] = 0\n return ice_conc"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to perform a 4 year moving window filter for a single land cover value (such as Forest as 1) for all years in an image. Calls the function mask4. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The temporal filter inspects the central position of four consecutive years, and if the extremities of the consecutive years are identical but the centre position is not, then the central pixels are reclassified to match its temporal neighbour class. This function can be applied to whichever land cover values the user decides, such as all of the land cover values or a select few. | def applyWindow4years(imagem, value, bandNames):
img_out = imagem.select(bandNames[0])
for i in np.arange(1, len(bandNames)-2):
img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))
img_out = img_out.addBands(imagem.select(bandNames[-2]))
img_out = img_out.addBands(imagem.select(bandNames[-1]))
return img_out | [
"def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out",
"def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out",
"def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year\n #currentImage = image.select(bandNames[1]), the image for the second year\n #previousImage = image.select(bandNames[0]), the first year\n #Find where the second year has missing data, replace those values with the values of the first year\n #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill\n #and the second band is the first years classification\n #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year\n def forwardNoDataFilter(bandName, previousImage):\n currentImage = image.select(ee.String(bandName))\n previousImage = ee.Image(previousImage)\n currentImage = currentImage.unmask(previousImage.select([0]))\n return currentImage.addBands(previousImage)\n \n #Iterate through all the years, starting with the first year's classification\n filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)",
"def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year\n #currentImage = image.select(bandNames[-2]), the second to last year\n #followingImage = image.select(bandNames[-1]), the final year\n #Find where the second to last year has missing data, replace those values with the values of the following year\n #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill\n #and the second band is the final years classification\n #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year\n def backwardNoDataFilter(bandName, followingImage):\n currentImage = image.select(ee.String(bandName))\n followingImage = ee.Image(followingImage)\n currentImage = currentImage.unmask(followingImage.select([0]))\n return currentImage.addBands(followingImage)\n \n #Apply backwards filter, starting with the final year and iterating through to year(0) \n filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1])))\n #Re-order bands to be in chronological order\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)",
"def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img",
"def fmask(bandname=\"fmask\"):\n\n def fmask(image):\n imgFmask = image.select(bandname)\n shadow = imgFmask.eq(3)\n snow = imgFmask.eq(4)\n cloud = imgFmask.eq(5)\n\n mask = shadow.Or(snow).Or(cloud)\n\n imgMask = image.updateMask(mask.Not())\n return imgMask\n return fmask",
"def stack_bands(yr, roi):\n\n water_year_start = '{}-10-01'.format(yr - 1)\n\n winter_s, winter_e = '{}-01-01'.format(yr), '{}-03-01'.format(yr),\n spring_s, spring_e = '{}-03-01'.format(yr), '{}-05-01'.format(yr),\n late_spring_s, late_spring_e = '{}-05-01'.format(yr), '{}-07-01'.format(yr)\n summer_s, summer_e = '{}-07-01'.format(yr), '{}-09-01'.format(yr)\n fall_s, fall_e = '{}-09-01'.format(yr), '{}-12-31'.format(yr)\n\n periods = [('cy', winter_s, fall_e),\n ('1', spring_s, spring_e),\n ('2', late_spring_s, late_spring_e),\n ('3', summer_s, summer_e),\n ('4', fall_s, fall_e)]\n\n first = True\n for name, start, end in periods:\n bands = landsat_composites(yr, start, end, roi, name)\n if first:\n input_bands = bands\n proj = bands.select('B2_cy').projection().getInfo()\n first = False\n else:\n input_bands = input_bands.addBands(bands)\n\n for s, e, n in [(spring_s, spring_e, 'espr'),\n (late_spring_s, late_spring_e, 'lspr'),\n (summer_s, summer_e, 'smr'),\n (fall_s, fall_e, 'fl'),\n (water_year_start, spring_e, 'wy_espr'),\n (water_year_start, late_spring_e, 'wy_espr'),\n (water_year_start, summer_e, 'wy_smr'),\n (water_year_start, fall_e, 'wy')]:\n gridmet = ee.ImageCollection(\"IDAHO_EPSCOR/GRIDMET\").filterBounds(\n roi).filterDate(s, e).select('pr', 'eto', 'tmmn', 'tmmx')\n temp_reducer = ee.Reducer.mean()\n t_names = ['tmax'.format(n), 'tmin'.format(n)]\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_{}'.format(n), 'pet_total_{}'.format(n)).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n wd_estimate = precip_sum.select('precip_total_{}'.format(n)).subtract(precip_sum.select(\n 'pet_total_{}'.format(n))).rename('wd_est_{}'.format(n))\n input_bands = input_bands.addBands([temp_perc, precip_sum, wd_estimate])\n\n temp_reducer = ee.Reducer.percentile([10, 50, 90])\n t_names = ['tmmn_p10_cy', 'tmmn_p50_cy', 'tmmn_p90_cy', 'tmmx_p10_cy', 'tmmx_p50_cy', 'tmmx_p90_cy']\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_cy', 'pet_total_cy').resample('bilinear').reproject(crs=proj['crs'], scale=30)\n wd_estimate = precip_sum.select('precip_total_cy').subtract(precip_sum.select(\n 'pet_total_cy')).rename('wd_est_cy')\n\n coords = ee.Image.pixelLonLat().rename(['Lon_GCS', 'LAT_GCS']).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n ned = ee.Image('USGS/NED')\n terrain = ee.Terrain.products(ned).select('elevation', 'slope', 'aspect').reduceResolution(\n ee.Reducer.mean()).reproject(crs=proj['crs'], scale=30)\n\n world_climate = get_world_climate(proj=proj)\n elev = terrain.select('elevation')\n tpi_1250 = elev.subtract(elev.focal_mean(1250, 'circle', 'meters')).add(0.5).rename('tpi_1250')\n tpi_250 = elev.subtract(elev.focal_mean(250, 'circle', 'meters')).add(0.5).rename('tpi_250')\n tpi_150 = elev.subtract(elev.focal_mean(150, 'circle', 'meters')).add(0.5).rename('tpi_150')\n static_input_bands = coords.addBands([temp_perc, wd_estimate, terrain, tpi_1250, tpi_250, tpi_150, world_climate])\n\n nlcd = ee.Image('USGS/NLCD/NLCD2011').select('landcover').reproject(crs=proj['crs'], scale=30).rename('nlcd')\n\n cdl_cult = ee.Image('USDA/NASS/CDL/2017').select('cultivated'). \\\n remap([1, 2], [0, 1]).reproject(crs=proj['crs'], scale=30).rename('cdlclt')\n\n cdl_crop = ee.Image('USDA/NASS/CDL/2017').select('cropland').reproject(crs=proj['crs'],\n scale=30).rename('cdlcrp')\n\n gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')\n occ_pos = gsw.select('occurrence').gt(0)\n water = occ_pos.unmask(0).rename('gsw')\n\n static_input_bands = static_input_bands.addBands([nlcd, cdl_cult, cdl_crop, water])\n\n input_bands = input_bands.addBands(static_input_bands).clip(roi)\n\n # standardize names to match EE javascript output\n standard_names = []\n temp_ct = 1\n prec_ct = 1\n names = input_bands.bandNames().getInfo()\n for name in names:\n if 'tavg' in name and 'tavg' in standard_names:\n standard_names.append('tavg_{}'.format(temp_ct))\n temp_ct += 1\n elif 'prec' in name and 'prec' in standard_names:\n standard_names.append('prec_{}'.format(prec_ct))\n prec_ct += 1\n else:\n standard_names.append(name)\n\n input_bands = input_bands.rename(standard_names)\n return input_bands",
"def load_chaz_storms(path, mask_distance=None, mask_coordinate=(0.0, 0.0),\n mask_category=None, categorization=\"NHC\"):\n\n\n # Load the mat file and extract pertinent data\n #data = xarray.open_dataset(path)\n data = netCDF4.Dataset(path)\n print(data.dimensions.keys())\n print(data.variables.keys())\n\n # Days from 1-1-1950\n # start_date = datetime.datetime(1950, 1, 1)\n #stormIDs = data.variables['time']['stormID'][:]\n\n #stormIDs = data['time']['stormID']\n storms = []\n\n time_length = data['Mwspd'].shape[0]\n num_tracks = data['Mwspd'].shape[1]\n num_intensities = data['Mwspd'].shape[2]\n\n for i in range(num_tracks):\n\n # Extract initial data ranges\n for n in range(num_intensities):\n\n # Use intensity to find non-nans and extract correct arrays\n max_wind_speed = numpy.array(data.variables['Mwspd'][:, i, n])\n index_set = (numpy.isnan(max_wind_speed) - 1).nonzero()[0]\n #print(\"\")\n #print(\"Max Wind speed\")\n #print(max_wind_speed)\n\n index = len(index_set)\n t = numpy.array(data.variables['time'][0:index, i])\n x = numpy.array(data.variables['longitude'][0:index, i])\n y = numpy.array(data.variables['latitude'][0:index, i])\n\n #print(\"\")\n #print(x)\n #print(\"\")\n #print(y)\n\n # Remove zero-length intensities\n if len(index_set) > 0:\n # Create storm object\n storm = clawpack.geoclaw.surge.storm.Storm()\n storm.ID = i * num_intensities + n\n\n # Initialize the date set\n\n storm.t = [datetime.datetime(2000, 1, 1, 0) + \\\n datetime.timedelta(hours=6) * i\n for i in range(len(index_set))]\n storm.time_offset = storm.t[0]\n #storm.t = t[index_set]\n ## Add fields with proper non-nan values\n #storm.t[0] = datetime.datetime(2007, 1, 1, 0)\n #for i in range(1, index_set):\n # storm.t[i] = storm.t[i-1] + datetime.timedelta(hours = 6)\n\n #storm.t = t[index_set]\n #storm.t -= storm.t[0]\n #storm.t *= 24.0 * 60.0**2\n\n ## Check for missing last time point and adjust index set\n #if storm.t[-1] < 0:\n # index_set = index_set[:-1]\n # #print(index_set)\n # storm.t = storm.t[:-1]\n\n storm.eye_location = numpy.empty((len(index_set), 2))\n x[index_set] = x[index_set] - 360.0 * numpy.ones(len(index_set))\n storm.eye_location[:, 0] = x[index_set]\n storm.eye_location[:, 1] = y[index_set]\n\n #storm.eye_location = numpy.empty((2, len(index_set)))\n #storm.eye_location[0, :] = x[index_set]\n #storm.eye_location[1, :] = y[index_set]\n\n # TODO: Convert from knots\n storm.max_wind_speed = max_wind_speed[index_set]\n #print(\"Storm Max Wind Speed in Knots\") \n #print(storm.max_wind_speed)\n #print(\" \") \n # Assumed values\n storm.storm_radius = 500000 * numpy.ones(len(index_set))\n\n\n # Calculate Radius of Max Wind\n C0 = 218.3784 * numpy.ones(len(index_set))\n storm.max_wind_radius = C0 - 1.2014 * storm.max_wind_speed + \\\n (storm.max_wind_speed / 10.9884)**2 - \\\n (storm.max_wind_speed / 35.3052)**3 - \\\n 145.5090 * \\\n numpy.cos(storm.eye_location[:, 1] * 0.0174533)\n \n #storm.max_wind_radius = units.convert(storm.max_wind_radius, 'nmi', 'm')\n #storm.max_wind_speed = units.convert(storm.max_wind_speed,\n # 'knots', 'm/s')\n \n #units.convert(storm.max_wind_radius, 'nmi', 'm')\n #units.convert(storm.max_wind_speed,\n # 'knots', 'm/s')\n\n # Define maximum radius for all sotrms\n storm.storm_radius = 50e3 * numpy.ones(len(index_set))\n\n # From Kossin, J. P. WAF 2015\n a = -0.0025\n b = -0.36\n c = 1021.36\n storm.central_pressure = ( a * storm.max_wind_speed**2\n + b * storm.max_wind_speed\n + c)\n\n include_storm = True\n if mask_distance is not None:\n distance = numpy.sqrt((storm.eye_location[:, 0] -\n mask_coordinate[0])**2 +\n (storm.eye_location[:, 1] -\n mask_coordinate[1])**2)\n inlcude_storm = numpy.any(distance < mask_distance)\n\n if mask_category is not None:\n category = storm.category(categorization=categorization)\n include_storm = numpy.any(category > mask_category)\n #raise NotImplementedError(\"Category masking not implemented.\")\n\n if include_storm:\n storms.append(storm)\n return storms",
"def get_S1(\n self,\n year,\n month,\n day,\n tempfilter=True,\n tempfilter_radius=7,\n applylcmask=False,\n mask_globcover=True,\n dualpol=True,\n trackflt=None,\n maskwinter=False,\n masksnow=True,\n explicit_t_mask=None,\n ascending=False,\n maskLIA=True,\n ):\n\n def computeLIA(image):\n # comput the local incidence angle (LIA) based on the srtm and the s1 viewing angle\n # get the srtm\n srtm = ee.Image(\"USGS/SRTMGL1_003\")\n srtm_slope = ee.Terrain.slope(srtm)\n srtm_aspect = ee.Terrain.aspect(srtm)\n # get the S1 incidence angle\n inc = ee.Image(image).select(\"angle\")\n # comput the LIA\n s = srtm_slope.multiply(\n ee.Image.constant(277)\n .subtract(srtm_aspect)\n .multiply(math.pi / 180)\n .cos()\n )\n lia = inc.subtract(\n ee.Image.constant(90).subtract(ee.Image.constant(90).subtract(s))\n ).abs()\n # add band to current image\n return image.addBands(\n lia.select([\"angle\"], [\"lia\"]).reproject(srtm.projection())\n )\n\n def maskterrain(image):\n # mask for terrain, local incidence angle and high and low backscatter\n tmp = ee.Image(image)\n # srtm dem\n if maskLIA == False:\n gee_srtm = ee.Image(\"USGS/SRTMGL1_003\")\n gee_srtm_slope = ee.Terrain.slope(gee_srtm)\n mask = gee_srtm_slope.lt(20)\n else:\n lia = tmp.select(\"lia\")\n mask = lia.gt(20).bitwiseAnd(lia.lt(45))\n mask2 = tmp.lt(0).bitwiseAnd(tmp.gt(-25))\n mask = mask.bitwiseAnd(mask2)\n tmp = tmp.updateMask(mask)\n\n return tmp\n\n def masklc(image):\n # load land cover info\n corine = ee.Image(\"users/felixgreifeneder/corine\")\n\n # create lc mask\n valLClist = [10, 11, 12, 13, 18, 19, 20, 21, 26, 27, 28, 29]\n\n lcmask = (\n corine.eq(valLClist[0])\n .bitwiseOr(corine.eq(valLClist[1]))\n .bitwiseOr(corine.eq(valLClist[2]))\n .bitwiseOr(corine.eq(valLClist[3]))\n .bitwiseOr(corine.eq(valLClist[4]))\n .bitwiseOr(corine.eq(valLClist[5]))\n .bitwiseOr(corine.eq(valLClist[6]))\n .bitwiseOr(corine.eq(valLClist[7]))\n .bitwiseOr(corine.eq(valLClist[8]))\n .bitwiseOr(corine.eq(valLClist[9]))\n .bitwiseOr(corine.eq(valLClist[10]))\n .bitwiseOr(corine.eq(valLClist[11]))\n )\n\n tmp = ee.Image(image)\n\n tmp = tmp.updateMask(lcmask)\n return tmp\n\n def mask_lc_globcover(image):\n\n tmp = ee.Image(image)\n\n # load lc\n glbcvr = ee.Image(\"ESA/GLOBCOVER_L4_200901_200912_V2_3\").select(\"landcover\")\n\n valLClist = [\n 11,\n 14,\n 20,\n 30,\n 40,\n 50,\n 60,\n 70,\n 90,\n 100,\n 110,\n 120,\n 130,\n 140,\n 150,\n 160,\n 170,\n 180,\n 190,\n 200,\n 210,\n 220,\n 230,\n ]\n\n lcmask = (\n glbcvr.eq(valLClist[0])\n .bitwiseOr(glbcvr.eq(valLClist[1]))\n .bitwiseOr(glbcvr.eq(valLClist[2]))\n .bitwiseOr(glbcvr.eq(valLClist[3]))\n .bitwiseOr(glbcvr.eq(valLClist[4]))\n .bitwiseOr(glbcvr.eq(valLClist[5]))\n .bitwiseOr(glbcvr.eq(valLClist[6]))\n .bitwiseOr(glbcvr.eq(valLClist[7]))\n .bitwiseOr(glbcvr.eq(valLClist[8]))\n .bitwiseOr(glbcvr.eq(valLClist[9]))\n .bitwiseOr(glbcvr.eq(valLClist[10]))\n .bitwiseOr(glbcvr.eq(valLClist[11]))\n .bitwiseOr(glbcvr.eq(valLClist[12]))\n .bitwiseOr(glbcvr.eq(valLClist[13]))\n .bitwiseOr(glbcvr.eq(valLClist[14]))\n .bitwiseOr(glbcvr.eq(valLClist[15]))\n .bitwiseOr(glbcvr.eq(valLClist[16]))\n .bitwiseOr(glbcvr.eq(valLClist[17]))\n .bitwiseOr(glbcvr.eq(valLClist[18]))\n .bitwiseOr(glbcvr.eq(valLClist[19]))\n .bitwiseOr(glbcvr.eq(valLClist[20]))\n .bitwiseOr(glbcvr.eq(valLClist[21]))\n .bitwiseOr(glbcvr.eq(valLClist[22]))\n )\n\n tmp = tmp.updateMask(lcmask)\n\n return tmp\n\n def setresample(image):\n image = image.resample()\n return image\n\n def toln(image):\n\n tmp = ee.Image(image)\n\n # Convert to linear\n vv = ee.Image(10).pow(tmp.select(\"VV\").divide(10))\n if dualpol == True:\n vh = ee.Image(10).pow(tmp.select(\"VH\").divide(10))\n\n # Convert to ln\n out = vv.log()\n if dualpol == True:\n out = out.addBands(vh.log())\n out = out.select([\"constant\", \"constant_1\"], [\"VV\", \"VH\"])\n else:\n out = out.select([\"constant\"], [\"VV\"])\n\n return out.set(\"system:time_start\", tmp.get(\"system:time_start\"))\n\n def tolin(image):\n\n tmp = ee.Image(image)\n\n # Covert to linear\n vv = ee.Image(10).pow(tmp.select(\"VV\").divide(10))\n if dualpol == True:\n vh = ee.Image(10).pow(tmp.select(\"VH\").divide(10))\n\n # Convert to\n if dualpol == True:\n out = vv.addBands(vh)\n out = out.select([\"constant\", \"constant_1\"], [\"VV\", \"VH\"])\n else:\n out = vv.select([\"constant\"], [\"VV\"])\n\n return out.set(\"system:time_start\", tmp.get(\"system:time_start\"))\n\n def todb(image):\n\n tmp = ee.Image(image)\n\n return (\n ee.Image(10)\n .multiply(tmp.log10())\n .set(\"system:time_start\", tmp.get(\"system:time_start\"))\n )\n\n def applysnowmask(image):\n\n tmp = ee.Image(image)\n sdiff = tmp.select(\"VH\").subtract(snowref)\n wetsnowmap = sdiff.lte(-2.6).focal_mode(100, \"square\", \"meters\", 3)\n\n return tmp.updateMask(wetsnowmap.eq(0))\n\n def projectlia(image):\n tmp = ee.Image(image)\n trgtprj = tmp.select(\"VV\").projection()\n tmp = tmp.addBands(tmp.select(\"angle\").reproject(trgtprj), [\"angle\"], True)\n return tmp\n\n def apply_explicit_t_mask(image):\n\n t_mask = ee.Image(\"users/felixgreifeneder/\" + explicit_t_mask)\n mask = t_mask.eq(0)\n return image.updateMask(mask)\n\n ee.Reset()\n ee.Initialize()\n\n # load S1 data\n gee_s1_collection = ee.ImageCollection(\"COPERNICUS/S1_GRD\")\n\n # Filter the image collection\n gee_s1_filtered = (\n gee_s1_collection.filter(ee.Filter.eq(\"instrumentMode\", \"IW\"))\n .filterBounds(self.roi)\n .filter(ee.Filter.eq(\"platform_number\", \"A\"))\n .filter(ee.Filter.listContains(\"transmitterReceiverPolarisation\", \"VV\"))\n )\n\n if ascending == True:\n gee_s1_filtered = gee_s1_filtered.filter(\n ee.Filter.eq(\"orbitProperties_pass\", \"ASCENDING\")\n )\n else:\n gee_s1_filtered = gee_s1_filtered.filter(\n ee.Filter.eq(\"orbitProperties_pass\", \"DESCENDING\")\n )\n\n if dualpol == True:\n # Consider only dual-pol scenes\n gee_s1_filtered = gee_s1_filtered.filter(\n ee.Filter.listContains(\"transmitterReceiverPolarisation\", \"VH\")\n )\n\n if trackflt is not None:\n # Specify track\n gee_s1_filtered = gee_s1_filtered.filter(\n ee.Filter.eq(\"relativeOrbitNumber_start\", trackflt)\n )\n\n if maskwinter == True:\n # Mask winter based on DOY\n gee_s1_filtered = gee_s1_filtered.filter(ee.Filter.dayOfYear(121, 304))\n\n # add LIA\n if maskLIA == True:\n # compute the local incidence angle if it shall be used for masking\n gee_s1_filtered = gee_s1_filtered.map(computeLIA)\n s1_lia = gee_s1_filtered.select(\"lia\")\n else:\n s1_lia = None\n\n s1_angle = gee_s1_filtered.select(\"angle\")\n\n if applylcmask == True:\n # apply land-cover mask based on Corine\n gee_s1_filtered = gee_s1_filtered.map(masklc)\n if mask_globcover == True:\n # apply land-cover mask based on globcover\n gee_s1_filtered = gee_s1_filtered.map(mask_lc_globcover)\n\n # Enable bilinear resampling (instead of NN)\n gee_s1_filtered = gee_s1_filtered.map(setresample)\n\n if explicit_t_mask == None:\n # apply masking based on the terraing (LIA)\n gee_s1_filtered = gee_s1_filtered.map(maskterrain)\n else:\n # apply specific terrain mask\n gee_s1_filtered = gee_s1_filtered.map(apply_explicit_t_mask)\n\n if masksnow == True:\n # automatic wet snow masking\n gee_s1_linear_vh = gee_s1_filtered.map(tolin).select(\"VH\")\n snowref = ee.Image(10).multiply(\n gee_s1_linear_vh.reduce(ee.Reducer.intervalMean(5, 100)).log10()\n )\n gee_s1_filtered = gee_s1_filtered.map(applysnowmask)\n\n #### SHOULD BE IF STATEMENT HERE\n\n # create a list of availalbel dates\n tmp = gee_s1_filtered.getInfo()\n tmp_ids = [x[\"properties\"][\"system:index\"] for x in tmp[\"features\"]]\n \n dates = np.array(\n [\n dt.date(year=int(x[17:21]), month=int(x[21:23]), day=int(x[23:25]))\n for x in tmp_ids\n ]\n )\n \n if not len(dates):\n raise Exception(\n \"There are no S1 images with the selected filters, please consider \"\n \"changing the area of interest or selecting a different orbit\"\n )\n \n # find the closest acquisitions\n doi = dt.date(year=year, month=month, day=day)\n doi_index = np.argmin(np.abs(dates - doi))\n date_selected = dates[doi_index]\n\n # filter imagecollection for respective date\n gee_s1_drange = gee_s1_filtered.filterDate(\n date_selected.strftime(\"%Y-%m-%d\"),\n (date_selected + dt.timedelta(days=1)).strftime(\"%Y-%m-%d\"),\n )\n s1_angle_drange = s1_angle.filterDate(\n date_selected.strftime(\"%Y-%m-%d\"),\n (date_selected + dt.timedelta(days=1)).strftime(\"%Y-%m-%d\"),\n )\n if maskLIA == True:\n s1_lia_drange = s1_lia.filterDate(\n date_selected.strftime(\"%Y-%m-%d\"),\n (date_selected + dt.timedelta(days=1)).strftime(\"%Y-%m-%d\"),\n )\n if gee_s1_drange.size().getInfo() > 1:\n if maskLIA == True:\n s1_lia = s1_lia_drange.mosaic()\n s1_angle = s1_angle_drange.mosaic()\n s1_sig0 = gee_s1_drange.mosaic()\n s1_lia = ee.Image(s1_lia.copyProperties(s1_lia_drange.first()))\n s1_sig0 = ee.Image(s1_sig0.copyProperties(gee_s1_drange.first()))\n else:\n s1_sig0 = ee.Image(gee_s1_drange.first())\n s1_angle = ee.Image(s1_angle_drange.first())\n s1_lia = ee.Image(s1_lia_drange.first())\n\n # fetch image from image collection\n # get the track number\n s1_sig0_info = s1_sig0.getInfo()\n track_nr = s1_sig0_info[\"properties\"][\"relativeOrbitNumber_start\"]\n\n # only uses images of the same track\n gee_s1_filtered = gee_s1_filtered.filterMetadata(\n \"relativeOrbitNumber_start\", \"equals\", track_nr\n )\n\n if tempfilter == True:\n # despeckle\n radius = tempfilter_radius\n units = \"pixels\"\n gee_s1_linear = gee_s1_filtered.map(tolin)\n gee_s1_dspckld_vv = self._multitemporalDespeckle(\n gee_s1_linear.select(\"VV\"),\n radius,\n units,\n {\"before\": -12, \"after\": 12, \"units\": \"month\"},\n )\n gee_s1_dspckld_vv = gee_s1_dspckld_vv.map(todb)\n gee_s1_fltrd_vv = gee_s1_dspckld_vv.filterDate(\n date_selected.strftime(\"%Y-%m-%d\"),\n (date_selected + dt.timedelta(days=1)).strftime(\"%Y-%m-%d\"),\n )\n s1_sig0_vv = gee_s1_fltrd_vv.mosaic()\n\n if dualpol == True:\n gee_s1_dspckld_vh = self._multitemporalDespeckle(\n gee_s1_linear.select(\"VH\"),\n radius,\n units,\n {\"before\": -12, \"after\": 12, \"units\": \"month\"},\n )\n gee_s1_dspckld_vh = gee_s1_dspckld_vh.map(todb)\n gee_s1_fltrd_vh = gee_s1_dspckld_vh.filterDate(\n date_selected.strftime(\"%Y-%m-%d\"),\n (date_selected + dt.timedelta(days=1)).strftime(\"%Y-%m-%d\"),\n )\n s1_sig0_vh = gee_s1_fltrd_vh.mosaic()\n\n if dualpol == True:\n s1_sig0 = s1_sig0_vv.addBands(s1_sig0_vh).select(\n [\"constant\", \"constant_1\"], [\"VV\", \"VH\"]\n )\n else:\n s1_sig0 = s1_sig0_vv.select([\"constant\"], [\"VV\"])\n\n # extract information\n s1_sig0_vv = s1_sig0.select(\"VV\")\n s1_sig0_vv = s1_sig0_vv.clip(self.roi)\n if dualpol == True:\n s1_sig0_vh = s1_sig0.select(\"VH\")\n s1_sig0_vh = s1_sig0_vh.clip(self.roi)\n\n gee_s1_ln = gee_s1_filtered.map(toln)\n gee_s1_lin = gee_s1_filtered.map(tolin)\n k1vv = ee.Image(gee_s1_ln.select(\"VV\").mean()).clip(self.roi)\n k2vv = ee.Image(gee_s1_ln.select(\"VV\").reduce(ee.Reducer.stdDev())).clip(\n self.roi\n )\n mean_vv = ee.Image(gee_s1_lin.select(\"VV\").mean()).clip(self.roi)\n std_vv = ee.Image(gee_s1_lin.select(\"VV\").reduce(ee.Reducer.stdDev())).clip(\n self.roi\n )\n\n if dualpol == True:\n k1vh = ee.Image(gee_s1_ln.select(\"VH\").mean()).clip(self.roi)\n k2vh = ee.Image(gee_s1_ln.select(\"VH\").reduce(ee.Reducer.stdDev())).clip(\n self.roi\n )\n mean_vh = ee.Image(gee_s1_lin.select(\"VH\").mean()).clip(self.roi)\n std_vh = ee.Image(gee_s1_lin.select(\"VH\").reduce(ee.Reducer.stdDev())).clip(\n self.roi\n )\n\n # export\n if dualpol == False:\n self.S1_SIG0_VV_db = s1_sig0_vv\n self.S1_ANGLE = s1_angle\n self.K1VV = k1vv\n self.K2VV = k2vv\n self.S1_DATE = date_selected\n else:\n self.S1_SIG0_VV_db = s1_sig0_vv\n self.S1_SIG0_VH_db = s1_sig0_vh\n self.S1_ANGLE = s1_angle\n self.K1VV = k1vv\n self.K1VH = k1vh\n self.K2VV = k2vv\n self.K2VH = k2vh\n self.S1_DATE = date_selected\n self.S1MEAN_VV = mean_vv\n self.S1MEAN_VH = mean_vh\n self.S1STD_VV = std_vv\n self.S1STD_VH = std_vh\n\n if maskLIA == True:\n self.S1_LIA = s1_lia",
"def request_band_extract(file_prefix, points_layer, region, years, filter_bounds=False):\n roi = ee.FeatureCollection(region)\n plots = ee.FeatureCollection(points_layer)\n for yr in years:\n stack = stack_bands(yr, roi)\n\n if filter_bounds:\n plots = plots.filterBounds(roi)\n\n filtered = plots.filter(ee.Filter.eq('YEAR', yr))\n\n plot_sample_regions = stack.sampleRegions(\n collection=filtered,\n properties=['POINT_TYPE', 'YEAR'],\n scale=30,\n tileScale=16)\n\n task = ee.batch.Export.table.toCloudStorage(\n plot_sample_regions,\n description='{}_{}'.format(file_prefix, yr),\n bucket='wudr',\n fileNamePrefix='{}_{}'.format(file_prefix, yr),\n fileFormat='CSV')\n\n task.start()\n print(yr)\n exit()",
"def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6):\n #Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff\n num_changes = calculateNumberOfChanges(image, bandNames)\n too_many_changes = num_changes.gt(numChangesCutoff)\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Calculate the number of connected pixels for each land cover class and year, reduce to a single band image representing the number\n #of connected pixels of the same land cover class as the central pixel, and determine if it is over the connectedPixelCutoff\n connected_pixel_count = ee.ImageCollection(binary_class_images.map(lambda x: x.mask(x).connectedPixelCount(100,False).reduce(ee.Reducer.sum()).lt(connectedPixelCutoff)))\n \n #Get a bitwiseAnd determination if the number of connected pixels <= connectedPixelCutoff and the number of changes > numChangesCutoff \n incidence_filter = ee.ImageCollection(connected_pixel_count.map(lambda x: x.bitwiseAnd(too_many_changes))).toBands().rename(bandNames)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Replace pixels of image where incidence_filter is True with mode_image\n incidence_filtered = image.where(incidence_filter, mode_image)\n \n return incidence_filtered",
"def resample(directory_workspace, file, directory_main, dst_transform):\n for name in list_band_brdf:\n #Adjust image on Sentinel-2 image with cubic convolution\n resample_band(os.path.join(directory_workspace, file+name+\"_intermediaire.tif\"), os.path.join(directory_workspace, file+name), dst_transform, 1, np.float32)\n for name in list_band_without_brdf:\n if name == \"_toa_band9.tif\":\n #Adjust image on Sentinel-2 image with cubic convolution\n resample_band(os.path.join(directory_workspace, file+name+\"_intermediaire.tif\"), os.path.join(directory_main, file+name), dst_transform, 10000, np.int16)\n else:\n resample_band(os.path.join(directory_workspace, file+name+\"_intermediaire.tif\"), os.path.join(directory_workspace, file+name), dst_transform, 1, np.float32)",
"def make_lightcurve(centroids, bands, band_idx, box_size, aperture_radius):\n band_names = np.sort(list(bands.keys()))\n num_stars= range(len(centroids))\n for star_idx in num_stars:\n xcenters, ycenters = [],[]\n aperture_sums = []\n background = []\n fwhms = []\n obs_time = []\n obs_mjd = []\n ##extract lightcurve (enumerate all frames) in a given band\n for i in tqdm(bands[band_names[band_idx]]):\n #import pdb; pdb.set_trace()\n hdr = fits.open(i)[0].header\n img = fits.open(i)[0].data\n #get dates from fits header\n date=dt.strptime(hdr['DATE-OBS'], '%Y-%m-%d')\n time=dt.strptime(hdr['EXP-STRT'], '%H:%M:%S.%f')\n newdate = time.replace(year=date.year, month=date.month, day=date.day)\n obs_time.append(newdate)\n obs_mjd.append(hdr['MJD-STRT'])\n\n #crop\n #import pdb; pdb.set_trace()\n image_crop = get_crop(img, centroids[star_idx], box_size)\n\n ###aperture photometry###\n #compute centroid\n centroid = get_centroid(image_crop)\n\n xcenters.append(centroid[0])\n ycenters.append(centroid[1])\n\n #compute backgound\n bkg_mean=get_bkg(image_crop, centroid, r_in=20., r_out=30.)\n\n #measure fwhm\n fwhm=get_fwhm(image_crop)\n\n #without aperture photometry\n\n aperture_sum = get_phot(image_crop, centroid, r=aperture_radius)\n\n #minus background wihtin annulus\n #aperture_sum = get_phot2(image_crop,bkg_mean,centroid,r=aperture_radius)\n\n aperture_sums.append(aperture_sum)\n background.append(bkg_mean)\n\n # if fwhm < 10*np.median(fwhms):\n # fwhms.append(fwhm)\n # else:\n # fwhms.append(np.nan)\n fwhms.append(fwhm)\n\n #output as dataframe of given band and star\n\n dfs.append(pd.DataFrame(\n {'{0}_{1}_x'.format(band_names[band_idx], str(star_idx)) : xcenters,\n '{0}_{1}_y'.format(band_names[band_idx], str(star_idx)) : ycenters,\n '{0}_{1}_flux_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : aperture_sums,\n '{0}_{1}_bkg_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : background,\n '{0}_{1}_fwhm_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : fwhms},\n #'airmass' : airmass\n index = obs_time))\n return dfs, band_idx, band_names",
"def sky_groups():\n cam = \"sky\"\n for light, lens, ndc, good, window in [(True, True, False, True, True),\n (True, True, False, True, False),\n (True, True, False, False, False),\n (True, False, False, True, False),\n (True, False, False, False, False),\n (False, True, False, True, True),\n (False, True, False, False, True)]:\n filenames = flatfiles(cam)\n filenames = get_light_sky(filenames, light)\n filenames = get_lens(filenames, lens)\n filenames = get_ndc(filenames, ndc)\n filenames = get_good(filenames, good)\n filenames = get_window_sky(filenames, window)\n images = valid_images(filenames)\n process_images(images, cam, (light, lens, ndc, good, window))",
"def applyGapFilter(image, bandNames):\n filtered = applyForwardNoDataFilter(image, bandNames)\n filtered = applyBackwardNoDataFilter(filtered, bandNames)\n return filtered",
"def calculate_daily_climatology(\n pctile,\n windowHalfWidth,\n lenClimYear,\n smoothPercentile,\n smoothPercentileWidth,\n thresh_climYear, # empty array\n seas_climYear, # empty array\n clim, # empty dict\n feb29,\n doyClim,\n clim_start,\n clim_end,\n tempClim,\n temp,\n):\n # Loop over all day-of-year values, and calculate threshold and seasonal climatology across years\n for d in range(1, lenClimYear + 1):\n # Special case for Feb 29\n if d == feb29:\n continue\n # find all indices for each day of the year +/- windowHalfWidth and from them calculate the threshold\n tt0 = np.where(doyClim[clim_start : clim_end + 1] == d)[\n 0\n ] # the index for that day each year\n # If this doy value does not exist (i.e. in 360-day calendars) then skip it\n if len(tt0) == 0:\n continue\n tt = np.array([])\n for w in range(-windowHalfWidth, windowHalfWidth + 1): # -5 : 5 default\n tt = np.append(\n tt, clim_start + tt0 + w\n ) # append the daily values 5days before and 5days after\n tt = tt[tt >= 0] # Reject indices \"before\" the first element\n tt = tt[tt < TClim] # Reject indices \"after\" the last element\n thresh_climYear[d - 1] = np.percentile(nonans(tempClim[tt.astype(int)]), pctile)\n seas_climYear[d - 1] = np.mean(nonans(tempClim[tt.astype(int)]))\n\n # Special case for Feb 29 (LEAP YEAR)\n thresh_climYear[feb29 - 1] = (\n 0.5 * thresh_climYear[feb29 - 2] + 0.5 * thresh_climYear[feb29]\n )\n seas_climYear[feb29 - 1] = (\n 0.5 * seas_climYear[feb29 - 2] + 0.5 * seas_climYear[feb29]\n )\n\n if smoothPercentile:\n thresh_climYear, seas_climYear = smooth_climatologies(\n thresh_climYear, seas_climYear, smoothPercentileWidth\n )\n\n # Generate threshold for full time series\n clim[\"thresh\"] = thresh_climYear[doy.astype(int) - 1]\n clim[\"seas\"] = seas_climYear[doy.astype(int) - 1]\n # Save vector indicating which points in temp are missing values\n clim[\"missing\"] = np.isnan(temp)\n\n return clim",
"def get_forecast_regions(year, get_b_regions=False):\n\n if year == '2012-13':\n # varsling startet januar 2013\n region_ids = [106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129]\n elif year == '2013-14':\n # Svartisen (131) was started in april 2014\n # Nordenskioldland (130) and Hallingdal (132) was established in april 2014, but not used before th season after.\n region_ids = [106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132]\n elif year == '2014-15':\n # Salten (133) was started in mars 2015.\n # We tested Nordeskioldland (130) in may 2015.\n region_ids = [106, 107, 108, 109, 110, 111, 112, 114, 115, 116, 117, 118, 119, 121, 122, 123, 124, 127, 128, 129, 130, 131, 132, 133]\n if get_b_regions:\n region_ids += [n for n in range(151, 171, 1)]\n elif year == '2015-16':\n region_ids = [106, 107, 108, 109, 110, 111, 112, 114, 115, 116, 117, 118, 119, 121, 122, 123, 124, 127, 128, 129, 130, 131, 132, 133]\n if get_b_regions:\n region_ids += [n for n in range(151, 171, 1)]\n elif year == '2016-17' or year == '2017-18':\n # Total makeover in november 2016\n region_ids = [3003, 3007, 3009, 3010, 3011, 3012, 3013, 3014, 3015, 3016, 3017, 3022, 3023, 3024, 3027, 3028, 3029, 3031, 3032, 3034, 3035]\n if get_b_regions:\n region_ids += [3001, 3002, 3004, 3005, 3006, 3008, 3018, 3019, 3020, 3021, 3025, 3026, 3030, 3033, 3036, 3037, 3038, 3039, 3040, 3041, 3042, 3043, 3044, 3045, 3046]\n elif year == '2018-19':\n # Heiane added 2019 february\n region_ids = [3003, 3007, 3009, 3010, 3011, 3012, 3013, 3014, 3015, 3016, 3017, 3022, 3023, 3024, 3027, 3028, 3029, 3031, 3032, 3034, 3035, 3037]\n if get_b_regions:\n region_ids += [3001, 3002, 3004, 3005, 3006, 3008, 3018, 3019, 3020, 3021, 3025, 3026, 3030, 3033, 3036, 3038, 3039, 3040, 3041, 3042, 3043, 3044, 3045, 3046]\n elif year == '2019-20':\n region_ids = [3003, 3007, 3009, 3010, 3011, 3012, 3013, 3014, 3015, 3016, 3017, 3022, 3023, 3024, 3027, 3028, 3029, 3031, 3032, 3034, 3035, 3037]\n if get_b_regions:\n region_ids += [3001, 3002, 3004, 3005, 3006, 3008, 3018, 3019, 3020, 3021, 3025, 3026, 3030, 3033, 3036, 3038, 3039, 3040, 3041, 3042, 3043, 3044, 3045, 3046]\n elif year == '2020-21':\n region_ids = [3003, 3007, 3009, 3010, 3011, 3012, 3013, 3014, 3015, 3016, 3017, 3022, 3023, 3024, 3027, 3028, 3029, 3031, 3032, 3034, 3035, 3037]\n if get_b_regions:\n region_ids += [3001, 3002, 3004, 3005, 3006, 3008, 3018, 3019, 3020, 3021, 3025, 3026, 3030, 3033, 3036, 3038, 3039, 3040, 3041, 3042, 3043, 3044, 3045, 3046]\n else:\n region_ids = \"No valid period given.\"\n\n return region_ids",
"def cut4(image):\r\n i, j = image.shape\r\n a1 = image[:i // 2, :j // 2]\r\n a2 = image[i // 2:, :j // 2]\r\n a3 = image[:i // 2, j // 2:]\r\n a4 = image[i // 2:, j // 2:]\r\n return a1, a2, a3, a4",
"def read_bands(path, file):\n\n raster = gdal.Open(f'{path}/{file}')\n\n band = []\n for i in range(raster.RasterCount):\n band.append(raster.GetRasterBand(i+1).ReadAsArray())\n\n band1 = band[0] \n band2 = band[1]\n band3 = band[2] \n band4 = band[3]\n\n for i in range(band1.shape[0]):\n for j in range(band1.shape[1]):\n if(band4[i][j]<=0.38): \n band1[i][j] = 0\n band2[i][j] = 0\n band3[i][j] = 0\n band4[i][j] = 0\n \n band4_nonzero = np.count_nonzero(band4)\n return band4_nonzero"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to perform a 3 year moving window filter for a single land cover value (such as Forest as 1) for all years in an image. Calls the function mask3. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The temporal filter inspects the central position of three consecutive years, and if the extremities of the consecutive years are identical but the centre position is not, then the central pixels are reclassified to match its temporal neighbour class. This function can be applied to whichever land cover values the user decides, such as all of the land cover values or a select few. | def applyWindow3years(imagem, value, bandNames):
img_out = imagem.select(bandNames[0])
for i in np.arange(1, len(bandNames)-1):
img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))
img_out = img_out.addBands(imagem.select(bandNames[-1]))
return img_out | [
"def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out",
"def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out",
"def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year\n #currentImage = image.select(bandNames[1]), the image for the second year\n #previousImage = image.select(bandNames[0]), the first year\n #Find where the second year has missing data, replace those values with the values of the first year\n #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill\n #and the second band is the first years classification\n #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year\n def forwardNoDataFilter(bandName, previousImage):\n currentImage = image.select(ee.String(bandName))\n previousImage = ee.Image(previousImage)\n currentImage = currentImage.unmask(previousImage.select([0]))\n return currentImage.addBands(previousImage)\n \n #Iterate through all the years, starting with the first year's classification\n filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)",
"def zero_three_cloud_contrast(img):\n # Temprary, I intend to change this slightly later.\n img2 = np.asarray(Image.open(\"Images/Original/KPNO/20171108/r_ut052936s31200.png\").convert(\"L\"))\n\n img3 = np.copy(img.data)\n img1 = np.int16(img.data)\n img2 = np.int16(img2)\n\n # Finds the difference from the \"standard\" .03s image.\n # Then subtracts that value from the entire image to normalize it to\n # standard image color.\n val = img1[510, 510] - img2[510, 510]\n img1 = img1 - val\n\n # Subtracts standard image from current image.\n # Performs closing to clean up some speckling in lower band of image.\n test = io_util.image_diff(img1, img2)\n test = ndimage.grey_closing(test, size=(2, 2))\n\n # Clouds are regions above the average value of the completed transform.\n avg = np.mean(test)\n cond = np.where(test > avg, 0, 1)\n\n # Increases black sky brightness in images where the moon is alone (thanks\n # to low dynamic range the sky is black because the moon is so bright)\n img3 = np.where(img3 < 150, img3 + 40, img3)\n final = np.multiply(img3, cond)\n\n # Find the mask and black out those pixels.\n masking = mask.generate_mask()\n\n final = AllSkyImage(img.name, img.date, img.camera, final)\n final = mask.apply_mask(masking, final)\n\n return final",
"def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year\n #currentImage = image.select(bandNames[-2]), the second to last year\n #followingImage = image.select(bandNames[-1]), the final year\n #Find where the second to last year has missing data, replace those values with the values of the following year\n #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill\n #and the second band is the final years classification\n #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year\n def backwardNoDataFilter(bandName, followingImage):\n currentImage = image.select(ee.String(bandName))\n followingImage = ee.Image(followingImage)\n currentImage = currentImage.unmask(followingImage.select([0]))\n return currentImage.addBands(followingImage)\n \n #Apply backwards filter, starting with the final year and iterating through to year(0) \n filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1])))\n #Re-order bands to be in chronological order\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)",
"def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img",
"def stack_bands(yr, roi):\n\n water_year_start = '{}-10-01'.format(yr - 1)\n\n winter_s, winter_e = '{}-01-01'.format(yr), '{}-03-01'.format(yr),\n spring_s, spring_e = '{}-03-01'.format(yr), '{}-05-01'.format(yr),\n late_spring_s, late_spring_e = '{}-05-01'.format(yr), '{}-07-01'.format(yr)\n summer_s, summer_e = '{}-07-01'.format(yr), '{}-09-01'.format(yr)\n fall_s, fall_e = '{}-09-01'.format(yr), '{}-12-31'.format(yr)\n\n periods = [('cy', winter_s, fall_e),\n ('1', spring_s, spring_e),\n ('2', late_spring_s, late_spring_e),\n ('3', summer_s, summer_e),\n ('4', fall_s, fall_e)]\n\n first = True\n for name, start, end in periods:\n bands = landsat_composites(yr, start, end, roi, name)\n if first:\n input_bands = bands\n proj = bands.select('B2_cy').projection().getInfo()\n first = False\n else:\n input_bands = input_bands.addBands(bands)\n\n for s, e, n in [(spring_s, spring_e, 'espr'),\n (late_spring_s, late_spring_e, 'lspr'),\n (summer_s, summer_e, 'smr'),\n (fall_s, fall_e, 'fl'),\n (water_year_start, spring_e, 'wy_espr'),\n (water_year_start, late_spring_e, 'wy_espr'),\n (water_year_start, summer_e, 'wy_smr'),\n (water_year_start, fall_e, 'wy')]:\n gridmet = ee.ImageCollection(\"IDAHO_EPSCOR/GRIDMET\").filterBounds(\n roi).filterDate(s, e).select('pr', 'eto', 'tmmn', 'tmmx')\n temp_reducer = ee.Reducer.mean()\n t_names = ['tmax'.format(n), 'tmin'.format(n)]\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_{}'.format(n), 'pet_total_{}'.format(n)).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n wd_estimate = precip_sum.select('precip_total_{}'.format(n)).subtract(precip_sum.select(\n 'pet_total_{}'.format(n))).rename('wd_est_{}'.format(n))\n input_bands = input_bands.addBands([temp_perc, precip_sum, wd_estimate])\n\n temp_reducer = ee.Reducer.percentile([10, 50, 90])\n t_names = ['tmmn_p10_cy', 'tmmn_p50_cy', 'tmmn_p90_cy', 'tmmx_p10_cy', 'tmmx_p50_cy', 'tmmx_p90_cy']\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_cy', 'pet_total_cy').resample('bilinear').reproject(crs=proj['crs'], scale=30)\n wd_estimate = precip_sum.select('precip_total_cy').subtract(precip_sum.select(\n 'pet_total_cy')).rename('wd_est_cy')\n\n coords = ee.Image.pixelLonLat().rename(['Lon_GCS', 'LAT_GCS']).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n ned = ee.Image('USGS/NED')\n terrain = ee.Terrain.products(ned).select('elevation', 'slope', 'aspect').reduceResolution(\n ee.Reducer.mean()).reproject(crs=proj['crs'], scale=30)\n\n world_climate = get_world_climate(proj=proj)\n elev = terrain.select('elevation')\n tpi_1250 = elev.subtract(elev.focal_mean(1250, 'circle', 'meters')).add(0.5).rename('tpi_1250')\n tpi_250 = elev.subtract(elev.focal_mean(250, 'circle', 'meters')).add(0.5).rename('tpi_250')\n tpi_150 = elev.subtract(elev.focal_mean(150, 'circle', 'meters')).add(0.5).rename('tpi_150')\n static_input_bands = coords.addBands([temp_perc, wd_estimate, terrain, tpi_1250, tpi_250, tpi_150, world_climate])\n\n nlcd = ee.Image('USGS/NLCD/NLCD2011').select('landcover').reproject(crs=proj['crs'], scale=30).rename('nlcd')\n\n cdl_cult = ee.Image('USDA/NASS/CDL/2017').select('cultivated'). \\\n remap([1, 2], [0, 1]).reproject(crs=proj['crs'], scale=30).rename('cdlclt')\n\n cdl_crop = ee.Image('USDA/NASS/CDL/2017').select('cropland').reproject(crs=proj['crs'],\n scale=30).rename('cdlcrp')\n\n gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')\n occ_pos = gsw.select('occurrence').gt(0)\n water = occ_pos.unmask(0).rename('gsw')\n\n static_input_bands = static_input_bands.addBands([nlcd, cdl_cult, cdl_crop, water])\n\n input_bands = input_bands.addBands(static_input_bands).clip(roi)\n\n # standardize names to match EE javascript output\n standard_names = []\n temp_ct = 1\n prec_ct = 1\n names = input_bands.bandNames().getInfo()\n for name in names:\n if 'tavg' in name and 'tavg' in standard_names:\n standard_names.append('tavg_{}'.format(temp_ct))\n temp_ct += 1\n elif 'prec' in name and 'prec' in standard_names:\n standard_names.append('prec_{}'.format(prec_ct))\n prec_ct += 1\n else:\n standard_names.append(name)\n\n input_bands = input_bands.rename(standard_names)\n return input_bands",
"def fmask(bandname=\"fmask\"):\n\n def fmask(image):\n imgFmask = image.select(bandname)\n shadow = imgFmask.eq(3)\n snow = imgFmask.eq(4)\n cloud = imgFmask.eq(5)\n\n mask = shadow.Or(snow).Or(cloud)\n\n imgMask = image.updateMask(mask.Not())\n return imgMask\n return fmask",
"def _mask3d(self, n, i, window):\n\n n = np.array(n)\n i = np.array(i)\n\n w2 = (window - 1) // 2\n\n x1, y1, z1 = np.clip(i - w2, 0 * n, n)\n x2, y2, z2 = np.clip(i + w2 + 1, 0 * n, n)\n\n mask = np.zeros(n, dtype=np.bool)\n mask[x1:x2, y1:y2, z1:z2] = True\n\n return mask",
"def load_chaz_storms(path, mask_distance=None, mask_coordinate=(0.0, 0.0),\n mask_category=None, categorization=\"NHC\"):\n\n\n # Load the mat file and extract pertinent data\n #data = xarray.open_dataset(path)\n data = netCDF4.Dataset(path)\n print(data.dimensions.keys())\n print(data.variables.keys())\n\n # Days from 1-1-1950\n # start_date = datetime.datetime(1950, 1, 1)\n #stormIDs = data.variables['time']['stormID'][:]\n\n #stormIDs = data['time']['stormID']\n storms = []\n\n time_length = data['Mwspd'].shape[0]\n num_tracks = data['Mwspd'].shape[1]\n num_intensities = data['Mwspd'].shape[2]\n\n for i in range(num_tracks):\n\n # Extract initial data ranges\n for n in range(num_intensities):\n\n # Use intensity to find non-nans and extract correct arrays\n max_wind_speed = numpy.array(data.variables['Mwspd'][:, i, n])\n index_set = (numpy.isnan(max_wind_speed) - 1).nonzero()[0]\n #print(\"\")\n #print(\"Max Wind speed\")\n #print(max_wind_speed)\n\n index = len(index_set)\n t = numpy.array(data.variables['time'][0:index, i])\n x = numpy.array(data.variables['longitude'][0:index, i])\n y = numpy.array(data.variables['latitude'][0:index, i])\n\n #print(\"\")\n #print(x)\n #print(\"\")\n #print(y)\n\n # Remove zero-length intensities\n if len(index_set) > 0:\n # Create storm object\n storm = clawpack.geoclaw.surge.storm.Storm()\n storm.ID = i * num_intensities + n\n\n # Initialize the date set\n\n storm.t = [datetime.datetime(2000, 1, 1, 0) + \\\n datetime.timedelta(hours=6) * i\n for i in range(len(index_set))]\n storm.time_offset = storm.t[0]\n #storm.t = t[index_set]\n ## Add fields with proper non-nan values\n #storm.t[0] = datetime.datetime(2007, 1, 1, 0)\n #for i in range(1, index_set):\n # storm.t[i] = storm.t[i-1] + datetime.timedelta(hours = 6)\n\n #storm.t = t[index_set]\n #storm.t -= storm.t[0]\n #storm.t *= 24.0 * 60.0**2\n\n ## Check for missing last time point and adjust index set\n #if storm.t[-1] < 0:\n # index_set = index_set[:-1]\n # #print(index_set)\n # storm.t = storm.t[:-1]\n\n storm.eye_location = numpy.empty((len(index_set), 2))\n x[index_set] = x[index_set] - 360.0 * numpy.ones(len(index_set))\n storm.eye_location[:, 0] = x[index_set]\n storm.eye_location[:, 1] = y[index_set]\n\n #storm.eye_location = numpy.empty((2, len(index_set)))\n #storm.eye_location[0, :] = x[index_set]\n #storm.eye_location[1, :] = y[index_set]\n\n # TODO: Convert from knots\n storm.max_wind_speed = max_wind_speed[index_set]\n #print(\"Storm Max Wind Speed in Knots\") \n #print(storm.max_wind_speed)\n #print(\" \") \n # Assumed values\n storm.storm_radius = 500000 * numpy.ones(len(index_set))\n\n\n # Calculate Radius of Max Wind\n C0 = 218.3784 * numpy.ones(len(index_set))\n storm.max_wind_radius = C0 - 1.2014 * storm.max_wind_speed + \\\n (storm.max_wind_speed / 10.9884)**2 - \\\n (storm.max_wind_speed / 35.3052)**3 - \\\n 145.5090 * \\\n numpy.cos(storm.eye_location[:, 1] * 0.0174533)\n \n #storm.max_wind_radius = units.convert(storm.max_wind_radius, 'nmi', 'm')\n #storm.max_wind_speed = units.convert(storm.max_wind_speed,\n # 'knots', 'm/s')\n \n #units.convert(storm.max_wind_radius, 'nmi', 'm')\n #units.convert(storm.max_wind_speed,\n # 'knots', 'm/s')\n\n # Define maximum radius for all sotrms\n storm.storm_radius = 50e3 * numpy.ones(len(index_set))\n\n # From Kossin, J. P. WAF 2015\n a = -0.0025\n b = -0.36\n c = 1021.36\n storm.central_pressure = ( a * storm.max_wind_speed**2\n + b * storm.max_wind_speed\n + c)\n\n include_storm = True\n if mask_distance is not None:\n distance = numpy.sqrt((storm.eye_location[:, 0] -\n mask_coordinate[0])**2 +\n (storm.eye_location[:, 1] -\n mask_coordinate[1])**2)\n inlcude_storm = numpy.any(distance < mask_distance)\n\n if mask_category is not None:\n category = storm.category(categorization=categorization)\n include_storm = numpy.any(category > mask_category)\n #raise NotImplementedError(\"Category masking not implemented.\")\n\n if include_storm:\n storms.append(storm)\n return storms",
"def third_octave_band(ref_freq=1000, i_band=None, n_band=18):\n\n if i_band is not None:\n k = i_band\n else:\n k = np.arange(-np.floor((n_band - 1) / 2), np.floor(n_band / 2 + 1))\n\n fc = 2 ** (k / 3) * ref_freq\n fl = fc * 2 ** (-1/6)\n fu = fc * 2 ** (1/6)\n\n return fc, fl, fu",
"def masked_f3kdb(clip: vs.VideoNode,\n rad: int = 16,\n thr: Union[int, List[int]] = 24,\n grain: Union[int, List[int]] = [12, 0],\n mask_args: Dict[str, Any] = {}\n ) -> vs.VideoNode:\n from debandshit import dumb3kdb\n\n deb_mask_args: Dict[str, Any] = dict(brz=(1000, 2750))\n deb_mask_args |= mask_args\n\n bits, clip = _get_bits(clip)\n\n deband_mask = detail_mask(clip, **deb_mask_args)\n\n deband = dumb3kdb(clip, radius=rad, threshold=thr, grain=grain, seed=69420)\n deband_masked = core.std.MaskedMerge(deband, clip, deband_mask)\n deband_masked = deband_masked if bits == 16 else depth(deband_masked, bits)\n return deband_masked",
"def landsat_fmask_cloud_mask_func(img):\n fmask = img.select('cfmask')\n # CLouds (4), shadows (2), and snow (3)\n cloud_mask = fmask.eq(2).Or(fmask.eq(3)).Or(fmask.eq(4))\n # cloud_mask = cloud_mask.Or(fmask.eq(1)) # Water\n # cloud_mask = cloud_mask.Or(\n # fmask.eq(4).multiply(img.select('cfmask_conf').divide(3)))\n img = img.updateMask(cloud_mask.neq(1))\n return img",
"def load_copernicus_ammonia(layers, time_slice, lat_slice, lon_slice, verbose=False):\n xr_layers = []\n\n if 'agl' in layers:\n xr_layers.append(xr.load_dataset(\n './data/copernicus/ammonia/CAMS-GLOB-ANT_Glb_0.1x0.1_anthro_nh3_v4.2_monthly_agl.nc').agl.sel(\n time=time_slice, lat=lat_slice, lon=lon_slice))\n\n if 'ags' in layers:\n xr_layers.append(xr.load_dataset(\n './data/copernicus/ammonia/CAMS-GLOB-ANT_Glb_0.1x0.1_anthro_nh3_v4.2_monthly_ags.nc').ags.sel(\n time=time_slice, lat=lat_slice, lon=lon_slice))\n\n nh3 = sum(xr_layers)\n nh3.name = 'nh3'\n\n if verbose:\n\n shape = gpd.read_file('./shp/lombardia/lombardia.shp').to_crs(epsg=4326)\n\n ncols = len(xr_layers) + 1\n fig, axs = plt.subplots(ncols=ncols, figsize=(8 * ncols, 5))\n\n for i in range(len(xr_layers)):\n shape.plot(ax=axs[i], color='black', alpha=0.5)\n xr_layers[i].mean(dim='time').plot(ax=axs[i], alpha=0.5)\n\n shape.plot(ax=axs[len(xr_layers)], color='black', alpha=0.5)\n nh3.mean(dim='time').plot(ax=axs[len(xr_layers)], alpha=0.5)\n\n plt.show()\n\n return nh3",
"def get_S1(\n self,\n year,\n month,\n day,\n tempfilter=True,\n tempfilter_radius=7,\n applylcmask=False,\n mask_globcover=True,\n dualpol=True,\n trackflt=None,\n maskwinter=False,\n masksnow=True,\n explicit_t_mask=None,\n ascending=False,\n maskLIA=True,\n ):\n\n def computeLIA(image):\n # comput the local incidence angle (LIA) based on the srtm and the s1 viewing angle\n # get the srtm\n srtm = ee.Image(\"USGS/SRTMGL1_003\")\n srtm_slope = ee.Terrain.slope(srtm)\n srtm_aspect = ee.Terrain.aspect(srtm)\n # get the S1 incidence angle\n inc = ee.Image(image).select(\"angle\")\n # comput the LIA\n s = srtm_slope.multiply(\n ee.Image.constant(277)\n .subtract(srtm_aspect)\n .multiply(math.pi / 180)\n .cos()\n )\n lia = inc.subtract(\n ee.Image.constant(90).subtract(ee.Image.constant(90).subtract(s))\n ).abs()\n # add band to current image\n return image.addBands(\n lia.select([\"angle\"], [\"lia\"]).reproject(srtm.projection())\n )\n\n def maskterrain(image):\n # mask for terrain, local incidence angle and high and low backscatter\n tmp = ee.Image(image)\n # srtm dem\n if maskLIA == False:\n gee_srtm = ee.Image(\"USGS/SRTMGL1_003\")\n gee_srtm_slope = ee.Terrain.slope(gee_srtm)\n mask = gee_srtm_slope.lt(20)\n else:\n lia = tmp.select(\"lia\")\n mask = lia.gt(20).bitwiseAnd(lia.lt(45))\n mask2 = tmp.lt(0).bitwiseAnd(tmp.gt(-25))\n mask = mask.bitwiseAnd(mask2)\n tmp = tmp.updateMask(mask)\n\n return tmp\n\n def masklc(image):\n # load land cover info\n corine = ee.Image(\"users/felixgreifeneder/corine\")\n\n # create lc mask\n valLClist = [10, 11, 12, 13, 18, 19, 20, 21, 26, 27, 28, 29]\n\n lcmask = (\n corine.eq(valLClist[0])\n .bitwiseOr(corine.eq(valLClist[1]))\n .bitwiseOr(corine.eq(valLClist[2]))\n .bitwiseOr(corine.eq(valLClist[3]))\n .bitwiseOr(corine.eq(valLClist[4]))\n .bitwiseOr(corine.eq(valLClist[5]))\n .bitwiseOr(corine.eq(valLClist[6]))\n .bitwiseOr(corine.eq(valLClist[7]))\n .bitwiseOr(corine.eq(valLClist[8]))\n .bitwiseOr(corine.eq(valLClist[9]))\n .bitwiseOr(corine.eq(valLClist[10]))\n .bitwiseOr(corine.eq(valLClist[11]))\n )\n\n tmp = ee.Image(image)\n\n tmp = tmp.updateMask(lcmask)\n return tmp\n\n def mask_lc_globcover(image):\n\n tmp = ee.Image(image)\n\n # load lc\n glbcvr = ee.Image(\"ESA/GLOBCOVER_L4_200901_200912_V2_3\").select(\"landcover\")\n\n valLClist = [\n 11,\n 14,\n 20,\n 30,\n 40,\n 50,\n 60,\n 70,\n 90,\n 100,\n 110,\n 120,\n 130,\n 140,\n 150,\n 160,\n 170,\n 180,\n 190,\n 200,\n 210,\n 220,\n 230,\n ]\n\n lcmask = (\n glbcvr.eq(valLClist[0])\n .bitwiseOr(glbcvr.eq(valLClist[1]))\n .bitwiseOr(glbcvr.eq(valLClist[2]))\n .bitwiseOr(glbcvr.eq(valLClist[3]))\n .bitwiseOr(glbcvr.eq(valLClist[4]))\n .bitwiseOr(glbcvr.eq(valLClist[5]))\n .bitwiseOr(glbcvr.eq(valLClist[6]))\n .bitwiseOr(glbcvr.eq(valLClist[7]))\n .bitwiseOr(glbcvr.eq(valLClist[8]))\n .bitwiseOr(glbcvr.eq(valLClist[9]))\n .bitwiseOr(glbcvr.eq(valLClist[10]))\n .bitwiseOr(glbcvr.eq(valLClist[11]))\n .bitwiseOr(glbcvr.eq(valLClist[12]))\n .bitwiseOr(glbcvr.eq(valLClist[13]))\n .bitwiseOr(glbcvr.eq(valLClist[14]))\n .bitwiseOr(glbcvr.eq(valLClist[15]))\n .bitwiseOr(glbcvr.eq(valLClist[16]))\n .bitwiseOr(glbcvr.eq(valLClist[17]))\n .bitwiseOr(glbcvr.eq(valLClist[18]))\n .bitwiseOr(glbcvr.eq(valLClist[19]))\n .bitwiseOr(glbcvr.eq(valLClist[20]))\n .bitwiseOr(glbcvr.eq(valLClist[21]))\n .bitwiseOr(glbcvr.eq(valLClist[22]))\n )\n\n tmp = tmp.updateMask(lcmask)\n\n return tmp\n\n def setresample(image):\n image = image.resample()\n return image\n\n def toln(image):\n\n tmp = ee.Image(image)\n\n # Convert to linear\n vv = ee.Image(10).pow(tmp.select(\"VV\").divide(10))\n if dualpol == True:\n vh = ee.Image(10).pow(tmp.select(\"VH\").divide(10))\n\n # Convert to ln\n out = vv.log()\n if dualpol == True:\n out = out.addBands(vh.log())\n out = out.select([\"constant\", \"constant_1\"], [\"VV\", \"VH\"])\n else:\n out = out.select([\"constant\"], [\"VV\"])\n\n return out.set(\"system:time_start\", tmp.get(\"system:time_start\"))\n\n def tolin(image):\n\n tmp = ee.Image(image)\n\n # Covert to linear\n vv = ee.Image(10).pow(tmp.select(\"VV\").divide(10))\n if dualpol == True:\n vh = ee.Image(10).pow(tmp.select(\"VH\").divide(10))\n\n # Convert to\n if dualpol == True:\n out = vv.addBands(vh)\n out = out.select([\"constant\", \"constant_1\"], [\"VV\", \"VH\"])\n else:\n out = vv.select([\"constant\"], [\"VV\"])\n\n return out.set(\"system:time_start\", tmp.get(\"system:time_start\"))\n\n def todb(image):\n\n tmp = ee.Image(image)\n\n return (\n ee.Image(10)\n .multiply(tmp.log10())\n .set(\"system:time_start\", tmp.get(\"system:time_start\"))\n )\n\n def applysnowmask(image):\n\n tmp = ee.Image(image)\n sdiff = tmp.select(\"VH\").subtract(snowref)\n wetsnowmap = sdiff.lte(-2.6).focal_mode(100, \"square\", \"meters\", 3)\n\n return tmp.updateMask(wetsnowmap.eq(0))\n\n def projectlia(image):\n tmp = ee.Image(image)\n trgtprj = tmp.select(\"VV\").projection()\n tmp = tmp.addBands(tmp.select(\"angle\").reproject(trgtprj), [\"angle\"], True)\n return tmp\n\n def apply_explicit_t_mask(image):\n\n t_mask = ee.Image(\"users/felixgreifeneder/\" + explicit_t_mask)\n mask = t_mask.eq(0)\n return image.updateMask(mask)\n\n ee.Reset()\n ee.Initialize()\n\n # load S1 data\n gee_s1_collection = ee.ImageCollection(\"COPERNICUS/S1_GRD\")\n\n # Filter the image collection\n gee_s1_filtered = (\n gee_s1_collection.filter(ee.Filter.eq(\"instrumentMode\", \"IW\"))\n .filterBounds(self.roi)\n .filter(ee.Filter.eq(\"platform_number\", \"A\"))\n .filter(ee.Filter.listContains(\"transmitterReceiverPolarisation\", \"VV\"))\n )\n\n if ascending == True:\n gee_s1_filtered = gee_s1_filtered.filter(\n ee.Filter.eq(\"orbitProperties_pass\", \"ASCENDING\")\n )\n else:\n gee_s1_filtered = gee_s1_filtered.filter(\n ee.Filter.eq(\"orbitProperties_pass\", \"DESCENDING\")\n )\n\n if dualpol == True:\n # Consider only dual-pol scenes\n gee_s1_filtered = gee_s1_filtered.filter(\n ee.Filter.listContains(\"transmitterReceiverPolarisation\", \"VH\")\n )\n\n if trackflt is not None:\n # Specify track\n gee_s1_filtered = gee_s1_filtered.filter(\n ee.Filter.eq(\"relativeOrbitNumber_start\", trackflt)\n )\n\n if maskwinter == True:\n # Mask winter based on DOY\n gee_s1_filtered = gee_s1_filtered.filter(ee.Filter.dayOfYear(121, 304))\n\n # add LIA\n if maskLIA == True:\n # compute the local incidence angle if it shall be used for masking\n gee_s1_filtered = gee_s1_filtered.map(computeLIA)\n s1_lia = gee_s1_filtered.select(\"lia\")\n else:\n s1_lia = None\n\n s1_angle = gee_s1_filtered.select(\"angle\")\n\n if applylcmask == True:\n # apply land-cover mask based on Corine\n gee_s1_filtered = gee_s1_filtered.map(masklc)\n if mask_globcover == True:\n # apply land-cover mask based on globcover\n gee_s1_filtered = gee_s1_filtered.map(mask_lc_globcover)\n\n # Enable bilinear resampling (instead of NN)\n gee_s1_filtered = gee_s1_filtered.map(setresample)\n\n if explicit_t_mask == None:\n # apply masking based on the terraing (LIA)\n gee_s1_filtered = gee_s1_filtered.map(maskterrain)\n else:\n # apply specific terrain mask\n gee_s1_filtered = gee_s1_filtered.map(apply_explicit_t_mask)\n\n if masksnow == True:\n # automatic wet snow masking\n gee_s1_linear_vh = gee_s1_filtered.map(tolin).select(\"VH\")\n snowref = ee.Image(10).multiply(\n gee_s1_linear_vh.reduce(ee.Reducer.intervalMean(5, 100)).log10()\n )\n gee_s1_filtered = gee_s1_filtered.map(applysnowmask)\n\n #### SHOULD BE IF STATEMENT HERE\n\n # create a list of availalbel dates\n tmp = gee_s1_filtered.getInfo()\n tmp_ids = [x[\"properties\"][\"system:index\"] for x in tmp[\"features\"]]\n \n dates = np.array(\n [\n dt.date(year=int(x[17:21]), month=int(x[21:23]), day=int(x[23:25]))\n for x in tmp_ids\n ]\n )\n \n if not len(dates):\n raise Exception(\n \"There are no S1 images with the selected filters, please consider \"\n \"changing the area of interest or selecting a different orbit\"\n )\n \n # find the closest acquisitions\n doi = dt.date(year=year, month=month, day=day)\n doi_index = np.argmin(np.abs(dates - doi))\n date_selected = dates[doi_index]\n\n # filter imagecollection for respective date\n gee_s1_drange = gee_s1_filtered.filterDate(\n date_selected.strftime(\"%Y-%m-%d\"),\n (date_selected + dt.timedelta(days=1)).strftime(\"%Y-%m-%d\"),\n )\n s1_angle_drange = s1_angle.filterDate(\n date_selected.strftime(\"%Y-%m-%d\"),\n (date_selected + dt.timedelta(days=1)).strftime(\"%Y-%m-%d\"),\n )\n if maskLIA == True:\n s1_lia_drange = s1_lia.filterDate(\n date_selected.strftime(\"%Y-%m-%d\"),\n (date_selected + dt.timedelta(days=1)).strftime(\"%Y-%m-%d\"),\n )\n if gee_s1_drange.size().getInfo() > 1:\n if maskLIA == True:\n s1_lia = s1_lia_drange.mosaic()\n s1_angle = s1_angle_drange.mosaic()\n s1_sig0 = gee_s1_drange.mosaic()\n s1_lia = ee.Image(s1_lia.copyProperties(s1_lia_drange.first()))\n s1_sig0 = ee.Image(s1_sig0.copyProperties(gee_s1_drange.first()))\n else:\n s1_sig0 = ee.Image(gee_s1_drange.first())\n s1_angle = ee.Image(s1_angle_drange.first())\n s1_lia = ee.Image(s1_lia_drange.first())\n\n # fetch image from image collection\n # get the track number\n s1_sig0_info = s1_sig0.getInfo()\n track_nr = s1_sig0_info[\"properties\"][\"relativeOrbitNumber_start\"]\n\n # only uses images of the same track\n gee_s1_filtered = gee_s1_filtered.filterMetadata(\n \"relativeOrbitNumber_start\", \"equals\", track_nr\n )\n\n if tempfilter == True:\n # despeckle\n radius = tempfilter_radius\n units = \"pixels\"\n gee_s1_linear = gee_s1_filtered.map(tolin)\n gee_s1_dspckld_vv = self._multitemporalDespeckle(\n gee_s1_linear.select(\"VV\"),\n radius,\n units,\n {\"before\": -12, \"after\": 12, \"units\": \"month\"},\n )\n gee_s1_dspckld_vv = gee_s1_dspckld_vv.map(todb)\n gee_s1_fltrd_vv = gee_s1_dspckld_vv.filterDate(\n date_selected.strftime(\"%Y-%m-%d\"),\n (date_selected + dt.timedelta(days=1)).strftime(\"%Y-%m-%d\"),\n )\n s1_sig0_vv = gee_s1_fltrd_vv.mosaic()\n\n if dualpol == True:\n gee_s1_dspckld_vh = self._multitemporalDespeckle(\n gee_s1_linear.select(\"VH\"),\n radius,\n units,\n {\"before\": -12, \"after\": 12, \"units\": \"month\"},\n )\n gee_s1_dspckld_vh = gee_s1_dspckld_vh.map(todb)\n gee_s1_fltrd_vh = gee_s1_dspckld_vh.filterDate(\n date_selected.strftime(\"%Y-%m-%d\"),\n (date_selected + dt.timedelta(days=1)).strftime(\"%Y-%m-%d\"),\n )\n s1_sig0_vh = gee_s1_fltrd_vh.mosaic()\n\n if dualpol == True:\n s1_sig0 = s1_sig0_vv.addBands(s1_sig0_vh).select(\n [\"constant\", \"constant_1\"], [\"VV\", \"VH\"]\n )\n else:\n s1_sig0 = s1_sig0_vv.select([\"constant\"], [\"VV\"])\n\n # extract information\n s1_sig0_vv = s1_sig0.select(\"VV\")\n s1_sig0_vv = s1_sig0_vv.clip(self.roi)\n if dualpol == True:\n s1_sig0_vh = s1_sig0.select(\"VH\")\n s1_sig0_vh = s1_sig0_vh.clip(self.roi)\n\n gee_s1_ln = gee_s1_filtered.map(toln)\n gee_s1_lin = gee_s1_filtered.map(tolin)\n k1vv = ee.Image(gee_s1_ln.select(\"VV\").mean()).clip(self.roi)\n k2vv = ee.Image(gee_s1_ln.select(\"VV\").reduce(ee.Reducer.stdDev())).clip(\n self.roi\n )\n mean_vv = ee.Image(gee_s1_lin.select(\"VV\").mean()).clip(self.roi)\n std_vv = ee.Image(gee_s1_lin.select(\"VV\").reduce(ee.Reducer.stdDev())).clip(\n self.roi\n )\n\n if dualpol == True:\n k1vh = ee.Image(gee_s1_ln.select(\"VH\").mean()).clip(self.roi)\n k2vh = ee.Image(gee_s1_ln.select(\"VH\").reduce(ee.Reducer.stdDev())).clip(\n self.roi\n )\n mean_vh = ee.Image(gee_s1_lin.select(\"VH\").mean()).clip(self.roi)\n std_vh = ee.Image(gee_s1_lin.select(\"VH\").reduce(ee.Reducer.stdDev())).clip(\n self.roi\n )\n\n # export\n if dualpol == False:\n self.S1_SIG0_VV_db = s1_sig0_vv\n self.S1_ANGLE = s1_angle\n self.K1VV = k1vv\n self.K2VV = k2vv\n self.S1_DATE = date_selected\n else:\n self.S1_SIG0_VV_db = s1_sig0_vv\n self.S1_SIG0_VH_db = s1_sig0_vh\n self.S1_ANGLE = s1_angle\n self.K1VV = k1vv\n self.K1VH = k1vh\n self.K2VV = k2vv\n self.K2VH = k2vh\n self.S1_DATE = date_selected\n self.S1MEAN_VV = mean_vv\n self.S1MEAN_VH = mean_vh\n self.S1STD_VV = std_vv\n self.S1STD_VH = std_vh\n\n if maskLIA == True:\n self.S1_LIA = s1_lia",
"def masked_f3kdb(clip: vs.VideoNode,\n rad: int = 16,\n thr: Union[int, List[int]] = 24,\n grain: Union[int, List[int]] = [12, 0],\n mask_args: Dict[str, Any] = {},\n show_mask: bool = False) -> vs.VideoNode:\n from debandshit import dumb3kdb\n\n deb_mask_args: Dict[str, Any] = dict(detail_brz=1500, lines_brz=1000)\n deb_mask_args |= mask_args\n\n bits, clip = _get_bits(clip)\n\n deband_mask = detail_mask(clip, **deb_mask_args)\n\n deband = dumb3kdb(clip, radius=rad, threshold=thr, grain=grain)\n deband_masked = core.std.MaskedMerge(deband, clip, deband_mask)\n deband_masked = deband_masked if bits == 16 else depth(deband_masked, bits)\n return deband_masked",
"def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6):\n #Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff\n num_changes = calculateNumberOfChanges(image, bandNames)\n too_many_changes = num_changes.gt(numChangesCutoff)\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Calculate the number of connected pixels for each land cover class and year, reduce to a single band image representing the number\n #of connected pixels of the same land cover class as the central pixel, and determine if it is over the connectedPixelCutoff\n connected_pixel_count = ee.ImageCollection(binary_class_images.map(lambda x: x.mask(x).connectedPixelCount(100,False).reduce(ee.Reducer.sum()).lt(connectedPixelCutoff)))\n \n #Get a bitwiseAnd determination if the number of connected pixels <= connectedPixelCutoff and the number of changes > numChangesCutoff \n incidence_filter = ee.ImageCollection(connected_pixel_count.map(lambda x: x.bitwiseAnd(too_many_changes))).toBands().rename(bandNames)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Replace pixels of image where incidence_filter is True with mode_image\n incidence_filtered = image.where(incidence_filter, mode_image)\n \n return incidence_filtered",
"def sky_groups():\n cam = \"sky\"\n for light, lens, ndc, good, window in [(True, True, False, True, True),\n (True, True, False, True, False),\n (True, True, False, False, False),\n (True, False, False, True, False),\n (True, False, False, False, False),\n (False, True, False, True, True),\n (False, True, False, False, True)]:\n filenames = flatfiles(cam)\n filenames = get_light_sky(filenames, light)\n filenames = get_lens(filenames, lens)\n filenames = get_ndc(filenames, ndc)\n filenames = get_good(filenames, good)\n filenames = get_window_sky(filenames, window)\n images = valid_images(filenames)\n process_images(images, cam, (light, lens, ndc, good, window))",
"def three_FLD(e_spectra, l_spectra, wavelengths, fwhm, band = 'A', plot=True):\n # set the size of the point selection search regions inside and outside of the absorption band\n buffer_in = 5\n buffer_out = 1\n \n # adjust the shoulder skipping and right shoulder search range depending on which band is selected\n if band == 'A':\n out_in_first = 0.7535*fwhm+2.8937 # define amount to skip to left shoulder from minimum\n wl_in = 760 # standard location of O2A absorption feature\n out_in_second = 11 # define amount to skip to right shoulder from minimum\n if band == 'B':\n out_in_first = 0.697*fwhm + 1.245 # define amount to skip to left shoulder from minimum\n wl_in = 687 # standard location of the O2B aboorption band\n out_in_second = 8 # define amount to skip to right shoulder from minimum\n \n # get absorption well minima position\n e_in_index, e_in = stats_on_spectra(wavelengths, wl_in - buffer_in, wl_in + buffer_in, e_spectra, 'min')\n l_in_index, l_in = stats_on_spectra(wavelengths, wl_in - buffer_in, wl_in + buffer_in, l_spectra, 'min')\n # get absorption left and right shoulders\n e_left_index, e_left = stats_on_spectra(wavelengths, wl_in - buffer_out - out_in_first, wl_in - out_in_first, e_spectra, 'mean')\n l_left_index, l_left = stats_on_spectra(wavelengths, wl_in - buffer_out - out_in_first, wl_in - out_in_first, l_spectra, 'mean')\n e_right_index, e_right = stats_on_spectra(wavelengths, wl_in + out_in_second, wl_in + buffer_out + out_in_second, e_spectra, 'mean')\n l_right_index, l_right = stats_on_spectra(wavelengths, wl_in + out_in_second, wl_in + buffer_out + out_in_second, l_spectra, 'mean')\n # interpolate between shoulders using a linear fit\n e_wavelengths_inter = wavelengths[e_left_index:e_right_index + 1]\n l_wavelengths_inter = wavelengths[l_left_index:l_right_index + 1]\n # get equation of straight line between two shoulders\n e_xp = [e_wavelengths_inter[0], e_wavelengths_inter[-1]] # get x values\n l_xp = [l_wavelengths_inter[0], l_wavelengths_inter[-1]]\n e_fp = [e_left, e_right] # get y values\n l_fp = [l_left, l_right]\n e_coefficients = np.polyfit(e_xp, e_fp, 1) # polyfit with 1 DoF for linear fit\n l_coefficients = np.polyfit(l_xp, l_fp, 1)\n # apply fit to wavelengths between shoulders\n e_interpolated = e_wavelengths_inter*e_coefficients[0] + e_coefficients[1]\n l_interpolated = l_wavelengths_inter*l_coefficients[0] + l_coefficients[1]\n # find lineary interpolated value matching the index of absorption well minima\n e_out = e_interpolated[e_in_index - e_left_index]\n l_out = l_interpolated[l_in_index - l_left_index]\n \n if plot == True: # generate plots showing absorption band and selected points\n \n # plot spectra\n plt.plot(wavelengths, e_spectra, color = 'orange')\n plt.plot(wavelengths, l_spectra, color = 'blue')\n \n # plot selected points\n plt.scatter(wavelengths[e_in_index], e_in, label = 'e_in')\n plt.scatter(wavelengths[l_in_index], l_in, label = 'l_in')\n plt.scatter(wavelengths[e_left_index], e_left, label = 'e_left')\n plt.scatter(wavelengths[l_left_index], l_left, label = 'l_left')\n plt.scatter(wavelengths[e_right_index], e_right, label = 'e_right')\n plt.scatter(wavelengths[l_right_index], l_right, label = 'l_right')\n \n # plot interpolation\n plt.plot(e_wavelengths_inter, e_interpolated)\n plt.plot(l_wavelengths_inter, l_interpolated)\n plt.scatter(wavelengths[e_in_index], e_out, label = 'e_out')\n plt.scatter(wavelengths[l_in_index], l_out, label = 'l_out')\n \n #plt.legend()\n if band == 'A':\n plt.xlim(750, 775)\n plt.title('O$_2$A Absorption Band')\n \n if band == 'B':\n plt.xlim(680, 700)\n plt.title('O$_2$B Absorption Band')\n plt.xlabel('Wavelength (nm)')\n plt.ylabel('Radiance (mW m−2 sr−1 nm−1)')\n plt.show()\n \n fluorescence = (e_out*l_in - l_out*e_in) / (e_out - e_in) # calculate fluorescence using interpolated values\n \n return(fluorescence)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A helper function to perform a spatial filter based on connectedPixelCount for one land cover class value. Spatial filter was applied to avoid unwanted modifications to the edges of the pixel groups (blobs), a spatial filter was built based on the "connectedPixelCount" function. Native to the GEE platform, this function locates connected components (neighbours) that share the same pixel value. Thus, only pixels that do not share connections to a predefined number of identical neighbours are considered isolated. In this filter, at least some number of connected pixels are needed to reach the minimum connection value, defined in params. | def majorityFilter(image, params):
params = ee.Dictionary(params)
minSize = ee.Number(params.get('minSize'))
classValue = ee.Number(params.get('classValue'))
#Generate a mask from the class value
classMask = image.eq(classValue)
#Labeling the group of pixels until 100 pixels connected
labeled = classMask.mask(classMask).connectedPixelCount(minSize, True)
#Select some groups of connected pixels
region = labeled.lt(minSize)
# Squared kernel with size shift 1
# [[p(x-1,y+1), p(x,y+1), p(x+1,y+1)]
# [ p(x-1, y), p( x,y ), p(x+1, y)]
# [ p(x-1,y-1), p(x,y-1), p(x+1,y-1)]
kernel = ee.Kernel.square(1)
#Find neighborhood
neighs = image.neighborhoodToBands(kernel).mask(region)
#Reduce to majority pixel in neighborhood
majority = neighs.reduce(ee.Reducer.mode())
#Replace original values for new values
filtered = image.where(region, majority)
return ee.Image(filtered) | [
"def count_neighbours(self, mask):\n from scipy.ndimage.filters import convolve\n\n mask = mask.astype('uint8')\n filter_args = {'mode': self.boundary}\n if self.boundary == 'empty':\n filter_args['mode'] = 'constant'\n filter_args['cval'] = 0\n elif self.boundary == 'filled':\n filter_args['mode'] = 'constant'\n filter_args['cval'] = 1\n\n counts = convolve(mask, self.structure, **filter_args)\n\n return counts",
"def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6):\n #Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff\n num_changes = calculateNumberOfChanges(image, bandNames)\n too_many_changes = num_changes.gt(numChangesCutoff)\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Calculate the number of connected pixels for each land cover class and year, reduce to a single band image representing the number\n #of connected pixels of the same land cover class as the central pixel, and determine if it is over the connectedPixelCutoff\n connected_pixel_count = ee.ImageCollection(binary_class_images.map(lambda x: x.mask(x).connectedPixelCount(100,False).reduce(ee.Reducer.sum()).lt(connectedPixelCutoff)))\n \n #Get a bitwiseAnd determination if the number of connected pixels <= connectedPixelCutoff and the number of changes > numChangesCutoff \n incidence_filter = ee.ImageCollection(connected_pixel_count.map(lambda x: x.bitwiseAnd(too_many_changes))).toBands().rename(bandNames)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Replace pixels of image where incidence_filter is True with mode_image\n incidence_filtered = image.where(incidence_filter, mode_image)\n \n return incidence_filtered",
"def find_connected_components(thresh_image):\n\n rows, cols = thresh_image.shape\n\n # First find the connected components of the image\n # num_labels: the number of connected components found in the image\n # labels: a matrix with the labels for each pixel\n # stats: [top_left_x_coord, top_left_y_coord, width, height, area] statistics for each component\n # centroids: [x_coord, y_coord] of the centroid of each component\n num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(thresh_image)\n\n # Do some filtering based on the characteristics of a shuttle\n for i in range(num_labels):\n\n # Filters out shapes that are too long\n size_ratio = stats[i, 2] / stats[i, 3]\n if size_ratio < 0.5 or size_ratio > 2:\n labels[labels == i] = 0\n\n # Filters out shapes that fit the bounding box too well (likely noise) or too bad (sparse structure)\n area_ratio = stats[i, 4] / (stats[i, 2] * stats[i, 3])\n if area_ratio < 0.4 or area_ratio > 0.9:\n labels[labels == i] = 0\n\n # Filters out shapes too small (in proportion to image size)\n if stats[i, 2] < (rows / 60) or stats[i, 3] < (cols / 60):\n labels[labels == i] = 0\n\n return labels",
"def find1Cpixels(bwImage):\n # fills pixels matching the following neighborhoods:\n hoods = [[[1, 0, 0],\n [0, 1, 0],\n [0, 0, 0]],\n [[0, 1, 0],\n [0, 1, 0],\n [0, 0, 0]],\n [[0, 0, 1],\n [0, 1, 0],\n [0, 0, 0]],\n [[0, 0, 0],\n [1, 1, 0],\n [0, 0, 0]],\n [[0, 0, 0],\n [0, 1, 1],\n [0, 0, 0]],\n [[0, 0, 0],\n [0, 1, 0],\n [1, 0, 0]],\n [[0, 0, 0],\n [0, 1, 0],\n [0, 1, 0]],\n [[0, 0, 0],\n [0, 1, 0],\n [0, 0, 1]]]\n output = np.zeros(bwImage.shape, dtype=np.bool)\n # for each neighborhood, find matching pixels and set them to 1 in the img\n for hood in hoods:\n output = np.logical_or(output,\n ndimage.binary_hit_or_miss(bwImage, hood))\n return output",
"def condensePixels(image,coordinates,grid_width):\r\n gridded_data = []\r\n for i in range(0,len(image[0])-grid_width,grid_width):\r\n grid_line = []\r\n for j in range(0,len(image[0][0])-grid_width,grid_width):\r\n reds = []\r\n greens = []\r\n blues = []\r\n lats = []\r\n lons = []\r\n for m in range(i,i+grid_width):\r\n for n in range(j,j+grid_width):\r\n reds.append(image[0][m][n][0])\r\n greens.append(image[0][m][n][1])\r\n blues.append(image[0][m][n][2])\r\n lats.append(coordinates[m][n][0])\r\n lons.append(coordinates[m][n][1])\r\n grid_line.append([reds,greens, blues,lats,lons])\r\n gridded_data.append(grid_line)\r\n \r\n #Now to go through this gridded list and then create a dataframe that has the most common red, green, and blue per 100\r\n #pixel grid, and also the average latitude and average longitude\r\n compacted_data = []\r\n for i in range(len(gridded_data)):\r\n new_line = []\r\n for j in range(len(gridded_data[0])):\r\n reds_counts = pd.Series(gridded_data[i][j][0]).value_counts()\r\n common_red = np.uint8(pd.Series(reds_counts[reds_counts==max(reds_counts)].index).mean())\r\n greens_counts = pd.Series(gridded_data[i][j][1]).value_counts()\r\n common_green = np.uint8(pd.Series(greens_counts[greens_counts==max(greens_counts)].index).mean())\r\n blues_counts = pd.Series(gridded_data[i][j][2]).value_counts()\r\n common_blue = np.uint8(pd.Series(blues_counts[blues_counts==max(blues_counts)].index).mean())\r\n lat = pd.Series(gridded_data[i][j][3]).mean()\r\n lon = pd.Series(gridded_data[i][j][4]).mean()\r\n new_line.append(np.array([common_red, common_green, common_blue, lat, lon]))\r\n compacted_data.append(np.array(new_line))\r\n \r\n #now to create a new image list to test out what the compacted colors look like\r\n compacted_image = []\r\n for i in range(len(compacted_data)):\r\n line = []\r\n for j in range(len(compacted_data[0])):\r\n line.append(np.array([np.uint8(compacted_data[i][j][0]),np.uint8(compacted_data[i][j][1]),np.uint8(compacted_data[i][j][2])]))\r\n compacted_image.append(np.array(line))\r\n compacted_image = np.array(compacted_image)\r\n compacted_image = [compacted_image]\r\n \r\n return compacted_data, compacted_image",
"def connected_components(self):\n ccs = list(nx.connected_components(self.G))\n return [sum(1 for cc in ccs if any(x.region == i for x in cc)) for i in range(self.args.regions)]",
"def isolated_connected_image_filter(*args, **kwargs):\n import itk\n instance = itk.IsolatedConnectedImageFilter.New(*args, **kwargs)\n return instance.__internal_call__()",
"def connected_components_grid(self, valueFilter):\n C = Grid2(self.W, self.H, -1)\n def helper(u, cid, value):\n count = 0\n if C.pget(u) == -1 and self.pget(u) == value:\n C.pset(u, cid)\n count += 1\n for (v,_) in self.nbors4(u):\n count += helper(v, cid, value)\n return count\n\n compid = 0\n compsizes = {}\n for (u,value) in self.piter():\n if valueFilter and value != valueFilter:\n continue\n size = helper(u, compid, value)\n if size > 0:\n compsizes[compid] = size\n compid += 1\n\n return (C, compsizes)",
"def get_4_neigbours_amount(img):\n ones = np.ones_like(img)\n return convolve(ones, four_neighbors_kernel, mode='constant', cval=0.0)",
"def compute_connected_components(disp, out_labels, d_thresh):\n return _elas.compute_connected_components(disp, out_labels, d_thresh)",
"def count_neighbors(lights, r, c):\n neighbors = 0\n\n if r > 0 and c > 0: # 1\n neighbors += 1 if lights[r - 1][c - 1] == \"#\" else 0\n\n if r > 0: # 2\n neighbors += 1 if lights[r - 1][c] == \"#\" else 0\n\n if r > 0 and c < GRID_SIZE - 1: # 3\n neighbors += 1 if lights[r - 1][c + 1] == \"#\" else 0\n\n if c < GRID_SIZE - 1: # 4\n neighbors += 1 if lights[r][c + 1] == \"#\" else 0\n\n if r < GRID_SIZE - 1 and c < GRID_SIZE - 1: # 5\n neighbors += 1 if lights[r + 1][c + 1] == \"#\" else 0\n\n if r < GRID_SIZE - 1: # 6\n neighbors += 1 if lights[r + 1][c] == \"#\" else 0\n\n if r < GRID_SIZE - 1 and c > 0: # 7\n neighbors += 1 if lights[r + 1][c - 1] == \"#\" else 0\n\n if c > 0: # 8\n neighbors += 1 if lights[r][c - 1] == \"#\" else 0\n\n return neighbors",
"def num_complementary_regions(self):\n g = self._get_puncturefinder_graph()\n # return g.connected_components_number()\n return nx.number_connected_components(g)",
"def _find_connected_components(mask):\n num, labels, stats, centroids = cv2.connectedComponentsWithStats(mask)\n stats = stats[stats[:, 4].argsort()]\n return stats",
"def get_connected_components(img):\n already_labeled = []\n connected_components = []\n pixel_queue = []\n\n def get_fg_neighbors(row,col):\n neighbors = []\n min_row = max(row-1,0)\n max_row = min(row+1, len(img)-1)\n min_col = max(col-1,0)\n max_col = min(col+1, len(img[0])-1)\n for r in xrange(min_row, max_row+1):\n for c in xrange(min_col, max_col+1):\n if img[r][c] == 1 and (r,c) not in already_labeled:\n neighbors.append((r,c))\n return neighbors\n\n for row in xrange(len(img)):\n for col in xrange(len(img[0])):\n if img[row][col] == 1 and (row,col) not in already_labeled:\n already_labeled.append((row,col))\n cc = []\n cc.append((row,col))\n pixel_queue.append((row,col))\n while len(pixel_queue) > 0:\n cur_pixel = pixel_queue[0]\n pixel_queue = pixel_queue[1:]\n neighbors = get_fg_neighbors(cur_pixel[0], cur_pixel[1])\n already_labeled += neighbors\n cc += neighbors\n pixel_queue += neighbors\n connected_components.append(cc)\n return connected_components",
"def count_land_neighbours(self):\n\t\tglobal neibs\n\t\tneibs = np.zeros((hh,wh),int)\n\t\t\n\t\t# Calculate the number of adjacent grids which are lands\n\t\tfor x in range(1,h+1): \n\t\t\tfor y in range(1,w+1):\n\t\t\t\tneibs[x,y] = lscape[x-1,y] \\\n\t\t\t\t\t+ lscape[x+1,y] \\\n\t\t\t\t\t+ lscape[x,y-1] \\\n\t\t\t\t\t+ lscape[x,y+1]",
"def numConnectedFaces(self):\n \n pass",
"def test_count_neighbors(self):\n m, n = 5, 5\n k, p = 0.2, 0.7\n agents = [ConwayAgent(ii, ii & 0x1 == 1) for ii in range(m * n)]\n C = ConwayModel(m, n, k, p, agents)\n\n to_count = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]])\n expected = np.array([[1, 1, 2], [2, 3, 1], [0, 2, 1]])\n result = C.count_neighbors(to_count)\n self.assertTrue(np.all(expected == result))",
"def get_neighbouring_green_cells_count(row, col, rows_num, columns_num, grid):\n\n nearby_green_cells = 0\n\n NEIGHBOURING_ROWS = [-1, -1, -1, 0, +1, +1, +1, 0]\n NEIGHBOURING_COLUMNS = [-1, 0, +1, +1, +1, 0, -1, -1]\n\n for i in range(8):\n current_position = [row + NEIGHBOURING_ROWS[i], col + NEIGHBOURING_COLUMNS[i]]\n if is_valid(current_position, rows_num, columns_num) and grid[current_position[0]][current_position[1]] == \"1\":\n nearby_green_cells += 1\n return nearby_green_cells",
"def spatialFilter(temp_filtered_frame, distances_short_of_targets):\n\n \n #TODO: add a median filtering step before bilateral filter step\n\n some_filtering = cv2.bilateralFilter(temp_filtered_frame,5,30,0)\n lots_filtering = cv2.bilateralFilter(temp_filtered_frame,7,55,0)\n\n #need three channels of distances because spatial filter done on all 3 \n dists_short = np.repeat(distances_short_of_targets[:,:,np.newaxis],3,axis=2)\n\n\n #this is used as a cutoff for spots where no further filtering required\n min_values = np.zeros_like(dists_short)\n min_values.fill(.1)\n\n not_short_elems = np.less(dists_short,min_values)\n\n temp_filter_vals_added = np.where(not_short_elems,temp_filtered_frame,\n np.zeros_like(temp_filtered_frame))\n\n middles = np.zeros_like(dists_short)\n middles.fill(0.45)\n\n #will be anded with one other numpy array to get middle range\n greater_than_zeros = np.greater_equal(dists_short,min_values)\n less_than_highs = np.less(dists_short,middles)\n a_little_short_elems = np.logical_and(greater_than_zeros,less_than_highs)\n\n some_space_filter_vals_added = np.where(a_little_short_elems,\n some_filtering,temp_filter_vals_added)\n\n\n a_lot_short_elems = np.greater_equal(dists_short,middles)\n lots_space_filter_vals_added = np.where(a_lot_short_elems, lots_filtering,\n some_space_filter_vals_added)\n\n return lots_space_filter_vals_added"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to perform a spatial filter based on connectedPixelCount for land cover class values defined in filterParams. Calls the function majorityFilter. Spatial filter was applied to avoid unwanted modifications to the edges of the pixel groups (blobs), a spatial filter was built based on the "connectedPixelCount" function. Native to the GEE platform, this function locates connected components (neighbours) that share the same pixel value. Thus, only pixels that do not share connections to a predefined number of identical neighbours are considered isolated. In this filter, at least some number of connected pixels are needed to reach the minimum connection value, defined in params. | def applySpatialFilter(image,filterParams):
#Loop through list of parameters and apply spatial filter using majorityFilter
for params in filterParams:
image = majorityFilter(ee.Image(image),params)
return image | [
"def majorityFilter(image, params):\n params = ee.Dictionary(params)\n minSize = ee.Number(params.get('minSize'))\n classValue = ee.Number(params.get('classValue'))\n \n #Generate a mask from the class value\n classMask = image.eq(classValue)\n \n #Labeling the group of pixels until 100 pixels connected\n labeled = classMask.mask(classMask).connectedPixelCount(minSize, True)\n \n #Select some groups of connected pixels\n region = labeled.lt(minSize)\n \n # Squared kernel with size shift 1\n # [[p(x-1,y+1), p(x,y+1), p(x+1,y+1)]\n # [ p(x-1, y), p( x,y ), p(x+1, y)]\n # [ p(x-1,y-1), p(x,y-1), p(x+1,y-1)]\n kernel = ee.Kernel.square(1)\n \n #Find neighborhood\n neighs = image.neighborhoodToBands(kernel).mask(region)\n\n #Reduce to majority pixel in neighborhood\n majority = neighs.reduce(ee.Reducer.mode())\n \n #Replace original values for new values\n filtered = image.where(region, majority)\n \n return ee.Image(filtered)",
"def find_connected_components(thresh_image):\n\n rows, cols = thresh_image.shape\n\n # First find the connected components of the image\n # num_labels: the number of connected components found in the image\n # labels: a matrix with the labels for each pixel\n # stats: [top_left_x_coord, top_left_y_coord, width, height, area] statistics for each component\n # centroids: [x_coord, y_coord] of the centroid of each component\n num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(thresh_image)\n\n # Do some filtering based on the characteristics of a shuttle\n for i in range(num_labels):\n\n # Filters out shapes that are too long\n size_ratio = stats[i, 2] / stats[i, 3]\n if size_ratio < 0.5 or size_ratio > 2:\n labels[labels == i] = 0\n\n # Filters out shapes that fit the bounding box too well (likely noise) or too bad (sparse structure)\n area_ratio = stats[i, 4] / (stats[i, 2] * stats[i, 3])\n if area_ratio < 0.4 or area_ratio > 0.9:\n labels[labels == i] = 0\n\n # Filters out shapes too small (in proportion to image size)\n if stats[i, 2] < (rows / 60) or stats[i, 3] < (cols / 60):\n labels[labels == i] = 0\n\n return labels",
"def test_clusterFilterMinSize(self):\n # settings - all clusters visible\n settings = clusterFilter.ClusterFilterSettings()\n settings.updateSetting(\"neighbourRadius\", 2.1)\n settings.updateSetting(\"minClusterSize\", 2)\n settings.updateSetting(\"maxClusterSize\", -1)\n \n # set PBC\n self.lattice.PBC[:] = 1\n \n # filter input\n filterInput = base.FilterInput()\n filterInput.inputState = self.lattice\n visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)\n filterInput.visibleAtoms = visibleAtoms\n filterInput.NScalars = 0\n filterInput.fullScalars = np.empty(0, np.float64)\n filterInput.NVectors = 0\n filterInput.fullVectors = np.empty(0, np.float64)\n \n # call filter\n result = self.filter.apply(filterInput, settings)\n self.assertIsInstance(result, base.FilterResult)\n \n # make sure num visible is correct\n self.assertEqual(len(visibleAtoms), len(self.bigClusterIndexes))\n \n # check clusters are correct\n clusterList = result.getClusterList()\n self.assertEqual(len(clusterList), 1)\n cluster = clusterList[0]\n self.assertEqual(len(cluster), len(self.bigClusterIndexes))\n for index in self.bigClusterIndexes:\n self.assertTrue(index in cluster)",
"def majority_mask(\n activations,\n majority_ceiling=None,\n random_state=42,\n):\n clusterer = cluster.KMeans(n_clusters=2, random_state=random_state)\n cluster_labels = clusterer.fit_predict(activations)\n silhouette_scores = silhouette_samples(activations, cluster_labels)\n if majority_ceiling is None:\n majority_ceiling = silhouette_scores.mean()\n mask = (0 <= silhouette_scores) & (silhouette_scores <= majority_ceiling)\n return mask, majority_ceiling",
"def find1Cpixels(bwImage):\n # fills pixels matching the following neighborhoods:\n hoods = [[[1, 0, 0],\n [0, 1, 0],\n [0, 0, 0]],\n [[0, 1, 0],\n [0, 1, 0],\n [0, 0, 0]],\n [[0, 0, 1],\n [0, 1, 0],\n [0, 0, 0]],\n [[0, 0, 0],\n [1, 1, 0],\n [0, 0, 0]],\n [[0, 0, 0],\n [0, 1, 1],\n [0, 0, 0]],\n [[0, 0, 0],\n [0, 1, 0],\n [1, 0, 0]],\n [[0, 0, 0],\n [0, 1, 0],\n [0, 1, 0]],\n [[0, 0, 0],\n [0, 1, 0],\n [0, 0, 1]]]\n output = np.zeros(bwImage.shape, dtype=np.bool)\n # for each neighborhood, find matching pixels and set them to 1 in the img\n for hood in hoods:\n output = np.logical_or(output,\n ndimage.binary_hit_or_miss(bwImage, hood))\n return output",
"def test_clusterFilterMaxSize(self):\n # settings - all clusters visible\n settings = clusterFilter.ClusterFilterSettings()\n settings.updateSetting(\"neighbourRadius\", 2.1)\n settings.updateSetting(\"minClusterSize\", 1)\n settings.updateSetting(\"maxClusterSize\", 2)\n \n # set PBC\n self.lattice.PBC[:] = 1\n \n # filter input\n filterInput = base.FilterInput()\n filterInput.inputState = self.lattice\n visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)\n filterInput.visibleAtoms = visibleAtoms\n filterInput.NScalars = 0\n filterInput.fullScalars = np.empty(0, np.float64)\n filterInput.NVectors = 0\n filterInput.fullVectors = np.empty(0, np.float64)\n \n # call filter\n result = self.filter.apply(filterInput, settings)\n self.assertIsInstance(result, base.FilterResult)\n \n # make sure num visible is correct\n self.assertEqual(len(visibleAtoms), len(self.smallClusterIndexes))\n \n # check clusters are correct\n clusterList = result.getClusterList()\n self.assertEqual(len(clusterList), 1)\n cluster = clusterList[0]\n self.assertEqual(len(cluster), len(self.smallClusterIndexes))\n for index in self.smallClusterIndexes:\n self.assertTrue(index in cluster)",
"def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6):\n #Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff\n num_changes = calculateNumberOfChanges(image, bandNames)\n too_many_changes = num_changes.gt(numChangesCutoff)\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Calculate the number of connected pixels for each land cover class and year, reduce to a single band image representing the number\n #of connected pixels of the same land cover class as the central pixel, and determine if it is over the connectedPixelCutoff\n connected_pixel_count = ee.ImageCollection(binary_class_images.map(lambda x: x.mask(x).connectedPixelCount(100,False).reduce(ee.Reducer.sum()).lt(connectedPixelCutoff)))\n \n #Get a bitwiseAnd determination if the number of connected pixels <= connectedPixelCutoff and the number of changes > numChangesCutoff \n incidence_filter = ee.ImageCollection(connected_pixel_count.map(lambda x: x.bitwiseAnd(too_many_changes))).toBands().rename(bandNames)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Replace pixels of image where incidence_filter is True with mode_image\n incidence_filtered = image.where(incidence_filter, mode_image)\n \n return incidence_filtered",
"def DetectLocalMaxima(F1,F2,thresh = 1.5):\n \n # calculate the threshold thresh\n thresh = thresh*np.std(F1)\n \n # filter the image maxfiltered with max filter with window size of 3x3\n maxfiltered = scipy.ndimage.filters.maximum_filter(F2, size= [3,3])\n\n # find pixels in the maxfiltered image with values larger than the \n # threshold and save the mask as above thresh\n abovethresh = maxfiltered>=thresh\n \n # find pixels in the F2 image with the highest value in the 8 connected \n # neighthood and save the mask as max8neighbor\n max8neighbor = F2 == maxfiltered\n \n # find the pixels (centroidcandidate) that satisfies both criteria (the \n # common sets of pixels that are true in both abovethresh and max8neighbor)\n centroidcandidate = np.multiply(abovethresh,max8neighbor)\n \n # set the edge to be all false, the edge values are artifacts due to \n #limited image size\n centroid = np.zeros(centroidcandidate.shape, dtype=bool)\n centroid[2:centroid.shape[0]-2,2:centroid.shape[1]-2] = centroidcandidate[2:centroid.shape[0]-2,2:centroid.shape[1]-2]\n \n return centroid",
"def count_neighbours(self, mask):\n from scipy.ndimage.filters import convolve\n\n mask = mask.astype('uint8')\n filter_args = {'mode': self.boundary}\n if self.boundary == 'empty':\n filter_args['mode'] = 'constant'\n filter_args['cval'] = 0\n elif self.boundary == 'filled':\n filter_args['mode'] = 'constant'\n filter_args['cval'] = 1\n\n counts = convolve(mask, self.structure, **filter_args)\n\n return counts",
"def majority(self, indices):\n count_neighbor = []\n # BEGIN Workspace 1.2\n for item in indices:\n count_neighbor.append(self._y[item])\n counter = Counter(count_neighbor)\n most_common = [counter.most_common()[0][0]]\n higher = counter.most_common(0)[0][1]\n i = 1\n for item in counter.most_common():\n if counter.most_common()[i][1] == higher:\n most_common.append(counter.most_common()[i][0])\n i += 1\n else:\n break\n total_counts = self.getCounts()\n label = most_common[0]\n highest = total_counts[most_common[0]]\n j = 1\n for item in most_common:\n if total_counts[most_common[j]] > highest:\n label = most_common[j]\n # END Workspace 1.2\n return label",
"def detect_communities(self, strategy=\"louvain\", weight=None,\n n_communities=2, intermediate=False,\n write=False, write_property=None, **kwargs):\n if strategy not in CommunityDetector._strategies.keys():\n raise CommunityDetector.PartitionError(\n f\"Unknown community detection strategy '{strategy}'\")\n partition = getattr(self, CommunityDetector._strategies[strategy])(\n weight=weight, n_communities=n_communities,\n intermediate=intermediate, **kwargs)\n return self._dispatch_processing_result(\n partition, \"Community\", write, write_property)",
"def Sharpness_Center_Filter():\n filter_0 = np.array([[[-1, 0, 0], [-1, 0, 0], [-1, 0, 0]],\n [[-1, 0, 0], [9, 0, 0], [-1, 0, 0]],\n [[-1, 0, 0], [-1, 0, 0], [-1, 0, 0]]],\n dtype=np.int16)\n filter_1 = np.array([[[0, -1, 0], [0, -1, 0], [0, -1, 0]],\n [[0, -1, 0], [0, 9, 0], [0, -1, 0]],\n [[0, -1, 0], [0, -1, 0], [0, -1, 0]]],\n dtype=np.int16)\n filter_2 = np.array([[[0, 0, -1], [0, 0, -1], [0, 0, -1]],\n [[0, 0, -1], [0, 0, 9], [0, 0, -1]],\n [[0, 0, -1], [0, 0, -1], [0, 0, -1]]],\n dtype=np.int16)\n return filter_0, filter_1, filter_2",
"def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img",
"def _filter_clusters(self, y, cluster_labels=None, minority_label=None):\n\n if cluster_labels is None:\n cluster_labels = self.labels\n\n unique_labels = np.unique(cluster_labels)\n\n # Remove label of observations identified as noise by DBSCAN:\n unique_labels = unique_labels[unique_labels != -1]\n\n filtered_clusters = []\n\n for label in unique_labels:\n cluster_obs = y[cluster_labels == label]\n\n minority_obs = cluster_obs[cluster_obs == minority_label].size\n majority_obs = cluster_obs[cluster_obs != minority_label].size\n\n imb_ratio = (majority_obs + 1) / (minority_obs + 1)\n\n if imb_ratio < 1:\n filtered_clusters.append(label)\n\n return filtered_clusters",
"def cpb_filter(biclusters,\n data,\n nclus,\n *args,\n **kwargs):\n data = bibench.util.shuffle(data)\n kwargs['fixed_row'] = -1\n kwargs['fixed_col'] = -1\n shuffle_results = filter(cpb(data, nclus, *args, **kwargs))\n if len(shuffle_results) == 0:\n return biclusters\n\n maxarea = max([b.area() for b in shuffle_results])\n return [b for b in biclusters if b.area() > maxarea]",
"def spatialFilter(temp_filtered_frame, distances_short_of_targets):\n\n \n #TODO: add a median filtering step before bilateral filter step\n\n some_filtering = cv2.bilateralFilter(temp_filtered_frame,5,30,0)\n lots_filtering = cv2.bilateralFilter(temp_filtered_frame,7,55,0)\n\n #need three channels of distances because spatial filter done on all 3 \n dists_short = np.repeat(distances_short_of_targets[:,:,np.newaxis],3,axis=2)\n\n\n #this is used as a cutoff for spots where no further filtering required\n min_values = np.zeros_like(dists_short)\n min_values.fill(.1)\n\n not_short_elems = np.less(dists_short,min_values)\n\n temp_filter_vals_added = np.where(not_short_elems,temp_filtered_frame,\n np.zeros_like(temp_filtered_frame))\n\n middles = np.zeros_like(dists_short)\n middles.fill(0.45)\n\n #will be anded with one other numpy array to get middle range\n greater_than_zeros = np.greater_equal(dists_short,min_values)\n less_than_highs = np.less(dists_short,middles)\n a_little_short_elems = np.logical_and(greater_than_zeros,less_than_highs)\n\n some_space_filter_vals_added = np.where(a_little_short_elems,\n some_filtering,temp_filter_vals_added)\n\n\n a_lot_short_elems = np.greater_equal(dists_short,middles)\n lots_space_filter_vals_added = np.where(a_lot_short_elems, lots_filtering,\n some_space_filter_vals_added)\n\n return lots_space_filter_vals_added",
"def connected_components_grid(self, valueFilter):\n C = Grid2(self.W, self.H, -1)\n def helper(u, cid, value):\n count = 0\n if C.pget(u) == -1 and self.pget(u) == value:\n C.pset(u, cid)\n count += 1\n for (v,_) in self.nbors4(u):\n count += helper(v, cid, value)\n return count\n\n compid = 0\n compsizes = {}\n for (u,value) in self.piter():\n if valueFilter and value != valueFilter:\n continue\n size = helper(u, compid, value)\n if size > 0:\n compsizes[compid] = size\n compid += 1\n\n return (C, compsizes)",
"def condensePixels(image,coordinates,grid_width):\r\n gridded_data = []\r\n for i in range(0,len(image[0])-grid_width,grid_width):\r\n grid_line = []\r\n for j in range(0,len(image[0][0])-grid_width,grid_width):\r\n reds = []\r\n greens = []\r\n blues = []\r\n lats = []\r\n lons = []\r\n for m in range(i,i+grid_width):\r\n for n in range(j,j+grid_width):\r\n reds.append(image[0][m][n][0])\r\n greens.append(image[0][m][n][1])\r\n blues.append(image[0][m][n][2])\r\n lats.append(coordinates[m][n][0])\r\n lons.append(coordinates[m][n][1])\r\n grid_line.append([reds,greens, blues,lats,lons])\r\n gridded_data.append(grid_line)\r\n \r\n #Now to go through this gridded list and then create a dataframe that has the most common red, green, and blue per 100\r\n #pixel grid, and also the average latitude and average longitude\r\n compacted_data = []\r\n for i in range(len(gridded_data)):\r\n new_line = []\r\n for j in range(len(gridded_data[0])):\r\n reds_counts = pd.Series(gridded_data[i][j][0]).value_counts()\r\n common_red = np.uint8(pd.Series(reds_counts[reds_counts==max(reds_counts)].index).mean())\r\n greens_counts = pd.Series(gridded_data[i][j][1]).value_counts()\r\n common_green = np.uint8(pd.Series(greens_counts[greens_counts==max(greens_counts)].index).mean())\r\n blues_counts = pd.Series(gridded_data[i][j][2]).value_counts()\r\n common_blue = np.uint8(pd.Series(blues_counts[blues_counts==max(blues_counts)].index).mean())\r\n lat = pd.Series(gridded_data[i][j][3]).mean()\r\n lon = pd.Series(gridded_data[i][j][4]).mean()\r\n new_line.append(np.array([common_red, common_green, common_blue, lat, lon]))\r\n compacted_data.append(np.array(new_line))\r\n \r\n #now to create a new image list to test out what the compacted colors look like\r\n compacted_image = []\r\n for i in range(len(compacted_data)):\r\n line = []\r\n for j in range(len(compacted_data[0])):\r\n line.append(np.array([np.uint8(compacted_data[i][j][0]),np.uint8(compacted_data[i][j][1]),np.uint8(compacted_data[i][j][2])]))\r\n compacted_image.append(np.array(line))\r\n compacted_image = np.array(compacted_image)\r\n compacted_image = [compacted_image]\r\n \r\n return compacted_data, compacted_image",
"def test_count_neighbors(self):\n m, n = 5, 5\n k, p = 0.2, 0.7\n agents = [ConwayAgent(ii, ii & 0x1 == 1) for ii in range(m * n)]\n C = ConwayModel(m, n, k, p, agents)\n\n to_count = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]])\n expected = np.array([[1, 1, 2], [2, 3, 1], [0, 2, 1]])\n result = C.count_neighbors(to_count)\n self.assertTrue(np.all(expected == result))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to perform a forward moving gap fill for all years in an image. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The forward gap fill is applied iteratively from the first year of bandNames through the final year, where if the current image has missing data, it is filled with the following year's values. | def applyForwardNoDataFilter(image, bandNames):
#Get a list of band names from year(1) through the last year
bandNamesEE = ee.List(bandNames[1:])
#Define forwards filter
#In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year
#currentImage = image.select(bandNames[1]), the image for the second year
#previousImage = image.select(bandNames[0]), the first year
#Find where the second year has missing data, replace those values with the values of the first year
#Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill
#and the second band is the first years classification
#The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year
def forwardNoDataFilter(bandName, previousImage):
currentImage = image.select(ee.String(bandName))
previousImage = ee.Image(previousImage)
currentImage = currentImage.unmask(previousImage.select([0]))
return currentImage.addBands(previousImage)
#Iterate through all the years, starting with the first year's classification
filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))
filtered = ee.Image(filtered)
return filtered.select(bandNames) | [
"def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year\n #currentImage = image.select(bandNames[-2]), the second to last year\n #followingImage = image.select(bandNames[-1]), the final year\n #Find where the second to last year has missing data, replace those values with the values of the following year\n #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill\n #and the second band is the final years classification\n #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year\n def backwardNoDataFilter(bandName, followingImage):\n currentImage = image.select(ee.String(bandName))\n followingImage = ee.Image(followingImage)\n currentImage = currentImage.unmask(followingImage.select([0]))\n return currentImage.addBands(followingImage)\n \n #Apply backwards filter, starting with the final year and iterating through to year(0) \n filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1])))\n #Re-order bands to be in chronological order\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)",
"def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out",
"def applyGapFilter(image, bandNames):\n filtered = applyForwardNoDataFilter(image, bandNames)\n filtered = applyBackwardNoDataFilter(filtered, bandNames)\n return filtered",
"def fill_price_gaps(\n from_date=dt.datetime(1970,1,1),\n to_date=dt.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)\n ):\n #Create a collection of years\n years = []\n cur_year = from_date.year\n while cur_year <= to_date.year:\n years.append(cur_year)\n cur_year += 1\n #Loop each year\n all_year_dates = pd.DataFrame([])\n for year in tqdm(years, total=len(years), desc=\"Loop through years to find dates\"):\n #establish bounding dates\n year_from_date = None if year != from_date.year else from_date\n year_to_date = None if year != to_date.year else to_date\n #Get filtered year dates\n year_dates = create_filtered_year_dates(year, from_date=year_from_date, to_date=year_to_date, )\n #Add to the full list\n all_year_dates = pd.concat([all_year_dates, year_dates])\n #Order the dates (just in case)\n all_year_dates = all_year_dates.sort_values([\"date\"]) \\\n .reset_index(drop=True)\n #Fetch all the tickers\n tickers = sqlaq_to_df(ticker.fetch())\n #Loop through tickers\n errors = []\n run_time = ProcessTime()\n for _,r in tqdm(tickers[[\"id\",\"ticker\"]].iterrows(), total=tickers.shape[0], desc=\"Filling in gaps\"):\n logger.info(f\"Filling gaps in {r.id} -> {r.ticker}\")\n try:\n #Fetch all prices\n dp = sqlaq_to_df(daily_price.fetch(ticker_ids=[r.id]))\n dp[\"date\"] = dp.date.astype(\"datetime64[ns]\")\n #Identify missing dates\n missing_dates = pd.merge(all_year_dates, dp[[\"date\",\"id\"]], on=[\"date\"], how=\"left\")\n #Identify the start date and remove all missing date before that\n start_date = missing_dates[~missing_dates.id.isnull()].date.min()\n missing_dates = missing_dates[missing_dates.date > start_date]\n #Remove all other items which have dates\n missing_dates = missing_dates[missing_dates.id.isnull()]\n #Order remaining dates\n missing_dates = missing_dates.sort_values(\"date\")\n #Create groupings no larger than max_days (in config)\n st_d = None\n date_groups = []\n missing_dates = missing_dates.date.to_list()\n if len(missing_dates):\n for i,d in enumerate(missing_dates):\n if not st_d:\n st_d = d\n else:\n #Append when group gets too big\n if (d - st_d).days > WEB_SCRAPE_MAX_DAYS:\n date_groups.append([st_d, missing_dates[i-1]])\n #Update the start date\n st_d = d\n #Append the last item\n date_groups.append([st_d, d])\n #Scrape the missing prices\n logger.info('Number of webscrapes to perform -> {}'.format(len(date_groups)))\n #For each time frame perform a scrape\n try: #Try loop so as not to miss all following date groups\n for i,dates in enumerate(date_groups):\n logger.info(f\"Running dates {i} -> {dt.datetime.strptime(str(dates[0])[:10], '%Y-%m-%d')} - {dt.datetime.strptime(str(dates[1])[:10], '%Y-%m-%d')}\")\n process_daily_prices(\n r.ticker,\n r.id,\n st_date=dates[0],\n en_date=dates[1],\n \n )\n except Exception as e:\n logger.error(e)\n errors.append({'ticker_id':r.id, 'ticker':r.ticker, \"error\":e, \"st_date\":dates[0], \"en_dates\":dates[1]})\n #Run an update on th weekly prices\n process_weekly_prices(\n r.id,\n \n )\n except Exception as e:\n logger.error(e)\n errors.append({'ticker_id':r.id, 'ticker':r.ticker, \"error\":e})\n #Lap\n logger.info(run_time.lap())\n logger.info(run_time.show_latest_lap_time(show_time=True))\n logger.info(f\"GAP FILL RUN TIME - {run_time.end()}\")\n\n logger.info(f'\\nGAP FILL ERROR COUNT -> {len(errors)}')\n if len(errors) > 0:\n logger.info('GAP FILL ERRORS ->')\n for e in errors:\n logger.error(e)",
"def stack_bands(yr, roi):\n\n water_year_start = '{}-10-01'.format(yr - 1)\n\n winter_s, winter_e = '{}-01-01'.format(yr), '{}-03-01'.format(yr),\n spring_s, spring_e = '{}-03-01'.format(yr), '{}-05-01'.format(yr),\n late_spring_s, late_spring_e = '{}-05-01'.format(yr), '{}-07-01'.format(yr)\n summer_s, summer_e = '{}-07-01'.format(yr), '{}-09-01'.format(yr)\n fall_s, fall_e = '{}-09-01'.format(yr), '{}-12-31'.format(yr)\n\n periods = [('cy', winter_s, fall_e),\n ('1', spring_s, spring_e),\n ('2', late_spring_s, late_spring_e),\n ('3', summer_s, summer_e),\n ('4', fall_s, fall_e)]\n\n first = True\n for name, start, end in periods:\n bands = landsat_composites(yr, start, end, roi, name)\n if first:\n input_bands = bands\n proj = bands.select('B2_cy').projection().getInfo()\n first = False\n else:\n input_bands = input_bands.addBands(bands)\n\n for s, e, n in [(spring_s, spring_e, 'espr'),\n (late_spring_s, late_spring_e, 'lspr'),\n (summer_s, summer_e, 'smr'),\n (fall_s, fall_e, 'fl'),\n (water_year_start, spring_e, 'wy_espr'),\n (water_year_start, late_spring_e, 'wy_espr'),\n (water_year_start, summer_e, 'wy_smr'),\n (water_year_start, fall_e, 'wy')]:\n gridmet = ee.ImageCollection(\"IDAHO_EPSCOR/GRIDMET\").filterBounds(\n roi).filterDate(s, e).select('pr', 'eto', 'tmmn', 'tmmx')\n temp_reducer = ee.Reducer.mean()\n t_names = ['tmax'.format(n), 'tmin'.format(n)]\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_{}'.format(n), 'pet_total_{}'.format(n)).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n wd_estimate = precip_sum.select('precip_total_{}'.format(n)).subtract(precip_sum.select(\n 'pet_total_{}'.format(n))).rename('wd_est_{}'.format(n))\n input_bands = input_bands.addBands([temp_perc, precip_sum, wd_estimate])\n\n temp_reducer = ee.Reducer.percentile([10, 50, 90])\n t_names = ['tmmn_p10_cy', 'tmmn_p50_cy', 'tmmn_p90_cy', 'tmmx_p10_cy', 'tmmx_p50_cy', 'tmmx_p90_cy']\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_cy', 'pet_total_cy').resample('bilinear').reproject(crs=proj['crs'], scale=30)\n wd_estimate = precip_sum.select('precip_total_cy').subtract(precip_sum.select(\n 'pet_total_cy')).rename('wd_est_cy')\n\n coords = ee.Image.pixelLonLat().rename(['Lon_GCS', 'LAT_GCS']).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n ned = ee.Image('USGS/NED')\n terrain = ee.Terrain.products(ned).select('elevation', 'slope', 'aspect').reduceResolution(\n ee.Reducer.mean()).reproject(crs=proj['crs'], scale=30)\n\n world_climate = get_world_climate(proj=proj)\n elev = terrain.select('elevation')\n tpi_1250 = elev.subtract(elev.focal_mean(1250, 'circle', 'meters')).add(0.5).rename('tpi_1250')\n tpi_250 = elev.subtract(elev.focal_mean(250, 'circle', 'meters')).add(0.5).rename('tpi_250')\n tpi_150 = elev.subtract(elev.focal_mean(150, 'circle', 'meters')).add(0.5).rename('tpi_150')\n static_input_bands = coords.addBands([temp_perc, wd_estimate, terrain, tpi_1250, tpi_250, tpi_150, world_climate])\n\n nlcd = ee.Image('USGS/NLCD/NLCD2011').select('landcover').reproject(crs=proj['crs'], scale=30).rename('nlcd')\n\n cdl_cult = ee.Image('USDA/NASS/CDL/2017').select('cultivated'). \\\n remap([1, 2], [0, 1]).reproject(crs=proj['crs'], scale=30).rename('cdlclt')\n\n cdl_crop = ee.Image('USDA/NASS/CDL/2017').select('cropland').reproject(crs=proj['crs'],\n scale=30).rename('cdlcrp')\n\n gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')\n occ_pos = gsw.select('occurrence').gt(0)\n water = occ_pos.unmask(0).rename('gsw')\n\n static_input_bands = static_input_bands.addBands([nlcd, cdl_cult, cdl_crop, water])\n\n input_bands = input_bands.addBands(static_input_bands).clip(roi)\n\n # standardize names to match EE javascript output\n standard_names = []\n temp_ct = 1\n prec_ct = 1\n names = input_bands.bandNames().getInfo()\n for name in names:\n if 'tavg' in name and 'tavg' in standard_names:\n standard_names.append('tavg_{}'.format(temp_ct))\n temp_ct += 1\n elif 'prec' in name and 'prec' in standard_names:\n standard_names.append('prec_{}'.format(prec_ct))\n prec_ct += 1\n else:\n standard_names.append(name)\n\n input_bands = input_bands.rename(standard_names)\n return input_bands",
"def missing_year_load_estimator(df_raw,fill):\n\n forecast_years = common.adjust_years(headers_list=list(df_raw.columns))\n\n df_raw['available_years'] = np.nan\n df_raw['year_forecasted'] = np.nan\n\n year_estimate_list = ['{}_{}'.format(common.Headers.estimate, x) for x in forecast_years]\n # Add columns to DataFrame with no files by adding in empty\n df_raw = pd.concat([df_raw, pd.DataFrame(columns=year_estimate_list)], sort=False)\n\n forecast_years = common.adjust_years(headers_list=list(df_raw.columns))\n # todo: maybe add to idx to identify the rows which the values of the loads are negative\n idx = df_raw[forecast_years].isna()\n\n z = (~idx.loc[1, :]).sum()\n\n df_raw['available_years'] = (~idx).sum(1)\n\n d = (df_raw['available_years'] > 1) & (df_raw['available_years'] < len(forecast_years))\n\n df_raw['year_forecasted'] = d\n\n est_row_list = d[d == True].index\n\n years_estimate_df = df_raw.loc[est_row_list, forecast_years]\n\n number_of_years = len(years_estimate_df.columns)\n years_estimate_df.columns = range(number_of_years)\n\n if fill==True:\n\n for j in range(len(est_row_list)):\n\n year_row1 = years_estimate_df.loc[est_row_list[j], :]\n t1 = year_row1.to_frame()\n estimated_array = common.interpolator(t1).T # common.interpolator gets a dataframe with one column and\n # number of indexes equal to the number of years and the missing values as nan, then interpolate the missing\n # values by using indexes as x and y being the values in the dataframe, then gives out a df with\n # the interpolated values\n n = 0\n for i in range(len(forecast_years)):\n if pd.isnull(df_raw.loc[est_row_list[j], forecast_years[i]]):\n df_raw.loc[est_row_list[j], forecast_years[i]] = 1\n df_raw.loc[est_row_list[j], forecast_years[i]] = estimated_array.iloc[0, n]\n df_raw.loc[est_row_list[j], year_estimate_list[i]] = estimated_array.iloc[0, n]\n n += 1\n\n return df_raw",
"def forward_filling(self, input_data):\n output_data = np.array(input_data)\n\n # Gap indices\n gap_list = np.ravel(np.argwhere(output_data == self.gap_value))\n new_gap_list = self._parse_gap_ids(gap_list)\n\n # Iterately fill in the gaps in the time series\n for gap in new_gap_list:\n # The entire time series is used for training until the gap\n first_gap_element_id = gap[0]\n timeseries_train_part = output_data[:first_gap_element_id]\n\n # Make forecast in the gap\n predicted = self.__forecast_in_gap(self.pipeline,\n timeseries_train_part,\n output_data, gap)\n\n # Replace gaps in an array with prediction values\n output_data[gap] = predicted\n return output_data",
"def fill_year(timeseries, value=0):\n # Obtain firts and last date from timeseries\n first_date = timeseries.index.min()\n last_date = timeseries.index.max()\n\n one_year_date = last_date - timedelta(days=365)\n\n ## Obtain the sunday beofre the date of one year ago\n starting_date = one_year_date - timedelta(days=one_year_date.weekday()+1)\n\n assert starting_date.weekday_name == 'Sunday'\n\n\n # Fill dates with mising zero\n date_range_series = create_timeseries(starting_date,\n first_date-timedelta(days=1),\n value)\n\n # Fill the original timeseries\n filled_timeseries = pd.concat([date_range_series, timeseries])\n\n return filled_timeseries",
"def setupYearlyData(self):\n numYears = len(self.yearsToMonths.keys())\n \n # create olrByYear as a 3 dimensional array with dimensions of\n # latitude, longitude, and year\n current_times, current_lats, current_longs = self.values.shape\n self.olrByYear = np.zeros((numYears, current_lats, current_longs), np.float32)\n\n for year in self.yearsToMonths.keys():\n year_dict = self.yearsToMonths[year]\n year_index = year_dict[self.yi]\n monthList = year_dict[self.ml]\n monthIndex = year_dict[self.mi]\n print(\"Getting Year Averages for Year = %d\" % year)\n self.getYearAvgs(self.olrByYear[year_index,:], monthList, monthIndex)",
"def fillna(df, col: str, forward: bool):\n na_prev = len(df)\n report = f'fillna(\"{col}\") ' + ('forward' if forward else 'backward') + ' NA count:'\n while True:\n na = df[col].isna().sum()\n report += f' {na}'\n if na == na_prev or na == 0: break\n na_prev = na\n # df must to be sorted by (ABI, YEAR)\n df.loc[df[col].isna(), col] = df.groupby('ABI')[col].shift(1 if forward else -1)",
"def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out",
"def make_annual(raster_path, year,):\n\n\twith Env(\"outputCoordinateSystem\", raster_path), Env(\"cellSize\", raster_path):\n\n\t\tnp_ras = arcpy.RasterToNumPyArray(raster_path, nodata_to_value=0)\n\t\tfor band_index, band in enumerate(np_ras):\n\t\t\t# print(\"Band Index: {}\".format(band_index))\n\t\t\t# print(\"Band has {} cells below 0 Min value {}\".format((band<0).sum(), band.min()))\n\t\t\tzero_fixed = np.where(band < 0, 0, band) # take the input data and set all locations that are less than 0 ET to 0 and leave everything above 0 as is\n\t\t\t# print(\"Fixed has {} cells below 0 Min value {}\".format((zero_fixed < 0).sum(), zero_fixed.min()))\n\t\t\tnp_ras[band_index] = np.multiply(zero_fixed, get_days_in_month_by_band_and_year(band_index, year)) # multiply the band by the number of days in the month and replace it\n\t\t\t# print(\"Stored has {} cells below 0. Min value {}\".format((np_ras[band_index] < 0).sum(), np_ras[band_index].min()))\n\t\tsummed_months = np.sum(np_ras, axis=0) # sum the bands together into one\n\t\t# print(\"Summed has {} cells below 0. min value{}\".format((summed_months < 0).sum(), summed_months.min()))\n\n\t\tlower_left = lower_left_point(raster_path)\n\n\t\tdesc = arcpy.Describe(os.path.join(str(raster_path), \"Band_1\")) # need to get a band for cell sizes\n\t\ttry:\n\t\t\tcell_width = desc.meanCellWidth\n\t\t\tcell_height = desc.meanCellHeight\n\n\t\t\tannual_raster = arcpy.NumPyArrayToRaster(summed_months, lower_left, cell_width, cell_height)\n\t\tfinally:\n\t\t\tdel desc\n\n\treturn annual_raster",
"def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out",
"def run_year(df, year, variable):\n\n ACCUM_VARS = [\"ACSNOW\", \"PCPT\", \"PCPC\", \"PCPNC\", \"POTEVP\"]\n\n # interpolate accumulation vars at `ind`\n if variable in ACCUM_VARS:\n arr = stack_year_accum(df, year, variable)\n else:\n arr = stack_year(df, year, variable)\n return arr",
"def fill_forward(df):\n df = df.fillna(method='ffill')\n df = df.fillna(method='bfill').fillna(0)\n return df",
"def gap_years_aggregated(mongo_client):\n db = mongo_client[\"nobel\"]\n\n original_categories = sorted(set(db.prizes.distinct(\"category\", {\"year\": \"1901\"})))\n\n pipeline = [\n {\"$match\": {\"category\": {\"$in\": original_categories}}},\n {\"$project\": {\"category\": 1, \"year\": 1}},\n\n # Collect the set of category values for each prize year.\n {\"$group\": {\"_id\": \"$year\", \"categories\": {\"$addToSet\": \"$category\"}}},\n\n # Project categories *not* awarded (i.e., that are missing this year).\n {\"$project\": {\"missing\": {\"$setDifference\": [original_categories, \"$categories\"]}}},\n\n # Only include years with at least one missing category\n {\"$match\": {\"missing.0\": {\"$exists\": True}}},\n\n # Sort in reverse chronological order. Note that \"_id\" is a distinct year at this stage.\n {\"$sort\": OrderedDict([(\"_id\", -1)])},\n ]\n\n for doc in db.prizes.aggregate(pipeline):\n print(\"{year}: {missing}\".format(year=doc[\"_id\"], missing=\", \".join(sorted(doc[\"missing\"]))))",
"def fill_extra_data(df, years_need_data_for, growth_rate, frequency):\n data_year = df.iloc[1:].index.year.unique() # grab all but the first index\n # which years do we not have data for\n no_data_year = {pd.Period(year) for year in years_need_data_for} - {pd.Period(year) for year in data_year}\n # if there is a year we dont have data for\n if len(no_data_year) > 0:\n for yr in no_data_year:\n source_year = pd.Period(max(data_year)) # which year to to apply growth rate to (is this the logic we want??)\n source_data = df.loc[df.index.year == source_year.year] # use source year data\n\n # create new dataframe for missing year\n try:\n new_data_df = pd.DataFrame()\n for col in df.columns:\n new_data = apply_growth(source_data[col], growth_rate, source_year, yr, frequency) # apply growth rate to column\n new_data_df = pd.concat([new_data_df, new_data], axis=1, sort=True)\n # add new year to original data frame\n df = pd.concat([df, new_data_df], sort=True)\n except AttributeError:\n new_data = apply_growth(source_data, growth_rate, source_year, yr, frequency) # apply growth rate to column\n # add new year to original data frame\n df = pd.concat([df, new_data], sort=True)\n return df",
"def forward_fill(marr, maxgap=None):\n # !!!: We should probably port that to C.\n # Initialization ..................\n if np.ndim(marr) > 1:\n raise ValueError,\"The input array should be 1D only!\"\n a = ma.array(marr, copy=True)\n amask = getmask(a)\n if amask is nomask or a.size == 0:\n return a\n #\n adata = getdata(a)\n # Get the indices of the masked values (except a[0])\n idxtofill = amask[1:].nonzero()[0] + 1\n currGap = 0\n if maxgap is not None:\n previdx = -1\n for i in idxtofill:\n if i != previdx + 1:\n currGap = 0\n currGap += 1\n if currGap <= maxgap and not amask[i-1]:\n adata[i] = adata[i-1]\n amask[i] = False\n previdx = i\n else:\n amask[i-maxgap:i] = True\n else:\n for i in idxtofill:\n if not amask[i-1]:\n adata[i] = adata[i-1]\n amask[i] = False\n return a",
"def _forward_fill(data: np.ndarray):\n last_values = None\n\n for row in data:\n if last_values is not None:\n # Get NaN values index\n idx = np.isnan(row)\n # Fill NaN values using last seen values\n row[idx] = last_values[idx]\n\n # Update last seen values\n last_values = row\n\n return data"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to perform a backward moving gap fill for all years in an image. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The backward gap fill is applied iteratively from the last year of bandNames through the first year, where if the current image has missing data, it is filled with the previous year's values. | def applyBackwardNoDataFilter(image, bandNames):
#Get a list of band names to iterate over, from year(-2) through year(0)
bandNamesEE = ee.List(bandNames[:-1]).reverse()
#Define backwards filter
#In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year
#currentImage = image.select(bandNames[-2]), the second to last year
#followingImage = image.select(bandNames[-1]), the final year
#Find where the second to last year has missing data, replace those values with the values of the following year
#Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill
#and the second band is the final years classification
#The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year
def backwardNoDataFilter(bandName, followingImage):
currentImage = image.select(ee.String(bandName))
followingImage = ee.Image(followingImage)
currentImage = currentImage.unmask(followingImage.select([0]))
return currentImage.addBands(followingImage)
#Apply backwards filter, starting with the final year and iterating through to year(0)
filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1])))
#Re-order bands to be in chronological order
filtered = ee.Image(filtered)
return filtered.select(bandNames) | [
"def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year\n #currentImage = image.select(bandNames[1]), the image for the second year\n #previousImage = image.select(bandNames[0]), the first year\n #Find where the second year has missing data, replace those values with the values of the first year\n #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill\n #and the second band is the first years classification\n #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year\n def forwardNoDataFilter(bandName, previousImage):\n currentImage = image.select(ee.String(bandName))\n previousImage = ee.Image(previousImage)\n currentImage = currentImage.unmask(previousImage.select([0]))\n return currentImage.addBands(previousImage)\n \n #Iterate through all the years, starting with the first year's classification\n filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)",
"def applyGapFilter(image, bandNames):\n filtered = applyForwardNoDataFilter(image, bandNames)\n filtered = applyBackwardNoDataFilter(filtered, bandNames)\n return filtered",
"def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out",
"def stack_bands(yr, roi):\n\n water_year_start = '{}-10-01'.format(yr - 1)\n\n winter_s, winter_e = '{}-01-01'.format(yr), '{}-03-01'.format(yr),\n spring_s, spring_e = '{}-03-01'.format(yr), '{}-05-01'.format(yr),\n late_spring_s, late_spring_e = '{}-05-01'.format(yr), '{}-07-01'.format(yr)\n summer_s, summer_e = '{}-07-01'.format(yr), '{}-09-01'.format(yr)\n fall_s, fall_e = '{}-09-01'.format(yr), '{}-12-31'.format(yr)\n\n periods = [('cy', winter_s, fall_e),\n ('1', spring_s, spring_e),\n ('2', late_spring_s, late_spring_e),\n ('3', summer_s, summer_e),\n ('4', fall_s, fall_e)]\n\n first = True\n for name, start, end in periods:\n bands = landsat_composites(yr, start, end, roi, name)\n if first:\n input_bands = bands\n proj = bands.select('B2_cy').projection().getInfo()\n first = False\n else:\n input_bands = input_bands.addBands(bands)\n\n for s, e, n in [(spring_s, spring_e, 'espr'),\n (late_spring_s, late_spring_e, 'lspr'),\n (summer_s, summer_e, 'smr'),\n (fall_s, fall_e, 'fl'),\n (water_year_start, spring_e, 'wy_espr'),\n (water_year_start, late_spring_e, 'wy_espr'),\n (water_year_start, summer_e, 'wy_smr'),\n (water_year_start, fall_e, 'wy')]:\n gridmet = ee.ImageCollection(\"IDAHO_EPSCOR/GRIDMET\").filterBounds(\n roi).filterDate(s, e).select('pr', 'eto', 'tmmn', 'tmmx')\n temp_reducer = ee.Reducer.mean()\n t_names = ['tmax'.format(n), 'tmin'.format(n)]\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_{}'.format(n), 'pet_total_{}'.format(n)).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n wd_estimate = precip_sum.select('precip_total_{}'.format(n)).subtract(precip_sum.select(\n 'pet_total_{}'.format(n))).rename('wd_est_{}'.format(n))\n input_bands = input_bands.addBands([temp_perc, precip_sum, wd_estimate])\n\n temp_reducer = ee.Reducer.percentile([10, 50, 90])\n t_names = ['tmmn_p10_cy', 'tmmn_p50_cy', 'tmmn_p90_cy', 'tmmx_p10_cy', 'tmmx_p50_cy', 'tmmx_p90_cy']\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_cy', 'pet_total_cy').resample('bilinear').reproject(crs=proj['crs'], scale=30)\n wd_estimate = precip_sum.select('precip_total_cy').subtract(precip_sum.select(\n 'pet_total_cy')).rename('wd_est_cy')\n\n coords = ee.Image.pixelLonLat().rename(['Lon_GCS', 'LAT_GCS']).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n ned = ee.Image('USGS/NED')\n terrain = ee.Terrain.products(ned).select('elevation', 'slope', 'aspect').reduceResolution(\n ee.Reducer.mean()).reproject(crs=proj['crs'], scale=30)\n\n world_climate = get_world_climate(proj=proj)\n elev = terrain.select('elevation')\n tpi_1250 = elev.subtract(elev.focal_mean(1250, 'circle', 'meters')).add(0.5).rename('tpi_1250')\n tpi_250 = elev.subtract(elev.focal_mean(250, 'circle', 'meters')).add(0.5).rename('tpi_250')\n tpi_150 = elev.subtract(elev.focal_mean(150, 'circle', 'meters')).add(0.5).rename('tpi_150')\n static_input_bands = coords.addBands([temp_perc, wd_estimate, terrain, tpi_1250, tpi_250, tpi_150, world_climate])\n\n nlcd = ee.Image('USGS/NLCD/NLCD2011').select('landcover').reproject(crs=proj['crs'], scale=30).rename('nlcd')\n\n cdl_cult = ee.Image('USDA/NASS/CDL/2017').select('cultivated'). \\\n remap([1, 2], [0, 1]).reproject(crs=proj['crs'], scale=30).rename('cdlclt')\n\n cdl_crop = ee.Image('USDA/NASS/CDL/2017').select('cropland').reproject(crs=proj['crs'],\n scale=30).rename('cdlcrp')\n\n gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')\n occ_pos = gsw.select('occurrence').gt(0)\n water = occ_pos.unmask(0).rename('gsw')\n\n static_input_bands = static_input_bands.addBands([nlcd, cdl_cult, cdl_crop, water])\n\n input_bands = input_bands.addBands(static_input_bands).clip(roi)\n\n # standardize names to match EE javascript output\n standard_names = []\n temp_ct = 1\n prec_ct = 1\n names = input_bands.bandNames().getInfo()\n for name in names:\n if 'tavg' in name and 'tavg' in standard_names:\n standard_names.append('tavg_{}'.format(temp_ct))\n temp_ct += 1\n elif 'prec' in name and 'prec' in standard_names:\n standard_names.append('prec_{}'.format(prec_ct))\n prec_ct += 1\n else:\n standard_names.append(name)\n\n input_bands = input_bands.rename(standard_names)\n return input_bands",
"def backward_fill(marr, maxgap=None):\n return forward_fill(marr[::-1], maxgap=maxgap)[::-1]",
"def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out",
"def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out",
"def adjust_ranges(bands, freq):\n # Get the indices of the first and last non-zero elements.\n first = 0\n for k, v in freq.items():\n if v != 0:\n first = k\n break\n rev_keys = list(freq.keys())[::-1]\n last = rev_keys[0]\n for idx in list(freq.keys())[::-1]:\n if freq[idx] != 0:\n last = idx\n break\n # Now adjust the ranges.\n min_key = min(freq.keys())\n max_key = max(freq.keys())\n for idx in range(min_key, first):\n freq.pop(idx)\n bands.pop(idx)\n for idx in range(last + 1, max_key + 1):\n freq.popitem()\n bands.popitem()\n old_keys = freq.keys()\n adj_freq = dict()\n adj_bands = dict()\n\n for idx, k in enumerate(old_keys):\n adj_freq[idx] = freq[k]\n adj_bands[idx] = bands[k]\n\n return adj_bands, adj_freq",
"def gap_years_aggregated(mongo_client):\n db = mongo_client[\"nobel\"]\n\n original_categories = sorted(set(db.prizes.distinct(\"category\", {\"year\": \"1901\"})))\n\n pipeline = [\n {\"$match\": {\"category\": {\"$in\": original_categories}}},\n {\"$project\": {\"category\": 1, \"year\": 1}},\n\n # Collect the set of category values for each prize year.\n {\"$group\": {\"_id\": \"$year\", \"categories\": {\"$addToSet\": \"$category\"}}},\n\n # Project categories *not* awarded (i.e., that are missing this year).\n {\"$project\": {\"missing\": {\"$setDifference\": [original_categories, \"$categories\"]}}},\n\n # Only include years with at least one missing category\n {\"$match\": {\"missing.0\": {\"$exists\": True}}},\n\n # Sort in reverse chronological order. Note that \"_id\" is a distinct year at this stage.\n {\"$sort\": OrderedDict([(\"_id\", -1)])},\n ]\n\n for doc in db.prizes.aggregate(pipeline):\n print(\"{year}: {missing}\".format(year=doc[\"_id\"], missing=\", \".join(sorted(doc[\"missing\"]))))",
"def forward_filling(self, input_data):\n output_data = np.array(input_data)\n\n # Gap indices\n gap_list = np.ravel(np.argwhere(output_data == self.gap_value))\n new_gap_list = self._parse_gap_ids(gap_list)\n\n # Iterately fill in the gaps in the time series\n for gap in new_gap_list:\n # The entire time series is used for training until the gap\n first_gap_element_id = gap[0]\n timeseries_train_part = output_data[:first_gap_element_id]\n\n # Make forecast in the gap\n predicted = self.__forecast_in_gap(self.pipeline,\n timeseries_train_part,\n output_data, gap)\n\n # Replace gaps in an array with prediction values\n output_data[gap] = predicted\n return output_data",
"def missing_year_load_estimator(df_raw,fill):\n\n forecast_years = common.adjust_years(headers_list=list(df_raw.columns))\n\n df_raw['available_years'] = np.nan\n df_raw['year_forecasted'] = np.nan\n\n year_estimate_list = ['{}_{}'.format(common.Headers.estimate, x) for x in forecast_years]\n # Add columns to DataFrame with no files by adding in empty\n df_raw = pd.concat([df_raw, pd.DataFrame(columns=year_estimate_list)], sort=False)\n\n forecast_years = common.adjust_years(headers_list=list(df_raw.columns))\n # todo: maybe add to idx to identify the rows which the values of the loads are negative\n idx = df_raw[forecast_years].isna()\n\n z = (~idx.loc[1, :]).sum()\n\n df_raw['available_years'] = (~idx).sum(1)\n\n d = (df_raw['available_years'] > 1) & (df_raw['available_years'] < len(forecast_years))\n\n df_raw['year_forecasted'] = d\n\n est_row_list = d[d == True].index\n\n years_estimate_df = df_raw.loc[est_row_list, forecast_years]\n\n number_of_years = len(years_estimate_df.columns)\n years_estimate_df.columns = range(number_of_years)\n\n if fill==True:\n\n for j in range(len(est_row_list)):\n\n year_row1 = years_estimate_df.loc[est_row_list[j], :]\n t1 = year_row1.to_frame()\n estimated_array = common.interpolator(t1).T # common.interpolator gets a dataframe with one column and\n # number of indexes equal to the number of years and the missing values as nan, then interpolate the missing\n # values by using indexes as x and y being the values in the dataframe, then gives out a df with\n # the interpolated values\n n = 0\n for i in range(len(forecast_years)):\n if pd.isnull(df_raw.loc[est_row_list[j], forecast_years[i]]):\n df_raw.loc[est_row_list[j], forecast_years[i]] = 1\n df_raw.loc[est_row_list[j], forecast_years[i]] = estimated_array.iloc[0, n]\n df_raw.loc[est_row_list[j], year_estimate_list[i]] = estimated_array.iloc[0, n]\n n += 1\n\n return df_raw",
"def forward_inverse_filling(self, input_data):\n output_data = np.array(input_data)\n\n # Gap indices\n gap_list = np.ravel(np.argwhere(output_data == self.gap_value))\n new_gap_list = self._parse_gap_ids(gap_list)\n\n # Iterately fill in the gaps in the time series\n for batch_index in range(len(new_gap_list)):\n\n preds = []\n weights = []\n # Two predictions are generated for each gap - forward and backward\n for direction_function in [self._forward, self._inverse]:\n weights_list, predicted_list = direction_function(output_data,\n batch_index,\n new_gap_list)\n weights.append(weights_list)\n preds.append(predicted_list)\n\n preds = np.array(preds)\n weights = np.array(weights)\n result = np.average(preds, axis=0, weights=weights)\n\n gap = new_gap_list[batch_index]\n # Replace gaps in an array with prediction values\n output_data[gap] = result\n\n return output_data",
"def make_annual(raster_path, year,):\n\n\twith Env(\"outputCoordinateSystem\", raster_path), Env(\"cellSize\", raster_path):\n\n\t\tnp_ras = arcpy.RasterToNumPyArray(raster_path, nodata_to_value=0)\n\t\tfor band_index, band in enumerate(np_ras):\n\t\t\t# print(\"Band Index: {}\".format(band_index))\n\t\t\t# print(\"Band has {} cells below 0 Min value {}\".format((band<0).sum(), band.min()))\n\t\t\tzero_fixed = np.where(band < 0, 0, band) # take the input data and set all locations that are less than 0 ET to 0 and leave everything above 0 as is\n\t\t\t# print(\"Fixed has {} cells below 0 Min value {}\".format((zero_fixed < 0).sum(), zero_fixed.min()))\n\t\t\tnp_ras[band_index] = np.multiply(zero_fixed, get_days_in_month_by_band_and_year(band_index, year)) # multiply the band by the number of days in the month and replace it\n\t\t\t# print(\"Stored has {} cells below 0. Min value {}\".format((np_ras[band_index] < 0).sum(), np_ras[band_index].min()))\n\t\tsummed_months = np.sum(np_ras, axis=0) # sum the bands together into one\n\t\t# print(\"Summed has {} cells below 0. min value{}\".format((summed_months < 0).sum(), summed_months.min()))\n\n\t\tlower_left = lower_left_point(raster_path)\n\n\t\tdesc = arcpy.Describe(os.path.join(str(raster_path), \"Band_1\")) # need to get a band for cell sizes\n\t\ttry:\n\t\t\tcell_width = desc.meanCellWidth\n\t\t\tcell_height = desc.meanCellHeight\n\n\t\t\tannual_raster = arcpy.NumPyArrayToRaster(summed_months, lower_left, cell_width, cell_height)\n\t\tfinally:\n\t\t\tdel desc\n\n\treturn annual_raster",
"def fill_price_gaps(\n from_date=dt.datetime(1970,1,1),\n to_date=dt.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)\n ):\n #Create a collection of years\n years = []\n cur_year = from_date.year\n while cur_year <= to_date.year:\n years.append(cur_year)\n cur_year += 1\n #Loop each year\n all_year_dates = pd.DataFrame([])\n for year in tqdm(years, total=len(years), desc=\"Loop through years to find dates\"):\n #establish bounding dates\n year_from_date = None if year != from_date.year else from_date\n year_to_date = None if year != to_date.year else to_date\n #Get filtered year dates\n year_dates = create_filtered_year_dates(year, from_date=year_from_date, to_date=year_to_date, )\n #Add to the full list\n all_year_dates = pd.concat([all_year_dates, year_dates])\n #Order the dates (just in case)\n all_year_dates = all_year_dates.sort_values([\"date\"]) \\\n .reset_index(drop=True)\n #Fetch all the tickers\n tickers = sqlaq_to_df(ticker.fetch())\n #Loop through tickers\n errors = []\n run_time = ProcessTime()\n for _,r in tqdm(tickers[[\"id\",\"ticker\"]].iterrows(), total=tickers.shape[0], desc=\"Filling in gaps\"):\n logger.info(f\"Filling gaps in {r.id} -> {r.ticker}\")\n try:\n #Fetch all prices\n dp = sqlaq_to_df(daily_price.fetch(ticker_ids=[r.id]))\n dp[\"date\"] = dp.date.astype(\"datetime64[ns]\")\n #Identify missing dates\n missing_dates = pd.merge(all_year_dates, dp[[\"date\",\"id\"]], on=[\"date\"], how=\"left\")\n #Identify the start date and remove all missing date before that\n start_date = missing_dates[~missing_dates.id.isnull()].date.min()\n missing_dates = missing_dates[missing_dates.date > start_date]\n #Remove all other items which have dates\n missing_dates = missing_dates[missing_dates.id.isnull()]\n #Order remaining dates\n missing_dates = missing_dates.sort_values(\"date\")\n #Create groupings no larger than max_days (in config)\n st_d = None\n date_groups = []\n missing_dates = missing_dates.date.to_list()\n if len(missing_dates):\n for i,d in enumerate(missing_dates):\n if not st_d:\n st_d = d\n else:\n #Append when group gets too big\n if (d - st_d).days > WEB_SCRAPE_MAX_DAYS:\n date_groups.append([st_d, missing_dates[i-1]])\n #Update the start date\n st_d = d\n #Append the last item\n date_groups.append([st_d, d])\n #Scrape the missing prices\n logger.info('Number of webscrapes to perform -> {}'.format(len(date_groups)))\n #For each time frame perform a scrape\n try: #Try loop so as not to miss all following date groups\n for i,dates in enumerate(date_groups):\n logger.info(f\"Running dates {i} -> {dt.datetime.strptime(str(dates[0])[:10], '%Y-%m-%d')} - {dt.datetime.strptime(str(dates[1])[:10], '%Y-%m-%d')}\")\n process_daily_prices(\n r.ticker,\n r.id,\n st_date=dates[0],\n en_date=dates[1],\n \n )\n except Exception as e:\n logger.error(e)\n errors.append({'ticker_id':r.id, 'ticker':r.ticker, \"error\":e, \"st_date\":dates[0], \"en_dates\":dates[1]})\n #Run an update on th weekly prices\n process_weekly_prices(\n r.id,\n \n )\n except Exception as e:\n logger.error(e)\n errors.append({'ticker_id':r.id, 'ticker':r.ticker, \"error\":e})\n #Lap\n logger.info(run_time.lap())\n logger.info(run_time.show_latest_lap_time(show_time=True))\n logger.info(f\"GAP FILL RUN TIME - {run_time.end()}\")\n\n logger.info(f'\\nGAP FILL ERROR COUNT -> {len(errors)}')\n if len(errors) > 0:\n logger.info('GAP FILL ERRORS ->')\n for e in errors:\n logger.error(e)",
"def __interpolate_missing_frames__(bboxes):\r\n for entity_idx, entity in enumerate(bboxes):\r\n prev_frame = None\r\n prev_val = None\r\n\r\n for t, val in entity:\r\n if (prev_frame is not None) and (t-1 != prev_frame):\r\n steps = t - prev_frame\r\n diff = (val - prev_val) / steps\r\n\r\n for i in range(steps - 1):\r\n interp = np.round(prev_val + (i+1) * diff).astype(np.int32)\r\n entity.append((prev_frame + i + 1, interp))\r\n\r\n entity.sort(key=lambda x: x[0])\r\n prev_frame += 1\r\n else:\r\n prev_frame = t\r\n prev_val = val\r\n\r\n return bboxes",
"def create_age_bands(\n original_df, age_field_name, start_age=0, end_age=90, years_per_band=5\n):\n df = original_df.copy()\n\n banded_field_name = \"age_band\"\n\n bin_buckets = [item for item in range(start_age, end_age + 1, years_per_band)]\n bin_buckets.append(999)\n lbs = [\n \"(%d, %d]\" % (bin_buckets[i], bin_buckets[i + 1])\n for i in range(len(bin_buckets) - 1)\n ]\n\n df[banded_field_name] = pd.cut(\n x=df[age_field_name], bins=bin_buckets, labels=lbs, include_lowest=True\n ).astype(str)\n df[banded_field_name] = df[banded_field_name].str.strip()\n\n current_last_band_name = f\"({end_age}, 999]\"\n last_band_name = f\"({end_age}plus]\"\n\n df[banded_field_name] = df[banded_field_name].replace(\n current_last_band_name, last_band_name, regex=False\n )\n\n df[banded_field_name] = df[banded_field_name].astype(str)\n df[banded_field_name] = df[banded_field_name].apply(\n lambda x: str(x[x.find(\"(\") + 1 : x.find(\"]\")])\n )\n df[banded_field_name] = df[banded_field_name].apply(lambda x: x.replace(\",\", \" -\"))\n\n # the bands need to be ordered by first age in range for visualisation later\n order = df[df[banded_field_name] != \"na\"][banded_field_name].unique()\n sorted_idx = np.argsort([int(x[:2]) for x in order])\n new_order = order[sorted_idx]\n # fix order for the column\n df[banded_field_name] = pd.Categorical(\n df[banded_field_name], categories=new_order, ordered=True\n )\n\n # remove the string 'na' and replace with numpy nan so that missing rates can be computed later\n df[banded_field_name] = df[banded_field_name].replace(\"na\", np.nan)\n\n return df",
"def get_year_regions_conflict_df(clean_conflict):\n columns = [\"Year\", \"Europe\", \"Middle East\", \"Asia\", \"Africa\", \"America\"]\n years = np.linspace(1945, 2016, num=2016-1945+1, dtype=int)\n years = np.reshape(years, (len(years), 1))\n regions = np.zeros((len(years), 5), dtype=int)\n years_regions = np.concatenate((years,regions),axis=1)\n\n yr_conflict_df = pd.DataFrame(years_regions, columns=columns)\n yr_conflict_df = yr_conflict_df.set_index('Year')\n\n for index in range(len(clean_conflict)):\n year = clean_conflict.get_value(index,'year')\n region = clean_conflict.get_value(index,'region')\n\n if int(region) == 1: yr_conflict_df.set_value(year, 'Europe', yr_conflict_df.get_value(year, 'Europe')+1)\n if int(region) == 2: yr_conflict_df.set_value(year, 'Middle East', yr_conflict_df.get_value(year, 'Middle East')+1)\n if int(region) == 3: yr_conflict_df.set_value(year, 'Asia', yr_conflict_df.get_value(year, 'Asia')+1)\n if int(region) == 4: yr_conflict_df.set_value(year, 'Africa', yr_conflict_df.get_value(year, 'Africa')+1)\n if int(region) == 5: yr_conflict_df.set_value(year, 'America', yr_conflict_df.get_value(year, 'America')+1)\n\n yr_conflict_df.to_csv(\"datasets/colonization_conflict_year_regions.csv\")\n\n return yr_conflict_df",
"def forward_fill(marr, maxgap=None):\n # !!!: We should probably port that to C.\n # Initialization ..................\n if np.ndim(marr) > 1:\n raise ValueError,\"The input array should be 1D only!\"\n a = ma.array(marr, copy=True)\n amask = getmask(a)\n if amask is nomask or a.size == 0:\n return a\n #\n adata = getdata(a)\n # Get the indices of the masked values (except a[0])\n idxtofill = amask[1:].nonzero()[0] + 1\n currGap = 0\n if maxgap is not None:\n previdx = -1\n for i in idxtofill:\n if i != previdx + 1:\n currGap = 0\n currGap += 1\n if currGap <= maxgap and not amask[i-1]:\n adata[i] = adata[i-1]\n amask[i] = False\n previdx = i\n else:\n amask[i-maxgap:i] = True\n else:\n for i in idxtofill:\n if not amask[i-1]:\n adata[i] = adata[i-1]\n amask[i] = False\n return a",
"def fill_year(timeseries, value=0):\n # Obtain firts and last date from timeseries\n first_date = timeseries.index.min()\n last_date = timeseries.index.max()\n\n one_year_date = last_date - timedelta(days=365)\n\n ## Obtain the sunday beofre the date of one year ago\n starting_date = one_year_date - timedelta(days=one_year_date.weekday()+1)\n\n assert starting_date.weekday_name == 'Sunday'\n\n\n # Fill dates with mising zero\n date_range_series = create_timeseries(starting_date,\n first_date-timedelta(days=1),\n value)\n\n # Fill the original timeseries\n filled_timeseries = pd.concat([date_range_series, timeseries])\n\n return filled_timeseries"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to apply forward gap filling and backward gap filling to an image. The image bands do not need to be in order, but the bandNames argument must be in chronological order. This funciton calls applyForwardNoDataFilter then applyBackwardNoDataFilter | def applyGapFilter(image, bandNames):
filtered = applyForwardNoDataFilter(image, bandNames)
filtered = applyBackwardNoDataFilter(filtered, bandNames)
return filtered | [
"def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year\n #currentImage = image.select(bandNames[1]), the image for the second year\n #previousImage = image.select(bandNames[0]), the first year\n #Find where the second year has missing data, replace those values with the values of the first year\n #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill\n #and the second band is the first years classification\n #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year\n def forwardNoDataFilter(bandName, previousImage):\n currentImage = image.select(ee.String(bandName))\n previousImage = ee.Image(previousImage)\n currentImage = currentImage.unmask(previousImage.select([0]))\n return currentImage.addBands(previousImage)\n \n #Iterate through all the years, starting with the first year's classification\n filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)",
"def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year\n #currentImage = image.select(bandNames[-2]), the second to last year\n #followingImage = image.select(bandNames[-1]), the final year\n #Find where the second to last year has missing data, replace those values with the values of the following year\n #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill\n #and the second band is the final years classification\n #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year\n def backwardNoDataFilter(bandName, followingImage):\n currentImage = image.select(ee.String(bandName))\n followingImage = ee.Image(followingImage)\n currentImage = currentImage.unmask(followingImage.select([0]))\n return currentImage.addBands(followingImage)\n \n #Apply backwards filter, starting with the final year and iterating through to year(0) \n filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1])))\n #Re-order bands to be in chronological order\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)",
"def each_band( imarr, funct, *args, **kwargs ):\n if type(imarr)==np.ma.MaskedArray:\n return each_band_masked( imarr, funct, *args, **kwargs )\n else:\n return each_band_unmasked( imarr, funct, *args, **kwargs )",
"def adjust_ranges(bands, freq):\n # Get the indices of the first and last non-zero elements.\n first = 0\n for k, v in freq.items():\n if v != 0:\n first = k\n break\n rev_keys = list(freq.keys())[::-1]\n last = rev_keys[0]\n for idx in list(freq.keys())[::-1]:\n if freq[idx] != 0:\n last = idx\n break\n # Now adjust the ranges.\n min_key = min(freq.keys())\n max_key = max(freq.keys())\n for idx in range(min_key, first):\n freq.pop(idx)\n bands.pop(idx)\n for idx in range(last + 1, max_key + 1):\n freq.popitem()\n bands.popitem()\n old_keys = freq.keys()\n adj_freq = dict()\n adj_bands = dict()\n\n for idx, k in enumerate(old_keys):\n adj_freq[idx] = freq[k]\n adj_bands[idx] = bands[k]\n\n return adj_bands, adj_freq",
"def each_band_unmasked( imarr, funct, *args, **kwargs ):\n outlist = []\n for i in range( imarr.shape[-1] ):\n outlist.append( funct( imarr[...,i], *args, **kwargs ) )\n return np.dstack( outlist )",
"def stack_bands(yr, roi):\n\n water_year_start = '{}-10-01'.format(yr - 1)\n\n winter_s, winter_e = '{}-01-01'.format(yr), '{}-03-01'.format(yr),\n spring_s, spring_e = '{}-03-01'.format(yr), '{}-05-01'.format(yr),\n late_spring_s, late_spring_e = '{}-05-01'.format(yr), '{}-07-01'.format(yr)\n summer_s, summer_e = '{}-07-01'.format(yr), '{}-09-01'.format(yr)\n fall_s, fall_e = '{}-09-01'.format(yr), '{}-12-31'.format(yr)\n\n periods = [('cy', winter_s, fall_e),\n ('1', spring_s, spring_e),\n ('2', late_spring_s, late_spring_e),\n ('3', summer_s, summer_e),\n ('4', fall_s, fall_e)]\n\n first = True\n for name, start, end in periods:\n bands = landsat_composites(yr, start, end, roi, name)\n if first:\n input_bands = bands\n proj = bands.select('B2_cy').projection().getInfo()\n first = False\n else:\n input_bands = input_bands.addBands(bands)\n\n for s, e, n in [(spring_s, spring_e, 'espr'),\n (late_spring_s, late_spring_e, 'lspr'),\n (summer_s, summer_e, 'smr'),\n (fall_s, fall_e, 'fl'),\n (water_year_start, spring_e, 'wy_espr'),\n (water_year_start, late_spring_e, 'wy_espr'),\n (water_year_start, summer_e, 'wy_smr'),\n (water_year_start, fall_e, 'wy')]:\n gridmet = ee.ImageCollection(\"IDAHO_EPSCOR/GRIDMET\").filterBounds(\n roi).filterDate(s, e).select('pr', 'eto', 'tmmn', 'tmmx')\n temp_reducer = ee.Reducer.mean()\n t_names = ['tmax'.format(n), 'tmin'.format(n)]\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_{}'.format(n), 'pet_total_{}'.format(n)).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n wd_estimate = precip_sum.select('precip_total_{}'.format(n)).subtract(precip_sum.select(\n 'pet_total_{}'.format(n))).rename('wd_est_{}'.format(n))\n input_bands = input_bands.addBands([temp_perc, precip_sum, wd_estimate])\n\n temp_reducer = ee.Reducer.percentile([10, 50, 90])\n t_names = ['tmmn_p10_cy', 'tmmn_p50_cy', 'tmmn_p90_cy', 'tmmx_p10_cy', 'tmmx_p50_cy', 'tmmx_p90_cy']\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_cy', 'pet_total_cy').resample('bilinear').reproject(crs=proj['crs'], scale=30)\n wd_estimate = precip_sum.select('precip_total_cy').subtract(precip_sum.select(\n 'pet_total_cy')).rename('wd_est_cy')\n\n coords = ee.Image.pixelLonLat().rename(['Lon_GCS', 'LAT_GCS']).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n ned = ee.Image('USGS/NED')\n terrain = ee.Terrain.products(ned).select('elevation', 'slope', 'aspect').reduceResolution(\n ee.Reducer.mean()).reproject(crs=proj['crs'], scale=30)\n\n world_climate = get_world_climate(proj=proj)\n elev = terrain.select('elevation')\n tpi_1250 = elev.subtract(elev.focal_mean(1250, 'circle', 'meters')).add(0.5).rename('tpi_1250')\n tpi_250 = elev.subtract(elev.focal_mean(250, 'circle', 'meters')).add(0.5).rename('tpi_250')\n tpi_150 = elev.subtract(elev.focal_mean(150, 'circle', 'meters')).add(0.5).rename('tpi_150')\n static_input_bands = coords.addBands([temp_perc, wd_estimate, terrain, tpi_1250, tpi_250, tpi_150, world_climate])\n\n nlcd = ee.Image('USGS/NLCD/NLCD2011').select('landcover').reproject(crs=proj['crs'], scale=30).rename('nlcd')\n\n cdl_cult = ee.Image('USDA/NASS/CDL/2017').select('cultivated'). \\\n remap([1, 2], [0, 1]).reproject(crs=proj['crs'], scale=30).rename('cdlclt')\n\n cdl_crop = ee.Image('USDA/NASS/CDL/2017').select('cropland').reproject(crs=proj['crs'],\n scale=30).rename('cdlcrp')\n\n gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')\n occ_pos = gsw.select('occurrence').gt(0)\n water = occ_pos.unmask(0).rename('gsw')\n\n static_input_bands = static_input_bands.addBands([nlcd, cdl_cult, cdl_crop, water])\n\n input_bands = input_bands.addBands(static_input_bands).clip(roi)\n\n # standardize names to match EE javascript output\n standard_names = []\n temp_ct = 1\n prec_ct = 1\n names = input_bands.bandNames().getInfo()\n for name in names:\n if 'tavg' in name and 'tavg' in standard_names:\n standard_names.append('tavg_{}'.format(temp_ct))\n temp_ct += 1\n elif 'prec' in name and 'prec' in standard_names:\n standard_names.append('prec_{}'.format(prec_ct))\n prec_ct += 1\n else:\n standard_names.append(name)\n\n input_bands = input_bands.rename(standard_names)\n return input_bands",
"def bandpasscorrect(data):\n ret=[x for x in data]\n n=len(ret)\n ret[0]=1.083*ret[0]-0.083*ret[1]\n ret[n-1]=1.083*ret[n-1]-0.083*ret[n-2]\n for k in range(1,n-1):\n ret[k]=1.166*ret[k]-0.083*ret[k-1]-0.083*ret[k+1]\n return ret",
"def band_filter(self, bands) -> 'ImageCollection':\n\n process_id = 'filter_bands'\n args = {\n 'imagery': self.graph,\n 'bands': bands\n }\n return self.graph_add_process(process_id, args)",
"def each_band_masked( imarr, funct, *args, **kwargs ):\n outlist = []\n ismalist = []\n for i in range( imarr.shape[-1] ):\n newband = funct( imarr[:,:,i], *args, **kwargs )\n outlist.append( newband )\n ismalist.append( type(newband)==np.ma.MaskedArray )\n if False in ismalist:\n warnings.warn( \"A function returned an unmasked array when a masked array was expected. I'll try to copy the mask from the input array.\")\n outarr = np.ma.dstack( outlist )\n outarr.mask = imarr.mask\n outarr.set_fill_value( imarr.fill_value )\n return outarr\n else:\n return np.ma.dstack( outlist )",
"def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out",
"def band_df( imarr, bandnames=None, equalize_masks=True ):\n if equalize_masks and np.ma.isMaskedArray(imarr):\n imarr = equalize_band_masks( imarr )\n nbands = imarr.shape[-1]\n if not bandnames:\n bandnames = [ 'band'+str(i+1) for i in range( nbands ) ]\n ddict = {}\n for bn in range( nbands ):\n if np.ma.isMaskedArray(imarr):\n ddict[bandnames[bn]] = imarr[...,bn].compressed()\n else:\n ddict[bandnames[bn]] = imarr[...,bn].ravel()\n return pd.DataFrame( ddict )",
"def binary_fillhole_image_filter(*args, **kwargs):\n import itk\n instance = itk.BinaryFillholeImageFilter.New(*args, **kwargs)\n return instance.__internal_call__()",
"def fmask(bandname=\"fmask\"):\n\n def fmask(image):\n imgFmask = image.select(bandname)\n shadow = imgFmask.eq(3)\n snow = imgFmask.eq(4)\n cloud = imgFmask.eq(5)\n\n mask = shadow.Or(snow).Or(cloud)\n\n imgMask = image.updateMask(mask.Not())\n return imgMask\n return fmask",
"def bandpass(array, freqmin, freqmax, df, corners=4, zerophase=True):\n from scipy.signal import sosfilt, zpk2sos, iirfilter\n fe = 0.5 * df\n low = freqmin / fe\n high = freqmax / fe\n # raise for some bad scenarios\n if high > 1:\n high = 1.0\n msg = \"Selected high corner frequency is above Nyquist. \" + \\\n \"Setting Nyquist as high corner.\"\n print(msg)\n if low > 1:\n msg = \"Selected low corner frequency is above Nyquist.\"\n raise ValueError(msg)\n z, p, k = iirfilter(corners, [low, high], btype='band',\n ftype='butter', output='zpk')\n sos = zpk2sos(z, p, k)\n if zerophase:\n firstpass = sosfilt(sos, array)\n return sosfilt(sos, firstpass[::-1])[::-1]\n else:\n return sosfilt(sos, array)",
"def forward_inverse_filling(self, input_data):\n output_data = np.array(input_data)\n\n # Gap indices\n gap_list = np.ravel(np.argwhere(output_data == self.gap_value))\n new_gap_list = self._parse_gap_ids(gap_list)\n\n # Iterately fill in the gaps in the time series\n for batch_index in range(len(new_gap_list)):\n\n preds = []\n weights = []\n # Two predictions are generated for each gap - forward and backward\n for direction_function in [self._forward, self._inverse]:\n weights_list, predicted_list = direction_function(output_data,\n batch_index,\n new_gap_list)\n weights.append(weights_list)\n preds.append(predicted_list)\n\n preds = np.array(preds)\n weights = np.array(weights)\n result = np.average(preds, axis=0, weights=weights)\n\n gap = new_gap_list[batch_index]\n # Replace gaps in an array with prediction values\n output_data[gap] = result\n\n return output_data",
"def fill_zero_pixels_by_interpolate(im):\n pass",
"def dwt(image_array, quantization_Array):\n # Create the high pass and low pass filters\n # both filters are non-causal\n # symmetric\n # [-2, -1, 0, 1, 2]\n LPF = [-0.125, 0.25, 0.75, 0.25, -0.125]\n LPF_center = 2\n\n # [ -2,-1, 0]\n HPF = [-0.5, 1, -0.5]\n HPF_center = 2\n\n nrow, ncol = image_array.shape\n\n # create an array that will contain the 4 different subbands of the image\n LL = np.zeros((nrow, ncol))\n LH = np.zeros((nrow, ncol))\n HL = np.zeros((nrow, ncol))\n HH = np.zeros((nrow, ncol))\n filtered_image = [LL, LH, HL, HH]\n\n # filtering the rows using a low pass and high pass filters\n LowPass_rows = np.zeros((nrow, ncol))\n HighPass_rows = np.zeros((nrow, ncol))\n for i in range(0, nrow):\n LowPass_rows[i, :] = lfilter(LPF, image_array[i, :], LPF_center)\n HighPass_rows[i, :] = lfilter(HPF, image_array[i, :], HPF_center)\n\n # down sample rows.\n # which means we will have half the number of columns\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][:, ::2]\n\n # apply filters accross columns\n for i in range(0, ncol):\n LL[:, i] = lfilter(LPF, LowPass_rows[:, i], LPF_center)\n LH[:, i] = lfilter(HPF, LowPass_rows[:, i], HPF_center)\n HL[:, i] = lfilter(LPF, HighPass_rows[:, i], LPF_center)\n HH[:, i] = lfilter(HPF, HighPass_rows[:, i], HPF_center)\n\n # down sample columns and quantize\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][::2, :]\n filtered_image[i] = np.round(\n filtered_image[i]/quantization_Array[i]).astype(int)\n\n return filtered_image",
"def forward_fill(marr, maxgap=None):\n # !!!: We should probably port that to C.\n # Initialization ..................\n if np.ndim(marr) > 1:\n raise ValueError,\"The input array should be 1D only!\"\n a = ma.array(marr, copy=True)\n amask = getmask(a)\n if amask is nomask or a.size == 0:\n return a\n #\n adata = getdata(a)\n # Get the indices of the masked values (except a[0])\n idxtofill = amask[1:].nonzero()[0] + 1\n currGap = 0\n if maxgap is not None:\n previdx = -1\n for i in idxtofill:\n if i != previdx + 1:\n currGap = 0\n currGap += 1\n if currGap <= maxgap and not amask[i-1]:\n adata[i] = adata[i-1]\n amask[i] = False\n previdx = i\n else:\n amask[i-maxgap:i] = True\n else:\n for i in idxtofill:\n if not amask[i-1]:\n adata[i] = adata[i-1]\n amask[i] = False\n return a",
"def bqa_fmask_func(qa):\n # Extracting cloud masks from BQA using np.right_shift() and np.bitwise_and()\n # Cloud (med & high confidence), then snow, then shadow, then fill\n # Low confidence clouds tend to be the FMask buffer\n fill_mask = np.bitwise_and(np.right_shift(qa, 0), 1) >= 1\n cloud_mask = np.bitwise_and(np.right_shift(qa, 4), 1) >= 1 # cloud bit\n cloud_mask &= np.bitwise_and(np.right_shift(qa, 5), 3) >= 2 # cloud conf.\n cloud_mask |= np.bitwise_and(np.right_shift(qa, 11), 3) >= 3 # cirrus\n shadow_mask = np.bitwise_and(np.right_shift(qa, 7), 3) >= 3\n snow_mask = np.bitwise_and(np.right_shift(qa, 9), 3) >= 3\n\n fmask = (fill_mask != True).astype(np.uint8)\n fmask[shadow_mask] = 2\n fmask[snow_mask] = 3\n fmask[cloud_mask] = 4\n\n return fmask"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to apply an incidence filter. The incidence filter finds all pixels that changed more than numChangesCutoff times and is connected to less than connectedPixelCutoff pixels, then replaces those pixels with the MODE value of that given pixel position in the stack of years. | def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6):
#Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff
num_changes = calculateNumberOfChanges(image, bandNames)
too_many_changes = num_changes.gt(numChangesCutoff)
#Get binary images of the land cover classifications for the current year
binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)
#Calculate the number of connected pixels for each land cover class and year, reduce to a single band image representing the number
#of connected pixels of the same land cover class as the central pixel, and determine if it is over the connectedPixelCutoff
connected_pixel_count = ee.ImageCollection(binary_class_images.map(lambda x: x.mask(x).connectedPixelCount(100,False).reduce(ee.Reducer.sum()).lt(connectedPixelCutoff)))
#Get a bitwiseAnd determination if the number of connected pixels <= connectedPixelCutoff and the number of changes > numChangesCutoff
incidence_filter = ee.ImageCollection(connected_pixel_count.map(lambda x: x.bitwiseAnd(too_many_changes))).toBands().rename(bandNames)
#Get an image that represents the mode of the land cover classes in each pixel
mode_image = image.reduce(ee.Reducer.mode())
#Replace pixels of image where incidence_filter is True with mode_image
incidence_filtered = image.where(incidence_filter, mode_image)
return incidence_filtered | [
"def apply(self, src, dst):\n cv2.filter2D(src, -1, self._kernel, dst)",
"def applyInterpixCrosstalk(self):\n import copy\n ICTcoeff = np.array([[self.information['c1'], self.information['c2'],self.information['c3']],[self.information['c4'],self.information['c5'],self.information['c6']],[self.information['c7'],self.information['c8'],self.information['c9']]])\n \n # apply coefficients to the image\n copie=np.float32(copy.deepcopy(self.image))\n convolve = ndimage.convolve(copie, ICTcoeff, mode='constant', cval=0.0) \n self.image = copy.deepcopy(convolve) \n \n # Replace self.image in self.image_total\n if 'NrefPix_x' in self.config and 'NrefPix_y' in self.config :\n self.image_total[self.information['NrefPix_x']:-self.information['NrefPix_x'], self.information['NrefPix_y']:-self.information['NrefPix_y']]=self.image\n else :\n self.image_total=self.image",
"def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img",
"def applyInstrumIntrinsicNoise(self) :\n instrumIntrinsicNoise=fits.getdata(self.path+'/data/IntrinsicNoise/'+self.information['instrumIntrinsicNoiseFile'])\n gain_conv=fits.getdata(self.path+'/data/GainMap/'+self.information['GainMapFile'])\n #reference pixel case management\n if 'NrefPix_x' in self.config and 'NrefPix_y' in self.config :\n instrumIntrinsicNoise = instrumIntrinsicNoise[self.information['NrefPix_x']:-self.information['NrefPix_x'], self.information['NrefPix_y']:-self.information['NrefPix_y']]\n gain_conv=gain_conv[self.information['NrefPix_x']:-self.information['NrefPix_x'], self.information['NrefPix_y']:-self.information['NrefPix_y']]\n \n self.image+=instrumIntrinsicNoise*self.config['exptime']*gain_conv",
"def mask_incoherent(self):\n self.MaskPrefix = 'i' + self.MaskPrefix\n print('Masking pixel values where .msk value is less than {0}...'.format(threshold))\n for ig in self.Set:\n igram = self.load_ma(ig)\n mskFile = ig.Path[:-3] + 'msk'\n coherence = roipy.tools.load_half(ig, 2, mskFile)\n incoherent = ma.masked_less(coherence, self.Cothresh)\n igram[incoherent.mask] = ma.masked\n mskFile = self.MaskPrefix + 'Mask_' + ig.Name[:-4]\n np.save(os.path.join(self.ProcDir, mskFile), igram.mask)\n print(mskFile)\n\n print('Done')",
"def apply(self,src,dst):\n cv2.filter2D(src,-1,self._kernel,dst) #The second argument specifies the per-channel depth of the destination image\n #(such as cv2.CV_8U for 8 bits per channel). A negative value (as used here) means\n #that the destination image has the same depth as the source image.",
"def SetIntensityDifferenceThreshold(self, arg0: 'double') -> \"void\":\n return _itkLevelSetMotionRegistrationFilterPython.itkLevelSetMotionRegistrationFilterISS2ISS2IVF42_SetIntensityDifferenceThreshold(self, arg0)",
"def isolated_connected_image_filter(*args, **kwargs):\n import itk\n instance = itk.IsolatedConnectedImageFilter.New(*args, **kwargs)\n return instance.__internal_call__()",
"def SetIntensityDifferenceThreshold(self, arg0: 'double') -> \"void\":\n return _itkLevelSetMotionRegistrationFilterPython.itkLevelSetMotionRegistrationFilterISS2ISS2IVF22_SetIntensityDifferenceThreshold(self, arg0)",
"def sepfirnd(input,filters,axes,output=None,mode='reflect',cval=0.0,origin=0):\n if output is None:\n output = np.empty_like(input)\n tmp = output\n if np.isscalar(filters[0]):\n filters = [np.asarray(filters)]\n if np.isscalar(axes):\n axes = [axes]\n if len(axes) > 1:\n tmp = np.empty_like(output)\n if len(filters) == 1:\n filters = [filters[0]]*len(axes)\n if len(axes) & 1 == 1: #pre-swap so that last write goes to output\n output,tmp = tmp,output \n for filt,ax in zip(filters,axes):\n output,tmp = tmp,output #swap buffers\n convolve1d(input,filt,ax,output,mode,cval,origin)\n input = output\n return output",
"def SetIntensityDifferenceThreshold(self, arg0: 'double') -> \"void\":\n return _itkLevelSetMotionRegistrationFilterPython.itkLevelSetMotionRegistrationFilterIUC2IUC2IVF42_SetIntensityDifferenceThreshold(self, arg0)",
"def SetIntensityDifferenceThreshold(self, arg0: 'double') -> \"void\":\n return _itkLevelSetMotionRegistrationFilterPython.itkLevelSetMotionRegistrationFilterIF2IF2IVF42_SetIntensityDifferenceThreshold(self, arg0)",
"def get_year_intensity_conflict_df(clean_conflict):\n columns = [\"Year\", \"Intensity1\", \"Intensity2\"]\n years = np.linspace(1945, 2016, num=2016-1945+1, dtype=int)\n years = np.reshape(years, (len(years), 1))\n intensity = np.zeros((len(years), 2), dtype=int)\n years_intensity = np.concatenate((years,intensity),axis=1)\n\n yr_conflict_df = pd.DataFrame(years_intensity, columns=columns)\n yr_conflict_df = yr_conflict_df.set_index('Year')\n\n for index in range(len(clean_conflict)):\n year = clean_conflict.get_value(index,'year')\n intensity = clean_conflict.get_value(index,'intensity')\n\n if int(intensity) == 1: yr_conflict_df.set_value(year, 'Intensity1', yr_conflict_df.get_value(year, 'Intensity1')+1)\n if int(intensity) == 2: yr_conflict_df.set_value(year, 'Intensity2', yr_conflict_df.get_value(year, 'Intensity2')+1)\n\n yr_conflict_df.to_csv(\"datasets/colonization_conflict_year_intensity.csv\")\n\n return yr_conflict_df",
"def filter(self, cutoff_value, input_list):\r\n output_list = []\r\n for tup in input_list:\r\n if tup[0] + tup[1] + tup[2] > 3*cutoff_value: # if the pixel is above a certain RGB value then its a bright pixel else dark\r\n tup1 = (255, 255, 255)\r\n else:\r\n tup1 = (0, 0, 0)\r\n output_list.append(tup1)\r\n return output_list",
"def filterSignal(self):\r\n if self.filtBool:\r\n signalPartSize = self.signalPart.size\r\n hammingWindow = np.hamming(signalPartSize)\r\n self.signalPart = np.multiply(hammingWindow,self.signalPart)",
"def equalize_exposure(image, iterations=1, kernel_size=None, min_object_size=500, dark_objects=True, stretch=False):\n\n # Housekeeping\n img = img_as_float(image.copy())\n\n if stretch is True:\n img = img/img.max()\n\n if dark_objects is False:\n img = 1-img # invert\n\n img_in = img.copy() # for use later\n\n if kernel_size is None:\n kernel_size = np.int(max(image.shape[0], image.shape[1])/10)\n\n # mean filter kernel\n kernel = morphology.disk(int(kernel_size/2))\n\n # identify objects to ignore\n if kernel_size % 2 is 0:\n block_size = kernel_size + 1\n else:\n block_size = kernel_size\n\n #objects = ~filters.threshold_adaptive(img, block_size, offset = 0.01*img.max()) # deprecated function\n objects = img > filters.threshold_local(img, block_size, offset = 0.01*img.max())\n objects = morphology.remove_small_objects(objects, min_size = min_object_size)\n\n # Correct Exposure x times\n i = 0\n while i < iterations:\n # Global mean\n img_mean = np.ma.masked_array(img, mask=objects).mean()\n\n # global means\n local_means = filters.rank.mean(img, selem=kernel, mask=~objects)\n local_means = filters.gaussian(local_means, kernel_size)\n\n # Correct Image\n img += (img_mean - local_means)\n img[img>1] = 1 # for compatibilty with img_as_float\n img[img<0] = 0 # for compatibilty with img_as_float\n i += 1\n\n out = img_as_float(img)\n\n return(out)",
"def _run_filter(self):\n result_lh = np.array([result[\"lh\"] for result in self.results])\n result_x = np.array([result[\"x\"] for result in self.results])\n lh_idx = np.where(result_lh >= self.lh_lim)[0]\n edge_idx = np.where(result_x <= self.starting_x_lim)[0]\n\n # Perform the CNN filtering only if cutoff != 0 and\n # the filter tools were loaded.\n if self.cutoff == 0 or self.filter_tools is None:\n stamp_idx = [i for i in range(len(self.stamps))]\n else:\n stamp_idx = self.filter_tools.cnn_filter(np.copy(self.stamps), cutoff=self.cutoff)\n\n # The indices to use are the ones that pass all three filters.\n self.good_idx = np.intersect1d(np.intersect1d(lh_idx, stamp_idx), edge_idx)",
"def SetIntensityDifferenceThreshold(self, arg0: 'double') -> \"void\":\n return _itkLevelSetMotionRegistrationFilterPython.itkLevelSetMotionRegistrationFilterIUC2IUC2IVF22_SetIntensityDifferenceThreshold(self, arg0)",
"def apply_filter(self, image):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to apply an frequency filter. This filter takes into consideration the occurrence frequency throughout the entire time series. Thus, all class occurrence with less than given percentage of temporal persistence (eg. 3 years or fewer out of 33) are replaced with the MODE value of that given pixel position in the stack of years. | def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams):
#Grab land cover classes as a list of strings
lc_classes = classDictionary.keys().getInfo()
#Get binary images of the land cover classifications for the current year
binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)
#Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer
class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)
#Get an image that represents the mode of the land cover classes in each pixel
mode_image = image.reduce(ee.Reducer.mode())
#Define an image to add bands with frequency filter applied
out_img = ee.Image()
#Loop through years
for yearBand in yearBandNames:
#Select the target year from the image
yearImage = image.select(yearBand)
#Loop through land cover classes in filterParams
for lc_class in lc_classes:
#Get the minimum occurance allowed in that land cover class
min_occurance = filterParams.get(lc_class)
#Find if the land cover class had less than the number of min_occurances in each pixel
change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))
#If change_class==1, then replace that pixel with the mode of all the years in that pixel
#This filter is only applied to pixels of this land cover class
#First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,
#if both conditions are true, then the pixel is replaced with the mode
yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)
#Rename yearImage to bandName
yearImage = yearImage.rename(yearBand)
#Append to output image
out_img = out_img.addBands(yearImage)
return out_img | [
"def frequency_filter(fc, L, srf, KIND=2):\n\n if hasattr(KIND, \"__len__\"):\n PASS = KIND\n KIND = 2\n else:\n PASS = [2,3]\n KIND = [KIND]\n\n # fourier transform of lateral inhibitory function \n\n # tonotopic axis\n if issubclass(type(fc), str):\n fc = float(fc)\n R1 = np.arange(L).astype(np.float)/L*srf/2/np.abs(fc)\n\n if KIND == 1:\n # Gabor function\n C1 = 1./2/0.3/0.3\n H = np.exp(-C1*(R1-1)**2) + np.exp(-C1*(R1+1)**2)\n else:\n # Gaussian Function\n R1 = R1 ** 2\n H = R1 * np.exp(1-R1)\n\n # passband\n if PASS[0] == 1:\n #lowpass\n maxi = np.argmax(H)\n sumH = H.sum()\n H[0:maxi] = 1\n H = H / (H.sum() or 1) * sumH\n elif PASS[0] == PASS[1]:\n # highpass\n maxi = np.argmax(H)\n sumH = H.sum()\n H[maxi+1:L] = 1\n H = H / (H.sum() or 1) * sumH\n\n return H",
"def fir_filter(sig, sampling_freq, critical_freq, kernel_window = 'hamming', taps = 101, kind = 'band', **kwargs):\n\n kernel = make_fir_filter(sampling_freq, critical_freq, kernel_window, taps, kind, **kwargs) \n\n return np.roll(scipy.signal.lfilter(kernel, [1], sig), -taps/2+1)",
"def get_frequency(frame):\n frame = clip_centre(frame)\n frame = auto_correlate(frame)\n threshold: int = SAMPLE_RATE // 500\n lag = frame[threshold:].argmax()\n frequency = SAMPLE_RATE / lag\n return frequency",
"def freq_filter(signal_filt):\r\n signal_freq=np.fft.rfft(signal_filt)\r\n signal_freq[3000:]=0\r\n\r\n fftinverse=np.fft.irfft(signal_freq,len(signal_filt))\r\n fftinverse=np.array(fftinverse,dtype='int16')\r\n return(fftinverse)",
"def freq_filter_image(image, high=True):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n dft = cv2.dft(np.float32(image), flags=cv2.DFT_COMPLEX_OUTPUT)\n dft_shift = np.fft.fftshift(dft)\n\n rows, cols = image.shape\n crow, ccol = rows // 2, cols // 2\n if high:\n dft_shift[crow - 30:crow + 31, ccol - 30:ccol + 31] = 0\n else:\n mask = np.zeros((rows, cols, 2), np.uint8)\n mask[crow - 30:crow + 30, ccol - 30:ccol + 30] = 1\n dft_shift *= mask\n image = cv2.idft(np.fft.ifftshift(dft_shift))\n image = cv2.magnitude(image[:, :, 0], image[:, :, 1])\n return image",
"def smoothed_target_encoding(count_sources: pd.Series, global_count: pd.Series, reg_term: int = 100):\n categ_card = count_sources.shape[0]\n raw_TE = count_sources.value_counts() / categ_card\n smoothing = categ_card / (categ_card + reg_term)\n final_TE = (smoothing * raw_TE).add((1 - smoothing) *\n global_count, fill_value=0)\n# final_TE = (1 - smoothing) * raw_TE + smoothing * global_count\n\n return final_TE",
"def adapt_freq(\n ds: xr.Dataset,\n *,\n dim: str,\n thresh: float = 0,\n) -> xr.Dataset:\n # Compute the probability of finding a value <= thresh\n # This is the \"dry-day frequency\" in the precipitation case\n P0_sim = ecdf(ds.sim, thresh, dim=dim)\n P0_ref = ecdf(ds.ref, thresh, dim=dim)\n\n # The proportion of values <= thresh in sim that need to be corrected, compared to ref\n dP0 = (P0_sim - P0_ref) / P0_sim\n\n # Compute : ecdf_ref^-1( ecdf_sim( thresh ) )\n # The value in ref with the same rank as the first non zero value in sim.\n # pth is meaningless when freq. adaptation is not needed\n pth = nbu.vecquantiles(ds.ref, P0_sim, dim).where(dP0 > 0)\n\n if \"window\" in ds.sim.dims:\n # P0_sim was computed using the window, but only the original time series is corrected.\n sim = ds.sim.isel(window=(ds.sim.window.size - 1) // 2)\n dim = [dim[0]]\n else:\n sim = ds.sim\n\n # Get the percentile rank of each value in sim.\n rank = sim.rank(dim[0], pct=True)\n\n # Frequency-adapted sim\n sim_ad = sim.where(\n dP0 < 0, # dP0 < 0 means no-adaptation.\n sim.where(\n (rank < P0_ref) | (rank > P0_sim), # Preserve current values\n # Generate random numbers ~ U[T0, Pth]\n (pth.broadcast_like(sim) - thresh) * np.random.random_sample(size=sim.shape)\n + thresh,\n ),\n )\n\n # Set some metadata\n sim_ad.attrs.update(ds.sim.attrs)\n pth.attrs[\n \"long_name\"\n ] = \"Smallest value of the timeseries not corrected by frequency adaptation.\"\n dP0.attrs[\n \"long_name\"\n ] = \"Proportion of values smaller than {thresh} in the timeseries corrected by frequency adaptation\"\n\n # Tell group_apply that these will need reshaping (regrouping)\n # This is needed since if any variable comes out a groupby with the original group axis,\n # the whole output is broadcasted back to the original dims.\n pth.attrs[\"_group_apply_reshape\"] = True\n dP0.attrs[\"_group_apply_reshape\"] = True\n return xr.Dataset(data_vars={\"pth\": pth, \"dP0\": dP0, \"sim_ad\": sim_ad})",
"def freq_filt(orig_img: np.ndarray, transfer_func: np.ndarray) -> np.ndarray:\n # pad and center the input image\n M, N = orig_img.shape[:2]\n padded_img = np.pad(\n orig_img,\n (\n (int(np.floor(M / 2)), int(np.ceil(M / 2))),\n (int(np.floor(N / 2)), int(np.ceil(N / 2))),\n (0, 0),\n ),\n constant_values=0,\n )\n\n # take fft of image\n f_img = np.fft.fftshift(np.fft.fft2(padded_img.astype(np.float32)))\n\n # get product of image and transfer func\n f_filtered = np.empty_like(f_img)\n for channel_idx in range(f_img.shape[-1]):\n f_filtered[:, :, channel_idx] = f_img[:, :, channel_idx] * transfer_func\n\n # get image using ifft\n filtered_img = np.real(np.fft.ifft2(np.fft.fftshift(f_filtered)))\n\n # slice to remove padding\n filtered_img = filtered_img[\n int(M / 2) : int(3 * M / 2), int(N / 2) : int(3 * N / 2), :\n ]\n\n # scale and return filtered image\n return (\n 255\n * (filtered_img - np.min(filtered_img))\n / (np.max(filtered_img) - np.min(filtered_img))\n ).astype(np.uint8)",
"def fl_set_counter_filter(ptr_flobject, pyfn_ValFilter):\n #FL_VAL_FILTER = cty.CFUNCTYPE(xfdata.STRING, \\\n # cty.POINTER(xfdata.FL_OBJECT), cty.c_double, cty.c_int)\n _fl_set_counter_filter = library.cfuncproto(\n library.load_so_libforms(), \"fl_set_counter_filter\",\n None, [cty.POINTER(xfdata.FL_OBJECT), xfdata.FL_VAL_FILTER],\n \"\"\"void fl_set_counter_filter(FL_OBJECT * ob,\n FL_VAL_FILTER filter)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n library.verify_function_type(pyfn_ValFilter)\n cfn_ValFilter = xfdata.FL_VAL_FILTER(pyfn_ValFilter)\n library.keep_cfunc_refs(cfn_ValFilter, pyfn_ValFilter)\n library.keep_elem_refs(ptr_flobject)\n _fl_set_counter_filter(ptr_flobject, cfn_ValFilter)",
"def filter_by_period(self, dataframe, epoch):\r\n #dataframe df_scoring filtered with only ratios\r\n df_one_period_only = dataframe[[x for x in dataframe.columns \\\r\n if str(x).count(\"souvenir_\" + epoch) or \\\r\n str(x).count(\"Sujet\")]]\r\n return df_one_period_only",
"def ISM_time_series_filter(dict_freq, threshold):\n res_dict = {'OTHER': [0, 0]}\n total = sum([int(dict_freq[ISM]) for ISM in dict_freq])\n for ISM in dict_freq:\n if int(dict_freq[ISM])/total < threshold:\n res_dict['OTHER'] = [0, res_dict['OTHER'][1] + int(dict_freq[ISM])]\n else:\n res_dict[ISM] = [dict_freq[ISM], int(dict_freq[ISM]) + res_dict.get(ISM, [0, 0])[1]]\n if res_dict['OTHER'][1] == 0:\n del res_dict['OTHER']\n return res_dict",
"def find_freq_fold(self, frequency):\n #self.homogen()\n \n data = self.data #/scipy.stats.nanstd(self.data)**2\n sample = self.sample\n time = self.time\n\n # Remove the DC Offset from the data\n data = data - np.median(data)\n\n # The folding period must be the inverse of the frequency\n # but we want the period in terms of bins, not of seconds\n # so need to multiply by the number of samples per second\n period = 1.0 / frequency\n period_s = int(np.floor(sample*period))\n\n # We now have the period, so need to work out how many periods\n # exist in the lightcurve\n number_p = int(np.floor(len(data)/period_s))\n \n folded = np.zeros(np.floor(period_s))\n folds = np.zeros(np.floor(period_s))\n\n # Let's reshape the array in preparation for folding\n pad_size = np.ceil(float(data.size)/period_s)*period_s - data.size\n b_padded = np.append(data, np.ones(pad_size)*np.nan)\n data_s = b_padded.reshape(-1,period_s) # The reshaped (thus stacked) data\n\n means = data_s.mean(axis=0)\n maxi = np.nanmax(means)\n\n # Try folding over a range of different periods close to\n # the guess frequency\n newperiod = period_s\n periods = np.arange(period_s-40, period_s+40)\n for period in periods:\n pad_size = np.ceil(float(data.size)/period_s)*period_s - data.size\n b_padded = np.append(data, np.ones(pad_size)*np.nan)\n data_s = b_padded.reshape(-1,period_s) # The reshaped (thus stacked) data\n\n means = data_s.mean(axis=0)\n if np.nanmax(means)>maxi:\n newperiod = period\n maxi = np.nanmax(means)\n\n print period_s, \",\", period\n # We also need to 'normalise' the data, so that it represents a set\n # of means rather than just a total.\n\n counted = np.ones(data_s.shape)\n counted[np.isnan(data_s)] = 0\n \n self.data_s = data_s\n self.fold_counter = counted\n\n # Need to recalculate the time axis\n self.phase = np.linspace(0, period, np.shape(folded)[0])",
"def _filtvar(self, which):\n\n # First step: Compute weights of filter by filtering a single\n # point in the middle of zero values (impulse response)\n\n if which == \"short\":\n cutoff = self.shortterm\n else:\n cutoff = self.longterm\n\n n0 = 4 * int(cutoff / 365.0 / self.dinterval)\n\n # z = 1\n # while pow(2, z) < n0:\n # z += 1\n # n0 = pow(2, z)\n\n ytemp = numpy.zeros((n0))\n ytemp[int(n0 / 2)] = 1.0\n\n # do fft\n fft = fftpack.rfft(ytemp)\n\n # do filter\n if self.debug:\n print(\" In filtvar, do filter, cutoff = \", cutoff, \"n0 is \", n0)\n\n a = self._freq_filter(fft, self.dinterval, cutoff)\n weights = fftpack.irfft(a)\n\n # Compute sum of squares of weights\n ssw = numpy.sum(weights * weights)\n if self.debug:\n print(\"ssw =\", ssw)\n\n # calculate residuals from smooth/trend curve\n if which == \"short\":\n f = interpolate.interp1d(self.xinterp, self.smooth, bounds_error=False)\n else:\n f = interpolate.interp1d(self.xinterp, self.trend, bounds_error=False)\n yp = f(self.xp)\n yy = self.resid - yp\n rmean = numpy.mean(yy)\n rsd = numpy.std(yy, ddof=1)\n n = yy.size\n\n # Compute lag 1 auto covariance\n # http://itl.nist.gov/div898/handbook/eda/section3/eda35c.htm\n sm = numpy.sum((yy[0:-1] - rmean) * (yy[1:] - rmean))\n cor = (\n sm / (n - 1) / (rsd * rsd)\n ) # equivalent to sm/numpy.sum(numpy.square(yy-rmean))\n\n if self.debug:\n print(\"cor is\", cor)\n\n # Compute auto covariances\n # r(k) = r(1)^k\n sm = 0.0\n for i in range(n0 - 1):\n for j in range(i + 1, n0):\n r = pow(cor, j - i)\n if r < 1e-5:\n break # speed things up by ignoring really small values\n sm += r * weights[i] * weights[j]\n\n var = rsd * rsd * (ssw + 2 * sm)\n\n if self.debug:\n print(\"sm is\", sm, \"var is \", var)\n\n return var",
"def _adapt_freq(\n ds: xr.Dataset,\n *,\n dim: Sequence[str],\n thresh: float = 0,\n) -> xr.Dataset:\n # Compute the probability of finding a value <= thresh\n # This is the \"dry-day frequency\" in the precipitation case\n P0_sim = ecdf(ds.sim, thresh, dim=dim)\n P0_ref = ecdf(ds.ref, thresh, dim=dim)\n\n # The proportion of values <= thresh in sim that need to be corrected, compared to ref\n dP0 = (P0_sim - P0_ref) / P0_sim\n\n if dP0.isnull().all():\n # All NaN slice.\n pth = dP0.copy()\n sim_ad = ds.sim.copy()\n else:\n # Compute : ecdf_ref^-1( ecdf_sim( thresh ) )\n # The value in ref with the same rank as the first non-zero value in sim.\n # pth is meaningless when freq. adaptation is not needed\n pth = nbu.vecquantiles(ds.ref, P0_sim, dim).where(dP0 > 0)\n\n # Probabilities and quantiles computed within all dims, but correction along the first one only.\n if \"window\" in dim:\n # P0_sim was computed using the window, but only the original time series is corrected.\n # Grouper.apply does this step, but if done here it makes the code faster.\n sim = ds.sim.isel(window=(ds.sim.window.size - 1) // 2)\n else:\n sim = ds.sim\n dim = dim[0]\n\n # Get the percentile rank of each value in sim.\n rank = sim.rank(dim, pct=True)\n\n # Frequency-adapted sim\n sim_ad = sim.where(\n dP0 < 0, # dP0 < 0 means no-adaptation.\n sim.where(\n (rank < P0_ref) | (rank > P0_sim), # Preserve current values\n # Generate random numbers ~ U[T0, Pth]\n (pth.broadcast_like(sim) - thresh)\n * np.random.random_sample(size=sim.shape)\n + thresh,\n ),\n )\n\n # Tell group_apply that these will need reshaping (regrouping)\n # This is needed since if any variable comes out a `groupby` with the original group axis,\n # the whole output is broadcasted back to the original dims.\n pth.attrs[\"_group_apply_reshape\"] = True\n dP0.attrs[\"_group_apply_reshape\"] = True\n return xr.Dataset(data_vars={\"pth\": pth, \"dP0\": dP0, \"sim_ad\": sim_ad})",
"def filter_ms2fits(stack, fit_data, channel=1, peakiness=4.5):\n \n fit_data = fit_data.copy()\n for t in range(0, len(fit_data)):\n frame_data = fit_data[t]\n frame_med = np.median(stack[channel, t])\n xy_width_means = np.mean(frame_data[:,5:7], axis=1)\n peak_heights = frame_data[:,3]\n spot_peakiness = np.log(peak_heights / xy_width_means)\n frame_data_filtered = frame_data[(peak_heights > frame_med) & (spot_peakiness > peakiness),:]\n fit_data[t] = frame_data_filtered\n return fit_data",
"def filter(data, n, order, fs, cutoff):\n # Filter requirements.\n\n # Get the filter coefficients so we can check its frequency response.\n b, a = butter_lowpass(cutoff, fs, order)\n # Plot the frequency response.\n w, h = freqz(b, a, worN=8000)\n t = np.linspace(0, n , n, endpoint=False)\n # Filter the data, and plot both the original and filtered signals.\n y = butter_lowpass_filter(data, cutoff, fs, order)\n return y",
"def apply_under_threshold(df, column, f, freq_threshold=.001):\n frequencies = df[column].value_counts(normalize=True)\n return apply_on(df, column,\n lambda x: f(x) if frequencies[x] < freq_threshold else x)",
"def FiltFreqResp(theTES, frange, fff, filt, dd, notch, FREQ_SAMPLING, nsamples, freq, spectrum, filtered_spec):\n # notch filter according to notch - must select TES\n\n sigfilt = dd[theTES, :]\n for i in range(len(notch)):\n sigfilt = ft.notch_filter(sigfilt, notch[i][0], notch[i][1], FREQ_SAMPLING)\n\n # get new spectrum with notch filter applied\n spectrum_f, freq_f = mlab.psd(sigfilt, Fs=FREQ_SAMPLING, NFFT=nsamples, window=mlab.window_hanning)\n\n # start plotting\n figure()\n xlim(frange[0], frange[1])\n rng = (freq > frange[0]) & (freq < frange[1])\n loglog(freq[rng], filtered_spec[rng], label='Data')\n loglog(freq[rng], f.gaussian_filter1d(spectrum_f, filt)[rng], label='Filt')\n title('Tes #{}'.format(theTES + 1))\n ylim(np.min(filtered_spec[rng]) * 0.8, np.max(filtered_spec[rng]) * 1.2)\n xlabel('Freq [Hz]')\n ylabel('Power Spectrum [$nA^2.Hz^{-1}$]')\n #### Show where the signal is expected\n for ii in range(10): plot(np.array([fff, fff]) * (ii + 1), [1e-20, 1e-10], 'r--', alpha=0.3)\n #### PT frequencies\n fpt = 1.724\n for ii in range(10): plot(np.array([fpt, fpt]) * (ii + 1), [1e-20, 1e-10], 'k--', alpha=0.3)\n\n return",
"def filter(self, counts):\n return self.filteration_func(counts, self.filter_size)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to apply a probability filter to land cover probabilities in each image of imageCollection. The user defines which classes will be filtered and how to filter them in the params list. The params list is a list of dictionaries, one for each class the user wants to filter. | def applyProbabilityCutoffs(imageCollection, params):
#Define function to map across imageCollection
def probabilityFilter(image):
#Get the classifications from the class with the highest probability
classifications = npv.probabilityToClassification(image)
#Loop through parameters
for param in params:
#Load parameter values
class_name = param.get('class_name')
class_value = param.get('class_value')
filter_name = param.get('filter')
threshold = param.get('threshold')
if filter_name=='gt':
#Find where the class_name is greater than threshold
prob_mask = image.select(class_name).gt(ee.Image.constant(threshold))
#Replace those pixels with the class value
classifications = classifications.where(prob_mask,class_value)
elif filter_name=='gte':
#Find where the class_name is greater than or equal to threshold
prob_mask = image.select(class_name).gte(ee.Image.constant(threshold))
#Replace those pixels with the class value
classifications = classifications.where(prob_mask,class_value)
elif filter_name == 'lte':
#Find where the class_name is less than or equal to threshold
prob_mask = image.select(class_name).lte(ee.Image.constant(threshold))
#Find where classifications are equal to class value
class_mask = classifications.eq(class_value)
#We only want to replace pixels where the class probability<=threshold AND classification==class_value
reclass_mask = prob_mask.bitwiseAnd(class_mask)
#Define square kernel of surrounding pixels
kernel = ee.Kernel.square(1)
#Convert to a multiband image, one band for each neighbor
neighs = classifications.neighborhoodToBands(kernel)
#Reduce to find the majority class in neighborhood
majority = neighs.reduce(ee.Reducer.mode())
#Replace pixels where the class probability<=threshold AND classification==class_value with the neighborhood majority class
classifications = classifications.where(reclass_mask,majority)
else:
#Find where the class_name is less than or equal to threshold
prob_mask = image.select(class_name).lt(ee.Image.constant(threshold))
#Find where classifications are equal to class value
class_mask = classifications.eq(class_value)
#We only want to replace pixels where the class probability<=threshold AND classification==class_value
reclass_mask = prob_mask.bitwiseAnd(class_mask)
#Define square kernel of surrounding pixels
kernel = ee.Kernel.square(1)
#Convert to a multiband image, one band for each neighbor
neighs = classifications.neighborhoodToBands(kernel)
#Reduce to find the majority class in neighborhood
majority = neighs.reduce(ee.Reducer.mode())
#Replace pixels where the class probability<=threshold AND classification==class_value with the neighborhood majority class
classifications = classifications.where(reclass_mask,majority)
return ee.Image(classifications)
return ee.ImageCollection(imageCollection.map(probabilityFilter)) | [
"def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img",
"def applyParamFilter(self, param_filter_function):\n self._constructInstances()\n\n newL = []\n for instD in self.instances:\n if param_filter_function( instD ):\n newL.append( instD )\n\n self.instances = newL",
"def filter_data(self):\n if(self.filter_classes == []):\n return\n \n filtered_idx = []\n for id in range(len(self.image_ids)):\n anns = self.load_annotations(id)\n found = False\n for ann in anns:\n if ann['label'] in self.filter_classes:\n found = True\n break\n if found:\n filtered_idx.append(id)\n \n self.filtered_ids = [self.image_ids[id] for id in filtered_idx]\n # self.image_ids = self.filtered_ids\n print(\"Number of filtered instances:\", len(self.filtered_ids))",
"def filter_classes(class_ints, class_list, class_filt):\n class_names = [class_list[int(c)] for c in class_ints]\n filter = [name in class_filt for name in class_names]\n return np.array(filter)",
"def applySpatialFilter(image,filterParams):\n #Loop through list of parameters and apply spatial filter using majorityFilter\n for params in filterParams:\n image = majorityFilter(ee.Image(image),params)\n return image",
"def apply_filter(self, src_img, slider1, slider2, slider3):\n pass",
"def img_filter(img, filterspecs):\n valid_filters = ['none','median','gaussian','white_tophat','rescale_intensity',\n 'denoise_wavelet', 'equalize_adapthist']\n filtering_sequence = 1\n\n for process_func in filterspecs.keys():\n if process_func == 'show_filtering':\n pass\n elif process_func not in valid_filters:\n img_filtered = None\n raise ValueError(\"{} is not a valid filter. Implemented so far are {}\".format(process_func, valid_filters))\n elif process_func == \"none\":\n img_filtered = img\n elif process_func == \"rescale_intensity\":\n args = filterspecs[process_func]['args']\n vmin, vmax = np.percentile(img, (args[0][0], args[0][1]))\n img_filtered = rescale_intensity(img, in_range=(vmin, vmax), out_range=args[1])\n elif process_func == \"equalize_adapthist\":\n vmin, vmax = np.percentile(img, (0, 100)) # store original range of values\n kwargs = filterspecs[process_func]['kwargs']\n img_filtered = equalize_adapthist(img, **kwargs)\n img_filtered = rescale_intensity(img_filtered, out_range=(vmin, vmax))\n else:\n func = eval(process_func)\n args = filterspecs[process_func]['args']\n if 'kwargs' in filterspecs[process_func].keys():\n kwargs = filterspecs[process_func]['kwargs']\n else:\n kwargs = {}\n\n img_filtered = img_apply_filter(img, func, *args, **kwargs)\n filtering_sequence += 1\n\n return(img_filtered)",
"def apply_filter(self, image):\n pass",
"def filter_detections(detections, arg_to_class, conf_thresh=0.5):\n num_classes = detections.shape[0]\n filtered_detections = []\n for class_arg in range(1, num_classes):\n class_detections = detections[class_arg, :]\n confidence_mask = np.squeeze(class_detections[:, -1] >= conf_thresh)\n confident_class_detections = class_detections[confidence_mask]\n if len(confident_class_detections) == 0:\n continue\n class_name = arg_to_class[class_arg]\n for confident_class_detection in confident_class_detections:\n coordinates = confident_class_detection[:4]\n score = confident_class_detection[4]\n detection = Box2D(coordinates, score, class_name)\n filtered_detections.append(detection)\n return filtered_detections",
"def filterp(th, ProbClass1):\n y = np.zeros(ProbClass1.shape[0])\n for i, v in enumerate(ProbClass1):\n if ProbClass1[i] > th:\n y[i] = 1\n return y",
"def classifyImages(self, testImages, probs = False):\n return [self.classifyImage(img, probs) for img in testImages]",
"def class_imgs(list_img):\n numberimg = len(list_img)\n resize(net, numberimg, cursize)\n i = 0\n for img in list_img:\n image = caffe.io.load_image(img)\n transformed_image = transformer.preprocess('data', image)\n net.blobs['data'].data[i] = transformed_image\n i = i + 1\n\n output = net.forward()\n\n results = []\n for n in range(0, numberimg):\n themax = output['prob'][n].argmax()\n results.append({'filename':list_img[n], 'class': themax, 'prob': output['prob'][n].tolist()})\n\n return results",
"def doFilterOnImgSetList(self, imgSetList):\n for imgset in imgSetList:\n self.doFilterOnOneImgSet(imgset)",
"def filter_probes(pacall, annotation, probes, threshold=0.5):\n\n threshold = np.clip(threshold, 0.0, 1.0)\n\n LGR.info(f'Filtering probes with intensity-based threshold of {threshold}')\n\n probes = io.read_probes(probes)\n signal, n_samp = np.zeros(len(probes), dtype=int), 0\n for donor, pa in pacall.items():\n annot = io.read_annotation(annotation[donor]).index\n data = io.read_pacall(pa).loc[probes.index, annot]\n n_samp += data.shape[-1]\n # sum binary expression indicator across samples for current subject\n signal += np.asarray(data.sum(axis=1))\n\n # calculate proportion of signal to noise for given probe across samples\n keep = (signal / n_samp) >= threshold\n\n LGR.info(f'{keep.sum()} probes survive intensity-based filtering')\n\n return probes[keep]",
"def majorityFilter(image, params):\n params = ee.Dictionary(params)\n minSize = ee.Number(params.get('minSize'))\n classValue = ee.Number(params.get('classValue'))\n \n #Generate a mask from the class value\n classMask = image.eq(classValue)\n \n #Labeling the group of pixels until 100 pixels connected\n labeled = classMask.mask(classMask).connectedPixelCount(minSize, True)\n \n #Select some groups of connected pixels\n region = labeled.lt(minSize)\n \n # Squared kernel with size shift 1\n # [[p(x-1,y+1), p(x,y+1), p(x+1,y+1)]\n # [ p(x-1, y), p( x,y ), p(x+1, y)]\n # [ p(x-1,y-1), p(x,y-1), p(x+1,y-1)]\n kernel = ee.Kernel.square(1)\n \n #Find neighborhood\n neighs = image.neighborhoodToBands(kernel).mask(region)\n\n #Reduce to majority pixel in neighborhood\n majority = neighs.reduce(ee.Reducer.mode())\n \n #Replace original values for new values\n filtered = image.where(region, majority)\n \n return ee.Image(filtered)",
"def filter(self, filters):",
"def filter_cascade(filters):\n def cascaded_image(image):\n for fil in filters:\n image = fil(image)\n return image\n return cascaded_image",
"def apply(self, image):\n for pre_processing_filter in self._filters:\n image = pre_processing_filter.apply(image)\n return image",
"def class_probabilities(parameters, features,class_prob):\n # create dictionary to store probability P(features|Y) for all values of Y\n final_class_prob = dict()\n # calculate numerator for P(y|all features)\n for class_value, class_parameters in parameters.items():\n # Initialize numerator = P(Y)\n if class_value == True:\n final_class_prob[class_value] = class_prob[0]\n else:\n final_class_prob[class_value] = class_prob[1]\n\n # Calculate P(feature|Y) for all features\n for i in range(len(class_parameters)):\n if class_parameters[i][0] == 'real':\n # if parameter is for real valued feature\n # use gaussian distribution with given mu and sigma to find probabilty\n mean = class_parameters[i][1]\n stdev = class_parameters[i][2]\n exponent = np.exp(-((features[i] - mean) ** 2 / (2 * stdev ** 2)))\n probability = min(1.0,((1 / (np.sqrt(2 * np.pi) * stdev)) * exponent))\n else:\n # if parameter is for discrete valued feature\n # get stored probabilty directly\n type, prob_true, prob_false = class_parameters[i]\n if features[i] == True:\n probability = min(1.0,prob_true)\n else:\n probability = min(1.0,prob_false)\n # multiply P(feature|Y) to numerator\n final_class_prob[class_value] *= probability\n\n\n denominator = 0\n # calculate denominator for P(y|all features)\n # summation of P(features|Y) for all Y\n for class_value,class_prob in final_class_prob.items():\n if class_value == True:\n denominator += (np.prod(class_prob))\n else:\n denominator += (np.prod(class_prob))\n\n # divide numerator with denominator to get final P(features|Y) for all Y\n for class_value, class_prob in final_class_prob.items():\n final_class_prob[class_value] /= denominator\n\n # return probability for each each class label\n return final_class_prob"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the number of features in the processed data. Returns int Feature size. | def get_num_features(self):
return len(self[0]['x']) | [
"def get_number_of_features(self):\n return len(self.__features)",
"def num_features(self) -> int:\n return self._num_features",
"def n_features(self):\n return len(self.features_list)",
"def number_of_features(self):\n return len(self.dv.feature_names_)",
"def count_num_features(self):\n return self.config.getint('Count', 'NumFeatures')",
"def getNrFeatures(self):\n return self.featureNames.size",
"def num_features(self, X=None):",
"def num_features(self):\n return len(self._traces[0].features)",
"def num_features(self) -> Dict[NodeType, int]:\n return self.num_node_features",
"def num_node_features(self):\n return self[0].num_node_features",
"def num_node_features(self) -> int:\n data, _, _ = self[0]\n if hasattr(data, 'num_node_features'):\n return data.num_node_features\n raise AttributeError(f\"'{data.__class__.__name__}' object has no \"\n f\"attribute 'num_node_features'\")",
"def num_flat_features(self, x):\n #print(x.size())\n size = x.size()[1:] # all dimensions except the batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n return num_features",
"def num_node_features(self) -> int:\n return self.get_num_dims('node_feature', as_label=False)",
"def num_feature_containers(self):\n return len(self.feature_containers)",
"def num_flat_features(self, x):\n\n size = x.size()[1:] # All dimensions except batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n\n return num_features",
"def feature_count(self):\n if self.selection.count == 0:\n return int(arcpy.GetCount_management(self.source).getOutput(0))\n return -1",
"def size(self):\r\n return len(self._train_datas)",
"def n_good_features_(self):\n return np.sum(self.important_features_)",
"def size(self):\n return _libsbml.ListOfSpeciesFeatures_size(self)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.