query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Gets the current explain_level
def _get_explain_level(self): self._validate_explain_level() if self.explain_level == "auto": if self._get_mode() == "Explain": return 2 if self._get_mode() == "Perform": return 1 if self._get_mode() == "Compete": return 0 if self._get_mode() == "Optuna": return 0 else: return deepcopy(self.explain_level)
[ "def help_explain(self):\n print(EXPLAIN)", "def expert_level(self) -> int:\n return self._expert_level", "def getLevel(self):\n return _libsbml.ASTBasePlugin_getLevel(self)", "def verbose_level(self):\n return self._verbose_level", "def ColorfulPyPrint_current_verbose_level():\r\n global O_VERBOSE_LEVEL\r\n return O_VERBOSE_LEVEL", "def get_level(self):\n return self.debug_level, self.verbosity", "def getVerboseLevel(self):\n return self.__verboseLevel", "def octree_level(self):\n return self._octree_level", "def getDebugLevel(self):\n return _fiasco_numpy.Optimizer_getDebugLevel(self)", "def explain_sql(self, raw_name):\n return self.is_active(raw_name)", "def level(self) -> int:\n return self.categorization.level(self)", "def current_trigger_level(self):\n return self._current_level", "def getLevel(self):\n return _libsbml.SBasePlugin_getLevel(self)", "def getResultLevel(self):\n return _libsbml.DefaultTerm_getResultLevel(self)", "def get_level(self):\r\n \r\n return self.level", "def getLevel(self):\n return self._level", "def explain(self, *, format=None, **options):\n return self.query.explain(using=self.db, format=format, **options)", "def get_level(self):\r\n \r\n return self.level", "def _get_redist_level(self):\n return self.__redist_level" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the current features_selection
def _get_features_selection(self): self._validate_features_selection() if self.features_selection == "auto": if self._get_mode() == "Explain": return False if self._get_mode() == "Perform": return True if self._get_mode() == "Compete": return True if self._get_mode() == "Optuna": return False else: return deepcopy(self.features_selection)
[ "def selected_feature(self):\n return self._selected_feature", "def GetSelection(self):\r\n\r\n return self._current", "def getSelection(self): # real signature unknown; restored from __doc__\r\n pass", "def get_selection(self):\r\n return self.value", "def active_selection():\r\n\r\n om.MGlobal.getActiveSelectionList()", "def currentSelection():\n sel = bpy.context.selected_objects\n if sel:\n return sel\n else:\n col = activeCollection()\n if col is not None:\n # Filter only mesh objects.\n return collectionMeshes(col)", "def GetSelectionPoint(self):\n ...", "def selection_function(self) -> SelectionFunction:\n return self._selection_function", "def selected(self):\n return self.selection[0]", "def features_subset(self):\n return self._features_subset", "def feature(self):\n return self._feature", "def get_features(self):\n return self.features", "def current_choice(self):\n\t\treturn self.choice_data_list[self.select_index]", "def get_selected(self):\n return self.get_filtered_options()[self.index]", "def selection(self) -> str:\n return self._selection", "def get_with_selected(self):\n\n return getattr(self, 'with_selected', None)", "def getSelection(self):\n selection = []\n for itm in self.selectionList.selectedItems():\n selection.append(itm.text())\n \n return selection", "def get_features(self):\n if not self.exposes_features:\n return None\n\n return self._last_features", "def selection(self):\n #logging.debug(__name__ + \": selection\")\n select = self._selectedItem()\n if select != None:\n return select.object\n else:\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the current start_random_models
def _get_start_random_models(self): self._validate_start_random_models() if self.start_random_models == "auto": if self._get_mode() == "Explain": return 1 if self._get_mode() == "Perform": return 5 if self._get_mode() == "Compete": return 10 if self._get_mode() == "Optuna": return 1 # just 1, because it will be tuned by Optuna else: return deepcopy(self.start_random_models)
[ "def get_start_maybe_randomised(self):\n if self.randomise:\n return random.uniform(0, self.start)\n else:\n return self.start\n # return (random.uniform(0, self.start) if self.random else self.start)", "def generate_nnmodels(self):\n return []", "def get_random_model_ids():\n\twith open('checkpoints/model_store.json') as f:\n\t\tmodel_store = json.load(f)\n\n\tmodel_ids = list(model_store.keys())\n\n\tif len(model_ids) > 2:\n\t\treturn list(random.sample(model_ids, 3))\n\telse:\n\t\treturn model_ids", "def pick_random_starting_point(self):\n # Shuffle arrays so that our combination of parameters is random\n Vcmax = np.random.uniform(5.0, 350)\n Jmax = np.random.uniform(5.0, 550)\n Rd = np.random.uniform(0.0, 6.0)\n\n return Vcmax, Jmax, Rd", "def _random_start_position(self):\r\n self.position = np.array(random.choice(self.start_positions),\r\n dtype=np.int16)", "def reset_model(self):\n self._time = 0\n qpos_init, qvel_init = self.initialize_robot(randomize=self.randomize_reset)\n self.set_state(qpos_init, qvel_init)\n observation = self._get_obs()\n return observation", "def test_random_init_train():\n env = ML10(env_type='train')\n assert len(env._task_envs) == 10\n for task_env in env._task_envs:\n assert task_env.random_init", "def get_loaded_models():\n global loaded_models\n if loaded_models is None:\n loaded_models = get_modules(NETWORKS_DIR)\n\n return loaded_models", "def random_start(self, backwards=False):\n if backwards:\n return self.sentence_ends[random.randint(0, len(self.sentence_ends) - 1)]\n else:\n return self.sentence_starts[random.randint(0, len(self.sentence_starts) - 1)]", "def readFirst(self):\n return self.models[0].time_next", "def init_rnd(self):\n\n # query max number of threads\n gennum = apache.AP_MPMQ_MAX_SPARE_THREADS\n # make generators\n # this bit is from Python lib reference\n g = random.Random(time.time())\n result = [g]\n for i in range(gennum - 1):\n laststate = g.getstate()\n g = random.Random()\n g.setstate(laststate)\n g.jumpahead(1000000)\n result.append(g)\n return result", "def random_startup_node_iter(self):\n while True:\n yield random.choice(self.startup_nodes)", "def generate_random_start_state(self) -> State:\n part_states = []\n random.shuffle(self.blocks)\n placed = []\n t = 0\n\n for block in self.blocks:\n if 1 / (t + 1) >= random.random():\n part_states.append(PartState(f'on({block.arguments[0]},table)'))\n else:\n rand = random.randint(0, len(placed) - 1)\n part_states.append(PartState(f'on({block.arguments[0]},{placed[rand]})'))\n\n placed.append(block.arguments[0])\n t += 1\n\n return State(set(part_states))", "def starting_nodes(self):\r\n return self.start_node", "def get_random_start_state(self) -> State:\n if len(self.blocks) <= state_enumeration_limit:\n rnd = random.randint(0, len(self.allStates) - 1)\n return self.allStates[rnd]\n else:\n return self.generate_random_start_state()", "def get_model_count(self):\n return len(self._model_start_i)", "def _get_models():\n from . import models\n return models", "def get_available_models():\n modelpath = os.path.join(os.path.dirname(__file__), \"train\", \"model\")\n models = sorted(item.name.replace(\".py\", \"\").replace(\"_\", \"-\")\n for item in os.scandir(modelpath)\n if not item.name.startswith(\"_\")\n and item.name.endswith(\".py\"))\n return models", "def initLocalBestChoice(self):\n random.seed()\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the current hill_climbing_steps
def _get_hill_climbing_steps(self): self._validate_hill_climbing_steps() if self.hill_climbing_steps == "auto": if self._get_mode() == "Explain": return 0 if self._get_mode() == "Perform": return 2 if self._get_mode() == "Compete": return 2 if self._get_mode() == "Optuna": return 0 # all tuning is done in Optuna else: return deepcopy(self.hill_climbing_steps)
[ "def getSteps( self ):\n\n return self.adb.get( 'steps' )", "def start_steps(self):\n return self._start_steps[:]", "def bootstrap_steps(self):\n return self._bootstrap_steps", "def get_workflow_steps(self):\n return self._data_dict[self.KEY_WF_STEPS]", "def current_step(self):\n return self.dialog.current_step", "def raw_steps(self):\n return self.data[\"params\"][\"steps\"]", "def get_view_steps(self):\n return self._data_dict[self.KEY_VIEW_STEPS]", "def getWorkflowSteps(self):\n\n return self.dbase.getProcessSteps(self.scene)", "def listSteps(self):\n return self._registered.keys()", "def get_steps_num():\n return 0", "def current_time_step(self) -> int:\n return self._current_time_step", "def current_time_step(self) -> ts.TimeStep:\n return self._current_time_step", "def get_step_conf(self):\n return self.step_conf", "def workflow_step(self):\n return self._workflow_step", "def step_id(self):\n return self._step_id", "def get_all_steps(self):\n steps = []\n steps.extend(self.init_workspace_steps())\n steps.extend(self.repos_clone_steps())\n steps.extend(self.cli_steps())\n steps.extend(self.prepare_mobilespec_steps())\n steps.extend(self.deploy_steps())\n return steps", "def prev_step(self):\n prev_steps = list(self.prev_steps.values())\n return prev_steps[0] if prev_steps else None", "def get_last_step(self):\n return self.get_step_by_index(-1)", "def get_steps(self) -> list:\n ret_val = []\n for step_id in self:\n step_body = Steps.cache_step(step_id)\n if step_body is not None:\n ret_val.append(step_body)\n\n return ret_val" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the current top_models_to_improve
def _get_top_models_to_improve(self): self._validate_top_models_to_improve() if self.top_models_to_improve == "auto": if self._get_mode() == "Explain": return 0 if self._get_mode() == "Perform": return 2 if self._get_mode() == "Compete": return 3 if self._get_mode() == "Optuna": return 0 else: return deepcopy(self.top_models_to_improve)
[ "def getTopModel(self):\n top = self.model\n while top.parent is not None:\n top = top.parent\n return top", "def best_model(self):\n return self.best_model_wts", "def top(self):", "def get_best_known_model(self) -> Tuple[Optional[Path], int]:\n return self._get_first_model(sort='total_score', desc=False)", "def get_top_features(self, model, num_features=9):\n\n self.feat_imp_argsort = np.argsort(list(model.feature_importances_)\n )[::-1]\n print(f\"All sorted indicies: {self.feat_imp_argsort}\")\n\n unordered_feature_names = self.X.columns.to_list()\n ordered_feature_names = [unordered_feature_names[idx]\n for idx in self.feat_imp_argsort]\n \n self.top_n_feature_indicies = self.feat_imp_argsort[:num_features]\n self.top_n_feature_names = ordered_feature_names[:num_features]\n\n imp_nums = model.feature_importances_\n imp_nums_sort = [imp_nums[idx] for idx in self.feat_imp_argsort][\n :num_features]\n self.imp_nums_std = (imp_nums_sort / max(imp_nums_sort)) * 100\n\n print(f\"\\nTop {num_features} features:\")\n print(f\"Indicies: {self.top_n_feature_indicies}\\n\")\n pprint(self.top_n_feature_names)", "def get_top(self, model, limit=10, inverted=False):\n content_type= ContentType.objects.get_for_model(model)\n\n #Get a queryset of all the objects of the model. Get their scores\n results = self.filter(content_type=content_type).values('object_id').annotate(score=Sum('vote'))\n if inverted:\n results = results.order_by('score')\n else:\n results = results.order_by('-score')\n\n #We have a iterable list of objects of the requested model and their respective scores\n # Use in_bulk() to avoid O(limit) db hits.\n class_name = content_type.model_class()\n objects = class_name.objects.in_bulk([item['object_id'] for item in results[:limit]])\n\n # Yield each object, score pair. Because of the lazy nature of generic\n # relations, missing objects are silently ignored.\n\n for item in results[:limit]:\n id, score = item['object_id'], item['score']\n\n if not score:\n continue\n\n if int(id) in objects:\n yield objects[int(id)], int(score)", "def find_best_model(self):\n self.gridSearch.fit(self.train_x, self.train_y)\n \n return self.gridSearch.best_estimator_, self.gridSearch.best_score_", "def get_latest_model():\n return get_models()[-1]", "def _select_best(self):\n rmse = float('Inf')\n for k, v in self._dict_models.items():\n if self._dict_models[k].model_fit is not None:\n if self._dict_models[k].rmse < rmse:\n rmse = self._dict_models[k].rmse\n self.best_model = self._dict_models[k]\n if self.best_model is not None:\n self._uvtsf_logger.info(\n \"The best model selected as: {}\".format(str(type(self.best_model)).split('\\'')[1].split('.')[2]))\n else:\n self._uvtsf_logger.warning(\"No model has been fitted! Please call ts_fit()...\")", "def personal_top(self):\n\n top_scores = list(self.scores)\n top_scores.sort(reverse=True)\n return top_scores[:3]", "def test_get_top(self):\n api = self.EXT_API['dev1']\n top = api.get_top()", "def AIC_optimal(self):\n return self.models[np.argmin(self.AIC)]", "def get_top_models(db, sr_db, equipment, data_type, no_models):\n\n for n in range(28):\n no_of_terms = n + 1\n all_score_data = sr_db.search((Q.n_terms == no_of_terms))\n\n if len(all_score_data) == 0:\n debug('Scoring results not available for %s %s with %d terms' % (equipment, data_type, no_of_terms))\n continue\n\n top_entries = nlargest(no_models, all_score_data,\n key=lambda e: e['kfold_score'])\n\n top_scores, top_mcodes = extractnames(top_entries, 'kfold_score', 'model_code')\n\n my_Q = ((Q.equipment_name == equipment) &\n (Q.data_type == data_type) &\n (Q.n_terms == no_of_terms))\n\n done = db.contains(my_Q)\n\n if not done:\n entry = {'equipment_name': equipment,\n 'data_type': data_type,\n 'n_terms': no_of_terms,\n 'top_scores': top_scores,\n 'top_mcodes': top_mcodes\n }\n db.insert(entry)\n else:\n db.update({'top_scores': top_scores, 'top_mcodes': top_mcodes}, my_Q)", "def best_models(db_connection):\n call = '''\n SELECT\n cause_id, locations_exclude, model_version_type_id, sex_id,\n age_start, age_end, pv_rmse_in, pv_rmse_out, pv_trend_in,\n pv_trend_out, pv_coverage_in, pv_coverage_out\n FROM\n cod.model_version\n WHERE\n model_version_id IN (\n SELECT\n child_id # the children of hybrids\n FROM\n cod.model_version_relation\n WHERE parent_id IN (SELECT\n model_version_id\n FROM\n cod.model_version\n WHERE\n model_version_type_id=3 # hybrid model\n AND is_best=1 # is best\n AND date_inserted > \"2015-6-11\" # past date\n )\n )\n OR\n model_version_id IN (SELECT model_version_id\n FROM cod.model_version\n WHERE\n model_version_type_id!=3 # not hybrid model\n AND is_best=1 # is best\n AND date_inserted > \"2015-6-11\" # past date\n )\n '''\n data = query(call, db_connection)\n return data", "def top_offer(self):\n return self._top_offer", "def get_model(self):\n if self.model == None:\n print(\"Sorry, No optimized model available, please optimize_model() first\")\n else:\n return self.model", "def pick_best_model(self, data):\n self.best_model_ = None\n self.best_performance_ = 0\n\n split_indexes = list(self.splitter_.split(data))\n for i in range(0, len(self.models_)):\n print 'Fitting model', self.models_[i].get_name(), '...'\n performance = self.estimate_model_performance(self.models_[i], data,\n split_indexes)\n if self.is_best_model(performance):\n self.best_model_ = self.models_[i]\n self.best_performance_ = performance\n\n print 'Best model:', self.best_model_.get_name(),\\\n 'with performance equals', self.best_performance_\n return self.best_model_", "def select_top_predictions(self, predictions):\n scores = predictions.get_field(\"scores\")\n keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)\n predictions = predictions[keep]\n scores = predictions.get_field(\"scores\")\n _, idx = scores.sort(0, descending=True)\n return predictions[idx]", "def get_top_pages(model=None):\n return get_page_children(page=None, model=model)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the current boost_on_errors
def _get_boost_on_errors(self): self._validate_boost_on_errors() if self.boost_on_errors == "auto": val = self._get_validation_strategy() if val.get("validation_type", "") == "custom": return False if self._get_mode() == "Explain": return False if self._get_mode() == "Perform": return False if self._get_mode() == "Compete": return True if self._get_mode() == "Optuna": return False else: return deepcopy(self.boost_on_errors)
[ "def on_errors(self):\n return self._on_errors", "def get_errors(self):\n return {'loss': self.loss.data[0]}", "def import_errors(self):\n return self._import_errors", "def get_script_errors(self):\r\n return self._script_errors", "def error(self):\n try:\n return self.errors[0]\n except (AttributeError, IndexError):\n return DotDict()", "def error(self):\n\t\treturn self.rec_errors", "def error_status(self):\n return self._status", "def get_error_status(self):\n return self.error_status", "def errors(self):\n return self.__ehandlers.copy()", "def failed_on(self):\n return self._failed_on", "def in_block_errors(self) -> str:\n return self._in_block_errors", "def errors(self) -> Sequence['outputs.BatchAIErrorResponse']:\n return pulumi.get(self, \"errors\")", "def errorbars (self):\n return self._errorbars", "def get_error(self):\n response_dict = self.response_dict()\n return response_dict.error", "def get_error(self):\n return self.exc_info", "def getRuntimeErrors(self):\n return self.runtimeErrors", "def getErrorLog(self):\n return _libsbml.SBMLValidator_getErrorLog(self)", "def _get_error_counters(self):\n return self.__error_counters", "def last_error(self):\n return self._last_error" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the current kmeans_features
def _get_kmeans_features(self): self._validate_kmeans_features() if self.kmeans_features == "auto": if self._get_mode() == "Explain": return False if self._get_mode() == "Perform": return False if self._get_mode() == "Compete": return True if self._get_mode() == "Optuna": return False else: return deepcopy(self.kmeans_features)
[ "def k_means(self):\n raise NotImplementedError()", "def get_features(self):\n return self.features", "def get_features(self):\n if self.strokes is False:\n print('Isolating strokes')\n self.isolate_strokes()\n # List of features to use (sm1 omitted because always nan)\n feature_names = ('zrc', 'centroid',\n 'cm0', 'cm1', 'cm2', 'cm3', 'cm4',\n 'sm0', 'sm2')\n features_list = []\n for istroke in self.strokes:\n if not self.isGoodFrame(istroke):\n continue\n ifeature_dic = self.extract_features_from_frame(istroke)\n ifeature_list = []\n for ifeature in feature_names:\n ifeature_list.append(ifeature_dic[ifeature])\n features_list.append(ifeature_list)\n return {'feature_names': feature_names,\n 'feature_table': np.array(features_list)}", "def feature_list(self):\n return self._dataset[\"features\"]", "def kpoints(self):\n return self._kpoints", "def feature_dict(self):\n return self._feature_dict", "def cluster_feature(feature_mat, k):\n whitened = whiten(feature_mat.transpose())\n centroid, distortion = kmeans(whitened, k)\n\n return centroid, distortion", "def getTweetFeatureDict(self):\n return self.featureDict", "def get_features(self):\n if not self.exposes_features:\n return None\n\n return self._last_features", "def extract_features(self):\n\t\tself.get_eyes_features()\n\t\tself.get_mouth_features()\n\t\treturn np.array(self.features)", "def node_features(self) -> np.ndarray:\n return self._node_features", "def kmeans(X, k, num_init):\n # Write your code here\n kmeans = KMeans(n_clusters=k, n_init=num_init,init='random',algorithm='full',random_state=5)\n kmeans.fit(X)\n return kmeans.inertia_, kmeans.cluster_centers_", "def matrix_features(self):\n return self._matrix_features", "def keys(self):\n return self.feature_dict.keys()", "def cluster_features(self):\n logger.info('Creating term-document matrix...')\n self._create_tdm()\n init_centroids = self.centroids_from_categories()\n\n # Cluster the features using specific centroids.\n logger.info('Clustering features...')\n self.kmeans = KMeans(init=init_centroids, n_init=1, max_iter=1, n_clusters=len(self.feature_categories))\n self.clusters = self.kmeans.fit_predict(self.tdm)\n\n # The feature vector maps key features (categories) to other features that occur in the same cluster.\n logger.info('Converting clusters to feature vectors...')\n feature_vectors = self.clusters_to_feature_vectors(category_features=list(self.feature_amenity_map.keys()))\n\n return feature_vectors", "def features(self):\n if self._classifier is None:\n return None\n\n return self._classifier.most_informative_features()", "def get_features(self):\n\n num_entries = len(self.data)\n # Choose a random data point\n entry_num = random.randint(1, num_entries) - 1\n target_entry = self.data[entry_num]\n return target_entry.keys()", "def feature_matrix(self):\n return self._feat_matrix", "def kmeans_clustering(self,k):\r\n \r\n print(colored(\"Performing K-means clustering with %d clusters\\n\"%k,color = 'yellow', attrs=['bold']))\r\n kmeans = KMeans(n_clusters=k, random_state=0, n_init=10, max_iter=100, n_jobs=-1, ).fit(self.X)\r\n self.labels = kmeans.labels_\r\n self.davies_bouldin_score()\r\n print()\r\n print(colored(\"The k-means inertia is %0.002f\\n\" %(kmeans.inertia_),color = 'red', attrs=['bold']))\r\n self.cluster_plot()\r\n return self.labels , kmeans.cluster_centers_,kmeans" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the current max_single_prediction_time
def _get_max_single_prediction_time(self): self._validate_max_single_prediction_time() if self.max_single_prediction_time is None: if self._get_mode() == "Perform": return 0.5 # prediction time should be under 0.5 second return None else: return deepcopy(self.max_single_prediction_time)
[ "def max_time(self):\n #{{{ function to return time of last sample\n\n if self.maxtime == -1:\n return stock.now()\n\n return self.maxtime", "def max_time(self):\n return self.time[np.argmax(self.flux)]", "def max_time(self) -> float:\r\n if(len(self.operations_by_name) == 0):\r\n return -1\r\n return max(map(lambda x: x[\"time_step\"], self.operations_by_name.values()))", "def max_time(self):\n return self._ll_tree_sequence.get_max_time()", "def getMaxSimTime(self):\n return self.max_simsecs_value", "def _get_detection_time_multiplier(self):\n return self.__detection_time_multiplier", "def get_inference_time(self):\n return self._engine.get_inference_time()", "def last_detect_time(self):\n return self._last_detect_time", "def slowest(self) -> float:\n if len(self.requests) == 0:\n return 0\n return self.requests[-1][\"time\"]", "def getDefaultTime(self):\n return max(tvp[0] for tvp in self.timeValuePairs)", "def last_run_time(self) -> float:\n return self._last_run_time", "def last_tick_time(self):\n return self.last_tick_", "def time_to_target_training(self) -> str:\r\n # TODO: Figure out how to implement this.\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"time_to_target_training\"))\r\n return self._training_modes[0]", "def _get_last_meas_time(self):\n\n #if flag for whole data regeneration is set\n if self._process_type == 'full_gen':\n return datetime.datetime(1900, 1, 1, 0, 0, 0)\n \n \n res = self._db.Query(\"\"\"SELECT last_measurement_time\n FROM last_dashboard_element_segment_value\n WHERE\n element_id = %s\n AND segment_value_id = %s\n \"\"\",(self._id, self._segment_value_id))\n if not res:\n return datetime.datetime(1900, 1, 1, 0, 0, 0)\n item = self._db.record[0]\n if item['last_measurement_time']:\n return item['last_measurement_time']\n return datetime.datetime(1900, 1, 1, 0, 0, 0)", "def get_best_time():\n return best_reaction_time", "def max_retire_time(self):\n return self._max_retire_time", "def get_max_end_time(self):\n max_end_time = 1.\n file = h5py.File(self.filename, 'r')\n for idx in range(len(self)):\n label = self.labels[idx]\n timestamps_group = file['/'][self.mode + '_timestamps']\n timestamps_dset = timestamps_group[label]\n end_time = timestamps_dset[-1]\n if end_time > max_end_time: max_end_time = end_time\n file.close()\n return max_end_time", "def get_time(self):\n if not self.simulated:\n return datetime.now()\n else:\n return self.simulated_time", "def last_timestamp_nanoseconds(self):\n return max((self.last_timeseries_timestamp(), self.last_histogram_timestamp()))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the current optuna_time_budget
def _get_optuna_time_budget(self): self._validate_optuna_time_budget() if self.optuna_time_budget is None: if self._get_mode() == "Optuna": return 3600 return None else: if self._get_mode() != "Optuna": # use only for mode Optuna return None return deepcopy(self.optuna_time_budget)
[ "def budget(self):\n return self._budget", "def get_current_goal(self):\n if self.agenda is None or len(self.agenda) == 0:\n return None\n return self.agenda[0]", "def get_best_time():\n return best_reaction_time", "def lagtime(self):\n return self._tau", "def getSelectedShowtime(self):\n\n cur = self.current()\n if cur < 0:\n return None\n else:\n return self.theater.showtimes(self.showtimeIds[cur])", "def current_time_step(self) -> ts.TimeStep:\n return self._current_time_step", "def free_flight_time(self):\n return self._free_flight_time", "def active_time(self):\n return self._active_time", "def dep_time(self):\n return self._dep_time", "def project_budget_funding(self):\n return self._project_budget_funding", "def active_time(self):\n return self.details.get('active_time')", "def pending_time(self):\n return self.__pb.pending_time() / 1e6", "def get_budget(self, category: BudgetCategory) -> Budget:\n return self.budgets.get(category, None)", "def _get_auto_cost(self):\n return self.__auto_cost", "def value(self):\n return self.timespan", "def budget_details(self):\n print \"Our budget to buy bikes wholesale is now {}.\".format(str(self.budget))", "def _get_acquisition_time(self) -> float:\n return self._acquisition_time", "def get_last_solve_time(self):\n return self.solve_time", "def effective_time(self):\n return self._effective_time" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the current optuna_init_params
def _get_optuna_init_params(self): self._validate_optuna_init_params() if self._get_mode() != "Optuna": # use only for mode Optuna return {} return deepcopy(self.optuna_init_params)
[ "def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['mapping'] = self.mapping\n paramDict['values'] = self.values\n return paramDict", "def init_params(self):\n self.params = {p.title: p for p in\n chain(self.session.shared_params.values(), self._get_addl_params())}", "def init_params(self):\n GAOperator.init_params(self)", "def _init_parms():\n\n with open('json/_default.json') as json_data:\n parms = json.load(json_data)\n\n return parms", "def get_params(self):\n return {'threshold': self.threshold,\n 'subsample': self.subsample,\n 'estimator': self.estimator,\n 'n_folds': self.n_folds,\n 'stratify': self.stratify,\n 'random_state': self.random_state,\n 'n_jobs': self.n_jobs}", "def get_global_parameters(self):", "def _get_params_for_run(self):\n if self._optimizer is not None:\n return self._optimizer.get_next_params()\n else:\n return self._params", "def get_main_params(self):\n return self.get_section_config('main')", "def suggested_init(self):\n init = []\n for name in self.par_order:\n init = init + self.par_map[name]['paramset'].suggested_init\n return init", "def get_injected_params(self):\n if 'data_params' in self.all_params.keys():\n if self.all_params['data_params'] is not None:\n data_params = {}\n for pkey in self.all_params['data_params'].keys():\n data_params[pkey] = \\\n self.all_params['data_params'][pkey]['value']\n else:\n data_params = None\n else:\n data_params = None\n return data_params", "def _get_current_hyperparameters(self):", "def get_params(self):\n return {}", "def _core_init_params(self) :\n\t\ta_list,b_list = [],[]\n\t\tg_list,h_list = [],[]\n\t\t\n\t\t\n\t\tfor eqnid,eqn in enumerate(self.equations) : \n\t\t\treg_p = self.regressors[eqnid]['prod']\n\t\t\treg_d = self.regressors[eqnid]['degrad']\n\t\t\th_eqn = self.initsol['h'][eqn-1]\n\t\t\tg_eqn = self.initsol['g'][eqn-1]\n\n\n\t\t\ta_list.append(self.initsol['alpha'][eqn-1])\n\t\t\tb_list.append(self.initsol['beta'][eqn-1])\n\t\t\t\n\t\t\tg_eqn = np.array([g_eqn[reg-1] for reg in reg_p])\n\t\t\th_eqn = np.array([h_eqn[reg-1] for reg in reg_d])\n\t\t\th_list.append(h_eqn)\n\t\t\tg_list.append(g_eqn)\n\t\n\t\treturn (a_list,b_list,g_list,h_list)", "def get_params_internal(self):\n return self.params", "def form_parameters(self, opt_vars):\n\n simplex, cube = opt_vars\n\n full_params = np.zeros(self.shape)\n full_params.flat[self.active_set] = simplex * self.signs \n\n return full_params", "def init_params(self):\n self._elite_size = int(round(self.elite_proportion * self.population_size))\n self._mutant_size = int(round(self.mutant_proportion * self.population_size))\n self._elapsed_time = 0.0\n self._last_perf_count = time.perf_counter()\n self._init_pool()\n self.current_population = list()\n self.operator.init_params()", "def get_opts(self):\n return self.__options", "def get_preference_params(self):\n return self._param", "def _get_init_args(self):\n\n return dict(enum=self.enum, dflt=self._defname,\n base=self.base, shape=self.shape)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the current optuna_verbose
def _get_optuna_verbose(self): self._validate_optuna_verbose() # use only for mode Optuna if self._get_mode() != "Optuna": return True return deepcopy(self.optuna_verbose)
[ "def isVerbose(self):\n return self.opts.verbose", "def is_verbose(self):\n return self._verbose", "def verbose():\n return Verbose.level()", "def getVerboseLevel(self):\n return self.__verboseLevel", "def verbose_level(self):\n return self._verbose_level", "def ColorfulPyPrint_current_verbose_level():\r\n global O_VERBOSE_LEVEL\r\n return O_VERBOSE_LEVEL", "def is_verbose() -> bool:\n return VERBOSE", "def verbosity(self):\n return self._verbosity", "def verbose(self, value):\n return value", "def verbosePref(self):\n # If the level of the object is below the Preference level,\n # recursively calls base (super) classes to get preference at specified level\n return self.get_pref_setting_for_level(VERBOSE_PREF, self._verbose_pref.level)[0]", "def enable_verbose(self):\n self.verbose = True", "def getVerbosity(self):\n\n return self.__verbosity;", "def get_short_opts(self):\n return self.__short_options", "def set_verbose(x):\n\tglobal verbose\n\tverbose = x", "def _crosstool_verbose(repository_ctx):\n name = \"CROSSTOOL_VERBOSE\"\n if name in repository_ctx.os.environ:\n return repository_ctx.os.environ[name].strip()\n return \"0\"", "def _set_verbose(value):\n global VERBOSE\n VERBOSE = value", "def verbosity_for_session(request):\n return request.config.getoption(\"--verbosity-project\")", "def option(self):\r\n return conf.lib.clang_getDiagnosticOption(self, None)", "def option(self):\n return conf.lib.clang_getDiagnosticOption(self, None)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the current n_jobs
def _get_n_jobs(self): self._validate_n_jobs() return deepcopy(self.n_jobs)
[ "def get_n_jobs(self):\n return self.n_jobs", "def get_num_jobs(self):\n return self._config.get_num_jobs()", "def effective_n_jobs(n_jobs=-1):\n if n_jobs == 1:\n return 1\n\n backend, backend_n_jobs = get_active_backend()\n if n_jobs is None:\n n_jobs = backend_n_jobs\n return backend.effective_n_jobs(n_jobs=n_jobs)", "def get_num_jobs(self):\n return str(self.num_jobs)", "def __len__(self):\n return self._num_of_jobs", "def num_jobs(self):\n return self.jobs.qsize()", "def num_jobs(self) -> int:\n return len(self.job_set)", "def _n_jobs_wrap(n_jobs):\n if not isinstance(n_jobs, int):\n raise ValueError(\n \"type(n_jobs) = %s, but n_jobs should be an int\" % type(n_jobs))\n\n if (n_jobs == 0) or (n_jobs < -1 * cpu_count()):\n msg = \"Must have -1 + cpu_count() <= n_jobs < 0 OR 1 <= n_jobs\"\n raise ValueError(\"n_jobs = %d, but %s\" % (n_jobs, msg))\n\n if n_jobs < 0:\n n_jobs = max(cpu_count() + 1 + n_jobs, 1)\n\n return n_jobs", "def check_n_jobs(n_jobs):\n # scikit-learn convention\n # https://scikit-learn.org/stable/glossary.html#term-n-jobs\n if n_jobs is None:\n return 1\n elif not is_int(n_jobs):\n raise ValueError(f\"`n_jobs` must be None or an integer, but found: {n_jobs}\")\n elif n_jobs < 0:\n return os.cpu_count()\n else:\n return n_jobs", "def __number_of_jobs__(self):\n # | - __number_of_jobs__\n num_jobs = 0\n\n # Regular jobs\n if self.job_var_lst is not None:\n num_jobs = len(self.job_var_lst)\n\n # Individual dir jobs\n if self.indiv_dir_lst is not None:\n num_jobs += len(self.indiv_dir_lst)\n\n\n return(num_jobs)\n # __|", "def get_job_count():\n return TxJob().count()", "def number_jobs(self):\n if self.array:\n return len(self.commands)\n else:\n return 1", "def max_concurrent_jobs(self) -> int:\n return pulumi.get(self, \"max_concurrent_jobs\")", "def get_make_jobs():\n import multiprocessing\n cpu_count = multiprocessing.cpu_count()\n if cpu_count == 1:\n return 1\n elif cpu_count <= 4:\n return cpu_count // 2\n else:\n return cpu_count - 1", "def numSubmitted(self):\n return len(self.__submittedJobs)", "def number_jobs_total(self):\n return self.submission_status.total_jobs", "def get_jobs(self):\r\n\r\n # TODO: add jobs as well..\r\n return list(JOBS.keys())", "def nworkers(self):\n return len(self._workers)", "def concurrent_jobs_limit(self) -> int:\n return pulumi.get(self, \"concurrent_jobs_limit\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the current random_state
def _get_random_state(self): self._validate_random_state() return deepcopy(self.random_state)
[ "def random_state(self):\n return self._random.get_state()", "def random_state(self):\n return self.__random_state", "def random_state(self):\n return self._distribution.random_state", "def get_state():\n return torch.get_rng_state(), random.getstate(), np.random.get_state()", "def rand(self):\n return self.State.rand()", "def getstate(self):\n return (self.baseseed, self.counter, self.randbits_remaining)", "def get_random_state():\n numpy_state = np.random.get_state()\n torch_state = torch.get_rng_state()\n random_state = (numpy_state, torch_state)\n return random_state", "def rand(self):\n self.state = (self.a * self.state + self.c)\n return self.state", "def get_state(self):\r\n return self.get_global_state()", "def next_state(self):\r\n s = random.choice(self.states)\r\n self.states.remove(s)\r\n return s", "def _random_function(self, random_state):\n return random_state.rand", "def random_state(state):\n old_state = RandomState()\n state.set_global()\n yield\n old_state.set_global()", "def get_current_state(self):\n return self.robot.get_current_state()", "def get_current_state(self):\n return self.world.get_state()", "def get_state(self, state):\n return state", "def state_(game):\n return game.initial", "def random_agent(self, state):\n\t\trndint = random.randint\n\t\treturn self.state[state][rndint(0, len(self.state[state]))]", "def get_random_start_state(self) -> State:\n if len(self.blocks) <= state_enumeration_limit:\n rnd = random.randint(0, len(self.allStates) - 1)\n return self.allStates[rnd]\n else:\n return self.generate_random_start_state()", "def get_state(self):\n return self.run_cmd('get-state')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the fairness metric
def _get_fairness_metric(self): self._validate_fairness_metric() if self.fairness_metric == "auto": if self._get_ml_task() == BINARY_CLASSIFICATION: return "demographic_parity_ratio" if self._get_ml_task() == REGRESSION: return "group_loss_ratio" if self._get_ml_task() == MULTICLASS_CLASSIFICATION: return "demographic_parity_ratio" else: return deepcopy(self.fairness_metric)
[ "def _get_fairness_threshold(self):\n if self.fairness_threshold == \"auto\":\n if self._get_ml_task() in [\n BINARY_CLASSIFICATION,\n MULTICLASS_CLASSIFICATION,\n ]:\n thresholds = {\n \"demographic_parity_difference\": 0.1,\n \"demographic_parity_ratio\": 0.8,\n \"equalized_odds_difference\": 0.1,\n \"equalized_odds_ratio\": 0.8,\n }\n return thresholds.get(self._fairness_metric, 0.8)\n elif self._get_ml_task() == REGRESSION:\n thresholds = {\n \"group_loss_ratio\": 0.8,\n }\n if self._fairness_metric == \"group_loss_difference\":\n raise AutoMLException(\n \"We can't set default fairness threshold value. Please set `fairness_threshold` value in AutoML constructor.\"\n )\n return thresholds.get(self._fairness_metric, 0.8)\n else:\n return deepcopy(self.fairness_threshold)", "def test_fairness_returns(self):\n expected_dict = \\\n {\n 0 : 0.0,\n 25 : 0.5,\n 50 : 1.0,\n 75 : 0.5,\n 100 : 0.0,\n }\n for value, expected in expected_dict.iteritems():\n self.assertEqual(processFairness(value), expected)", "def StatisticalFairness(pickled_dataset, num_clusters, result, fairness_type):\n\n\tglobal NUM_SAMPLES, DELTA, NUM_NEAREST_NEIGHBOURS\n\n\tdataset, groups = LoadPickledData(pickled_dataset)\n\n\t# Subsample the dataset uniformly at random, assuming the rows in pkl file are randomly shuffled already\n\tdataset = dataset[: NUM_SAMPLES]\n\tgroups = groups[: NUM_SAMPLES]\n\n\tNUM_NEAREST_NEIGHBOURS = min(NUM_NEAREST_NEIGHBOURS, dataset.shape[0] - 1)\n\n\t# Convert groups to 2D in case it's 1D\n\tif(len(groups.shape) == 1):\n\t\tgroups = groups.reshape(-1, 1)\n\n\t# Vanilla clustering\n\t_, centres, labels = FindClusters(dataset, num_clusters, 0, 1)\n\n\t# KMeans hard assignment cost\n\thard_distribution = labelToOneHot(labels)\n\thard_cost = KMeansCost(dataset, centres, hard_distribution)\n\tnum_samples = len(dataset)\n\n\t# Uniform assignment cost - worst case\n\tuniform_distribution = np.zeros([num_samples, num_clusters]).astype(float)\n\tuniform_distribution[:,:] = 1 / num_clusters\n\tuniform_cost = KMeansCost(dataset, centres, uniform_distribution)\n\n\t# LP for statistical fairness clustering\n\toutput = stat_clustering(dataset, centres, groups, DELTA)\n\n\tfair_cost = output[\"objective\"] / num_samples\n\n\t# Print Simulation Environment Parameters\n\tPrintSimulationParameters(pickled_dataset, num_clusters, result)\n\n\tresult.AddCost(\"Hard KMeans\", round(hard_cost, 2))\n\tresult.AddCost(\"Statistical fairness\", round(fair_cost, 2))\n\tresult.AddCost(\"Uniform assignment\", round(uniform_cost, 2))\n\tresult.AddCost(\"Percentage decrease (hard vs alg)\", round(((fair_cost - hard_cost) / fair_cost) * 100, 2))\n\tresult.AddCost(\"Percentage increase (uniform vs alg)\", round(((uniform_cost - fair_cost) / fair_cost) * 100, 2))\n\n\t# Report the number of constraint violations\n\toptimal_assignment = output[\"assignment\"][:num_samples * num_clusters]\t\t\t\t\t\t# Extract P_i_j values alone and discard C_i_j_k\n\tdistribution = np.asarray(optimal_assignment).reshape(num_samples, num_clusters)\t\t\t# Convert matrix to 2D representation\n\tgroup_violations = GroupwisePercentageViolations(distribution, dataset, groups, NUM_NEAREST_NEIGHBOURS,\n\t\t\ttitle = \"Constraint Violations within protected groups\",\n\t\t\tverbose = not QUIET,\n\t\t\tstat_distance_metric = 0,\t\t\t# 0 = TV distance - LP doesn't support D_inf yet \n\t\t\teuclidean_distance_metric = 0, \t\t# 0 = L2 distance\n\t\t\tnormalise_stat = False,\t\t\t\t# TV distance is already normalised \n\t\t\tnormalise_euclidean = True,\t\t\t# Euclidean distance has to be normalised\n\t\t\tfairness_type = fairness_type\n\t\t)\n\n\tpercentage_violations = PercentageViolations(distribution, dataset, NUM_NEAREST_NEIGHBOURS,\n\t\t\ttitle = \"Constraint Violations over all pairs\",\n\t\t\tverbose = not QUIET,\n\t\t\tstat_distance_metric = 0,\t\t\t# 0 = TV distance - LP doesn't support D_inf yet \n\t\t\teuclidean_distance_metric = 0, \t\t# 0 = L2 distance\n\t\t\tnormalise_stat = False,\t\t\t\t# TV distance is already normalised \n\t\t\tnormalise_euclidean = True,\t\t\t# Euclidean distance has to be normalised\n\t\t\tfairness_type = fairness_type\n\t\t)\n\n\t# Compute statistical bias\n\tstat_bias, earthmover = Bias(distribution, dataset, groups, verbose = not QUIET)\n\n\tresult.AddCost(\"PercentageViolations\", round(percentage_violations, 2))\n\tresult.AddCost(\"GroupwiseViolations\", round(group_violations, 2))\n\tresult.AddCost(\"Statistical Bias\", round(stat_bias, 2))\n\tresult.AddCost(\"Earthmover Distance\", round(earthmover, 2))\n\tresult.AddSolution(centres, distribution)\n\t\n\tif(not QUIET):\n\t\t# Report solution cost\n\t\tprint(result.GetCost())", "def test_fairness(self):\n print(\"testing lottery fairness\")\n self.setup_normal(PARTICIPANTS_NO,TICKETS_NO)\n winner_tickets=self.lottery.multiple_draw(DRAWS_NO)\n winners_distribution = helpers.create_distribution(winner_tickets,self.participants)\n expected_distribution = helpers.create_distribution(self.tickets,self.participants)\n kl=helpers.KL_divergence(expected_distribution,winners_distribution)\n print (\"KL distance:{}\".format(kl))\n helpers.create_plot(expected_distribution,winners_distribution)\n self.assertLessEqual(kl,KL_MAX,\"the lottery draw is not fair enough, (KL ={})\".format(kl))", "def is_fair(self):\n fairness = Fairness(experience_weight=1)\n if fairness.is_fair(self):\n return 'This trade is fair!'\n return 'This trade is unfair!'", "def fairness_discrepancy(props, n_classes, norm=0):\n # unique, freq = np.unique(data, return_counts=True)\n # props = freq / len(data) #Proportion of data that belongs to that data\n \n # #------------------Modification to correct the zero support problem------------------------------------------------\n # temp=np.zeros(n_classes)\n # temp[unique]=props\n # props=temp\n # #------------------------------------------------------------------------------\n \n # print (freq)\n truth = 1./n_classes\n\n\n # L2 and L1=================================================================================================\n #(Remove Normalisation)\n l2_fair_d = np.sqrt(((props - truth)**2).sum())\n l1_fair_d = abs(props - truth).sum()\n\n # q = props, p = truth\n # kl_fair_d = (props * (np.log(props) - np.log(truth))).sum()\n\n #Cross entropy\n p=np.ones(n_classes) \n # ce=cross_entropy(p,props,n_classes)-cross_entropy(p,p,n_classes)\n \n #information specificity=====================================================================================\n rank=np.linspace(1,n_classes-1,n_classes-1)\n rank[::-1].sort() #Descending order\n perc=np.array([i/np.sum(rank) for i in rank])\n \n \n props[::-1].sort()\n alpha=props[1:]\n specificity=abs(props[0]-np.sum(alpha*perc))\n info_spec=(l1_fair_d+specificity)/2\n \n #Wasstertein Distance\n wd=wasserstein_distance(props,np.ones(len(props))*truth)\n \n #Wassertein Specificity\n wds=(wd+specificity)/2\n if norm==0:\n return l2_fair_d, l1_fair_d,info_spec,specificity,wd,wds\n # return l2_fair_d, l1_fair_d,info_spec,specificity\n else:\n return l2_fair_d/metric_max(n_classes,\"L2\"), l1_fair_d/metric_max(n_classes,\"L1\"),info_spec/metric_max(n_classes,\"Is\"),specificity,wd/metric_max(n_classes,\"Wd\")\n # return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity", "def FastConvergence(self):\n\t\treturn self._get_attribute('fastConvergence')", "def fairness_discrepancy(data, n_classes, norm=0):\n unique, freq = np.unique(data, return_counts=True)\n props = freq / len(data) #Proportion of data that belongs to that data\n \n #------------------Modification to correct the zero support problem------------------------------------------------\n temp=np.zeros(n_classes)\n temp[unique]=props\n props=temp\n #------------------------------------------------------------------------------\n \n # print (freq)\n truth = 1./n_classes\n\n\n # L2 and L1=================================================================================================\n l2_fair_d = np.sqrt(((props - truth)**2).sum())/n_classes\n l1_fair_d = abs(props - truth).sum()/n_classes\n\n # q = props, p = truth\n # kl_fair_d = (props * (np.log(props) - np.log(truth))).sum()\n\n #Cross entropy\n p=np.ones(n_classes)/n_classes \n # ce=cross_entropy(p,props,n_classes)-cross_entropy(p,p,n_classes)\n \n #information specificity=====================================================================================\n rank=np.linspace(1,n_classes-1,n_classes-1)\n rank[::-1].sort() #Descending order\n perc=np.array([i/np.sum(rank) for i in rank])\n \n #Create array to populate proportions\n # props2=np.zeros(n_classes)\n # props2[unique]=props\n \n props[::-1].sort()\n alpha=props[1:]\n specificity=abs(props[0]-np.sum(alpha*perc))\n info_spec=(l1_fair_d+specificity)/2\n \n #Wasstertein Distance\n wd=wasserstein_distance(props,np.ones(len(props))*truth)\n \n #Wassertein Specificity\n wds=(wd+specificity)/2\n if norm==0:\n for i in props:\n f.write(\"%f \"%(i))\n f.write(\"\\n\")\n return l2_fair_d, l1_fair_d,info_spec,specificity,wd,wds\n # return l2_fair_d, l1_fair_d,info_spec,specificity\n else:\n return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity,wd/metric_max(n_classes,\"wd\"),wds/metric_max(n_classes,\"wds\")\n # return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity", "def test_fairness(self):\n # Games to run and number of decimal places to look at the win ratio\n # We can increase iterations and tolerance for more confidence, at the\n # expense of execution time\n iterations = 10000\n tolerance = 1\n\n game = Game(quiet=True)\n win_count = {game.player1: 0, game.player2: 0}\n\n for _ in range(iterations):\n winner = game.play()\n if winner is not None:\n win_count[winner] += 1\n\n win_ratio = win_count[game.player1] / win_count[game.player2]\n\n self.assertAlmostEqual(1.0, win_ratio, tolerance)\n self.assertNotEqual(win_count[game.player1], 0)\n self.assertNotEqual(win_count[game.player2], 0)", "def staleness(T, clock) -> float:\n return clock - T", "def test_estimate_statistics_priority(self):\n s = private_sampling.ThresholdSample(\n 0.5, private_sampling.PrioritySamplingMethod)\n s.process(\"a\", 2.0)\n s.process(\"b\", 3.0)\n self.assertEqual(s.estimate_statistics(), 5.0)", "def test_metric_is_scorer():\n trainer = DirectClassifier(\"LR\", metric=get_scorer(\"f1\"), random_state=1)\n trainer.run(bin_train, bin_test)\n assert trainer.metric == \"f1\"", "def checkClaimQuality(self, results):\n\n fairness = 0.0 \n robustness = 0.0\n uniqueness = 0.0\n total_parameters_nb = float(abs(len(results)))\n\n for result in results:\n a, b, u, v, r = result[0:5]\n sr = self.SR(r)\n sp = self.SP(a,b)\n fairness += sr * sp\n robustness += sp * (min(0, sr))**2\n ### Uniqueness ###\n one_indicator_function = lambda x: 1 if x<0 else 0\n uniqueness += 1/total_parameters_nb * one_indicator_function(sr)\n\n measures = {}\n measures[\"fairness\"] = fairness\n measures[\"robustness\"] = math.exp(-robustness)\n measures[\"uniqueness\"] = uniqueness\n\n return measures", "def compute_clustering_score():\n # TODO: Implement simple clustering\n raise NotImplementedError()", "def constant_score(self):\n return self._constant_score", "def staleness_scaled(T, clock) -> float:\n return (clock - T) / clock", "def coherence(self):\r\n coherence = np.abs(self.coherency ** 2)\r\n\r\n return coherence", "def sampling_priority(self):\n # type: () -> Optional[NumericType]\n return self._metrics.get(SAMPLING_PRIORITY_KEY)", "def readScore(self):\n return self.zmwMetric(\"ReadScore\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the fairness threshold
def _get_fairness_threshold(self): if self.fairness_threshold == "auto": if self._get_ml_task() in [ BINARY_CLASSIFICATION, MULTICLASS_CLASSIFICATION, ]: thresholds = { "demographic_parity_difference": 0.1, "demographic_parity_ratio": 0.8, "equalized_odds_difference": 0.1, "equalized_odds_ratio": 0.8, } return thresholds.get(self._fairness_metric, 0.8) elif self._get_ml_task() == REGRESSION: thresholds = { "group_loss_ratio": 0.8, } if self._fairness_metric == "group_loss_difference": raise AutoMLException( "We can't set default fairness threshold value. Please set `fairness_threshold` value in AutoML constructor." ) return thresholds.get(self._fairness_metric, 0.8) else: return deepcopy(self.fairness_threshold)
[ "def threshold(self) -> float:\n return pulumi.get(self, \"threshold\")", "def get_best_threshold(ensemble_model):\n train_gen, val_gen = get_generators()\n\n # Add metrics\n ensemble_model.compile(loss='binary_crossentropy', optimizer=get_optimizer(), metrics=get_metric(scan=True))\n preds_test = ensemble_model.evaluate_generator(generator=val_gen, verbose=True)\n\n # Now, simply fetch the right threshold\n ts = np.linspace(0, 1, len(preds_test))\n\n return ts[np.argmax(preds_test)]", "def get_decision_thresh(self) -> Optional[float]:\n return self.readout.get_decision_thresh()", "def waitlist_threshold(self):\n return self._waitlist_threshold", "def test_threshold_brier_score_threshold(o_dask, f_prob_dask, threshold):\n actual = threshold_brier_score(o_dask, f_prob_dask, threshold)\n assert (actual.threshold == threshold).all()", "def reward_threshold(self) -> Optional[float]:", "def _get_fairness_metric(self):\n self._validate_fairness_metric()\n if self.fairness_metric == \"auto\":\n if self._get_ml_task() == BINARY_CLASSIFICATION:\n return \"demographic_parity_ratio\"\n if self._get_ml_task() == REGRESSION:\n return \"group_loss_ratio\"\n if self._get_ml_task() == MULTICLASS_CLASSIFICATION:\n return \"demographic_parity_ratio\"\n else:\n return deepcopy(self.fairness_metric)", "def test_fairness(self):\n print(\"testing lottery fairness\")\n self.setup_normal(PARTICIPANTS_NO,TICKETS_NO)\n winner_tickets=self.lottery.multiple_draw(DRAWS_NO)\n winners_distribution = helpers.create_distribution(winner_tickets,self.participants)\n expected_distribution = helpers.create_distribution(self.tickets,self.participants)\n kl=helpers.KL_divergence(expected_distribution,winners_distribution)\n print (\"KL distance:{}\".format(kl))\n helpers.create_plot(expected_distribution,winners_distribution)\n self.assertLessEqual(kl,KL_MAX,\"the lottery draw is not fair enough, (KL ={})\".format(kl))", "def _compute_threshold(self,z=2.0):\n scoretally = []\n for seq in self.seqs:\n matches,endpoints,scores = self.scan(seq,-100)\n scoretally.append(scores[0])\n ave,std = avestd(scoretally)\n self.threshold = ave - z *std\n #print '#%s: threshold %5.2f = %5.2f - %4.1f * %5.2f'%(\n # self, self.threshold, ave, z, std)", "def getThreshold(self): # real signature unknown; restored from __doc__\n pass", "def threshold(self, value):\r\n threshold = 0.5\r\n if value >= threshold:\r\n return 1\r\n else:\r\n return 0", "def FastConvergence(self):\n\t\treturn self._get_attribute('fastConvergence')", "def thread_priority(self) -> \"int\":\n return _beamforming_swig.randomsampler_sptr_thread_priority(self)", "def thresh(self) -> int:\n return self._thresh", "def thresholdfactor(self):\n return self.__thresholdfactor", "def find_metric_threshold(self):\n logger.info(\"compute metric threshold\")\n\n ### Beaucoup trop lent quand on a beaucoup de models ###\n\n df_results_not_aggregated = self.result_reader.load_all_results(aggregate=False)\n\n if len(df_results_not_aggregated) == 0:\n logger.info(\"threshold = None\")\n return None\n\n main_scorer = \"test_%s\" % self.job_config.main_scorer\n (df_results_not_aggregated[main_scorer].fillna(df_results_not_aggregated[main_scorer].min(), inplace=True))\n min_cv = df_results_not_aggregated.groupby(\"job_id\")[main_scorer].min().values\n delta_min_max_cv = np.median(\n df_results_not_aggregated.groupby(\"job_id\")[main_scorer].apply(lambda x: x.max() - x.min())\n )\n\n if len(min_cv) <= self.min_nb_of_models:\n logger.info(\"threshold = None\")\n return None\n\n min_cv = -np.sort(-min_cv)\n result = min_cv[self.min_nb_of_models] - delta_min_max_cv\n\n # result = np.percentile( min_cv, self._get_quantile(len(min_cv)) * 100)\n # TODO : ici peut etre faire une estimation parametric du quantile avec un Kernel, plus smooth et moins sensible quand peu de valeurs\n\n logger.info(\"threshold : %2.2f\" % result)\n return result", "def is_fair(self):\n fairness = Fairness(experience_weight=1)\n if fairness.is_fair(self):\n return 'This trade is fair!'\n return 'This trade is unfair!'", "def test_fairness_returns(self):\n expected_dict = \\\n {\n 0 : 0.0,\n 25 : 0.5,\n 50 : 1.0,\n 75 : 0.5,\n 100 : 0.0,\n }\n for value, expected in expected_dict.iteritems():\n self.assertEqual(processFairness(value), expected)", "def test_soft_threshold():\n assert snet.soft_threshold(10, 100) == 0\n assert snet.soft_threshold(-10, 100) == 0\n assert snet.soft_threshold(10, 3) == 7\n assert snet.soft_threshold(-10, 3) == -7" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets privileged groups for fair training
def _get_privileged_groups(self): if self.privileged_groups == "auto": return [] else: return deepcopy(self.privileged_groups)
[ "def _get_underprivileged_groups(self):\n if self.underprivileged_groups == \"auto\":\n return []\n else:\n return deepcopy(self.underprivileged_groups)", "def test_aws_service_api_security_groups_get(self):\n pass", "def test_get_groups(self):\n pass", "def granted_groups(self):\n return [\n g\n for g in Group.objects.filter()\n if ManagedObject.objects.filter(GroupAccess.Q(g) & Q(id=self.id)).exists()\n ]", "def getGroupsForPrincipal(principalid):", "def test_get_with_private_group_no_access(self):\n group = self.create_review_group(invite_only=True)\n repository = self.create_repository(tool_name='Test')\n review_request = self.create_review_request(publish=True,\n repository=repository)\n review_request.target_groups.add(group)\n\n url, mimetype = self.setup_review_request_child_test(review_request)\n\n with override_feature_checks(self.override_features):\n rsp = self.api_get(url, expected_status=403)\n\n self.assertEqual(rsp['stat'], 'fail')\n self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)", "def allowed_groups(self) -> Sequence[str]:\n return pulumi.get(self, \"allowed_groups\")", "def list_groups(self):\n pass", "def get_groups(self):\n return list(self.groups.values())", "def security_groups(self):\n return self._security_groups", "def test_get_security_group_using_get1(self):\n pass", "def get_groups(self, sampler: Sampler):\n raise NotImplementedError()", "def test_get_with_private_group(self):\n group = self.create_review_group(invite_only=True)\n group.users.add(self.user)\n repository = self.create_repository(tool_name='Test')\n review_request = self.create_review_request(publish=True,\n repository=repository)\n review_request.target_groups.add(group)\n\n url, mimetype = self.setup_review_request_child_test(review_request)\n\n with override_feature_checks(self.override_features):\n self.api_get(url,\n expected_mimetype=mimetype,\n expected_json=self.basic_get_returns_json)", "def get_relevant_perm_groups(self):\n\n groups = Group.objects.filter(Q(name=\"everyone\") | Q(name=self.admin_group_name()) | Q(name=self.participants_group_name()))\n return groups", "def _get_admin_group_lists(self):\n return self.__admin_group_lists", "def getGroup():\n\tprint\n\tprint \"Requesting the list of groups for this account\"\n\n\tgroups_result = getResult('/papi/v0/groups')\n\n\treturn (groups_result)", "def private_channels(self):\n return self.slack.groups.list().body['groups']", "def get_registered_groups():\n return registeredAccounts[\"groups\"]", "def get_sample_groups(self): \r\n return self.sample_groups" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets underprivileged groups for fair training
def _get_underprivileged_groups(self): if self.underprivileged_groups == "auto": return [] else: return deepcopy(self.underprivileged_groups)
[ "def _get_privileged_groups(self):\n if self.privileged_groups == \"auto\":\n return []\n else:\n return deepcopy(self.privileged_groups)", "def test_aws_service_api_security_groups_get(self):\n pass", "def test_get_groups(self):\n pass", "def _identify_groups_for_user(user):\n groups = []\n for group in user.groups.all():\n if group.name == 'WMT16' \\\n or group.name.lower().startswith('wmt') \\\n or group.name.startswith('eng2') \\\n or group.name.endswith('2eng'):\n continue\n \n if not group in groups:\n groups.append(group)\n \n return groups", "def test_get_device_groups(self):\n pass", "def test_get_security_group_using_get1(self):\n pass", "def list_groups(self):\n pass", "def test_get_resource_group_by_moid(self):\n pass", "def test_groups_get(self):\n pass", "def getGroupsForPrincipal(principalid):", "def test_users_groups_get(self):\n pass", "def getGroup():\n\tprint\n\tprint \"Requesting the list of groups for this account\"\n\n\tgroups_result = getResult('/papi/v0/groups')\n\n\treturn (groups_result)", "def get_free_standins(group):", "def get_sample_groups(self): \r\n return self.sample_groups", "def list_secgroups(self, name=None):", "def get_groups(self, sampler: Sampler):\n raise NotImplementedError()", "def test_get_group(self):\n pass", "def get_registered_groups():\n return registeredAccounts[\"groups\"]", "def get_gadm_list():\n cur = g.db.execute('select id_user from user_group where gadm == 1', [uid])\n gadm = [row[0] for row in cur.fetchall()]\n return gadm" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Download log files from remote machines on local machine via ssh
def __download_via_ssh(cls, request, local_path): hosts = request.POST.getlist('hosts[]') logs = request.POST.getlist('logs[]') if not os.path.exists(local_path): os.makedirs(local_path) for host_name in hosts: host_object = Host.objects.get(host_name=host_name) host_path = os.path.join(local_path, host_name) if not os.path.exists(host_path): os.makedirs(host_path) for log_name in logs: log_object = Log.objects.get(log_name=log_name) help_methods.get_file_via_ssh( getattr(log_object, 'log_path'), host_path, getattr(host_object, 'host_name'), getattr(host_object, 'host_root_user'), getattr(host_object, 'host_root_password') )
[ "def get_logs():\n get(remote_path=\"/tmp/log_extracts.tar.gz\",\n local_path=\"/logs/new_log.tar.gz\")", "def PullLogs(ssh, log_files, download_folder):\n for log_file in log_files:\n target_file = os.path.join(download_folder, os.path.basename(log_file))\n ssh.ScpPullFile(log_file, target_file)\n _DisplayPullResult(download_folder)", "def remote_get_log(user, remote_host):\n\n try:\n # set up dir that we'll move the remote .tar into\n if not os.path.isdir('./deploy.logs/'+remote_host):\n os.mkdir('./deploy.logs/'+remote_host)\n \n # download the tar file from remote host\n out, err, returncode = remote_download_file(remote_host+'.tgz', \n './deploy.logs/'+remote_host+'/'+remote_host+'.tgz', user, remote_host)\n\n deploy_logging.log('Downloading logs', 'Logs downloaded from '+remote_host)\n # now try to untar the files\n\n # build up a command list to execute\n command_list = []\n\n # tar is picky about where it'll unzip to (CWD), so we'll just Cd there\n command_list.append('cd ./deploy.logs/'+remote_host+'/')\n\n # now untar. if deploy_main.verbosity >=1 then we'll be verbose\n if deploy_main.verbosity >=1:\n command_list.append('tar -xvvf '+remote_host+'.tgz')\n else:\n command_list.append('tar -xf '+remote_host+'.tgz')\n\n # not make command string by joining the list elements with '; ' \n command_string = '; '.join(command_list)\n\n # execute string\n out, err, retvalue = deploy_helper.shellexec2(command_string)\n\n deploy_logging.log('Downloading logs', 'Logs from '+remote_host+' are ready')\n\n # we no longer need the tar file, just hogging up space\n os.remove('./deploy.logs/'+remote_host+'/'+remote_host+'.tgz')\n\n except Exception, e:\n if deploy_main.verbosity == 2:\n # Only log if we error and need to narrow this down. otherwise,\n # it gets really spammy. \n deploy_logging.logerror(remote_host+\": Some kind of err in remote_get_log. (\"+\\\n remote_host+\") , error:\"+str(e)+\")\")\n return", "def download_logs(log_dir=\"/var/log/jmeter\"):\n _setup_host()\n local('mkdir %s' % env.host)\n run('gzip -9 %s/*.jtl' % log_dir)\n get('%s/*.gz' % log_dir, '%s/' % env.host)\n run('rm %s/*.gz' % log_dir)", "def FetchLogs(opener, api_host):\r\n fetch_dir = os.path.join(options.options.fetch_dir or os.getcwd(), 'logs')\r\n logging.info('writing logs for user %s to %s' %\r\n (options.options.user_id, fetch_dir))\r\n _InitDir(fetch_dir)\r\n\r\n io_loop = ioloop.IOLoop.instance()\r\n http_client = httpclient.AsyncHTTPClient(io_loop)\r\n log_urls = GetLogUrls(opener, api_host)\r\n with util.Barrier(io_loop.stop) as b:\r\n for log_record in log_urls:\r\n output_file = os.path.join(fetch_dir, log_record['filename'])\r\n\r\n # Check whether the output file already exists; skip if yes\r\n # and --cache_logs is true.\r\n if os.path.exists(output_file) and options.options.cache_logs:\r\n log_record['local_filename'] = output_file\r\n continue\r\n\r\n _InitDir(os.path.dirname(output_file))\r\n try:\r\n _FetchLog(http_client, log_record['url'], output_file, b.Callback())\r\n log_record['local_filename'] = output_file\r\n except:\r\n logging.exception('failed to fetch %s' % log_record['url'])\r\n log_record['local_filename'] = None\r\n\r\n io_loop.start()\r\n logging.info('fetched %d log files for user %s' %\r\n (len(log_urls), options.options.user_id))\r\n return log_urls", "def download_files(args):\n with get_ssh_client(args) as c:\n found = remote_find_files(c, args)\n with c.open_sftp() as sftp:\n for filename in found:\n remotepath = os.path.join(args.remote_dir, filename)\n localpath = os.path.join(args.src, filename)\n silentremove(localpath)\n sftp.get(remotepath, localpath)\n logger.info(\"Download backup files successed\")", "def logs():\n puts(yellow(\"[Reading log-file]\"))\n run(\"cat %s\" % REMOTE_ERR_FILE)\n run(\"cat %s\" % REMOTE_LOG_FILE)", "def _download(self, url, rel_path):\n \n tmp_dir = \"TMP_DIR=`mktemp -d`;\"\n wget_cmd = [ tmp_dir, \"wget\", \"-nv\", \"-O\", \"$TMP_DIR/archive.tgz\", url, \";\" ]\n wget_cmd = ' '.join(wget_cmd)\n \n mkdir_cmd = \"mkdir -p %s ;\" % (\"./remote_resources/\" + rel_path)\n \n cleandir_cmd = \"rm -Rf %s/* ;\" % (\"./remote_resources/\" + rel_path)\n \n untar_cmd = [ \"tar\", \"xf\", \"$TMP_DIR/archive.tgz\", \"-C\", \"./remote_resources/%s\" % rel_path, \";\" ]\n untar_cmd = ' '.join(untar_cmd)\n \n remove_cmd = \"rm -Rf $TMP_DIR;\"\n \n return self._ssh(' '.join([ wget_cmd, mkdir_cmd, cleandir_cmd, untar_cmd, remove_cmd ]))", "def sftp_download(host, port, username, password, local_file_dir, host_file_name, host_dir):\r\n sf = Sftp(host, port, username, password)\r\n try:\r\n if '.*' in host_file_name:\r\n sf.download_dir(host_dir, local_file_dir, host_file_name)\r\n else:\r\n sf.download_file(local_file_dir, '%s/%s' % (host_dir, host_file_name))\r\n finally:\r\n sf.close()", "def sync_log(self):\r\n print('Synchronizing log files...')\r\n\r\n # Connect with SSH-PubKey and synchronize files\r\n subprocess.run(\r\n ['scp',\r\n '-i', self.ssh_key,\r\n '-o', 'StrictHostKeyChecking=no',\r\n 'robot@{}:/home/robot/.bin/*_tmux.log'.format(self.settings['ip']),\r\n self.log_path\r\n ])\r\n\r\n print('Done.')", "def remote_fetch(ip_addr, username, cmd):\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n ssh.connect(ip_addr, username=username)\n stdin, stdout, stderr = ssh.exec_command(cmd)\n return stdout.readlines()", "def download(self, remotepath, localpath):\n sftp = self.connection.open_sftp()\n if isinstance(remotepath, str):\n sftp.get(remotepath, localpath)\n else:\n for path in remotepath:\n filename = os.path.split(path)[-1]\n sftp.get(path, localpath + \"/\" + filename)\n sftp.close()", "def _fetch_remote_data(remote, download_dir, data_home):\n\n file_path = '{}.zip'.format(download_dir)\n if not Path(file_path).exists():\n urllib.request.urlretrieve(remote.url, file_path)\n _unzip_dataset( file_path, data_home)", "def sftp_download_latest_file(self, host, port, usr, pwd, remote, local=None, **kwargs):\n filefilter = kwargs.get('filter')\n with pysftp.Connection(host, username=usr, password=pwd, port=int(port)) as self.sftp:\n try:\n self.sftp.chdir(remote)\n self._log.debug('sftp walking to %s', remote)\n except (IOError, OSError):\n self._log.debug(\"sftp cd to dir '%s' failed!\", remote)\n\n sftp_curr_dir = self.sftp.getcwd()\n\n statfiles = list(\"%s/%s\" % (sftp_curr_dir, filename) for filename in self.sftp.listdir(sftp_curr_dir) if re.search(filefilter, filename))\n sorted_statfiles = list(sorted([filename for filename in statfiles], key=self.mtime))\n try:\n target_file = sorted_statfiles[-1]\n except (IndexError, NameError):\n self._log.debug(\"'%s' not found!\", filefilter)\n\n if local is None:\n local = os.getcwd()\n if '.' not in os.path.basename(local):\n local = os.path.join(local, target_file.split('/')[-1])\n if os.path.exists(os.path.split(local)[0]) is False:\n os.makedirs(os.path.split(local)[0])\n\n self.sftp.get(target_file, local)\n self.sftp.close()", "def download_lftp_dat_files():\n lftp_access = {\n 'ftp_address' : 'smuc.st-and.ac.uk',\n 'ftp_subdir' : '/pub/bodc',\n 'ftp_user' : '',\n 'ftp_password' : '',\n 'ftp_exclude_dir' : ['corrections', 'exports', 'test'],\n 'lftp_options' : '--only-newer --exclude-glob *.dmp.gz',\n 'output_dir' : OUTPUT_DIR,\n }\n\n global lftp\n lftp = LFTPSync()\n\n if os.path.exists(os.path.join(OUTPUT_DIR, 'lftp_mirror.log')):\n return lftp.list_new_files_path_previous_log(lftp_access)\n\n lftp.lftp_sync(lftp_access)\n return lftp.list_new_files_path(check_file_exist=True)", "def list_remote_files(remote_path='/home/dfci/media/ssd/Conditionnement/'):\n ls = subprocess.Popen(['ssh', 'dfci@dfci', 'ls', remote_path ], \n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n universal_newlines=True) # deals with Python3 string\n out, err = ls.communicate()\n remote_file_list = out.split(sep='\\n')\n remote_file_list.pop() # The last one is a dummy ''\n remote_file_list = sorted(remote_file_list, reverse=True) # Most recent first\n return remote_file_list", "def collect_log(self):\n path = 'cluster_test_%d/*.log' % self.address[1]\n src = \"%s@%s:%s\" % (self.user_name, self.address[0], path)\n dest = console_config._log_path\n self._rsync(src, dest)", "def list_remote_files():\n url = options.root_url + '?method=list&depth=0'\n response = urllib2.urlopen(AuthRequest(url))\n options.listing = json.loads(response.read(), object_hook=as_datetime)", "def get_metrics_file_remote(host, HBase_loc, filename):\n HOST = host\n COMMAND = \"\"\"cd %s ; export JAVA_HOME=/usr ; ./hbase shell ./scan > ~/%s\n \"\"\" % (HBase_loc, filename)\n ssh = subprocess.Popen([\"ssh\", \"%s\" % HOST, COMMAND],\n shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n result = ssh.stdout.readlines()\n os.system(\"scp \" + os.environ[\"USER\"] + \"@\" + host + \":\" + filename + \" .\")\n return str(os.environ[\"PWD\"] + \"/\" + filename)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make an hashable representation of an object for hashlib
def hashable(obj): return bytes(str(obj), "utf-8")
[ "def hashable(obj):\n if not obj.__hash__:\n return str(obj)\n return obj", "def hash(obj):\n \n import hashlib\n import pickle\n \n sha = hashlib.sha256()\n sha.update(pickle.dumps(obj))\n \n return sha.hexdigest()", "def hash_object(cls, serialized_obj):\n hash_bytes = sha256(serialized_obj).digest()\n hexdigest = hexlify(hash_bytes[:Sha256DictStore.HASH_SIZE_BYTES])\n return hexdigest.decode('utf-8')", "def make_hash(o):\n if isinstance(o, (set, tuple, list)):\n return hash(tuple([make_hash(e) for e in o]))\n elif not isinstance(o, dict) and o.__class__.__module__ == 'builtins':\n return hash(o)\n elif not isinstance(o, dict):\n return make_hash(o.__dict__)\n\n new_o = copy.deepcopy(o)\n for k, v in new_o.items():\n new_o[k] = make_hash(v)\n return hash(tuple(frozenset(sorted(new_o.items()))))", "def deep_hash(obj):\n pass", "def pickle_and_hash(obj: Any) -> str:\n try:\n s = dill.dumps(obj)\n except:\n raise UnpickleableError()\n\n return hashlib.sha512(s).hexdigest()", "def make_hash(o):\n\n if isinstance(o, (set, tuple, list)):\n\n return hash( tuple([make_hash(e) for e in o]) )\n\n elif not isinstance(o, dict):\n\n return hash(o)\n\n new_o = copy.deepcopy(o)\n for k, v in new_o.items():\n new_o[k] = make_hash(v)\n\n return hash(tuple(frozenset(sorted(new_o.items()))))", "def compute_obj_hash(obj) -> str:\n str_obj = json.dumps(obj, cls=BestEffortJSONEncoder, sort_keys=True)\n return hashlib.sha256(str_obj.encode('utf-8')).hexdigest()", "def calc_hash_digest(obj):\r\n # Get string representation\r\n string = repr(obj).encode()\r\n # Create hash digest\r\n hash = hashlib.md5()\r\n hash.update(string)\r\n digest = hash.hexdigest()\r\n return digest", "def object_sha256(obj):\n\n return hashlib.sha256(json.dumps(obj).encode()).hexdigest()", "def make_hash(o):\n if type(o) == DictProxyType:\n o2 = {}\n for k, v in o.items():\n if not k.startswith(\"__\"):\n o2[k] = v\n o = o2\n if isinstance(o, (set, tuple, list)):\n return tuple([make_hash(e) for e in o])\n elif not isinstance(o, dict):\n return hash(o)\n new_o = copy.deepcopy(o)\n for k, v in new_o.items():\n new_o[k] = make_hash(v)\n return hash(tuple(frozenset(sorted(new_o.items()))))", "def structural_hash(obj: object) -> bytes:\n hasher = hashlib.blake2b()\n if isinstance(obj, (int, str, float, PurePath)):\n hasher.update(bytes(\"P\" + str(obj), \"utf-8\"))\n elif dataclasses.is_dataclass(obj):\n fields = dataclasses.fields(obj)\n hasher.update(bytes(f\"O{len(fields)}\\x20\", \"utf-8\"))\n for field in sorted(fields, key=lambda x: x.name):\n if not field.metadata.get(\"nohash\"):\n hasher.update(bytes(f\"F{len(field.name)}\\x20{field.name}\", \"utf-8\"))\n hasher.update(structural_hash(getattr(obj, field.name)))\n elif isinstance(obj, (collections.abc.Sequence, collections.abc.Set)):\n hasher.update(bytes(f\"L{len(obj)}\\x20\", \"utf-8\"))\n for member in obj:\n child_hash = structural_hash(member)\n hasher.update(bytes(f\"E{len(child_hash)}\\x20\", \"utf-8\"))\n hasher.update(child_hash)\n elif isinstance(obj, collections.abc.Mapping):\n hasher.update(bytes(f\"M{len(obj)}\\x20\", \"utf-8\"))\n for key, member in obj.items():\n child_hash = structural_hash(member)\n hasher.update(\n bytes(f\"E{len(key)}\\x20{key}\\x20{len(child_hash)}\\x20\", \"utf-8\")\n )\n hasher.update(child_hash)\n elif isinstance(obj, enum.Enum):\n hasher.update(bytes(str(obj), \"utf-8\"))\n elif obj is None:\n hasher.update(b\"N\")\n else:\n raise TypeError(\"Unhashable type\", obj)\n\n return hasher.digest()", "def get_object_fingerprint(obj, hash_method='md5'):\n method = getattr(hashlib, hash_method)\n return method(pickle.dumps(obj)).hexdigest()", "def toHashable(self) -> str:\r\n\r\n return self.toHashBase().encode('utf-8')", "def __hash__(self):\n hashable = tuple(self.pandas_object.values.tobytes())\n if isinstance(self.pandas_object, pd.DataFrame):\n hashable += tuple(self.pandas_object.columns)\n else:\n hashable += tuple(self.pandas_object.name)\n return hash(hashable)", "def object_hash(obj):\n try:\n code = obj.__code__.co_code\n except AttributeError:\n attrlist = [getattr(obj, name) for name in dir(obj)\n if not name.startswith('__')]\n codelist = [attr.__code__.co_code for attr in attrlist\n if hasattr(attr, '__code__')]\n code = b','.join(codelist)\n digest = hashlib.md5(code).hexdigest()\n return digest", "def get_hash(objects):\n digest = hashlib.md5()\n\n for some_object in objects:\n digest.update(str(some_object))\n\n return digest.hexdigest()", "def object_sha1(obj):\n\n return hashlib.sha1(json.dumps(obj).encode()).hexdigest()", "def __hash__(self):\n return hash(self.serialize(\"glycoct\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure row data is valid This currently just checks that 2D arrays match the variable components.
def validate_row(row): subkeys = [INDEP, DEP] for subkey in subkeys: for k, v in row[subkey].items(): if v is None: continue if np.ndim(v) > 1: assert np.ndim(v) == 2 if 1 not in np.shape(v): assert isinstance(k, variable.Variable) assert k.components is not None assert len(k.components) in np.shape(v)
[ "def valid_data(self):\n return len(self.input_row_positions) > 1", "def validate(self, row):\n raise NotImplementedError", "def _check_input_data(self, data):\n\n if not isinstance(data, np.ndarray):\n raise TypeError('Input data must be a numpy array.')", "def validate_matrix(self, data, **kwargs):\n validate_matrix(data.get(\"params\"))", "def test_validate_row():\n chunksize, raw_puzzle = get_valid_incomplete()\n puzzle = solver.Puzzle(chunksize, raw_puzzle)\n assert puzzle.validate_row(0) == True\n chunksize, raw_puzzle = get_valid_complete()\n puzzle = solver.Puzzle(chunksize, raw_puzzle)\n assert puzzle.validate_row(0) == True\n chunksize, raw_puzzle = get_invalid_incomplete()\n puzzle = solver.Puzzle(chunksize, raw_puzzle)\n assert puzzle.validate_row(0) == False", "def verify_vertex_values(self):\n for (line, row) in [(ln, rw) for ln in range(9) for rw in range(9)]:\n if self.grid[line][row] not in range(1, 10) and self.grid[line][row] is not None:\n raise VertexValueError()", "def test_is_data_valid(self):\r\n \r\n valid_array = np.full((12,), 1.0)\r\n invalid_array = np.full((12,), np.NaN)\r\n self.assertTrue(utils.is_data_valid(valid_array))\r\n self.assertFalse(utils.is_data_valid(invalid_array))\r\n self.assertFalse(utils.is_data_valid(['bad', 'data']))\r\n self.assertTrue(utils.is_data_valid(np.ma.masked_array(valid_array)))", "def test_validate_sudoku_violating_row_uniqueness():\n sudoku_values = load_given_sudoku_answer()\n\n # # randomly generate a row index and two column indices\n row = np.random.randint(9)\n column1 = np.random.randint(9)\n column2 = np.random.randint(9)\n while column2 == column1: # make column1 != column2\n column2 = np.random.randint(9)\n sudoku_values[[row], [column2]] = sudoku_values[[row], [column1]]\n\n assert validate_sudoku(sudoku_values) == False", "def verify_grid_row_data(self, row_data):\n return self.verify_grid_row_details(self.vendors_div_id, row_data)", "def _check_multiple_input(self, data, strict=True):\n if (not strict or self.system.num_arrays == 1) \\\n and not isinstance(data, tuple):\n return\n if strict and not isinstance(data, tuple):\n raise TypeError(\"Input must be a tuple of length \"\n f\"{self.system.num_arrays}, \"\n f\"got {type(data).__name__}.\")\n if len(data) != self.system.num_arrays:\n raise ValueError(\"Input must be same length as number of Arrays \"\n f\"in system. Expected {self.system.num_arrays}, \"\n f\"got {len(data)}.\")\n _all_same_index(data)", "def validate_data(self):\n if self.type == 'grid':\n for layout in self.data:\n grid = layout.get('grid')\n if not grid:\n raise ChartError(\n \"Layout grid setting must be set \"\n \"if layout type is 'grid'\")\n\n if not grid.get('location'):\n raise ChartError(\n \"Layout grid location must be set \"\n \"if layout type is 'grid'\")\n\n if len(grid['location']) != 2:\n raise ChartError(\"Layout grid location length must be 2\")", "def _is_valid_row(self, row_index):\n row = self._data[row_index, :]\n return Sudoku._is_valid_data(row)", "def verify(self) :\n \n if self.data_set :\n self.dims = sp.shape(self.data)\n else :\n raise RunTimeError('Data needs to be set before running verify()')\n\n # Will delete these keys if they are found in 'field', then see if any\n # are left over.\n axes_keys = self.field_axes.keys()\n format_keys = self.field_formats.keys()\n for field_name in self.field.iterkeys() :\n # Check for keys in fields and not in field_axes, the oposite is\n # done outside this loop.\n if ((not self.field_axes.has_key(field_name)) or \n (not self.field_formats.has_key(field_name))) :\n raise ce.DataError(\"Dictionaries 'field', 'field_axes' and \"\n \"field_formats must have the same keys.\")\n axes_keys.remove(field_name)\n format_keys.remove(field_name)\n # Check all the axes\n axes = self.field_axes[field_name] # for saving keystrokes only\n self._verify_single_axis_names(axes)\n # Check the shape.\n field_data_shape = sp.shape(self.field[field_name])\n for ii in range(len(axes)) :\n axis_ind = list(self.axes).index(axes[ii])\n if field_data_shape[ii] != self.dims[axis_ind] :\n raise ce.DataError(\"The shape of the data in one of the \"\n \"fields is incompatible with the shape \"\n \"of the main data. field: \"+field_name)\n # Check the format string.\n # TODO: This should do something better than just check that there\n # is a string.\n if not type(self.field_formats[field_name]) is str :\n raise ce.DataError(\"The field_format must be type str. field: \"\n + field_name )\n # The opposite of the first check in the loop.\n if len(axes_keys) or len(format_keys) :\n raise ce.DataError(\"Dictionaries 'field', 'field_axes' and \"\n \"field_formats must have the same keys.\")", "def _isvalid(self, data):\n if data is None:\n return False\n elif isinstance(data, (list,tuple)):\n if len(data) <= 0:\n return False\n else:\n return True\n elif isinstance(data, (np.ndarray)):\n if data.size <= 0:\n return False\n else:\n return True\n elif not data:\n return False\n else:\n return True", "def check_data(data, fields):\n\n # First check if all the fields (including ID) are found in all entries\n for row in data:\n row_cols = list(row.keys())\n missing_fields = [\n field for field, col_name in fields.items() if col_name not in row_cols\n ]\n if missing_fields:\n raise ValueError(f\"One or more mandatory fields missing in {row}\")\n\n # Collect all ids and make sure they are unique\n row_ids = {row[fields[\"id\"]] for row in data}\n if len(row_ids) != len(data):\n raise ValueError(\"Data has non-unique IDs.\")", "def _check_input(data):\n T = len(data)\n data = np.array(data)\n dim = data[0].size if not np.isscalar(data[0]) else 1\n return data, T, dim", "def _check_data_valid(self):\n\n is_valid = (sum(~np.isnan(self.data).flatten()) > 0 and self.data.flatten().sum() != 0)\n if not is_valid:\n raise FITSException(f\"No data in {self.survey}\")", "def _is_valid_row_num(self, rowNum: int) -> bool:\r\n return 0 <= rowNum < len(self.field)", "def __is_row_valid(self, number, row):\n row_of_numbers = self.player_input[row]\n if number in row_of_numbers and number != 0:\n return False\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a dictionary of dependent data
def add_dict(self, indep, dep): dfull = {IND: len(self), INDEP: indep.copy(), DEP: dep} validate_row(dfull) check_objects(dfull) if settings.CONVERT_SCALAR_ARRAYS: scalarise(dfull) if settings.PRINT_UPDATES: print(self.show([dfull])) self.append(dfull) self._combine(dfull)
[ "def add_dict(dictionary, context_name):", "def addDependent(location):", "def __add__(self, dt:DictTensor)->DictTensor:\n assert dt.device()==self.device()\n for k in dt.keys():\n assert not k in self.variables, (\n \"variable \" + k + \" already exists in the DictTensor\"\n )\n v = {**self.variables, **dt}\n return DictTensor(v)", "def add_data_paths(self, path_dict: dict):\n self.data_dict.update(path_dict)", "def setup_data_branch(data_dict, directives_dict):\n for key in directives_dict:\n if key not in data_dict:\n data_dict[key] = {}\n return data_dict", "def _initDependancies(self):\n table = CataInfo.keyword_dependancies()\n for key, value in table.items():\n item = self.findItemByPath(key)\n if item is not None:\n deps = value if isinstance(value, list) else [value]\n for dep in deps:\n depitem = self.findItemByPath(dep)\n if depitem is not None:\n item.appendDependItem(depitem)", "def add_dep(self, attr_set):\n for attr in attr_set:\n self._dic[attr].add(attr_set)", "def add_dependency(self, gov_num, dep_num, label=None):\n _idx_gov = self.idx[gov_num]\n _idx_dep = self.idx[dep_num]\n self.heads[_idx_dep] = _idx_gov\n self.labels[_idx_dep] = label\n self.deps[_idx_gov].append((label, _idx_dep))", "def add_dependency(self, dep):\n \n if dep == OrderedDict(): return False\n dep_key, dep_dict = dep.popitem()\n graph_list = self.get_dependencies(dep_key, self.graph, list())\n if graph_list != None:\n if graph_list != list():\n for graph in graph_list:\n graph[dep_key] = dep_dict\n else:\n self.graph[dep_key] = dep_dict\n return True\n return False", "def addData(self, series_dict):\n \n for series_name, series_data in series_dict.items():\n self.series.append(series_name)\n self.data[series_name] = series_data", "def add_feature(num_feat, den_feat, new_feat):\n for name in data_dict:\n \n data_point = data_dict[name]\n num = data_point[num_feat]\n den = data_point[den_feat]\n ratio = compute_ratio(num, den)\n data_point[new_feat] = ratio", "def _add(self, example: Dict[str, Any]) -> Dict[str, Any]:\n raise NotImplementedError", "def append_dependency(current_unit, keyword, header_tag):\n if current_unit == []:\n if keyword == 'excludearch':\n current_unit = {keyword:[header_tag['content']]}\n else: \n current_unit = {keyword:{'dependencies':[{'name':header_tag['content']}]}}\n else:\n found = False\n for single_record in current_unit:\n if keyword in single_record:\n found = True\n if keyword == 'excludearch':\n single_record[keyword].append(header_tag['content'])\n else:\n if 'dependencies' in single_record[keyword]:\n single_record[keyword]['dependencies'].append({'name':header_tag['content']})\n else:\n single_record[keyword]['dependencies'] = [{'name':header_tag['content']}]\n\n if found is False:\n if keyword == 'excludearch':\n current_unit.append({keyword:[header_tag['content']]})\n else:\n current_unit.append({keyword:{'dependencies':[{'name':header_tag['content']}]}})\n\n return current_unit", "def Add_Variables(self, variable_dict : dict):\n self.variable_bin = {**self.variable_bin, **variable_dict}", "def createDependencyRule(self, label=\"dependency\", dict={}, depends=\"\"):\n self.rule_data += '{\"label\":\"%s\", \"rule\":\"dependency\", \"dict\":%s, \"depends\":\"%s\"},\\n' %(label,str(dict).replace(\"'\",\"\\\"\"), depends)", "def add_to_aux_data(self, new_data=None):\n self.aux_data.update(new_data or {})", "def add(self, curve_name, x, y, z):\n if curve_name not in self.data.keys():\n self.data[curve_name] = dict()\n if z not in self.data[curve_name].keys():\n self.data[curve_name][z] = dict()\n if y not in self.data[curve_name][z]:\n self.data[curve_name][z][y] = list()\n self.data[curve_name][z][y].append(x)", "def add(self,x,y):\n # assert that independent variable is as long as each of the\n # dependent variables\n for ch in self.chs:\n assert len(x) == len(y[ch-1])\n apply(Storage.add, (self,[x,y]))", "def _add_dependency(self, dep):\n self.dependency.append(dep)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of dictionaries that only contain values for keys
def exclusively(self, keys, lst=None): minimal = self.minimal() if lst is None else lst def make_exclusive(d, keys): dct = {} for k in keys: if k in d: dct[k] = d[k] else: dct[k] = -999 return dct lst = [] for d in minimal: dct = make_exclusive(d, keys) if len(dct) > 0: lst.append(dct) return lst
[ "def filter_dictionary(dictionary, keys):\n return {key: dictionary[key] for key in dictionary if key in keys}", "def minidict(self):\n return {k: v for k, v in self.dict().items() if v is not None}", "def filter_keys_out(items, keys):\n for key, value in items.items():\n if key in keys:\n continue\n yield key, value", "def _pick(d, keys):\n return {k: v for k, v in d.items() if k in keys}", "def _filter_dict(\n param_dict: TParameterization, subset_keys: List[str]\n) -> TParameterization:\n return {k: v for k, v in param_dict.items() if k in subset_keys}", "def dicts(self, value=None):\n if value is None:\n return [dict(zip(self.keys, line)) for line in self.data]\n return [dict(zip(self.keys, line)) for line in self.data if value in line]", "def select_keys(my_dict: Dict, keys: Sequence) -> Dict:\n keyset = set(keys)\n return {k: v for k, v in my_dict.items() if k in keyset}", "def filter_dict(iterable, keys):\n if type(keys) is not list:\n keys = [keys]\n for i in iterable:\n try:\n d = {}\n for a in keys:\n try:\n d[a] = i[a]\n except KeyError:\n pass\n if d != {}:\n yield d\n except Exception:\n pass", "def filter_dict(dict, filter=[]):\n return {key: dict[key] for key in filter}", "def filter_dic_by_keys(dic,allowed_keys):\n new_dic = {}\n for key in dic:\n if key in allowed_keys:\n new_dic[key] = dic[key]\n return new_dic", "def filter_keys_in_set(ds, keys):\n logger.info(\"For each element in the dataset, keeping only values with keys: %s.\", ', '.join(keys))\n\n def filter_keys(x):\n return {k: v for k, v in x.items() if k in keys}\n\n return ds.map(filter_keys, num_parallel_calls=TF_AUTOTUNE)", "def _filter_dict(d, keys):\n if keys is None:\n return d\n else:\n keys = set(keys)\n present_keys = keys.intersection(d.keys())\n missing_keys = keys.difference(d.keys())\n res = {k: d[k] for k in present_keys}\n if len(missing_keys) != 0:\n warnings.warn(\"Missing expected keys: {}\".format(missing_keys), stacklevel=2)\n return res", "def filtering(d, all_keys=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]):\n keys = list(d.values())\n\n # check which of `all_keys` are absent in `keys` and return them as a list (but always include 0)\n l = [k for k in all_keys if ((k not in keys) | (k == 0))]\n return l", "def without_empty_values(input_dict):\n empty_values = ([], (), {}, \"\", None)\n\n return {key: value for key, value in input_dict.items() if value not in empty_values}", "def filter_dict_by_keys(d, reject_keys):\n return dict((k, v) for (k, v) in d.iteritems() if k not in reject_keys)", "def filter_values(function, dictionary):\n return {k: v for k, v in dictionary.items() if function(v)}", "def subset(self, keys, *args):\n return dict_util.subset(self, keys, *args)", "def dict_filter(d, keep):\n\tassert type(keep) is list\n\tif isinstance(d, dict):\n\t\t#recursively call for nested dicts\n\t\treturn { key:dict_filter(value, keep) for key,value in d.iteritems() if key in keep }\n\treturn d", "def remove_empty_list(dictionary):\n\n return {k: v for k, v in dictionary.items() if v != []}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge this Box with one or more other Box instances
def merge(self, box, in_place=True): if in_place: self._merge(box) else: base = self.copy() base._merge(box) return base
[ "def merge(self, other_box_list):\n self.boxes.append(other_box_list.boxes)\n self.filter()", "def merge(self, other):\n # merge containers with the same name\n for container in self._container_child_containers:\n lookup = {obj.name: obj for obj in getattr(self, container)}\n ids = [id(obj) for obj in getattr(self, container)]\n for obj in getattr(other, container):\n if id(obj) in ids:\n continue\n if obj.name in lookup:\n lookup[obj.name].merge(obj)\n else:\n lookup[obj.name] = obj\n ids.append(id(obj))\n getattr(self, container).append(obj)\n\n # for data objects, ignore the name and just add them\n for container in self._data_child_containers:\n objs = getattr(self, container)\n lookup = {obj.name: i for i, obj in enumerate(objs)}\n ids = [id(obj) for obj in objs]\n for obj in getattr(other, container):\n if id(obj) in ids:\n pass\n elif hasattr(obj, 'merge') and obj.name is not None and obj.name in lookup:\n ind = lookup[obj.name]\n try:\n newobj = getattr(self, container)[ind].merge(obj)\n getattr(self, container)[ind] = newobj\n except NotImplementedError:\n getattr(self, container).append(obj)\n ids.append(id(obj))\n else:\n lookup[obj.name] = obj\n ids.append(id(obj))\n getattr(self, container).append(obj)\n obj.set_parent(self)\n\n # use the BaseNeo merge as well\n super().merge(other)", "def append(self, other):\n for i in other.blocks:\n self.blocks.append(i)", "def union(self, other) -> 'BBox':\n\n\t\tif isinstance(other, Point):\n\t\t\tself.pMin.x = min(self.pMin.x, other.x)\n\t\t\tself.pMin.y = min(self.pMin.y, other.y)\n\t\t\tself.pMin.z = min(self.pMin.z, other.z)\n\t\t\tself.pMax.x = max(self.pMax.x, other.x)\n\t\t\tself.pMax.y = max(self.pMax.y, other.y)\n\t\t\tself.pMax.z = max(self.pMax.z, other.z)\n\n\t\telif isinstance(other, BBox):\n\t\t\tself.pMin.x = min(self.pMin.x, other.pMin.x)\n\t\t\tself.pMin.y = min(self.pMin.y, other.pMin.y)\n\t\t\tself.pMin.z = min(self.pMin.z, other.pMin.z)\n\t\t\tself.pMax.x = max(self.pMax.x, other.pMax.x)\n\t\t\tself.pMax.y = max(self.pMax.y, other.pMax.y)\n\t\t\tself.pMax.z = max(self.pMax.z, other.pMax.z)\n\n\t\telse:\n\t\t\traise TypeError('unsupported union operation between\\\n\t\t\t\t\t\t\t{} and {}'.format(self.__class__, type(other)))\n\t\treturn self", "def merge(self, other):\n raise Exception()", "def merge(self, other):\n self.add_nodes( other.nodeList )\n \n for node in other.nodeList:\n self.add_edges( other.edgesFrom(node) )", "def combine_boxes(*args: Box) -> Box:\n assert all(list(space.shape) == [1] for space in args)\n lows = np.asarray([space.low[0] for space in args])\n highs = np.asarray([space.high[0] for space in args])\n return Box(lows, highs)", "def merge(self, other):\n self._moments = merge_pqc([self, other])._moments\n self._parameters = sp.symarray(self.parameter_symbol, len(self.symbols))\n if self.flatten_circuit:\n self.flatten()", "def multiadd(self, other):\n\t\tself.points.update(other.points)\n\t\tself.updMean()\n\t\tself.updVariance()\n\t\tself.updIB()\n\t\tself.updOB()", "def merge(self, other: 'Group'):\n self.ids.extend(other.ids)\n self.params.update(other.params)\n self.data.update(other.data)\n l0, l1 = len(self.results), len(other.results)\n self.results.update(other.results)\n assert len(self.results) == l0+l1, \"Some results were overwritten by merging!\"", "def __add__(self, other):\n if type(other) == type(self):\n clone = Vector2(*self)\n clone[0] += other[0]\n clone[1] += other[1]\n \n return clone\n \n elif type(other) == Rect:\n clone = Rect(other)\n clone.left += self[0]\n clone.top += self[1]\n \n return clone", "def extend(self, *objects):\n self.insert(len(self.nets), objects)", "def add_box(self, box):\n mz_from = box.from_mz\n mz_to = box.to_mz\n rt_from = box.from_rt\n rt_to = box.to_rt\n self.boxes_mz.addi(mz_from, mz_to, box)\n self.boxes_rt.addi(rt_from, rt_to, box)", "def aggregate(self):\n for sub_box_label, geotweet_set in self.sub_box_dict.iteritems():\n for up_box_label in self.upscale_boxes(sub_box_label):\n try:\n self.box_dict[up_box_label].update(geotweet_set)\n except KeyError:\n self.box_dict[up_box_label] = set(geotweet_set)\n try:\n self.box_unique_days[up_box_label].update(\n self.minor_box_unique_days[sub_box_label]\n )\n except KeyError:\n self.box_unique_days[up_box_label] = set(\n self.minor_box_unique_days[sub_box_label]\n )\n self.box_dict_out_of_sync = False\n # box_dict has many candidate boxes with overlaps.", "def _merge(self, other):\n self.plots += other.plots\n return self.plots", "def __add__(self, other):\n mesh = deepcopy(self)\n mesh.MergeWith(other)\n return mesh", "def build_boxes(self):\n for index in self.box_space.points:\n if self.rank_of_box[index] == self.my_rank:\n self.my_boxes.append(Box(self, index))", "def append(self, *args, **kwargs):\n # ๆ”ฏๆŒ็ฌฌไธ€ไธชๅ‚ๆ•ฐ่งฃๆž\n arg = args[0]\n # ไธ้œ€่ฆๅค„็†\n if not arg:\n return\n\n # ๅฆ‚ๆžœ็ฑปๅž‹ไธๅŒน้…๏ผŒๅผบๅˆถ่ฝฌๆข\n if not isinstance(arg, BBox):\n # logging.warning(\"{}ไธๆ˜ฏBBox็ฑปๅž‹๏ผŒๅฐ่ฏ•ๅผบๅˆถ่ฝฌๆข๏ผŒๅฏ่ƒฝๅ‡บ้”™\".format(arg))\n arg = BBox(arg, iou_thresh=self.iou_thresh, intersection_thresh=self.intersection_thresh, s_thresh=self.s_thresh)\n\n super(BBoxes, self).append(arg)", "def mergeBboxes(bboxes, bboxes_prev):\n bboxes_merged = deepcopy(bboxes)\n for bbox in bboxes_prev:\n is_exist = False\n for bbox_merged in bboxes_merged:\n if bbox.object_id == bbox_merged.object_id:\n is_exist = True\n bbox_merged.visible = bbox.visible\n break\n if not is_exist:\n bboxes_merged.append(bbox)\n return bboxes_merged" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return unique key values
def unique(self, key, lst=None): d = self.find(key, lst) vals = set(d.values()) return sorted(list(vals))
[ "def unique_values(self):\n for key in self.metadb.unique_values():\n yield key, self.datadb[key]", "def distinct(self, key):\n return self.database.command({'distinct': self.name,\n 'key': key})['values']", "def unique_keys(self) -> List[str]:\n match = re.search(r'/\\*.*unique key\\s*\\(([^\\()]*)\\).*\\**/', self.query,\n re.DOTALL | re.IGNORECASE)\n if match is not None:\n unique_keys = [k.strip() for k in match.group(1).split(',')]\n else:\n unique_keys = []\n return unique_keys", "def unique_val():\n try: \n data = [{\"V\":\"S001\"}, {\"V\": \"S002\"}, {\"VI\": \"S001\"}, {\"VI\": \"S005\"},\n {\"VII\":\"S005\"}, {\"V\":\"S009\"},{\"VIII\":\"S007\"}]\n print(\"Original List: \",data)\n #loop itrate here\n u_value = set( val for dic in data for val in dic.values())\n print(\"Unique Values: \",u_value) \n except ValueError as e:\n logger.error(\"Not find the dictnary\"+str(e))", "def _findUniqueMappingKeys(mapping):\n\n uniqueMappingKeys = set()\n for key, entries in viewitems(mapping):\n if len(entries) == 1:\n uniqueMappingKeys.add(key)\n return uniqueMappingKeys", "def unique(self):\n dict_util.unique(self)", "def unique_vals(client, proj, dataset, table, col_name):\n if not client.check_table(dataset, table):\n return []\n res = run_bq_query(client, \"SELECT %s FROM [%s:%s.%s] GROUP BY %s ORDER BY %s\" % (col_name, proj, dataset, table, col_name, col_name), 120)\n return [rec[col_name] for rec in res]", "def _findUniqueMappingValues(mapping):\n uniqueMappingValues = set()\n for entries in viewvalues(mapping):\n if len(entries) == 1:\n uniqueMappingValues.update(entries)\n return uniqueMappingValues", "def unique(self, data: list, primary_key) -> list:\n unique_ids = set()\n unique_items = []\n for item in data:\n if item[primary_key] not in unique_ids:\n unique_ids.add(item[primary_key])\n unique_items.append(item)\n return unique_items", "def unique_keys(self):\n keys = set()\n for d in self:\n for key in d.keys():\n keys.add(key)\n return keys", "def _make_data_unique(collected_data: List[Dict]) -> List[Dict]:\n return list(dict(x) for x in set(tuple(x.items()) for x in collected_data))", "def _make_key(args, kwargs):\n set_vals = []\n\n for index, value in enumerate(args):\n set_vals.append(_make_unique(index, value))\n for key, value in kwargs.items():\n set_vals.append(_make_unique(key, value))\n\n return frozenset(set_vals)", "def get_vals(key, source=None):\n if source is None:\n source = full\n return np.unique(source[key])", "def uniqueify(seq, key):\n keys = set([])\n nodups = []\n for elem in seq:\n if getattr(elem, key) not in keys:\n nodups.append(elem)\n keys.add(getattr(elem, key))\n return nodups", "def _unique_values(self):\n fields = {}\n for key in self._data.keys():\n if self._has_field(key):\n field = self._get_field(key)\n if field.unique:\n fields[key] = self._get_value(key)\n for key in (field.unique_with or []):\n fields[key.replace('.', '__')] = self._get_value(key)\n # else there were changes in model, continue\n return fields", "def uniq(val, key=None):\n if not isinstance(val, list):\n return val\n if key is None:\n try:\n return list(set(val))\n except TypeError:\n pass\n keys = []\n values = []\n for value in val:\n try:\n thiskey = value[key]\n except:\n thiskey = repr(value)\n if thiskey not in keys:\n keys.append(thiskey)\n values.append(value)\n return values", "def _calculate_unique_key(self, key):\n annotations = self._results.GetAnnotations()\n counter = 1\n while(key in annotations):\n key = \"%s_%s\" % (key, counter)\n counter += 1\n return key", "def _collectIdenticalKeys(self, gene_list):\n return(list(set(self.keys()) & set(gene_list.keys())))", "def arcpy_get_unique_field_values(self, arg_field_name):\r\n\t\t# next line uses python 'set comprehension'... basically an inline for-loop which defines the elements of the set.\r\n\t\t# Sets automatically only retain one of each unique item (as youmight expect)\r\n\t\t# A Set is like a dictionary (hence the curley braces), except instead of {key:value,key:value,...} it is just {key,key,...}\r\n\t\tif __thou_shalt__.do_a_dry_run:\r\n\t\t\treturn [1]\r\n\t\treturn thou_shalt(\"Extract sorted list of unique values from \\n\\t\\tfield %s of \\n\\t\\t%s\"%(arg_field_name,self.shortened_name_with_context()),\r\n\t\t\tlambda: sorted(list({cursor.getValue(arg_field_name) for cursor in arcpy.SearchCursor(str(self))}))\r\n\t\t)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The set methods must raise a ComponentsErrorEx in case of wrong mode
def test_wrong_mode(self): self.assertRaises(ComponentErrorsEx, self.dp.setRewindingMode, 'FOO')
[ "def setControlMode(self,mode,*args,**kwargs):\n raise NotImplementedError()", "def magic_xmode(self,parameter_s = ''):\n\n new_mode = parameter_s.strip().capitalize()\n try:\n self.InteractiveTB.set_mode(mode = new_mode)\n print 'Exception reporting mode:',self.InteractiveTB.mode\n except:\n warn('Error changing exception modes.\\n' + str(sys.exc_info()[1]))", "def check_set_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")", "def test_set_fails_when_setting_non_primitive_type(self):\n with pytest.raises(\n ClickException, match=\"Attribute `behaviours` is not allowed to be updated!\"\n ):\n self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.behaviours\", \"value\"],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def test_invalid_mode(*args):\n with pytest.raises(ValueError, match=\"Invalid mode\"):\n MultiAgentManager(\"test_dir\", mode=\"invalid_mode\")", "def test_set_property_invalid(self):\r\n try:\r\n initial_value = self.config.values['option1']\r\n self.config.option1 = 'invalid'\r\n except Exception as e:\r\n self.assertIsInstance(e, InvalidOptionValueError)\r\n self.assertEqual(self.config.values['option1'], initial_value)", "def _check(self, mode=None):\n if self.closed:\n raise OSError(\"%s is closed\" % self.__class__.__name__)\n if mode is not None and self.mode not in mode:\n raise OSError(\"bad operation for mode %r\" % self.mode)", "def modifyComponentsNotPreferableOnServer(self):\n # Nothing to do\n pass", "def test_handling_wrong_context(member, mode, arg, msg):\n with pytest.raises(TypeError) as excinfo:\n member.set_validate_mode(getattr(Validate, mode), arg)\n assert msg in excinfo.exconly()", "def test_setattr_property(self) -> None:\n obj = SampleConfigComponentDefinition.parse_obj({\"name\": \"test\"})\n with pytest.raises(AttributeError):\n obj.no_set = \"new value\" # type: ignore\n assert obj.can_set == \"test.can_set\"\n obj.can_set = \"new\"\n assert obj.can_set == \"new.can_set\"", "def validate(self, mode): # pragma: no cover\n pass", "def check_for_setup_error(self):", "def testProtocolSetBadType(self):\n def setProtocol():\n self.mr.protocol = 12345\n\n self.assertRaises(\n TypeError,\n setProtocol\n )", "def set_operation_mode(self, operation_mode):", "def _check_mutating_imethod(self):\n if self._properties(_props.ERROR, True) == _props.ERROR:\n raise RuntimeError(\"Operation failed\")", "async def test_set_operation_bad_attr_and_state(opp):\n state = opp.states.get(ENTITY_WATER_HEATER)\n assert state.attributes.get(\"operation_mode\") == \"eco\"\n assert state.state == \"eco\"\n with pytest.raises(vol.Invalid):\n await common.async_set_operation_mode(opp, None, ENTITY_WATER_HEATER)\n state = opp.states.get(ENTITY_WATER_HEATER)\n assert state.attributes.get(\"operation_mode\") == \"eco\"\n assert state.state == \"eco\"", "def setMolSystemInfo(self):\n \n pass", "def setValueTest_SetInValidValues_1 (self):\r\n self.assertRaises (ATOM3BadAssignmentValue, self.atc.setValue, 1)", "def _set_sensor_mode(self) -> None:\n raise NotImplementedError(\"This method should be implemented by a child class\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
collect docker logs from servers $ command is $ log_collector.py
def main(): global tar_file_descr help_msg = 'Usage: log_collector.py <all | host1[,host2,host3...]>' hosts = [] if len(sys.argv) == 2: if '-h' == sys.argv[1] or '--help' == sys.argv[1]: print(help_msg) sys.exit(0) elif 'all' == sys.argv[1]: # get logs from all hosts hosts = [] host_objs = CLIENT.host_get_all() for host_obj in host_objs: hosts.append(host_obj.name) else: # get logs from specified hosts hostnames = sys.argv[1].split(',') for host in hostnames: if host not in hosts: hosts.append(host) else: print(help_msg) sys.exit(1) # open tar file for storing logs fd, tar_path = tempfile.mkstemp(prefix='kolla_support_logs_', suffix='.tgz') os.close(fd) # avoid fd leak with tarfile.open(tar_path, 'w:gz') as tar_file_descr: # clear out old logs if os.path.exists(LOGDIR): shutil.rmtree(LOGDIR) os.mkdir(LOGDIR) # gather logs from selected hosts try: for host in hosts: get_logs_from_host(host) # tar up all the container logs tar_file_descr.add(LOGDIR, arcname='container_logs') finally: # remove uncompressed logs if os.path.exists(LOGDIR): shutil.rmtree(LOGDIR) # gather dump output from kolla-cli dump_kolla_info() print('Log collection complete. Logs are at %s' % tar_path)
[ "def collect():\n try:\n return subprocess.check_output([\n 'sudo', '/opt/tinypilot-privileged/scripts/collect-debug-logs', '-q'\n ])\n except subprocess.CalledProcessError as e:\n raise LogCollectionScriptFailedError(str(e)) from e", "def __get_docker_logs(self, containers):\n\n result = []\n\n attributes = None\n try:\n attributes = JsonObject({\"monitor\": \"agentDocker\"})\n if self.__host_hostname:\n attributes[\"serverHost\"] = self.__host_hostname\n\n except Exception:\n self._logger.error(\"Error setting monitor attribute in DockerMonitor\")\n raise\n\n prefix = self.__log_prefix + \"-\"\n\n for cid, info in six.iteritems(containers):\n container_attributes = attributes.copy()\n container_attributes[\"containerName\"] = info[\"name\"]\n container_attributes[\"containerId\"] = cid\n\n # get the attributes and config items from the labels\n attrs, base_config = get_attributes_and_config_from_labels(\n info.get(\"labels\", None), self.__docker_options\n )\n\n attrs.update(container_attributes)\n\n labels = info.get(\"labels\", []) or []\n self._logger.log(\n scalyr_logging.DEBUG_LEVEL_1,\n 'Found labels \"%s\" for container %s. Using attributes: %s.'\n % (\", \".join(labels), info[\"name\"], str(attrs)),\n )\n\n if self._use_raw_logs and \"log_path\" in info and info[\"log_path\"]:\n stream_count = 1\n log_config = self.__create_log_config(\n default_parser=\"docker\",\n path=info[\"log_path\"],\n attributes=attrs,\n base_config=base_config,\n parse_as_json=True,\n )\n if \"rename_logfile\" not in log_config:\n log_config[\"rename_logfile\"] = \"/docker/%s.log\" % info[\"name\"]\n\n result.append({\"cid\": cid, \"stream\": \"raw\", \"log_config\": log_config})\n else:\n stream_count = 2\n path = prefix + info[\"name\"] + \"-stdout.log\"\n log_config = self.__create_log_config(\n default_parser=\"dockerStdout\",\n path=path,\n attributes=attrs,\n base_config=base_config,\n )\n result.append(\n {\"cid\": cid, \"stream\": \"stdout\", \"log_config\": log_config}\n )\n\n path = prefix + info[\"name\"] + \"-stderr.log\"\n log_config = self.__create_log_config(\n default_parser=\"dockerStderr\",\n path=path,\n attributes=attrs,\n base_config=base_config,\n )\n result.append(\n {\"cid\": cid, \"stream\": \"stderr\", \"log_config\": log_config}\n )\n\n self._logger.log(\n scalyr_logging.DEBUG_LEVEL_1,\n \"Using log config %s for container %s\"\n % (str(result[-1]), info[\"name\"]),\n )\n\n if stream_count == 2:\n self._logger.log(\n scalyr_logging.DEBUG_LEVEL_1,\n \"Using log config %s for container %s\"\n % (str(result[-2]), info[\"name\"]),\n )\n\n return result", "def start_analysis_containers(client, stream_list, image, logs, uid):\n\n for stream in stream_list:\n print(f'Testing: {stream}')\n mc_ip = stream.split(':')[0]\n port = stream.split(':')[1]\n cmd = f'bash -c \"tsp -I ip {stream} -P continuity -O drop > {logs}/{mc_ip}.log\"'\n # cmd = f'bash -c \"while true; do sleep 1; echo thing; done > {logs}/{mc_ip}.log\"'\n client.containers.run(\n image,\n detach=True,\n command=cmd,\n network_mode='host',\n auto_remove=True,\n name=mc_ip,\n tty=True,\n volumes={logs: {'bind': logs, 'mode': 'rw'}},\n user=uid\n )", "def get_logs(self):\n result = []\n cname = self.build_conf.container_name\n for name in self.test_conf.img_logs:\n try:\n info = subprocess.check_output([\n 'docker', 'exec', '-i', cname, 'cat', name])\n info = info.decode('utf8')\n except Exception as problem: # pylint: disable=broad-except\n info = 'Exception: in trying to get log file %s:\\n%s\\n' % (\n name, str(problem))\n result.append((name, info))\n return result", "def docker_logger(client, container_id):\n log = logging.getLogger(__name__)\n for cur_log in client.logs(container_id, stream=True, follow=True):\n log.debug(cur_log.decode(\"utf-8\").strip())", "def logs(self, container: Container) -> str:", "def main():\n args = parse_args(\"Send results of a Jenkins build to a LogCollector instance over TCP.\")\n store(args)", "def _logs(self):\n try:\n container = self.dock.containers.get(self.name)\n except docker.errors.DockerException:\n return b\"\"\n\n try:\n return container.logs(tail=100, stdout=True, stderr=True)\n except docker.errors.DockerException as err:\n _LOGGER.warning(\"Can't grap logs from %s -> %s\", self.image, err)", "def stream_container_logs(container: Container) -> None:\n logs = container.logs(stream=True, follow=True)\n for log in logs:\n for line in log.splitlines():\n print(f'[Container {container.id[:5]}] {line.decode()}')", "def on_server_start(self):\n self._container = self._docker_client.containers.run(self.docker_image_name, detach=True, **self.docker_params)\n self.signal_ready()\n\n for log_line in self.get_lines():\n try:\n alert_dict = self.parse_line(log_line)\n if alert_dict:\n self.add_alert_to_queue(alert_dict)\n except Exception:\n self.logger.exception(None)", "def collect_logfiles(self):\n pass", "def collect_pods_logs():\n logger.info(\"Collecting pod logs:\")\n logs_dir = posixpath.join(OPT.output_dir, \"pod_logs\")\n os.makedirs(logs_dir)\n\n pods = get_pods()\n if not pods:\n logger.warning(\"Could not get pods list - skipping pods logs collection\")\n return\n\n for pod in pods:\n containers = get_containers(pod)\n for cont in containers:\n container = cont.rstrip()\n cmd = OPT.kube_cli + \" logs {} {} -c {}\". \\\n format(get_namespace_argument(), pod, container)\n with open(\"{}/{}_{}.log\".format(logs_dir, pod, container), \"wb\") as file_pointer:\n handle = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, \\\n stderr=subprocess.STDOUT)\n while True:\n line = handle.stdout.readline()\n if line:\n file_pointer.write(line)\n else:\n break\n logger.info(\" + pod:%s, container:%s\", pod, container)", "def _dump_docker_log(container_name: str, dir: Path) -> None:\n destination = dir / f\"{container_name}.log\"\n with open(destination, \"wb\") as out_stream:\n popen = subprocess.Popen(\n [\n \"docker\",\n \"logs\",\n \"--timestamps\",\n container_name,\n ],\n stdout=out_stream,\n )\n popen.wait()", "def main():\n lines = read_syslog()\n if len(sys.argv) > 1:\n lines = filter_logs(sys.argv[1], lines)\n for line in lines:\n print(line)", "def pulllog():\n\tpass", "def collect_log(self):\n path = 'cluster_test_%d/*.log' % self.address[1]\n src = \"%s@%s:%s\" % (self.user_name, self.address[0], path)\n dest = console_config._log_path\n self._rsync(src, dest)", "def show_follow_logs_command(self) -> None:\n logger.info(f\"Run the following command to show ({self.container_name}) containers logs.\")\n logger.info(f\"docker container logs -f {self.container_name}\")", "def collect():\n date_format = \"%Y-%m-%dT%H:%M:%S\"\n root_dir = self.config['rsnap_log_home']\n metrics = {}\n for log in sorted(os.listdir(root_dir)):\n for line in reversed(open(os.path.join(root_dir, log))\n .readlines()):\n if '.pid' in line and 'rm' in line:\n end_date = line.split()[0].strip(\"[\").strip(\"]\")\n endd = datetime.strptime(end_date, date_format)\n if endd:\n metric_value = endd\n metrics[os.path.splitext(log)[0]] = metric_value\n for metric_name, metric_value in metrics.iteritems():\n self.publish(metric_name, metric_value)", "def process_request(self, run_state):\n try:\n # random delay to prevent all requests from starting at the same time\n delay = random.randint(500, 5000) / 1000\n run_state.sleep_but_awaken_if_stopped(delay)\n\n self.__logger.log(\n scalyr_logging.DEBUG_LEVEL_3,\n \"Starting to retrieve logs for cid=%s\" % six.text_type(self.cid),\n )\n self.__client = DockerClient(\n base_url=(\"unix:/%s\" % self.__socket_file),\n version=self.__docker_api_version,\n )\n\n epoch = datetime.datetime.utcfromtimestamp(0)\n while run_state.is_running():\n self.__logger.log(\n scalyr_logging.DEBUG_LEVEL_3,\n \"Attempting to retrieve logs for cid=%s\" % six.text_type(self.cid),\n )\n sout = False\n serr = False\n if self.stream == \"stdout\":\n sout = True\n else:\n serr = True\n\n self.__logs = self.__client.logs(\n container=self.cid,\n stdout=sout,\n stderr=serr,\n stream=True,\n timestamps=True,\n tail=self.__max_previous_lines,\n follow=True,\n )\n\n # self.__logs is a generator so don't call len( self.__logs )\n self.__logger.log(\n scalyr_logging.DEBUG_LEVEL_3,\n \"Found log lines for cid=%s\" % (six.text_type(self.cid)),\n )\n try:\n for line in self.__logs:\n line = six.ensure_text(line)\n # split the docker timestamp from the frest of the line\n dt, log_line = _split_datetime_from_line(line)\n if not dt:\n # Under some edge cases this message can be logged a lot (which can\n # exhaust the CPU) so we need to make sure rate limit is in place.\n global_log.error(\n \"No timestamp found on line: '%s'\",\n line,\n limit_once_per_x_secs=300,\n limit_key=\"docker-monitor-line-missing-ts\",\n )\n else:\n timestamp = scalyr_util.seconds_since_epoch(dt, epoch)\n\n # see if we log the entire line including timestamps\n if self.__log_timestamps:\n log_line = line\n\n # check to make sure timestamp is >= to the last request\n # Note: we can safely read last_request here because we are the only writer\n if timestamp >= self.__last_request:\n self.__logger.info(log_line.strip())\n\n # but we need to lock for writing\n self.__last_request_lock.acquire()\n self.__last_request = timestamp\n self.__last_request_lock.release()\n else:\n # TODO: We should probably log under debug level 5 here to make\n # troubleshooting easier.\n pass\n\n if not run_state.is_running():\n self.__logger.log(\n scalyr_logging.DEBUG_LEVEL_3,\n \"Exiting out of container log for cid=%s\"\n % six.text_type(self.cid),\n )\n break\n except ProtocolError as e:\n if run_state.is_running():\n global_log.warning(\n \"Stream closed due to protocol error: %s\" % six.text_type(e)\n )\n\n if run_state.is_running():\n global_log.warning(\n \"Log stream has been closed for '%s'. Check docker.log on the host for possible errors. Attempting to reconnect, some logs may be lost\"\n % (self.name),\n limit_once_per_x_secs=300,\n limit_key=\"stream-closed-%s\" % self.name,\n )\n delay = random.randint(500, 3000) / 1000\n run_state.sleep_but_awaken_if_stopped(delay)\n\n # we are shutting down, so update our last request to be slightly later than it's current\n # value to prevent duplicate logs when starting up again.\n self.__last_request_lock.acquire()\n\n # can't be any smaller than 0.01 because the time value is only saved to 2 decimal places\n # on disk\n self.__last_request += 0.01\n\n self.__last_request_lock.release()\n except docker.errors.NotFound as e:\n # This simply represents the container has been stopped / killed before the client has\n # been able to cleanly close the connection. This error is non-fatal and simply means we\n # will clean up / remove the log on next iteration.\n global_log.info(\n 'Container with id \"%s\" and name \"%s\" has been removed or deleted. Log file '\n \"will be removed on next loop iteration. Original error: %s.\"\n % (self.cid, self.name, str(e))\n )\n except Exception as e:\n # Those errors are not fatal so we simply ignore dont dont log them under warning.\n # They usually appear on agent restart when using log consumption via API since\n # long running streaming API connection will be closed.\n if \"readinto of closed file\" in str(e) or \"operation on closed file\" in str(\n e\n ):\n global_log.log(\n scalyr_logging.DEBUG_LEVEL_1,\n \"Unhandled non-fatal exception in DockerLogger.process_request for %s:\\n\\t%s.\\n\\n%s\"\n % (self.name, six.text_type(e), traceback.format_exc()),\n )\n return\n\n global_log.warn(\n \"Unhandled exception in DockerLogger.process_request for %s:\\n\\t%s.\\n\\n%s\"\n % (self.name, six.text_type(e), traceback.format_exc())\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read in labels from digitStruct.mat file to create a dict of image file name and corresponding labels
def read_labels(digitstruct_file): labels = dict() for dsObj in tdqm(yieldNextDigitStruct(digitstruct_file), ncols=50): image_labels = [] for bbox in dsObj.bboxList: image_labels.append(bbox.label) labels[dsObj.name] = image_labels return labels
[ "def _get_imagenet_as_dict(self):\n real_file_path = os.path.realpath(self.map_file)\n if not os.path.exists(real_file_path):\n raise IOError(\"map file {} not exists\".format(self.map_file))\n\n label_dict = {}\n with open(real_file_path) as fp:\n line = fp.readline()\n while line:\n labels = line.split(\" \")\n label_dict[labels[1]] = labels[0]\n line = fp.readline()\n\n # get all the dir which are n02087046, n02094114, n02109525\n dir_paths = {}\n for item in label_dict:\n real_path = os.path.join(self.image_dir, label_dict[item])\n if not os.path.isdir(real_path):\n logger.warning(\"{} dir is not exist\".format(real_path))\n continue\n dir_paths[item] = real_path\n\n if not dir_paths:\n raise PathNotExistsError(\"not valid image dir in {}\".format(self.image_dir))\n\n # get the filename, label and image binary as a dict\n for label in dir_paths:\n for item in os.listdir(dir_paths[label]):\n file_name = os.path.join(dir_paths[label], item)\n if not item.endswith(\"JPEG\") and not item.endswith(\"jpg\"):\n logger.warning(\"{} file is not suffix with JPEG/jpg, skip it.\".format(file_name))\n continue\n data = {}\n data[\"file_name\"] = str(file_name)\n data[\"label\"] = int(label)\n\n # get the image data\n real_file_path = os.path.realpath(file_name)\n image_file = open(real_file_path, \"rb\")\n image_bytes = image_file.read()\n image_file.close()\n if not image_bytes:\n logger.warning(\"The image file: {} is invalid.\".format(file_name))\n continue\n data[\"image\"] = image_bytes\n yield data", "def load_imagenet1k_label_darknet():\r\n imagenet_label_dict = {}\r\n file_path = '../data/imageNetLabel.txt'\r\n with open(file_path, 'r') as f:\r\n for idx, line in enumerate(f):\r\n names = line.rstrip()[10:]\r\n imagenet_label_dict[line.rstrip()[:9]] = names\r\n\r\n label_dict = {}\r\n file_path = '../data/imagenet.labels.list'\r\n with open(file_path, 'r') as f:\r\n for idx, line in enumerate(f):\r\n if idx >= 1000:\r\n break\r\n label_dict[idx] = imagenet_label_dict[line.rstrip('\\n')]\r\n return label_dict", "def load_matlab_file(ipath, out_file_1):\n f = h5py.File(ipath+'digitStruct.mat','r')\n\n count = 1\n data = dict()\n\n # Loop over elements in bbox dataset and create python dictionary from them.\n for bbox in f['digitStruct/bbox']:\n obj = f[bbox[0]]\n temp_dict = dict()\n temp_dict['height'] = extract_value(f, obj, 'height', count=count)\n temp_dict['label'] = extract_value(f, obj, 'label')\n temp_dict['left'] = extract_value(f, obj, 'left')\n temp_dict['top'] = extract_value(f, obj, 'top')\n temp_dict['width'] = extract_value(f, obj, 'width')\n # Check that we are getting equal amounts of data for all 5 attributes on each image\n if not(len(temp_dict['height']) == len(temp_dict['label']) == len(temp_dict['left']) == len(temp_dict['top']) == len(temp_dict['width'])):\n raise Exception('Not all sizes for png #' + count + \" are equal, look into it\")\n data[count] = temp_dict\n count += 1\n if count % 5000 == 0:\n print 'Translated ' + str(count) + ' bounding box entries for file ' + out_file_1\n\n # Write to pickle file so we have a python dictionary ready to load later, and don't have to deal with this .mat file\n try:\n with open(out_file_1,'wb') as out_file:\n pickle.dump(data, out_file)\n except Exception as e:\n print 'Save pickle data exception to file: ' + str(out_file) + ' - ' + str(e)\n raise e", "def read_stanford_labels():\n # First get the hardi data\n fetch_stanford_hardi()\n hard_img, gtab = read_stanford_hardi()\n # Fetch and load\n files, folder = fetch_stanford_labels()\n labels_file = pjoin(folder, \"aparc-reduced.nii.gz\")\n labels_img = nib.load(labels_file)\n return hard_img, gtab, labels_img", "def _read_label_data(self):\n label_dict = {}\n\n for tar_file in self.tar_files:\n tar_content = tarfile.open(tar_file, \"r:gz\")\n members = tar_content.getmembers()\n h5file = tar_content.extractfile(members[1].name)\n f = h5py.File(h5file, 'r')\n keys = list(f.keys())\n for key in keys:\n if key in label_dict:\n label_dict[key].append(np.array(f[key]))\n else:\n label_dict[key] = []\n label_dict[key].append(np.array(f[key]))\n\n f.close()\n tar_content.close()\n return label_dict", "def read_stanford_labels():\n # First get the hardi data\n fetch_stanford_hardi()\n hard_img, gtab = read_stanford_hardi()\n\n # Fetch and load\n files, folder = fetch_stanford_labels()\n labels_file = pjoin(folder, \"aparc-reduced.nii.gz\")\n labels_img = nib.load(labels_file)\n return hard_img, gtab, labels_img", "def get_imagenet_classnames():\r\n return np.loadtxt(open(path_data+'/ilsvrc_2012_labels.txt'), dtype=object, delimiter='\\n')", "def load_label(self, idx):\n mat = loadmat('{}/info_resize_1000/{}/{:0>6}.mat'.format(self.data_dir, self.split, idx))\n label = mat['mask'][...].astype(np.uint8)\n label = label[np.newaxis, ...]\n return label", "def get_image_label_map(image_filename, label_filename):\n\n # load keys\n image_keys = get_h5_sorted_keys(filename=image_filename)\n label_keys = get_h5_sorted_keys(filename=label_filename)\n\n # build dictionary\n image_label_map = dict() # map image key to label key\n for label_key in label_keys:\n image_key = label_key.split(\"_bin\")[0]\n if image_key not in SKIPPED_KEYS:\n assert image_key in image_keys\n if image_key not in image_label_map.keys():\n image_label_map[image_key] = []\n image_label_map[image_key].append(label_key) # no need to sort afterwards as label_keys are sorted\n\n # sanity check\n # all samples have labels\n assert sorted(image_label_map.keys()) == image_keys\n\n for image_key in image_keys:\n image_label_map[image_key] = sorted(image_label_map[image_key])\n\n return image_label_map", "def get_pet_labels(images_dir):\r\n \r\n # Creates a list of files in directory from pet images directory\r\n in_files = listdir(images_dir)\r\n \r\n # Process each of the files such that the created dictionary would have\r\n # key = filename and the value = picture label\r\n \r\n # Create an empty dictionary to hold pet labels\r\n petlabels_dic = dict()\r\n \r\n \r\n \r\n for idx in range(0, len(in_files), 1): \r\n if in_files[idx][0] != \".\":\r\n pet_image_name = in_files[idx].split(\"_\")\r\n # Check if the first character is uppercase letter. If it is, then lowercase that first character\r\n if pet_image_name[0].isupper() : \r\n pet_image_name = pet_image_name.lower()\r\n # Create a temporary label variable to hold pet label name\r\n pet_label = \" \"\r\n \r\n # Process each of the character strings(words) split by '_' in \r\n # the list pet_image_name\r\n for word in pet_image_name: \r\n if word.isalpha():\r\n pet_label += word + \" \"\r\n pet_label = pet_label.strip()\r\n if in_files[idx] not in petlabels_dic:\r\n petlabels_dic[in_files[idx]] = [pet_label]\r\n else: \r\n print(\" Warning: Duplicate files exist in dictionary\", in_files[idx])\r\n \r\n \r\n # Return dictionary of pet lables\r\n return(petlabels_dic)", "def get_image_labels_mapping(images_fp, labels_fp):\n name_map = {}\n\n for f in images_fp():\n image_name = f[0]['file']\n vars = {k.upper():v for k,v in f[0].items() if k!='file' }\n label_name = labels_fp.get_matching(**vars)[0]['file']\n name_map[image_name] = label_name\n return name_map", "def get_images_and_labels_nc():\n refs = get_ref_df()\n images = {}\n for _, data in refs.iterrows():\n if data['ProbeFileName'] in images:\n continue\n im = data['ProbeFileName']\n images[im] = 1 if data['IsTarget'] == 'Y' else 0\n return images", "def extract_labels(pdbbind_label_file):\n assert os.path.isfile(pdbbind_label_file)\n labels = {}\n with open(pdbbind_label_file) as f:\n content = f.readlines()\n for line in content:\n if line[0] == \"#\":\n continue\n line = line.split()\n # lines in the label file have format\n # PDB-code Resolution Release-Year -logKd Kd reference ligand-name\n #print line[0], line[3]\n labels[line[0]] = line[3]\n return labels", "def image_label(imglist):\n img_dict={}\n for i in range(len(imglist)):\n y = imglist[i].rsplit(\"/\")[-2]\n if y not in img_dict.keys():\n img_dict[y] = 1\n else:\n img_dict[y] += 1\n return img_dict", "def create_labelmapDict_patch(list_all_images, path_dataset):\n list_all_classes = []\n for idx, name_image_ in enumerate(list_all_images):\n _, tail = os.path.split(name_image_)\n temp_obj = []\n name_file_xml_all = os.path.join(path_dataset, 'LABELS', tail[0:-3] + 'xml')\n if os.path.exists(name_file_xml_all):\n with tf.gfile.GFile(name_file_xml_all, 'rb') as fid:\n xml_str = fid.read()\n xml = etree.fromstring(xml_str)\n data = tfrecord_util.recursive_parse_xml_to_dict(xml)['annotation']\n if 'object' in data:\n for obj in data['object']:\n name_in_obj_ = obj['name'].replace(' ', '').strip()\n if name_in_obj_ != 'INCOMPLETAS':\n list_all_classes.append(name_in_obj_)\n temp_obj.append(obj)\n # list_all_classes = unique_list(list_all_classes)\n list_all_classes = list(set(list_all_classes))\n list_all_classes.sort()\n list_all_classes.insert(0, 'background')\n labelmap_ = {el: k for k, el in enumerate(list_all_classes)}\n return labelmap_", "def read_idx_2_label():\n with open('../Data/imagenet_class_index.json') as f:\n dictionary = json.load(f)\n return dictionary", "def read_labels(label_paths, use):\n paths_to_use = label_paths[use]\n lbls_raw = [scipy.io.loadmat(path)['labels'] for path in paths_to_use]\n lbls = []\n for lbl_raw in lbls_raw:\n lbl = np.zeros((lbl_raw.shape[0], lbl_raw.shape[1], 3), dtype=np.uint8)\n bmask = lbl_raw == 1 \n gmask = ~bmask\n lbl[:,:,1] = gmask * 255\n lbl[:,:,2] = bmask * 255\n lbls.append(np.copy(lbl))\n return np.array(lbls)", "def load_label(self, file, variable_name=\"group\"):\n data = scipy.io.loadmat(file)\n self.logger.info(\"loading mat file %s\", file)\n label = data[variable_name].todense().astype(np.int)\n label = np.array(label)\n print(label.shape, type(label), label.min(), label.max())\n return label", "def get_pet_labels(image_dir):\n filename_list = listdir(image_dir) # files in the pet folder\n\n # define the dictionary with filenamne as keys and labels as values\n petlabels_dic = {f: f[0:f.rfind('_')].lower().replace('_', ' ').strip()\n for f in filename_list if f[0] != '.'}\n # or\n \"\"\"\n petlabels_dic = {}\n for pet_file in filename_list:\n\n if(pet_file[0] != '.'): # if it's not a hidden file (mac/linux)\n pet_name = pet_file.lower() # change it to all lower case\n pet_name_words = pet_name.split('_') # get all individual words\n pet_label = \"\"\n\n for pet_word in pet_name_words:\n if pet_word.isalpha(): # if word only has letters\n pet_label += pet_word + ' ' # add word to the label\n\n pet_label = pet_label.strip() # remove spaces from the start/end\n\n if pet_file not in petlabels_dic.keys(): # if key is a new one\n petlabels_dic[pet_file] = pet_label\n else: # impossible to have duplicated filenames here...\n print(\"Duplicated file found in the directory!\")\n \"\"\"\n return petlabels_dic" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct a heap from a list of elements with priorities. Each element of the list must be in the form (Item, Priority).
def construct_heap(self, elems): for e in elems: self.n += 1 self.A.append(e) self.pos[e[0]] = self.n for i in range(self.n // 2, 0, -1): self.combine(i)
[ "def priority_queue():\n class PriorityElement:\n def __init__(self, value, weight):\n self.value = value\n self.weight = weight\n\n def __lt__(self, other):\n return self.weight < other.weight\n\n # The following will produce the exact same results\n priority_1 = []\n priority_2 = []\n heapq.heappush(priority_1, PriorityElement(0, 15))\n heapq.heappush(priority_1, PriorityElement(0, 6))\n\n heapq.heappush(priority_2, (0, 15))\n heapq.heappush(priority_2, (0, 6))", "def build_heap(aList):\n iEnd = len(aList)\n iStart = iEnd / 2 - 1 # Root of a tree is @ size/2-1\n for iIndex in range(iStart, -1, -1):\n heapify(aList, iEnd, iIndex)", "def build_heap(self, items):\n for key in items:\n self.insert(key)", "def _heapify_priorities(update_priority: np.array) -> List:\n\n # Use numpy vectorization to efficiently compute the list of\n # (priority, random nonce, page, offset) tuples to be heapified.\n pages, offsets = update_priority.nonzero()\n priorities = [tuple(data) for data in np.stack((\n -update_priority[pages, offsets],\n # Don't use deterministic order for page, offset. Otherwise,\n # we get the \"venetian blind\" effect when filling large blocks of\n # colour.\n np.random.randint(0, 2 ** 8, size=pages.shape[0]),\n pages,\n offsets)\n ).T.tolist()]\n\n heapq.heapify(priorities)\n return priorities", "def build_heap(lst, lst_size):\n\n for index in range((lst_size//2) - 1, -1, -1):\n heapify(lst, index, lst_size)", "def heapify(self):\n return HeapItem(prio=-self.ll_upperbound, p=self)", "def heapify(x):\n pass", "def build_heap(arr):\n for i in range((len(arr)//2), -1, -1):\n heapify(arr,index=i, size=len(arr)-1)", "def test_heap_item():\n import heapq\n pq = []\n heapq.heappush(pq, Item('c', 3))\n heapq.heappush(pq, Item('a', 1))\n heapq.heappush(pq, Item('b', 2))\n while pq:\n print(heapq.heappop(pq))", "def build_heap(arr):\n for i in range(len(arr)-1, -1, -1):\n down_heapify(arr, len(arr), i)", "def buildHeap(A):\n n = len(A)\n for i in range(n/2-1, -1, -1):\n heapify(A, i, n)", "def construct_max_heap(self, lst):\n self.heap_list = lst\n #start compare node\n node = (len(self.heap_list)-2)/2\n while node >= 0:\n self.sift_down(node, len(self.heap_list)-1)\n node -= 1", "def build_max_heap(lst):\n heap = Heap(lst)\n for i in range(len(lst) // 2, -1, -1):\n heap.max_heapify(i)\n return heap", "def create_heap_heapify(array):\n for i in range(len(array)//2-1, -1, -1):\n heapify(array, i)\n return array", "def heapify(self, l):\n if not l:\n return\n self.h = [None]\n for i in xrange(0, len(l)):\n self.push(l[i])", "def test_heapq_tuples():\n h = [(1, 'a'), (2, 'b'), (3, 'c')]\n heapify(h)\n\n assert heappop(h) == (1, 'a')\n assert heappop(h) == (2, 'b')", "def heapSort(liste):\n liste,siraliListe=liste.copy(),[]\n minHeapYap(liste)\n for k in range(len(liste)):\n liste[0],liste[-1]=liste[-1],liste[0]\n siraliListe.append(liste.pop())\n minHeapify(liste,0)\n return siraliListe", "def _create_priorities(self, pri):\n heaps = self.priorities\n heaps[pri] = MinBinaryHeap()", "def a_heap():\n # ไบง็”Ÿไธ€ไธช100ไธชๅ…ƒ็ด ็š„ๅˆ—่กจ๏ผŒๅˆ—่กจๅ…ƒ็ด ็š„ๅ€ผ่Œƒๅ›ดๅœจ1000ๅ†…\n li = [random.randrange(1000) for _ in range(100)]\n assert heapq.nlargest(1, li)[0] == max(li)\n assert heapq.nsmallest(1, li)[0] == min(li)\n\n # heapy็š„ๆŽ’ๅบๅบ•ๅฑ‚ๆ˜ฏ่ฐƒ็”จไบ†heapifyๆ–นๆณ•๏ผŒไฝฟไธ€ไธชๅบๅˆ—ๅ˜ไธบๅ †ๆŽ’ๅบ็š„็ป“ๆžœ\n # ๆญคๆ—ถๅฏไปฅ้€š่ฟ‡heappopๆ–นๆณ•ๅ–ๅ‡บๆœ€ๅฐๅ€ผ\n heapq.heapify(li)\n assert min(li) == heapq.heappop(li)\n assert min(li) == heapq.heappop(li)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inserts the element elem with priority prio.
def insert(self, elem, prio): self.n += 1 self.A.append( (e,w) ) self.pos[e] = self.n i = self.n p = i // 2 self.insert_loop(i, p)
[ "def change_priority(self, elem, prio):\n pos = self.pos[elem]\n currPrio = self.A[pos][1]\n self.A[pos] = (elem, prio)\n if self.cmpFn(prio, currPrio):\n self.insert_loop(pos, pos // 2) # Up heapify\n else:\n self.combine(pos) # Down heapify", "def insert(self, priority: int, item: str) -> None:\r\n thing = (item, priority)\r\n self._heap.append(thing)\r\n self._perc_up(len(self._heap) - 1)", "def push(self, elt):\n if len(self._queue) == 0: self._queue.append(elt); return\n for i in range(len(self._queue)):\n if self._queue[i].priority < elt.priority:\n self._queue.insert(i, elt)\n return\n #if we get here, elt is lower than all the other procs in the queue, so\n #just append it\n self._queue.append(elt)", "def enqueue(self, element, priority=0, maximum=True):\r\n self.heap.insert((priority, self.help, element), maximum)\r\n self.help += 1\r\n return", "def reprioritize(self, priority, element):\r\n if element not in self.element_finder:\r\n raise ValueError(\"No such element in the priority queue.\")\r\n entry = self.element_finder[element]\r\n self.add_element(priority, element, entry[1])\r\n entry[1] = self.INVALID", "def add_element(self, priority, element, count=None):\r\n if count is None:\r\n count = next(self.counter)\r\n entry = [priority, count, element]\r\n self.element_finder[element] = entry\r\n heapq.heappush(self.pq, entry)", "def put(self, item, priority):\n heapq.heappush(self.elements, (priority, item))", "def insertElement(self, element , i ):\n\n self.heap[i] = element\n # Parent of ith position\n parenti = i // 2\n\n # Inserting element into the heap\n try:\n # Bubbling up\n if parenti != 0 and self.heap[i].dijkstraCriterion < self.heap[parenti].dijkstraCriterion:\n self.heap[i], self.heap[parenti] = self.heap[parenti], self.heap[i]\n self.insertElement(element, parenti)\n # Incrementing self.i position\n else:\n self.i += 1\n return\n\n except:\n # Bubbling up\n self.heap[i] = 'NaN'\n self.insertElement(element, parenti)\n return", "def append(self,data,priority):\r\n\t\tbisect.insort(self.queue,(priority,data))", "def insert(self, pri):\n heaps = self.priorities\n if pri > 10 or pri < 1:\n raise ValueError(\n 'Priority must be between 1 (high) - 10 (low)'\n )\n if pri not in heaps.keys():\n self._create_priorities(pri)\n\n priority = heaps.get(pri)\n priority.push(self._order)\n self._order += 1", "def enqueue(self, item, priority):\n # TODO: Insert given item into heap\n ...", "def insert(self, value, priority=2):\n if not isinstance(priority, int):\n raise TypeError(\"Priority must be an integer\")\n if priority in self.priority_queue:\n self.priority_queue[priority].append(value)\n else:\n self.priority_queue[priority] = [value]\n print(self.priority_queue)", "def insertElement(self, element , i ):\r\n\r\n self.heap[i] = element\r\n # Parent of ith position\r\n parenti = i // 2\r\n\r\n # Inserting element into the heap\r\n try:\r\n # Bubbling up\r\n if parenti != 0 and self.heap[i] < self.heap[parenti]:\r\n self.heap[i], self.heap[parenti] = self.heap[parenti], self.heap[i]\r\n self.insertElement(element, parenti)\r\n # Incrementing self.i position\r\n else:\r\n self.i += 1\r\n return\r\n\r\n except:\r\n # Bubbling up\r\n self.heap[i] = 'NaN'\r\n self.insertElement(element, parenti)\r\n return", "def push( self , elem ):\n self.append( elem )", "def add(self, elem):\n if len(self) < self.size:\n self.data.append(elem)\n self._upheap(len(self.data) - 1)\n else:\n min_so_far = self.min()\n if elem > min_so_far:\n self.remove_min()\n self.data.append(elem)\n self._upheap(len(self.data) - 1)", "def insert(self, p, elem):\n node = self._validate(p)\n new_node = self._Node(elem, idx=self._curr_idx, parent=node._parent)\n self._curr_idx += 1\n node._parent = new_node\n new_node._children.append(node)\n self._size += 1\n\n # Invalidate depths and heights after modifying the tree.\n self._depths, self._heights = None, None\n return self._make_position(new_node)", "def insertChildBefore(new_elem, elem):\n parent = DOM.getParent(elem)\n id = DOM.getChildIndex(parent, elem)\n DOM.insertChild(parent, new_elem, id)", "def push(self, priority: float, item):\n heappush(self._heap, (-1 * priority, item))", "def insert(self: 'LinkedList', elem: object) -> None:\r\n self.first = _Node(elem, self.first)\r\n self.size += 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Changes the priority of the element elem to prio.
def change_priority(self, elem, prio): pos = self.pos[elem] currPrio = self.A[pos][1] self.A[pos] = (elem, prio) if self.cmpFn(prio, currPrio): self.insert_loop(pos, pos // 2) # Up heapify else: self.combine(pos) # Down heapify
[ "def reprioritize(self, priority, element):\r\n if element not in self.element_finder:\r\n raise ValueError(\"No such element in the priority queue.\")\r\n entry = self.element_finder[element]\r\n self.add_element(priority, element, entry[1])\r\n entry[1] = self.INVALID", "def setPriority(self, p):\n self.priority = p", "def _update_priority(self, task, prio, worker):\n task.priority = prio = max(prio, task.priority)\n for dep in task.deps or []:\n t = self._state.get_task(dep)\n if t is not None and prio > t.priority:\n self._update_priority(t, prio, worker)", "def increase_priority(self):\n if self._priority > 0:\n self._priority -= 1", "def _set_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..254']}), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8)(100), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"priority must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..254']}), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8)(100), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def _set_priority(self, v, load=False):\n try:\n t = YANGDynClass(v,base=np.uint8, is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True)\n except (TypeError, ValueError):\n raise ValueError(\"\"\"priority must be of a type compatible with base=np.uint8, is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True\"\"\")\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def setPriority(self, priority):\n\t\traise Exception(\"Abstract method IOperator.setPriority not implemented in: \" + str(self))", "def _set_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'1..254']}), is_leaf=True, yang_name=\"priority\", rest_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set router priority within virtual router'}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"priority must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'1..254']}), is_leaf=True, yang_name=\"priority\", rest_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set router priority within virtual router'}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def update_priority(self, index: int, priority: float) -> None:\n self.priorities.set(priority, index)", "def _set_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'1..254']}), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8)(100), is_leaf=True, yang_name=\"priority\", rest_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set router priority within virtual router', u'cli-show-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"priority must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'1..254']}), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8)(100), is_leaf=True, yang_name=\"priority\", rest_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set router priority within virtual router', u'cli-show-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def setPriority(self, *args):\n return _libsbml.Event_setPriority(self, *args)", "def get_priority(self, elem):\n pos = self.pos[elem]\n return self.A[pos][1]", "def _set_priority(self, v, load=False):\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/mpls', defining_module='openconfig-mpls', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"priority must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=unicode, is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/mpls', defining_module='openconfig-mpls', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def setpriority(self, pid=None, priority=5):\n\t \n\t import win32api,win32process,win32con\n\t \n\t priorityclasses = [win32process.IDLE_PRIORITY_CLASS,\n\t win32process.BELOW_NORMAL_PRIORITY_CLASS,\n\t win32process.NORMAL_PRIORITY_CLASS,\n\t win32process.ABOVE_NORMAL_PRIORITY_CLASS,\n\t win32process.HIGH_PRIORITY_CLASS,\n\t win32process.REALTIME_PRIORITY_CLASS]\n\t if pid == None:\n\t pid = win32api.GetCurrentProcessId()\n\t handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)\n\t win32process.SetPriorityClass(handle, priorityclasses[priority])", "def insert(self, elem, prio):\n self.n += 1\n self.A.append( (e,w) )\n self.pos[e] = self.n\n i = self.n\n p = i // 2\n self.insert_loop(i, p)", "def setPriority(self, *args):\n return _yarp.Thread_setPriority(self, *args)", "def change_priority(self, priority, key):\n index = self.__position[key]\n current = self.__heap[index][0]\n self.__heap[index][0] = priority\n\n if priority > current:\n self.__bubble_down(index)\n else:\n self.__bubble_up(index)", "def _set_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/mpls', defining_module='openconfig-mpls', yang_type='uint16', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"priority must be of a type compatible with uint16\"\"\",\n 'defined-type': \"uint16\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/mpls', defining_module='openconfig-mpls', yang_type='uint16', is_config=True)\"\"\",\n })\n\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def policy_priority(self, policy_priority):\n self._policy_priority = policy_priority" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the priority of an element.
def get_priority(self, elem): pos = self.pos[elem] return self.A[pos][1]
[ "def getpriority(self, name):\n\t\tif name not in self:\n\t\t\treturn None\n\t\treturn self.attributes[name].priority", "def priority(node):\n return node.priority", "def get_priority(self, item):\n try:\n return self.set[item][0]\n except KeyError:\n print(\"Can't get priority of non-existing item\")", "def priority(self):\n return self._priority", "def priority( self ):\n\t\ttry:\n\t\t\treturn float(self._root.get( 'priority', -1 ))\n\t\texcept ValueError:\n\t\t\tRESOURCES.warn( 'No valid number type for property \"priority\": %s' % self._root.get('priority') )\n\t\treturn None", "def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")", "def top_priority( self ):\n return self[0][0]", "def getPriority(self, *args):\n return _libsbml.Event_getPriority(self, *args)", "def getPriority(self):", "def get_priority(self):\n return self.DEFAULT_PRIORITY", "def getPriority(self):\n return _yarp.Thread_getPriority(self)", "def get_priority(self):\n return str(self.priority)", "def btm_priority( self ):\n return self[-1][0]", "def policy_priority(self):\n return self._policy_priority", "def get_priority(cls, key):\r\n event = cls._events.get(key) or cls._events['default']\r\n return event[0]", "def getPriority(self):\n\t\traise Exception(\"Abstract method IOperator.getPriority not implemented in: \" + str(self))", "def get_priority(self):\n return (ResourceInfo.RES_PRIORITY_CHECK \n + self.res_dict['disk'] * ResourceInfo.RES_PRIORITY_DISK \n + self.res_dict['network'] * ResourceInfo.RES_PRIORITY_NETWORK\n + self.res_dict['cpu'] * ResourceInfo.RES_PRIORITY_CPU\n + self.res_dict['visit'] * ResourceInfo.RES_PRIORITY_VISIT)", "def get_priority_cache_policy(self):\n\n try:\n out = self.filer.invoke('priority-list-info-volume',\n 'volume', self.name)\n except OntapApiException as e:\n # If volume doesn't have a priority schedule, it is default:\n if e.reason == 'unable to find volume' and e.errno == '2':\n return 'default'\n else:\n raise\n\n pri_vol = out.child_get('priority-volume').child_get(\n 'priority-volume-info')\n return pri_vol.child_get_string('cache-policy')", "def get_priority(issue):\n\tpriority = ''\n\tif 'priority' in issue['fields']:\n\t\tpriority = issue['fields']['priority'].get('name', '')\n\treturn priority" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transcodes a file src to a file dest.
def transcode(self, src: Path, dest: Path) -> None: pass
[ "def copyFile( src, dest ):\n\tinFile = open( src, 'r' )\n\toutFile = open( dest, 'w' )\n\tfor line in inFile:\n\t\toutFile.write( line )\n\toutFile.close()\n\tinFile.close()", "def compressFile(source, target):\n data = cake.filesys.readFile(source)\n try:\n data = zlib.compress(data, 1)\n except zlib.error, e:\n raise EnvironmentError(str(e))\n cake.filesys.writeFile(target, data)", "def case_convert_file_to_file(source_path: str, dest_path: str, style: CaseStyleEnum) -> None:\n with open(source_path, 'r') as f:\n contents = f.read()\n new_contents = case_convert_stream(contents, style)\n with open(dest_path, 'w') as f:\n f.write(new_contents)", "def _copy_file ( self, source, dest ):\n return", "def unify(src, dst):\n\n # NOTE: at this point it is assumed files are unifiable\n\n # get a temp file name\n dir = os.path.split(src)[0]\n tmp_handle, tmp_path = tempfile.mkstemp(dir=dir)\n os.close(tmp_handle)\n\n # rename the destination, in case we need to back out\n os.rename(dst, tmp_path)\n\n # link source to destination\n try:\n os.link(src, dst)\n except:\n # back out\n print 'Could not link %s -> %s, backing out' % (src, dst)\n try:\n if os.path.exists(dst):\n os.unlink(dst)\n os.rename(tmp_path, dst)\n except:\n print 'Could not back out!!! the destination file is still there as', tmp_file\n raise exceptions.OSError\n\n # done, remove the temp file\n os.unlink(tmp_path)", "def copy_file_from_node_to_node(src_file, src_node, dest_node, dest_file):\n log.info(f\"copying {src_file} from {src_node.ip_address} to {dest_node.ip_address}\")\n src_file_obj = read_file_from_node(src_file, src_node)\n write_file_to_node(src_file_obj, dest_file, dest_node)", "def copy_file(src, dest):\n with open_local_or_gcs(src, 'r') as h_src:\n with open_local_or_gcs(dest, 'w') as h_dest:\n shutil.copyfileobj(h_src, h_dest)", "def copy_file(src, dest):\r\n # Check if destination file exist or not, if not, create a new file\r\n if not os.path.exists(dest):\r\n create_new_file(dest)\r\n else:\r\n if os.path.isdir(dest):\r\n dest = os.path.join(dest, os.path.basename(src))\r\n create_new_file(dest)\r\n write_chunks(src, dest)\r\n change_same_permission(src, dest)\r\n change_same_time(src, dest)", "def cp_file(src, dest):\n if not os.path.exists(dest):\n shutil.copyfile(src, dest)", "def compress(src,dstfile):\n\tafile = zipfile.ZipFile(dstfile,\"w\",zipfile.ZIP_DEFLATED)\n\tfor root,dirs,files in os.walk(src):\n\t\tfor filename in files:\n\t\t\tabspath = osp.join(root,filename)\n\t\t\trelpath = osp.relpath(abspath,src)\n\t\t\tafile.write(abspath, relpath)\n\tafile.close();", "def copy_file(src, dst):\n return shutil.copy2(src, dst)", "def cp(self,origen,destino):\n global DIRARCHIVOS\n origen = DIRARCHIVOS+origen\n destino = DIRARCHIVOS+destino\n if os.path.exists(origen):\n with open(origen, 'rb') as forigen:\n with open(destino, 'wb') as fdestino:\n shutil.copyfileobj(forigen, fdestino)\n print(\"Archivo copiado\")", "def copy_file(fromf,tof, fromapp, toapp):\n f2w=open(tof,\"w\")\n with open(fromf) as f:\n for line in f:\n newline=line.replace(fromapp,toapp)\n f2w.write(newline.replace(fromapp.upper(),toapp.upper()))\n f2w.close()", "def convert_tmpfile(src_file_name:str, dest_path:str):\n src_path = os.path.join(\n current_app.config['UPLOAD_FOLDER'],\n src_file_name\n )\n if not os.path.exists(src_path):\n abort(http.HTTPStatus.BAD_REQUEST, message='raw file not exist')\n pathlib.Path(os.path.dirname(dest_path)).mkdir(parents=True, exist_ok=True)\n shutil.move(src_path, dest_path)", "def copyfile_ctf(src, dest):\n _copytree(src, dest)\n # list of file types to rename\n file_types = (\n \".acq\",\n \".eeg\",\n \".dat\",\n \".hc\",\n \".hist\",\n \".infods\",\n \".bak\",\n \".meg4\",\n \".newds\",\n \".res4\",\n )\n # Rename files in dest with the name of the dest directory\n fnames = [f for f in os.listdir(dest) if f.endswith(file_types)]\n bids_folder_name = op.splitext(op.split(dest)[-1])[0]\n for fname in fnames:\n ext = op.splitext(fname)[-1]\n os.replace(op.join(dest, fname), op.join(dest, bids_folder_name + ext))", "def convertFile(infile, outfile):\n # we are opening the file\n f = open(infile, \"rb\")\n # reading the file\n data = f.read()\n # decoding\n decodedData = data.decode('ISO-8859-1')\n encodedData = decodedData.encode('utf-8')\n open(outfile, \"wb\").write(encodedData)\n # closing the file\n f.close()\n print(\"Success: File conversion complete.\")", "def process_file(src_file, dest_file):\n # read data\n with open(src_file) as fil:\n new_data = fil.read()\n # generate a chain of templates\n parent_template = None\n current_template = dest_file\n cursor = 1\n if EXTEND_FLAG in new_data:\n new_data = new_data.replace(EXTEND_FLAG, \"\")\n while exists(current_template):\n parent_template = current_template\n current_template = \"%s%s%d\" % (dest_file, CHILD_TPL_FLAG, cursor)\n cursor += 1\n # write data\n with open(current_template, \"w\") as fil:\n if parent_template:\n # in the chain of templates each has to extend one another\n new_data = \"\\n\".join([\n \"{%% extends \\\"%s\\\" %%}\" % parent_template,\n new_data\n ])\n fil.write(new_data)", "def rotator(self, source, dest):\n with open(source, \"rb\") as sf:\n data = sf.read()\n compressed = zlib.compress(data, 9)\n with open(dest, \"wb\") as df:\n df.write(compressed)\n os.remove(source)", "def copy_file(target_path, dest_path):\n copy2(target_path, dest_path)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes an integer below 1001 and converts it into english text. Ignore spaces and hyphens as the instructions require.
def int2text(integer): # Numbers 1-99 are handled by simply looking up words in the special_case # dictionary. if integer < 100: return digit2text(integer) elif integer < 1000: # If exactly some hundred, then just return the word for the hundred's # place and the word 'hundred' if integer%100 == 0: return digit2text(integer/100)+'hundred' # Otherwise return the word for the hundred's place, the word # 'hundredand' and do some composition to make the rest of the words. else: return digit2text(integer/100)+'hundredand'+\ digit2text(integer%100) # Special case for 1000. elif integer == 1000: return "onethousand"
[ "def english(number):\r\n if number == 0:\r\n return 'zero'\r\n word = ''\r\n for step in itertools.count():\r\n number, rest = divmod(number, 1000)\r\n word = format_num(en3(rest), step) + word\r\n if number == 0:\r\n return word.strip()", "def num_to_thaiword(number: int) -> str:\n\n output = \"\"\n number_temp = number\n if number is None:\n return \"\"\n elif number == 0:\n output = \"เธจเธนเธ™เธขเนŒ\"\n\n number = str(abs(number))\n for place, value in enumerate(list(number[::-1])):\n if place % 6 == 0 and place > 0:\n output = _PLACES[6] + output\n\n if value != \"0\":\n output = _VALUES[int(value)] + _PLACES[place % 6] + output\n\n for search, replac in _EXCEPTIONS.items():\n output = output.replace(search, replac)\n\n if number_temp < 0:\n output = \"เธฅเธš\" + output\n\n return output", "def hundreds_word(number):\n converted = ''\n if not (0 < number < 1000):\n return 'No es posible convertir el numero a letras'\n\n number_str = str(number).zfill(9)\n cientos = number_str[6:]\n\n\n if(cientos):\n if(cientos == '001'):\n converted += 'UN '\n elif(int(cientos) > 0):\n converted += '%s ' % __convert_group(cientos)\n\n\n return converted.title().strip()", "def convert_to_words(num: int):\n number_dict = {\n 1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five',\n 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine', 10: 'ten',\n 11: 'eleven', 12: 'twelve', 13: 'thirteen', 14: 'fourteen', 15: 'fifteen',\n 16: 'sixteen', 17: 'seventeen', 18: 'eighteen', 19: 'nineteen'\n }\n if num == 0:\n return ''\n \n elif num == 1000:\n return 'one thousand'\n \n elif num in number_dict:\n return number_dict.get(num)\n \n elif num < 100:\n if (20 <= num) and (num < 30):\n return 'twenty-' + convert_to_words(num % 10)\n elif (30 <= num) and (num < 40):\n return 'thirty-' + convert_to_words(num % 10)\n elif (40 <= num) and (num < 50):\n return 'forty-' + convert_to_words(num % 10)\n elif (50 <= num) and (num < 60):\n return 'fifty-' + convert_to_words(num % 10)\n elif (60 <= num) and (num < 70):\n return 'sixty-' + convert_to_words(num % 10)\n elif (70 <= num) and (num < 80):\n return 'seventy-' + convert_to_words(num % 10)\n elif (80 <= num) and (num < 90):\n return 'eighty-' + convert_to_words(num % 10)\n elif (90 <= num) and (num < 100):\n return 'ninety-' + convert_to_words(num % 10)\n \n elif (num % 100 == 0):\n return convert_to_words(num//100) + ' hundred'\n \n else: #if 100 <= num < 1000\n if (100 < num) and (num < 200):\n return 'one hundred and ' + convert_to_words(num % 100)\n elif (200 < num) and (num < 300):\n return 'two hundred and ' + convert_to_words(num % 100)\n elif (300 < num) and (num < 400):\n return 'three hundred and ' + convert_to_words(num % 100)\n elif (400 < num) and (num < 500):\n return 'four hundred and ' + convert_to_words(num % 100)\n elif (500 < num) and (num < 600):\n return 'five hundred and ' + convert_to_words(num % 100)\n elif (600 < num) and (num < 700):\n return 'six hundred and ' + convert_to_words(num % 100)\n elif (700 < num) and (num < 800):\n return 'seven hundred and ' + convert_to_words(num % 100)\n elif (800 < num) and (num < 900):\n return 'eight hundred and ' + convert_to_words(num % 100)\n elif (900 < num) and (num < 1000):\n return 'nine hundred and ' + convert_to_words(num % 100)", "def numtowords(number, lang='sw', use_lakh=False):\n click.echo(num_to_words(number, lang=lang, use_lakh=use_lakh))", "def translateNumber(n):\r\n if type(n) != str:\r\n return None\r\n else:\r\n translation = \"\"\r\n word = \"\"\r\n for c in n:\r\n if c != ' ':\r\n word += c\r\n elif word in Numbers:\r\n translation += Numbers[word] + \" \"\r\n else:\r\n translation += word + \" \"\r\n return translation", "def num2words(num):\n # Create a dictionary of all unique numbers from 1 to 1,000\n num2words = {0:'', 1:'one', 2:'two', 3:'three', 4:'four', 5:'five', 6:'six', 7:'seven',\\\n 8:'eight', 9:'nine', 10:'ten', 11:'eleven', 12:'twelve', 13:'thirteen', 14:'fourteen',\\\n 15:'fifteen', 16:'sixteen', 17:'seventeen', 18:'eighteen', 19:'nineteen', 20:'twenty',\\\n 30:'thirty', 40:'forty', 50:'fifty', 60:'sixty', 70:'seventy', 80:'eighty',\\\n 90:'ninety', 1000:'onethousand'}\n result = ''\n while True:\n try:\n result += num2words[num]\n return result\n except:\n pass\n try:\n result += num2words[num-num%10] + num2words[num%10]\n return result\n except:\n result += num2words[(num - num%100)//100] + 'hundred'\n num = num%100\n if num == 0:\n return result\n else:\n result += 'and'", "def _convert_greater_4000(self, number):\n div = self.convert_to_roman(int(number/1000))\n rest = self.convert_to_roman(number % 1000)\n return \"(\"+div+\")\" + rest", "def number_to_words(n):\n if n < 20:\n return WORDS[n]\n elif n < 100:\n return WORDS[18 + n // 10] + (\"\" if n % 10 == 0 else \"-\" + WORDS[n % 10])\n elif n < 1_000:\n return (\n number_to_words(n // 100)\n + \" hundred\"\n + (\" \" + number_to_words(n % 100) if n % 100 > 0 else \"\")\n )\n elif n < 1_000_000:\n return (\n number_to_words(n // 1_000)\n + \" thousand\"\n + (\" \" + number_to_words(n % 1_000) if n % 1_000 > 0 else \"\")\n )\n elif n < 1_000_000_000:\n return (\n number_to_words(n // 1_000_000)\n + \" million\"\n + (\" \" + number_to_words(n % 1_000_000) if n % 1_000_000 > 0 else \"\")\n )\n elif n < 1_000_000_000_000:\n return (\n number_to_words(n // 1_000_000_000)\n + \" billion\"\n + (\n \" \" + number_to_words(n % 1_000_000_000)\n if n % 1_000_000_000 > 0\n else \"\"\n )\n )\n elif n < 1_000_000_000_000_000:\n return (\n number_to_words(n // 1_000_000_000_000)\n + \" trillion\"\n + (\n \" \" + number_to_words(n % 1_000_000_000_000)\n if n % 1_000_000_000_000 > 0\n else \"\"\n )\n )\n else:\n return \"Support upto 999,999,999,999,999 (1 less than a quadrillion)\"", "def _convert_greater_1000_less_4000(self, number):\n return int(str(number)[0]) * self.values_indo_to_roman[1000] + self.convert_to_roman(number%1000)", "def hundreds_text(num):\n hundreds_digit = num // 100\n tens_digit = num % 100\n hundreds_text = singles[hundreds_digit] + ' ' + \"Hundred\"\n return hundreds_text + ' ' + tens_text(tens_digit)", "def _cardinal2word(strNumber):\n return Number.convertNumberIntoLetters(strNumber)", "def translateNum(self, num: int) -> int:\n mapping = {str(i): string.ascii_lowercase[i] for i in range(26)}\n num = str(num)\n\n def dp(s: str):\n print(s)\n if len(s) == 1:\n return 1\n if len(s) == 2:\n # i.e. 23ๅฏไปฅๅ˜ๆˆ 2๏ผŒ3ๅ’Œ23\n if s in mapping:\n return 2\n # i.e. 59ๅช่ƒฝๅ˜ๆˆ5ๅ’Œ9\n else:\n return 1\n if s[:2] in mapping:\n return dp(s[1:]) + dp(s[2:])\n else:\n return dp(s[1:])\n return dp(num)", "def convert(number):\n result = \"\"\n while number != 0:\n result = ALPHANUM[number % len(ALPHANUM)] + result\n number //= len(ALPHANUM)\n return result or \"0\"", "def intRender(self, number):\n\n data = unicode(number)\n bites = list()\n\n while data:\n bites.append(data[-3:])\n data = data[:-3]\n\n return \" \".join(reversed(bites))", "def __int2words(n, scale='short', sep='', hyphen=' '):\n if scale == 'short':\n (g, p, k) = (3, 1000, 2)\n elif scale == 'long':\n (g, p, k) = (6, 1000000, 1)\n else:\n raise ValueError('Unsupported scale type: ' + scale)\n i = (len(str(n)) - 1) // g\n (d, r) = divmod(n, p**i)\n w = _illions[i - k] + 'illion'\n x = _int2words(d, scale, sep, hyphen) + ' ' + w\n if r == 0: return x\n if r < 100: return x + ' and ' + _int2words(r, scale, sep, hyphen)\n return x + sep + ' ' + _int2words(r, scale, sep, hyphen)", "def int_to_str(number):\n rb = RuleBasedNumberFormat(URBNFRuleSetTag.SPELLOUT, Locale('pl_PL'))\n verbalized = rb.format(int(number))\n return verbalized", "def int2words(n, scale='short', sep='', hyphen=' ', lang='en'):\n if lang != 'en': raise ValueError(str.format(\"int2words: lang='{lang}' not implemented\", lang=lang))\n return _int2words(int(n), scale, sep, hyphen)", "def int_to_roman(integer: int) -> str:\n result = ''\n\n for i in lookup.keys():\n while integer >= lookup[i]:\n result += i\n integer -= lookup[i]\n\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Solves [a]{b} = {x} by Gauss elimination.
def gaussElimin(a,b): a=float64(a) b=float64(b) n=len(b) x=zeros((n,1),dtype=float) for k in range(n-1): for i in range(k+1,n): l=float(a[i][k])/a[k][k] a[i][k]=0 for j in range(k+1,n): a[i][j]=a[i][j]-l*a[k][j] b[i]=b[i]-l*b[k] x[n-1]=float(b[n-1])/a[n-1][n-1] for i in range(n-2,-1,-1): sum=b[i] for j in range(i+1,n): sum=sum-a[i][j]*x[j] x[i]=float(sum)/a[i][i] return x
[ "def gauss(A, b):\n n = len(A)\n\n A, b = forward_elimination(A, b, n)\n return back_substitution(A, b, n)", "def gaussian_elimination(A, b):\n n = len(b)\n # Join A and b\n ab = np.c_[A,b]\n # Gaussian Elimination\n for i in range(n-1):\n if ab[i,i] == 0:\n raise ZeroDivisionError('Zero value in matrix..')\n\n for j in range(i+1, n):\n ratio = ab[j,i] / ab[i,i]\n\n for k in range(i, n+1):\n ab[j,k] = ab[j,k] - ratio * ab[i,k]\n\n # Backward Substitution\n X = np.zeros((n,1))\n X[n-1,0] = ab[n-1,n] / ab[n-1,n-1]\n\n for i in range(n-2,-1,-1):\n knowns = ab[i, n]\n for j in range(i+1, n):\n knowns -= ab[i,j] * X[j,0]\n X[i,0] = knowns / ab[i,i]\n return X", "def solve_aux_program(A, b):\n\n (m, n) = A.shape # m = |b| = |y|, n = |x|\n\n c_ = np.zeros(n + m, dtype=\"float64\")\n c_[n:] = -1 # g = -โˆ‘y\n\n A_ = np.hstack((A, np.eye(m))) # = Ax + y\n b_ = b.copy()\n for i, b_i in enumerate(b):\n if b_i < 0:\n b_[i] *= -1\n A_[i] *= -1\n\n initial_basis = list(range(n + 1, n + m + 1))\n\n basis, Q = solve_canonical_impl(initial_basis, c_, A_, b_)\n if Q[0][0] < 0:\n raise NoSolutionError(\"The original system is inconsistent\")\n\n redundant = {}\n for no, s in enumerate(basis):\n if s <= n:\n continue\n r = next(i for i in range(1, m + 1) if Q[i][s] == 1)\n k = next((j for j in range(1, n + 1) if Q[r][j] != 0), None)\n if k is None:\n del A[r - 1]\n del Q[r]\n redundant.add(s)\n else:\n basis[no] = k\n gauss_elimination(Q, basis)\n basis = [s for s in basis if s not in redundant]\n\n return basis", "def gauss(A, b):\n n = A.shape[0]\n\n # Comprobamos si hay elementos diagonales cero\n if any(diag(A) == 0):\n raise ZeroDivisionError(('Se producirรก la divisiรณn por cero ; '\n 'pivotar actualmente no es compatible '))\n\n A, b = eliminacion_adelante(A, b, n)\n return sustitucion_regresiva(A, b, n)", "def Gauss_Seidel_Solve(A,b,tol=1.0e-6,max_iterations=100,LOUD=False):\n [Nrow, Ncol] = A.shape\n assert Nrow == Ncol\n N = Nrow\n converged = False\n iteration = 1\n x = np.random.rand(N) #random initial guess \n x_new = np.zeros(N)\n while not(converged):\n x = x_new.copy() #replace old value\n for row in range(N):\n x_new[row] = b[row]\n for column in range(N):\n if column != row:\n #only change from before is that I use x_new in the update\n x_new[row] -= A[row,column]*x_new[column]\n x_new[row] /= A[row,row]\n relative_change = np.linalg.norm(x_new-x)/np.linalg.norm(x_new)\n if (LOUD):\n print(\"Iteration\",iteration,\": Relative Change =\",relative_change)\n if (relative_change < tol) or (iteration >= max_iterations):\n converged = True\n iteration += 1\n return x_new", "def gauss(self, b):\r\n n = self.matrix.shape[0]\r\n m = self.matrix.shape[1]\r\n not_unique = []\r\n for i in range(0, m):\r\n maxi = i\r\n # find non-zero element in column i, starting in row i\r\n for k in range(i, n): # k index for rows\r\n if self.matrix[k, i] == 1:\r\n maxi = k\r\n if self.matrix[maxi, i] == 1:\r\n # swap rows i and maxi in matrix\r\n # k is column index, start with i because columns < i are zero\r\n for k in range(i, m):\r\n tmp = self.matrix[maxi, k]\r\n self.matrix[maxi, k] = self.matrix[i, k]\r\n self.matrix[i, k] = tmp\r\n # swap rows i and maxi in vector b\r\n tmp = b[maxi]\r\n b[maxi] = b[i]\r\n b[i] = tmp\r\n else:\r\n not_unique.append(i)\r\n\r\n # iterate all rows and add maxi row to current row such that the\r\n # leading element is 0\r\n for u in range(i+1, n):\r\n if self.matrix[u, i] == 1:\r\n for v in range(i, m):\r\n self.matrix[u, v] = (self.matrix[u, v] + self.matrix[i, v]) % 2\r\n b[u] = (b[u] + b[i]) % 2\r\n # iterate all rows backwards and solve the linear equation\r\n # If several solutions are possible to solve the equation system, \r\n # list all solutions in an array\r\n solutions = []\r\n not_unique_combinations = list(itertools.product([0, 1], repeat=len(not_unique)))\r\n for combination in not_unique_combinations:\r\n x = [0] * m\r\n for i in reversed(range(0, n)):\r\n if self.matrix[i, i % m] == 1:\r\n x[i] = b[i]\r\n if i < m-1:\r\n for k in range(i+1, m):\r\n x[i] = (x[i] - self.matrix[i, k] * x[k]) % 2\r\n else:\r\n if i in not_unique:\r\n position = not_unique.index(i)\r\n x[i] = combination[position]\r\n solutions.append(x) \r\n return solutions", "def gaussian_solve(a, b):\n g = np.zeros((len(a), len(a[0]) + len(b[0])))\n for i in range(len(a)):\n for j in range(len(a[0])):\n g[i][j] = a[i][j]\n for i in range(len(b)):\n for j in range(len(b[0])):\n g[i][j + len(a[0])] = b[i][j]\n for i in range(len(a)):\n for j in range(i+1, len(a)):\n row1 = g[i]\n row2 = g[j]\n if row1[i] != 0:\n q = row2[i] / row1[i]\n g[j] = row2 - q * row1\n for i in range(len(a)):\n i = len(a) - i - 1\n for j in range(i):\n j = i - j - 1\n row1 = g[i]\n row2 = g[j]\n if row1[i] != 0:\n q = row2[i] / row1[i]\n g[j] = row2 - q * row1\n if g[i][i] != 0:\n g[i] /= g[i][i]\n else:\n return 'error: matrix is not linearly independent'\n out = np.zeros((len(b), len(b[0])))\n for i in range(len(b)):\n for j in range(len(b[0])):\n out[i][j] = g[i][j + len(a[0])]\n return out", "def test_gauss_1(self, a, b, dete, xe):\n det, x = gauss(a, b)\n\n assert np.isclose(det, dete)\n assert np.allclose(x, xe)", "def solve(self,b):\n nrows = self.nrows\n ncols = self.ncols\n newmatrix = Matrix(nrows,ncols+b.ncols) #Account for b not being just a column vector\n for i in range(nrows):\n for j in range(ncols):\n newmatrix[i,j]= self[i,j]\n for j in range(b.ncols):\n newmatrix[i,ncols+j] = b[i,j]\n newmatrix.gaussianelimination()\n x = Matrix(nrows,b.ncols)\n for i in range(x.nrows):\n for j in range(b.ncols):\n x[i,j] = newmatrix[i,j+ncols]\n return x", "def solve_gauss(A, F):\n\n At, Ft = triangularise(A, F)\n n = len(Ft)\n x = np.zeros(len(Ft))\n x[n-1] = Ft[n-1]/At[n-1, n-1]\n for i in range(n-2, -1, -1):\n s = 0\n for j in range(i+1, n):\n s = s + At[i, j]*x[j]\n x[i] = (Ft[i] - s) / At[i, i]\n return x, 'Always'", "def gauss_seidel(a, b, n=None, x=None, delta=None, actual=np.array([]), max_iterations=default_max_iterations):\n # Make sure that both delta and actual are passed in\n if (delta and not actual.any()) or (actual.any() and not delta):\n raise SyntaxError(\"Must pass in both delta and actual if one is passed in\")\n # Make sure that only N or delta is passed in\n if delta and n:\n raise SyntaxError(\"Can only pass delta or N option\")\n\n # Create an initial guess if needed\n if x is None:\n x = np.zeros(len(a[0]))\n\n # Iterate for N times if N is passed in\n if n:\n L = np.tril(a)\n U = a - L\n for i in range(n):\n x = np.dot(np.linalg.inv(L), b - np.dot(U, x))\n\n # Iterate until error is found or max_iterations is exceeded if delta and actual are passed in\n elif delta and actual.any():\n n = 0\n actual_norm = np.linalg.norm(actual)\n L = np.tril(a)\n U = a - L\n\n while True:\n x = np.dot(np.linalg.inv(L), b - np.dot(U, x))\n x_norm = np.linalg.norm(x)\n n += 1\n # Compare norms of actual matrix with Jacobian-calculated matrix and if difference is within error, return\n # the number of iterations it took to get within the error\n if abs(Decimal(actual_norm) - Decimal(x_norm)) <= delta or n >= max_iterations:\n break\n # If neither N or delta was passed in\n else:\n raise SyntaxError(\"Must pass in either N or delta options to function\")\n\n # Return the result and the number of iterations taken to find it\n return [x, n]", "def gauss_seidel_step(D, L, U, b, xk):\n #Solve Ax=b\n A = D + U\n b = b - L.dot(xk)\n xnext = solve_triangular(A,b)\n\n return xnext", "def gauss_seidel_iteration(A, b, x0, epsilon=1e-8):\n\n #Initialize parameters\n D, L, U = decompose(A)\n xnext = gauss_seidel_step(D, L, U, b, x0)\n iterationcount = 0\n\n #Perform Gauss-Seidel iteration to solve Ax=b\n while norm(xnext - x0) > epsilon:\n x0 = xnext\n xnext = gauss_seidel_step(D, L, U, b, x0)\n\n xsol = xnext\n\n return xsol", "def solve_triangular(a, b, lower=False):\n # TODO maybe commit this to gvar.linalg\n # TODO can I raise a LinAlgError if a[i,i] is 0, and still return the\n # result and have it assigned to a variable using try...finally inside this\n # function?\n x = np.copy(b)\n a = a.reshape(a.shape + (1,) * len(x.shape[1:]))\n if lower:\n x[0] /= a[0, 0]\n for i in range(1, len(x)):\n x[i:] -= x[i - 1] * a[i:, i - 1]\n x[i] /= a[i, i]\n else:\n x[-1] /= a[-1, -1]\n for i in range(len(x) - 1, 0, -1):\n x[:i] -= x[i] * a[:i, i]\n x[i - 1] /= a[i - 1, i - 1]\n return x", "def sparse_gauss_seidel(A, b, tol=1e-8, maxiters=29):\n \n\n def iter(xi):\n xj=np.zeros((m,))\n for i in xrange(m): \n rowstart = A.indptr[i]\n rowend = A.indptr[i+1]\n aii=A[i,i]\n xj[i]=(b[i]-(np.dot(A.data[rowstart:rowend], xi[A.indices[rowstart:rowend]])-aii*xi[i]))/(aii)\n xi[i]=xj[i]\n return xj\n \n #Aix = np.dot(A.data[rowstart:rowend], x[A.indices[rowstart:rowend]])\n\n m=len(b)\n xk=np.zeros((m,))\n for i in xrange(0,maxiters):\n xk=iter(xk)\n if (la.norm(A.dot(xk)-b,ord=np.inf)<tol) or (i==maxiters-1):\n return xk", "def legendreGauss (func, deg, a, b, ind, bsp, ind2=0):\n\n\tx, w = np.polynomial.legendre.leggauss(deg)\n\tt = 0.5*(x+1)*(b-a)+ a\n\t\n\tgauss = sum(w + func(t, bsp, ind, ind2))*( 0.5*(b-a))\n\n\treturn gauss", "def gaussxwab(N,a,b):\n x,w = gaussxw(N)\n return 0.5*(b-a)*x+0.5*(b+a),0.5*(b-a)*w", "def gaussian_elimination(A, b, type=None):\n # augmented matrix - matrix A augmented with vector b\n if b is not None:\n A = np.column_stack((A, b))\n \n print A\n i_rows, j_cols = np.shape(A)\n\n\n col_indx = np.arange(0, j_cols)\n row_indx = np.arange(0, i_rows)\n\n\n for i in range(0, i_rows - 1):\n # i-th row of matrix A\n i_row = A[i, :]\n\n # sub column of diagonal element\n abs_sub_col = abs(A[i + 1:, i])\n \n # max element of sub column\n a_max_sub_col = max(abs_sub_col)\n pos_max_sub_col = abs_sub_col.argmax()\n\n if a_max_sub_col == 0:\n print A\n print \"i-th row =\", i\n print \"row =\", i_row\n print \"sub col =\", A[i + 1:, i]\n raise ValueError(\"Matrix is singular.\")\n\n \n # row multiplier\n m = A[i, i] / a_max_sub_col\n \n # copy rows to replace the in the next two steps\n _i_row_indx = row_indx[i]\n _max_row_indx = i + pos_max_sub_col + 1\n \n _A_i_row = copy.copy(i_row)\n _A_max_row = A[i + pos_max_sub_col + 1, :]\n \n \n # replace rows\n row_indx[i] = _max_row_indx\n row_indx[i + pos_max_sub_col + 1] = _i_row_indx\n \n A[i, :] = _A_max_row\n A[i + pos_max_sub_col + 1, :] = _A_i_row\n\n for i_sub_row in range(i + 1, i_rows):\n # row k - row i\n A[i_sub_row, :] = A[i_sub_row, :] - m * i_row\n print \"GE-start\"\n print A \n print row_indx\n print \"GE-end\"\n return None", "def gauss(x,p):\r\n return np.exp((-(x - p[0])**2) / (2 * p[1]**2))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Solves [L][U]{x} = b, where [a] = [L\U] is the matrix returned from LUdecomp.
def LUsolve(a,b): b=float64(b) n=len(b) LU=LUdecomp(a) y=zeros((n,1)) x=zeros((n,1)) y[0]=b[0] for i in range(1,n): sum=b[i] for j in range(i): sum=sum-LU[i][j]*y[j] y[i]=sum x[n-1]=float(y[n-1])/LU[n-1][n-1] for i in range(n-2,-1,-1): sum=y[i] for j in range(i+1,n): sum=sum-LU[i][j]*x[j] x[i]=float(sum)/LU[i][i] return x
[ "def LUsolve(B, Y):\n\n n = len(B)\n U = np.zeros_like(B)\n L = np.identity(n)\n\n for i in range(n):\n for j in range(i):\n L[i,j] = B[i,j]\n \n for k in range(i, n):\n U[i,k] = B[i,k]\n\n LL = np.concatenate((L, Y), axis=1)\n Z = forward_substitute(LL)\n\n UU = np.concatenate((U, np.array([Z]).T), axis=1)\n X = back_substitute(UU)\n\n return X", "def lu_decom(A,b):\n # init\n n = len(b)\n L = np.eye(n)\n U = np.zeros((n,n))\n x = np.zeros(n)\n y = np.zeros(n)\n\n # decomposition A = LU\n\n U[0,:] = A[0,:]\n L[1:,0] = A[1:,0] / U[0,0]\n\n for i in range(1,n):\n for j in range(i,n):\n\n U[i,j] = A[i,j] - np.dot(L[i,:i],U[:i,j])\n\n if j != n-1:\n L[j+1,i] = (A[j+1,i] - np.dot(L[j+1,:i],U[:i,i])) / U[i,i]\n\n # solve Ly=b\n y[0] = b[0]\n\n for k in range(1,n):\n y[k] = b[k] - np.dot(L[k,:k],y[:k])\n\n # solve Ux=y\n x[-1] = y[-1] / U[-1,-1]\n\n for k in range(n-2,-1,-1):\n x[k] = (y[k] - np.dot(U[k,k+1:],x[k+1:])) / U[k,k]\n\n return x,L,U", "def luDecomposition(matrix,b):\r\n # copy\r\n a=[matrix[i][:] for i in range(len(matrix))]\r\n n = len(b)\r\n x = [0]*n\r\n y = [0]*n\r\n # a[i][j] = L + U - I\r\n # Normally, you use two separate matrices, L and U\r\n # Use one for memory saving (uper part is U, lower part is L)\r\n for i in range(0,n):\r\n for j in range(0,i):\r\n alpha = a[i][j]\r\n for k in range(0,j):\r\n alpha = alpha - a[i][k]*a[k][j]\r\n a[i][j] = alpha/a[j][j]\r\n for j in range(i,n):\r\n alpha = a[i][j]\r\n for k in range(0,i):\r\n alpha = alpha - a[i][k]*a[k][j]\r\n a[i][j] = alpha\r\n # After you have A = LU, then Ax = b -> Ux = y, Ly = b. Due to their nature,\r\n # these two systems are easily solved via forward and backwards pass.\r\n # Ly = b\r\n for i in range(n):\r\n alpha = 0\r\n for k in range(i+1):\r\n alpha = alpha + a[i][k]*y[k]\r\n y[i] = b[i] - alpha\r\n # Ux = y\r\n for i in range(n-1,-1,-1):\r\n alpha = 0\r\n for k in range(i+1,n):\r\n alpha = alpha + a[i][k]*x[k]\r\n x[i] = (y[i] - alpha)/a[i][i]\r\n return x", "def lu_solve(A, b):\n return A.from_ddm(A.to_ddm().lu_solve(b.to_ddm()))", "def solve(A_or_plu_or_pluq, b, pivoting='partial'):\n if isinstance(A_or_plu_or_pluq, tuple):\n solver = LU(np.eye(A_or_plu_or_pluq[0].shape[0]), pivoting=pivoting)\n\n solver.set_P(A_or_plu_or_pluq[0])\n solver.set_L(A_or_plu_or_pluq[1])\n solver.set_U(A_or_plu_or_pluq[2])\n if len(A_or_plu_or_pluq) > 3:\n solver.set_Q(A_or_plu_or_pluq[3])\n else:\n M, N = A_or_plu_or_pluq.shape\n Z = len(b)\n\n error_msg = \"[!] A must be square.\"\n assert (M == N), error_msg\n\n error_msg = \"[!] b must be {}-D\".format(M)\n assert (Z == N), error_msg\n\n solver = LU(A_or_plu_or_pluq, pivoting=pivoting)\n solver.decompose()\n\n x = solver.solve(b)\n\n return x", "def _solveX(L, U, b):\n m, n = L.shape\n # Forward Substitution\n y = list()\n y.insert(0, b[0]/L[0][0])\n for i in range(1, m):\n summ = 0\n for k in range(0, i):\n summ += L[i][k]*y[k]\n y.insert(i, (b[i]-summ)/(L[i][i]))\n\n # Backwards Substitution\n x = [0]*m\n x[m-1] = y[m-1] / U[m-1][m-1]\n for i in range(m - 2, -1, -1):\n summ = 0\n for k in range(i+1, n):\n summ += U[i][k]*x[k]\n x[i] = (y[i] - summ)/U[i][i]\n\n return x", "def LUsolve_csr(sLU, iLU, jLU, b):\n b = np.array(b)\n N = len(b)\n\n # Solves Lower triangular system Ly = b\n y = np.zeros(N, dtype=complex)\n for i in range(N):\n # Only does the scalar product for non-zero elements of L\n idx = iLU[i] + np.where(jLU[iLU[i]:iLU[i+1]] < i+1)[0]\n y[i] = b[i] - np.dot(sLU[idx], y[jLU[idx]])\n\n # Solves Upper triangular system Ux = y\n x = np.zeros(N, dtype=complex)\n for i in range(N - 1, -1, -1):\n # Only does the scalar product for non-zero elements of U\n idx = iLU[i] + np.where(jLU[iLU[i]:iLU[i + 1]] > i)[0]\n x[i] = (y[i] - np.dot(sLU[idx], x[jLU[idx]])) / (sLU[iLU[i] + np.where(jLU[iLU[i]:iLU[i+1]] == i)[0]])\n return x", "def solve(matrix, b):\n lu_matrix = decompose_to_LU(matrix)\n # get supporting vector y\n y = np.matrix(np.zeros([lu_matrix.shape[0], 1]), dtype=np.float64)\n for i in range(y.shape[0]):\n y[i, 0] = b[i] - lu_matrix[i, :i] * y[:i]\n\n # get vector of answers x\n x = np.matrix(np.zeros([lu_matrix.shape[0], 1]))\n for i in range(1, x.shape[0] + 1):\n x[-i, 0] = (y[-i] - lu_matrix[-i, -i:] * x[-i:, 0]) / lu_matrix[-i, -i]\n\n return np.array(x.transpose()[0], dtype=np.float64)[0]", "def solve_U(U, b):\n \n raise NotImplementedError", "def solve_L(L, b):\n\n raise NotImplementedError", "def LUsolve_csr_slow(sLU, iLU, jLU, b):\n b = np.array(b)\n N = len(b)\n\n # Solves Lower triangular system Ly = b\n y = np.zeros(N, dtype=complex)\n for i in range(N):\n y[i] = b[i]\n for j in range(iLU[i], iLU[i+1]):\n if jLU[j] < i: # Only does the scalar product for non-zero elements of L\n y[i] -= sLU[j] * y[jLU[j]]\n\n # Solves Upper triangular system Ux = y\n x = np.zeros(N, dtype=complex)\n for i in range(N - 1, -1, -1):\n u_ii = 0\n x[i] = y[i]\n for j in range(iLU[i], iLU[i + 1]):\n if jLU[j] == i:\n u_ii = sLU[j]\n if jLU[j] > i: # Only does the scalar product for non-zero elements of U\n x[i] -= sLU[j] * x[jLU[j]]\n x[i] /= u_ii # Divides by diagonal element of U\n return x", "def LUSolve(self, lu_piv , b):\n SpSub = self.NSpin*self.NSublat\n lu,piv=lu_piv\n self.Data = self.Data.reshape([SpSub, SpSub, self.__SpaceTimeVol])\n b = b.reshape([SpSub, SpSub, self.__SpaceTimeVol])\n self.Data = solver.lu_solve(lu,piv,b)\n self.Data = self.Data.reshape(self.__OriginShape)", "def descompLU(A, b):\n # Obtenemos el numero de filas\n n = len(A)\n\n # Llenamos la diagonal de la matriz L con 1\n L = [[0 if i!=j else 1 for j in range(n)] for i in range(n)]\n \n # Igualamos U a la matriz A\n # No copiamos directamente con U = A por que cualquier alteracion\n # en U tambien alterarรก las componentes de A\n U = [[A[i][j] for j in range(n)] for i in range(n)]\n \n for i in range(0, n):\n # Para cada fila i, accedemos a la columna i+1 hasta el final\n # y dividimos por el coefiente diagonal A(k ,k)\n for k in range(i+1, n):\n L[k][i] = U[k][i] / U[i][i]\n \n # Para cada fila desde i+i hasta el final, realizamos\n # eliminacion Gausiana. Al final preservaremos solo \n # la diagonal superior de A.\n for l in range(i+1, n):\n U[k][l] = U[k][l] - L[k][i] * U[i][l]\n \n U = [[U[i][j] if i<=j else 0 for j in range(n)] for i in range(n)]\n \n return L, U", "def decomposeLU(self):\n self.check_square()\n\n N = self.rows\n L = make_matrix(N, N)\n U = make_matrix(N, N)\n A = self #for more math friendly notation\n\n\n for j in range(N):\n L[j, j] = 1.0 #Doolittle factorization\n\n #e.g., if you are in column = 5, you go down 6 rows\n for i in range(j+1):\n U[i, j] = A[i, j] - sum(L[i, k] * U[k, j] for k in range(i))\n #e.g., if you are in column = 5,\n # you start at row 5 and go down for the lower triangular matrix\n for i in range(j, N):\n L[i, j] = (A[i, j] - sum(L[i, k] * U[k, j] for k in range(j))) / U[j, j]\n\n self.L = L\n self.U = U\n return L, U", "def solveU(U, b):\n # validate input\n if np.allclose(U,np.triu(U))==False or np.linalg.det == 0:\n raise TypeError(\"U is not an upper regular triangular matrix\")\n \n elif len(U.shape) != 2 or len(b.shape) != 1:\n raise TypeError(\"unsuitable object\")\n \n else:\n un, um = U.shape\n n, = b.shape\n if un != um or un != n:\n raise TypeError((\"dimensions do not fullfill requirements\"))\n\n # solve \n x = np.zeros(n, dtype=complex)\n x[-1] = (b[-1]) / U[n - 1, n - 1]\n for i in range(1, n):\n t = U[(n - (i + 1)):(n - i)] @ x\n x[-(i + 1)] = (b[-(i + 1)] - t) / U[n - (i + 1), n - (i + 1)]\n\n return x", "def cholesky_solve(a, bb):\n b = bb.copy()\n n = b.shape[0] - a.shape[0]\n kn = a.shape[0] - 1\n\n spot = np.arange(kn, dtype=int) + 1\n for j in range(n):\n b[j] /= a[0,j]\n b[j+spot] -= b[j]*a[spot,j]\n\n spot = spot[::-1]\n for j in range(n-1, -1, -1):\n b[j] = (b[j] - np.sum(a[spot,j] * b[j+spot]))/a[0,j]\n\n return -1, b", "def decompose_to_LU(a):\n # create emtpy LU-matrix\n lu_matrix = np.matrix(np.zeros([a.shape[0], a.shape[1]]))\n n = a.shape[0]\n\n for k in range(n):\n # calculate all residual k-row elements\n for j in range(k, n):\n lu_matrix[k, j] = a[k, j] - lu_matrix[k, :k] * lu_matrix[:k, j]\n # calculate all residual k-column elemetns\n for i in range(k + 1, n):\n lu_matrix[i, k] = (a[i, k] - lu_matrix[i, : k] * lu_matrix[: k, k]) / lu_matrix[k, k]\n\n return lu_matrix", "def LUdecomposition(self, iszerofunc=_iszero):\n combined, p = self.LUdecomposition_Simple(iszerofunc=_iszero)\n L = self.zeros(self.rows)\n U = self.zeros(self.rows)\n for i in range(self.rows):\n for j in range(self.rows):\n if i > j:\n L[i,j] = combined[i,j]\n else:\n if i == j:\n L[i,i] = 1\n U[i,j] = combined[i,j]\n return L, U, p", "def test_LU(self):\n A = np.random.rand(10, 10)\n MA = to_matrix(A)\n ML, MU = MA.decomposeLU()\n self.assertEqual(ML*MU, MA)\n self.assertTrue(ML.is_lower_triangular())\n self.assertTrue(MU.is_upper_triangular())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Constructs a Octave ResNet26 model.
def pre_act_oct_resnet26(pretrained=False, **kwargs): model = PreActOctResNet(Bottleneck, [2, 2, 2, 2], **kwargs) return model
[ "def create_embedding_net():\n # This is a resnet50 base model\n resnet_model = models.resnet50(pretrained=True)\n\n # Now modify the network layers\n resnet_model.fc = Identity()\n resnet_model.avgpool = Identity() \n #print(resnet_model)\n\n return resnet_model", "def resnet10(shortcut_type, num_classes, in_channels):\n model = ResNet(BasicBlock, [1, 1, 1, 1], shortcut_type, num_classes, in_channels)\n return model", "def resnet18():\n model = ResNet18(BasicBlock, [2, 2, 2, 2])\n #if pretrained:\n #model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet():\n return models.resnet152(pretrained=True)", "def resnet10(**kwargs):\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model", "def preresnet56(num_classes=10):\n model = CifarPreResNet(ResNetBasicblock, 56, num_classes)\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def resnet18(pretrained=False):\n model = ResNet(BasicBlock, [2, 2, 2, 2])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnext18( **kwargs):\n model = ResNeXt(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def build_regnet():\n regnety_160 = {\n 'stem_width': 32,\n 'bottle_ratio': 1.0,\n 'w0': 200,\n 'wa': 106.23,\n 'wm': 2.48,\n 'group_w': 112,\n 'depth': 18,\n 'se_ratio': 0.25,\n 'num_classes': 1000,\n 'pool_size': (7, 7),\n 'crop_pct': 0.875,\n }\n model = RegNet(regnety_160)\n return model", "def initialize_model():\n model = models.squeezenet1_0(pretrained=False)\n model.features[0] = nn.Conv2d(1, 96, kernel_size=7, stride=2)\n model.classifier[1] = nn.Conv2d(512, 25, kernel_size=(1, 1), stride=(1, 1))\n\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n None\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)\n return model", "def preresnet20(num_classes=10):\n model = CifarPreResNet(ResNetBasicblock, 20, num_classes)\n return model", "def resnext101(**kwargs):\n model = ResNeXt(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model", "def get_model():\n # Load the pretrained model.\n model = torchvision.models.resnet34(pretrained=True)\n\n # Resize model for our task.\n model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1,\n bias=False)\n model.avgpool = torch.nn.AvgPool2d(2)\n model.fc = torch.nn.Linear(in_features=512, out_features=10, bias=True)\n\n return model", "def resnext152(**kwargs):\n model = ResNeXt(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model", "def generate_model(**kwargs):\n model = ResNet3D(Bottleneck, [3, 4, 6, 3], [64, 128, 256, 512], **kwargs)\n return model" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Constructs a Octave ResNet200 model.
def pre_act_oct_resnet200(pretrained=False, **kwargs): model = PreActOctResNet(Bottleneck, [3, 24, 36, 3], **kwargs) return model
[ "def resnet10(**kwargs):\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model", "def create_embedding_net():\n # This is a resnet50 base model\n resnet_model = models.resnet50(pretrained=True)\n\n # Now modify the network layers\n resnet_model.fc = Identity()\n resnet_model.avgpool = Identity() \n #print(resnet_model)\n\n return resnet_model", "def resnet200(**kwargs):\n model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)\n return model", "def resnet18():\n model = ResNet18(BasicBlock, [2, 2, 2, 2])\n #if pretrained:\n #model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnext18( **kwargs):\n model = ResNeXt(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def resnet():\n return models.resnet152(pretrained=True)", "def pre_act_oct_resnet26(pretrained=False, **kwargs):\n model = PreActOctResNet(Bottleneck, [2, 2, 2, 2], **kwargs)\n return model", "def resnet10(shortcut_type, num_classes, in_channels):\n model = ResNet(BasicBlock, [1, 1, 1, 1], shortcut_type, num_classes, in_channels)\n return model", "def resnet18(pretrained=False):\n model = ResNet(BasicBlock, [2, 2, 2, 2])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)\n return model", "def generate_model(**kwargs):\n model = ResNet3D(Bottleneck, [3, 4, 6, 3], [64, 128, 256, 512], **kwargs)\n return model", "def resnet101(config, **kwargs):\n model = ResNet(config, Bottleneck, [3, 4, 23, 3], **kwargs)\n\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n None\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)\n return model", "def preresnet20(num_classes=10):\n model = CifarPreResNet(ResNetBasicblock, 20, num_classes)\n return model", "def build_lenet_model():\n model = build_preprocess_layers()\n model.add(Convolution2D(6, 5, 5, activation=\"relu\"))\n model.add(MaxPooling2D())\n model.add(Convolution2D(6, 5, 5, activation=\"relu\"))\n model.add(MaxPooling2D())\n model.add(Flatten())\n model.add(Dense(120))\n model.add(Dense(84))\n model.add(Dense(1))\n\n return model", "def ResNet20(inputShape):\n inputs = Input(shape=inputShape)\n x = resLayer(inputs) # resLayer1\n\n # resBlocks\n for nStage in range(3):\n for nBlock in range(3):\n x = resBlock(x, nStage, nBlock)\n\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(10, activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Generate model\n model = Model(inputs=inputs, outputs=outputs)\n return model", "def resnet18():\n return ResNet(Basic_Block, [2, 2, 2, 2])", "def resnext101(**kwargs):\n model = ResNeXt(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model", "def oct_resnet152(**kwargs):\n return _oct_resnet(Bottleneck, [3, 8, 36, 3], **kwargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The standard size of a tile sprite in 2D screen space.
def tile_size_2d(self): return 32.0, 32.0
[ "def tile_size(self):\n return self._tile_size", "def tile_size(self) -> int:\n return self._tile_size", "def get_tile_size(self) -> int:\n return self.tile_size.spin.value()", "def _get_tile_size() -> int:\n return octree_config['octree']['tile_size'] if octree_config else 256", "def get_sprite_size(self):\n return len(self.state)", "def GetTileScale(self):\n ...", "def cellsize_2d(self):\t\r\n return self.dx * self.dy", "def tileWidth(self):\n return self._tileWidth", "def super_cell_size(self):\n return np.array([self.x_param(), 2.0 * self.y_param(), ])", "def calc_image_size(spr):\n return int(max(spr.label_safe_width(), 1)), \\\n int(max(spr.label_safe_height(), 1))", "def tileHeight(self):\n return self._tileHeight", "def square_width(self):\n\n return (min(self.contentsRect().height(), self.contentsRect().width()) //\n min(len(self.layers), self.max_nodes))", "def square_height(self):\n\n return (min(self.contentsRect().height(), self.contentsRect().width()) //\n min(len(self.layers), self.max_nodes))", "def calculate_screen_size():\n return pygame.display.Info().current_w, pygame.display.Info().current_h", "def _rect_size(self):\n bnd = self._bounds\n return (bnd[1][0] - bnd[0][0], bnd[1][1] - bnd[0][1])", "def get_tilesize(self, sampling):\n xsize = {\n 'T6': 600000,\n 'T3': 300000,\n 'T1': 100000\n }[self.get_tiletype(sampling)]\n ysize = {\n 'T6': 600000,\n 'T3': 300000,\n 'T1': 100000\n }[self.get_tiletype(sampling)]\n return xsize, ysize", "def size(self):\n return self.surface.get_size()", "def to_im_size(self):\n return ImageSize(self.x2 - self.x1 + 1, self.y2 - self.y1 + 1)", "def get_map_size(self, map_major_dim=None):\n w, h = self.img_size\n mmd = map_major_dim\n if w >= h:\n x_tiles = mmd\n y_tiles = round(h / w * mmd)\n else:\n x_tiles = round(w / h * mmd)\n y_tiles = mmd\n\n return (x_tiles, y_tiles)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Using the Command_Handler from command module to handle command.
def usingHandler(self, cmd): self.command_handler.handle_command(cmd) while msg_queue.empty() is False: self.writeresponse(msg_queue.get())
[ "def mod_command_handler(self, cmd, args):\n self.command_handler_params = (cmd, args) # for inspection\n return bundy.config.create_answer(0)", "def handle(self, command):\n fn = \"handle_{}\".format(command)\n try:\n self.commands[fn](self)\n except KeyError:\n if settings.DEBUG:\n print(\"Could not find command: {}\".format(command))", "def on_command(self, cmd_event):\n # If the command has already been handled, there is nothing left to do.\n if cmd_event.handled:\n return\n\n command = cmd_event.command.split(' ')[0]\n\n if command in self.handlers:\n self.handlers[command][1](cmd_event)\n else:\n cmd_event.response = 'Invalid command: %s\\nTry using \"help\" for a list of commands' % cmd_event.command", "def _mod_command_handler(self, cmd, args):\n return bundy.config.create_answer(1, \"Unknown command: \" + str(cmd))", "def _command(self, handlers, args, msg):\n com, arg = self._command_split(args)\n if com in handlers.subcommands:\n msg.inc_handlers()\n self._command(handlers.subcommands[com], arg, msg)\n for handler in handlers.handlers:\n msg.inc_handlers()\n handler.callback(msg, args)\n msg.dec_handlers()", "def _handler(self, bot, update, *args, **kwargs):\n raise NotImplementedError('Not implemented command handler method.')", "def handle_command(self, command):\n\n cmd = json.loads(command)\n cmd_type = cmd.get('type')\n\n handler = self._command_handlers.get(cmd_type)\n if not handler:\n raise ValueError(f'Unknown command {cmd_type}')\n event = handler(cmd)\n return eventd", "def command():\n pass", "def _handle_command(self, command: str, params: list):\n if command in self.cli_controller.global_commands:\n self.cli_controller.global_commands[command].execute(params)\n elif self.cli_controller.target:\n self._handle_category_command(command, params)\n else:\n self._print_command_not_found(command)\n print(\n \"\"\"\\tMaybe the command you were trying to execute is a target specific command,\n\\tuse 'select' to select a target.\"\"\"\n )", "def on_command(server, user, command, args):", "def __command_handler__(self, commands, handler):\n message_set = self.event.text.split(u' ')\n for command in commands:\n if command in message_set:\n handler(self.event, self.vk)\n break", "def get_command(self,command):\n\t\treturn self.command_handlers[command]", "async def handle_command(self, command_message, user, chatroom):\n command_message = command_message.strip()\n if not self.is_command(command_message):\n # occurs when command is invalid after trimming (may have technically been valid before e.x. '! ')\n resp = Response(f\"\\\"{command_message}\\\" is not a valid command (Syntax)\", Origin.SERVER)\n await chatroom.send(resp, user.websocket)\n return\n\n command_name, command_args = self._parse_command(command_message)\n if command_name not in self.registered_commands:\n resp = Response(f\"\\\"{command_message}\\\" is not a valid command (Doesn't exist)\", Origin.SERVER)\n await chatroom.send(resp, user.websocket)\n return\n\n # Invoke the command, passing calling user, chatroom, and arguments\n command_func = self.registered_commands[command_name]\n await command_func(self, user, chatroom, command_args)", "def __init__(self, command_handler_name):\n\n # Set the command handler attributes\n self.name = command_handler_name", "def map_handler(command):\n\ttry:\n\t\treturn COMMAND_HANDLERS[command]\n\texcept KeyError:\n\t\terr_msg = f\"Unimplemented command '{command}'\"\n\t\tlogging.error(err_msg)\n\t\traise CommandException(err_msg)", "def handle(self, *args, **options):\n if not self.server:\n print 'Error : %s' % self.init_error\n return\n\n handler_choice = {\n 'proxy': self.proxy_handle,\n 'server': self.server_handle,\n }\n\n sub_command = options['sub_command']\n handler_choice.get(sub_command)(options)", "def process_command(self,cmd,*args,**kwargs):\n \t#if (self.IsServerOn()==False): sys.exit(); \n \tif self._closed: return\n \tself._last_response=\"\"; method=getattr(self,'ftp_'+cmd.replace(' ','_'))\n \tmethod(*args,**kwargs)\n \tif self._last_response: code=int(self._last_response[:3]); resp=self._last_response[4:]; self.log_cmd(cmd,args[0],code,resp)", "def handle(self, line):\n if not line.startswith(self.CMD):\n return\n try:\n command, _, args = line[1:].partition(\" \")\n args = args.split(None)\n getattr(self, 'fx_{}'.format(command)).handle(self, args)\n except:\n log.msg(\"generic error after handle\")\n log.err()", "def handle_command(command, channel):\n #Default respons is help text for the user\n default_response = \"This don't exist m8. Try *{}*.\".format(\"!price trx\")\n #Finds and executes the given command, filling in response\n response = None\n \n if command.lower() in name_id_map:\n req = requests.get(url = 'https://api.coinmarketcap.com/v1/ticker/' + name_id_map[command.lower()] + '/')\n coin = req.json()\n text =format_coin_output(coin[0])\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command.lower() in symbol_id_map:\n req = requests.get(url = 'https://api.coinmarketcap.com/v1/ticker/' + symbol_id_map[command.lower()] + '/')\n coin = req.json()\n text = format_coin_output(coin[0])\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!top':\n text = top_coins()\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!exit':\n text = \":wasssap3::wasssap3:ABANDON SHIP!!!:wasssap3::wasssap3:\\n :rotating_light:EXIT ALL MARKETS:rotating_light:\\n\"\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!ping':\n text = \"Still scavaging the moon.\\n\"\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n else:\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=default_response,\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Target method used by monitor thread, which polls vbmc status every 3s. If vbmc stops, ipmiconsole will stop.
def monitor(instance="default"): global logger_ic while True: try: with open("{}/{}/.{}-bmc.pid".format( config.infrasim_home, instance, instance), "r") as f: pid = f.readline().strip() if not os.path.exists("/proc/{}".format(pid)): logger_ic.warning("Node {} vBMC {} is not running, " "ipmi-console is ready to quit". format(instance, pid)) break time.sleep(3) except IOError: logger_ic.warning("Node {} workspace is possibly destroyed, " "ipmi-console is ready to quit".format(instance)) break stop(instance)
[ "def bitmessage_monitor(self):\n while True:\n now = time.time()\n if self.timer_check_bm_alive < now:\n while self.timer_check_bm_alive < now:\n self.timer_check_bm_alive += 10\n lf = LF()\n if (not self.is_restarting_bitmessage and\n lf.lock_acquire(config.LOCKFILE_API, to=60)):\n try:\n self._api.add(2, 3)\n time.sleep(0.1)\n except socket.timeout:\n logger.error(\"Timeout during BM monitor API query: restarting bitmessage\")\n self.restart_bitmessage()\n except Exception as err:\n logger.error(\"Exception during BM monitor API query: {}\".format(err))\n finally:\n lf.lock_release(config.LOCKFILE_API)\n time.sleep(2)", "def _StatusUpdateThreadMain(self):\n while self._status_update_active:\n self._UpdateStatus()\n time.sleep(self._status_update_interval)", "def __init__(self, interval, project, vcs, parent=None):\n super(VcsStatusMonitorThread, self).__init__(parent)\n self.setObjectName(\"VcsStatusMonitorThread\")\n \n self.setTerminationEnabled(True)\n \n self.projectDir = project.getProjectPath()\n self.project = project\n self.vcs = vcs\n \n self.interval = interval\n self.autoUpdate = False\n \n self.statusList = []\n self.reportedStates = {}\n self.shouldUpdate = False\n \n self.monitorMutex = QMutex()\n self.monitorCondition = QWaitCondition()\n self.__stopIt = False", "def updateStatus(self):\n done = False\n if not self.pg.is_alive():\n done = True\n while not self.pg.msgQueue.empty():\n msg = str(self.pg.msgQueue.get(False))\n self.monitorTextBox.append(msg)\n if done:\n self.timer.stop()\n self.pg.join()\n self.runButton.setEnabled(True)\n self.stopButton.setEnabled(False)\n if self.pg.ex:\n etype, evalue, etrace = self.pg.ex\n el = traceback.format_exception(etype, evalue, etrace)\n for line in el:\n self.monitorTextBox.append(line)\n self.setStatusBar.emit(\n \"Surrogate Failed Elapsed Time: {0}\".format(\n hhmmss(math.floor(time.time() - self.timeRunning))\n )\n )\n else:\n self.setStatusBar.emit(\n \"Surrogate Finished, Elapsed Time: {0}\".format(\n hhmmss(math.floor(time.time() - self.timeRunning))\n )\n )\n if self.pg.driverFile != \"\":\n try:\n df = os.path.abspath(self.pg.driverFile)\n except:\n pass\n msgBox = QMessageBox()\n msgBox.setWindowTitle(\"Driver File Location\")\n msgBox.setText(\n \"The surrogate model driver file path is: {0}\".format(\n os.path.abspath(df)\n )\n )\n msgBox.exec_()\n else:\n self.refreshContents()\n self.setStatusBar.emit(\n \"Surrogate Model Generation, Elapsed Time: {0}s\".format(\n math.floor(time.time() - self.timeRunning)\n )\n )", "def run(self):\n while self.running:\n self.__update_battery()\n self.__update_signal()\n time.sleep(5)", "def on_timer(self):\n self.read_serial_data()\n # self.update_monitor()", "def monitor(self):\n import curses\n import inspect\n\n stdscr = curses.initscr()\n curses.curs_set(0)\n curses.noecho()\n curses.cbreak()\n width_split = curses.COLS//3-1\n win_done = curses.newwin(curses.LINES-1, width_split, 0, 0)\n win_running = curses.newwin(curses.LINES-1, width_split,\n 0, width_split+1)\n win_pending = curses.newwin(curses.LINES-1, width_split,\n 0, 2*width_split+1)\n stdscr.addstr(curses.LINES-1, 0,\n 'Monitoring started. Press Ctrl+C to stop.')\n stdscr.refresh()\n win_done.addstr(0, 0, 'DONE')\n win_pending.addstr(0, 0, 'PENDING')\n while True:\n try:\n win_done.addstr(1, 0,\n f'{len(self.done)} jobs done')\n list_done = list(self.done)[:curses.LINES-3]\n for idx, fut in enumerate(list_done, start=2):\n fmt_str = f'{id(fut):x} {fut._state}'\n win_done.addstr(idx, 0, fmt_str)\n win_done.refresh()\n\n win_running.clear()\n win_running.addstr(0, 0, 'RUNNING')\n win_running.addstr(1, 0,\n f'{self.running.qsize()} jobs running')\n list_running = list(self.running.items())[:curses.LINES-3]\n for idx, (fut, coro) in enumerate(list_running, start=2):\n coro_state = inspect.getcoroutinestate(coro)\n fmt_str = f'{id(fut):x} {coro_state}'\n win_running.addstr(idx, 0, fmt_str)\n win_running.refresh()\n\n win_pending.clrtoeol()\n win_pending.addstr(1, 0,\n f'{self.pending.qsize()} jobs pending')\n win_pending.refresh()\n time.sleep(.1)\n except KeyboardInterrupt:\n break\n\n curses.nocbreak()\n curses.echo()\n curses.endwin()", "def run(self):\n self.cncLock.acquire()\n self.running = True\n\n # Initialize\n try:\n self.cnc = serial.Serial(self.deviceFile,BAUD_RATE)\n\n self.updaterThread = threading.Thread(target=self.periodic_timer)\n self.updaterThread.start()\n\n # Wake up grbl\n log.info(\"Initializing Grbl...\")\n cmd = \"\\r\\n\\r\\n\"\n self.cnc.write(cmd.encode())\n\n # Wait for grbl to initialize and flush startup text in serial input\n time.sleep(2)\n self.cnc.flushInput()\n self.cncLock.release()\n\n while self.running :\n cmd = self.commandQueue.get().strip() + EOLStr\n if self.running == False:\n break\n self.cncLock.acquire()\n self.cnc.write(cmd.encode())\n\n out = str(self.cnc.readline().strip()) # Wait for grbl response\n if out.find('ok') >= 0 :\n log.debug(f'MSG: {out}') # Debug response\n elif out.find('error') >= 0 :\n log.error(f'ERROR: {out}')\n else:\n log.info(out)\n self.cncLock.release()\n except:\n raise\n finally:\n log.debug(\"CNC main loop left\")\n self.cnc.close()", "def monitor(self):\n\n if self._running:\n # Current time\n ctime = time.time()\n # A float representing the current system-wide CPU utilization as a percentage.\n cpu = psutil.cpu_percent()\n # System memory usage percent = (total - available) * 100.0 / total\n memData = dict(psutil.virtual_memory()._asdict())\n vmem = memData['percent']\n self.readings.append((ctime, cpu, vmem))\n t = threading.Timer(self.interval, self.monitor)\n t.start()", "def stop_polling(self):\r\n self.KCube.CC_StopPolling(self.serial)", "def run(self):\n self.monitor.start()", "def longPoll(self):\n self.heartbeat()\n pass", "def run():\n logger.verbose(\"bwmon: Thread started\")\n while True:\n lock.wait()\n logger.verbose(\"bwmon: Event received. Running.\")\n database.db_lock.acquire()\n nmdbcopy = copy.deepcopy(database.db)\n database.db_lock.release()\n try:\n if getDefaults(nmdbcopy) and len(bwlimit.tc(\"class show dev %s\" % dev_default)) > 0:\n # class show to check if net:InitNodeLimit:bwlimit.init has run.\n sync(nmdbcopy)\n else: logger.log(\"bwmon: BW limits DISABLED.\")\n except: logger.log_exc(\"bwmon failed\")\n lock.clear()", "def _checkStatus(self):\n \n if self.last_messages == self.messages_received:\n self.channel.stop_consuming()\n else:\n self.last_messages = self.messages_received\n self.timer_id = self.conn.call_later(300, self._checkStatus)", "def remote_busy(self):\n self.ringer.play_busy()", "def control_c(self) -> None:\n time.sleep(0.1) # sometimes it's better to wait a bit\n send_control_c(self.proc, True)", "def _stopped(self):", "async def check_status(self):\n while True:\n async with self._loop_lock:\n new_monitor_processes = {}\n for class_name in self.monitor_processes:\n monitor = self.monitor_processes[class_name][\"process\"]\n if monitor.poll() is not None:\n log = f\"Monitor {class_name} has stopped with code: {monitor.returncode}\"\n if monitor.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Monitor \" + class_name,\n monitor.returncode,\n monitor.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_monitor_processes[class_name] = self.monitor_processes[\n class_name\n ]\n self.monitor_processes = new_monitor_processes\n\n new_scraper_processes = {}\n for class_name in self.scraper_processes:\n scraper = self.scraper_processes[class_name][\"process\"]\n if scraper.poll() is not None:\n log = f\"Scraper {class_name} has stopped with code: {scraper.returncode}\"\n if scraper.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Scraper \" + class_name,\n scraper.returncode,\n scraper.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_scraper_processes[class_name] = self.scraper_processes[\n class_name\n ]\n self.scraper_processes = new_scraper_processes\n await asyncio.sleep(1)", "def poll(self):\n\tself.met = self.button.poll()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stop ipmiconsole of target instance specified by its name
def stop(instance="default"): global logger_ic logger_ic = infrasim_log.get_logger(LoggerType.ipmi_console.value, instance) try: file_ipmi_console_pid = "{}/{}/.ipmi_console.pid".\ format(config.infrasim_home, instance) with open(file_ipmi_console_pid, "r") as f: pid = f.readline().strip() os.kill(int(pid), signal.SIGTERM) logger_ic.info("SIGTERM is sent to pid: {}".format(pid)) os.remove(file_ipmi_console_pid) except IOError: # When pid file is missing, by e.g., node destroy, # find process id by instance name if instance == "default": process_name = "ipmi-console start$" else: process_name = "ipmi-console start {}".format(instance) ps_cmd = r"ps ax | grep '{}' | grep Sl | awk '{{print $1}}' | head -n1".format(process_name) logger_ic.warning("Fail to find ipmi console pid file, check by:") logger_ic.warning("> {}".format(ps_cmd)) _, pid = run_command(cmd=ps_cmd) logger_ic.warning("ipmi console pid got: {}".format(pid)) if not pid: logger_ic.warning("ipmi console for instance {} is not running".format(instance)) return os.kill(int(pid), signal.SIGTERM) logger_ic.info("SIGTERM is sent to pid: {}".format(pid)) except Exception: logger_ic.warning(traceback.format_exc()) pass
[ "def stop(self):\n self.scion_sh('stop')", "def stop_single(self, name):\n logger.debug('Stopping module \"%s\"', name)\n self.modules[name].stop()\n logger.debug('Module \"%s\" stopped', name)", "def stop_notebook_instance(NotebookInstanceName=None):\n pass", "def _stop_instance(self, instance):\n self.log.info(\"Stopping instance %s\", instance.id)\n instance.stop()", "def ec2_stop(resource, metadata):\n instances = resource.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']},\n {'Name': 'tag:Name', 'Values': [metadata['fqdn']]}, ])\n\n for instance in instances:\n print(\"Terminating vm id {0} name {1}\".format(instance.id, instance.tags[0]['Value']))\n # resource.instances.filter(InstanceIds=[instance.id]).stop()\n resource.instances.filter(InstanceIds=[instance.id]).terminate()", "def stop(self, **kwargs):\n instance_name = getOptionFrom(kwargs, 'instance-name')\n oauth = self.Server\n status = oauth.get_status(instance_name)\n if status['status'] == 'running':\n oauth.stop(instance_name)\n else:\n padded_success(\"Already stopped\")\n if status['status'] == 'stopped':\n print '\\nAlready stopped\\n'", "def stop_test_instance(test_name=None):\n env.warn_only = True\n if test_name is not None:\n instances = [test_name]\n else:\n output = run('ls -1 %s' % env.site_root)\n instances = [x.strip() for x in output.split(\"\\n\")]\n for item in instances:\n sudo(\"stop %s\" % item.strip())", "def stop_instance(self):\n instance_id = self._choose_among_running_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Stopping the instance \"%s\"' % instance_id\n self.compute.stop_instance(instance_id)\n print 'The instance has been stopped'", "def stop(self):\n result = mpc_command(['stop'])", "def stop(self):\n self._kill_process()", "def stop(account):\n with settings(hide('everything', 'commands'), warn_only=True):\n if is_host_up(env.host):\n run ('cd /falkia/%s ; mongrel_rails stop -P /tmp/%s.pid' % (account, account))\n print(yellow(\"* falkia instance stopped on %s\") % account)", "def runner_stop():\n RunnerInstance.instance().stop()", "def stop(self, name):\n self.buttons[name] = False", "def stop(self):\n\n print utilities.run(os.path.join(\"source \" + self.params.INSTALLATION_DIRECTORY, \"chorus_path.sh\") + \" && chorus_control.sh stop\", communicate=\"\", user=self.user())", "def terminate_instance(self, name):\n server = self.get_server(name)\n if server:\n self.nova_client.servers.delete(server)\n time.sleep(60)\n logger.info(\"Terminated Instance '%s'\", name)\n else:\n logger.error(\"Instance '%s' does not exist\", name)", "def kill(self):\n self._screen_commands('quit')", "def stop_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Stopping a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"stop\"])\n popdir()", "def stop_sample(self, sample_name):\n self.sampleObjects[sample_name].stop()", "def stop(self):\n if self.running and self.save_output:\n if not self.name:\n self.name = self.container.get('Id')\n out_path = self.output_dir + \"/output-\" + self.name + \".txt\"\n with open(out_path, 'w') as f:\n print(d.attach(container=self.container.get('Id'), stream=False, logs=True), file=f)\n f.closed\n if self.container:\n self.logger.debug(\"Removing container '%s'\" % self.container['Id'])\n d.kill(container=self.container)\n self.running = False\n d.remove_container(self.container)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a handle to the Ceph Cluster.
def connect(ceph_config_file, timeout = CEPH_TIMEOUT): handle = rados.Rados(conffile = ceph_config_file) LOGGER.info("librados version: " + str(handle.version())) LOGGER.info("Attempting to connect to: " + str(handle.conf_get('mon initial members'))) handle.connect() #timeout shoudl be specified LOGGER.info("Cluster ID" + handle.get_fsid()) return handle
[ "def host_cluster_create(context, values):\n return IMPL.host_cluster_create(context, values)", "def create_cluster():\n config = get_kube_config()\n command = CLUSTER_CREATE_COMMAND.replace('\\n','').format(cluster_name=config['cluster_name'],\n project_name=config['project_name'],\n machine_type=config['machine_type'],\n disk_size=config['disk_size'],\n nodes=config['nodes'],\n zone=config['zone'])\n print \"Creating cluster by running {}\".format(command)\n subprocess.check_call(shlex.split(command))\n command = AUTH_COMMAND.replace('\\n','').format(cluster_name=config['cluster_name'],\n project_name=config['project_name'],\n zone=config['zone'])\n print \"Authenticating with cluster by running {}\".format(command)\n subprocess.check_call(shlex.split(command))", "def createcluster(self):\n for hostitem in OTHER_NODES:\n checkhost(hostitem)\n if OTHER_WSREP:\n for wsrepitem in OTHER_WSREP:\n REMAINING_NODES.append(wsrepitem)\n if REMAINING_NODES:\n alive = str(REMAINING_NODES)[1:-1]\n print \"{}\\nThe following nodes are alive in cluster:{}\\n {}\".format(\n RED, WHITE, alive)\n print \"\\n\\nTo boostrap a new cluster you need to switch them off\\n\"\n os.sys.exit(1)\n else:\n if self.mode == \"new\" and not self.force:\n ask('\\nThis operation will destroy the local data')\n clean_dir(self.datadir)\n initialize_mysql(self.datadir)\n bootstrap_mysql(self.mode)\n if self.mode == \"new\":\n create_monitor_table()\n ALL_NODES.append(\"localhost\")\n for creditem in CREDENTIALS:\n create_users(creditem)\n print \"\"\n drop_anonymous()", "def _setup_test_cluster(self, return_cluster, name, create_args):\n stack_name = '{0}_stack'.format(name)\n templ, self.stack = self._setup_test_stack(stack_name, TEMPLATE)\n cluster_instance = cbd.CloudBigData('%s_name' % name,\n templ.resource_definitions(\n self.stack)['cbd_cluster'],\n self.stack)\n self._stubout_create(return_cluster)\n return cluster_instance", "async def open(cls, loop, *, aliases=None, configfile=None, **config):\n cluster = cls(loop, aliases=aliases, **config)\n if configfile:\n cluster.config_from_file(configfile)\n await cluster.establish_hosts()\n return cluster", "def create_cluster(self, cluster_spec):\n raise Exception(\"Unimplemented\")", "def ceph_ha(self):\n if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_REDHAT:\n raise SkipTest()\n\n self.env.revert_snapshot(\"ready\")\n self.env.bootstrap_nodes(self.env.nodes().slaves[:6])\n\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=settings.DEPLOYMENT_MODE_HA,\n settings={\n 'volumes_ceph': True,\n 'images_ceph': True,\n 'volumes_lvm': False\n }\n )\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-01': ['controller', 'ceph-osd'],\n 'slave-02': ['controller', 'ceph-osd'],\n 'slave-03': ['controller', 'ceph-osd'],\n 'slave-04': ['compute', 'ceph-osd'],\n 'slave-05': ['compute', 'ceph-osd'],\n 'slave-06': ['ceph-osd']\n }\n )\n # Depoy cluster\n self.fuel_web.deploy_cluster_wait(cluster_id)\n check_ceph_health(self.env.get_ssh_to_remote_by_name('slave-01'))\n\n # Run ostf\n self.fuel_web.run_ostf(cluster_id=cluster_id)\n\n self.env.make_snapshot(\"ceph_ha\")", "def __create(self):\n pass\n\n # create at cluster-provider\n # get kubeconfig\n # wait for api\n # ^ could be async and seperate steps?", "def create_cluster(self):\n # TODO: check available resources\n # self.check_available_resources()\n\n # TEMP\n # self.cleanup_cluster()\n\n self.logger.info(\"Creating new cluster for project '%s'...\", self.project_name)\n\n self.create_security_group()\n self.create_network()\n self.create_ssh_key_pair()\n self.create_vms()\n\n self.process_cloud_vars()\n\n self.logger.info(\"Cluster setup for project '%s' complete...\", self.project_name)", "def cluster(self) -> Cluster:", "def create(create_deployment: dict):\n logger.info(\"Creating cluster\")\n\n # Get standardized cluster_details\n cluster_details = GrassOnPremisesExecutor._standardize_cluster_details(create_deployment=create_deployment)\n cluster_name = cluster_details[\"name\"]\n if os.path.isdir(f\"{GlobalPaths.ABS_MARO_CLUSTERS}/{cluster_name}\"):\n raise BadRequestError(f\"Cluster '{cluster_name}' is exist\")\n\n # Start creating\n try:\n GrassOnPremisesExecutor._init_master(cluster_details=cluster_details)\n GrassOnPremisesExecutor._create_user(cluster_details=cluster_details)\n\n # Remote create master, cluster after initialization\n master_api_client = MasterApiClientV1(\n master_hostname=cluster_details[\"master\"][\"public_ip_address\"],\n master_api_server_port=cluster_details[\"master\"][\"api_server\"][\"port\"],\n user_id=cluster_details[\"user\"][\"id\"],\n master_to_dev_encryption_private_key=cluster_details[\"user\"][\"master_to_dev_encryption_private_key\"],\n dev_to_master_encryption_public_key=cluster_details[\"user\"][\"dev_to_master_encryption_public_key\"],\n dev_to_master_signing_private_key=cluster_details[\"user\"][\"dev_to_master_signing_private_key\"],\n )\n master_api_client.create_master(master_details=cluster_details[\"master\"])\n master_api_client.create_cluster(cluster_details=cluster_details)\n except Exception as e:\n # If failed, remove details folder, then raise\n shutil.rmtree(path=f\"{GlobalPaths.ABS_MARO_CLUSTERS}/{cluster_name}\")\n logger.error_red(f\"Failed to create cluster '{cluster_name}'\")\n raise e\n\n logger.info_green(f\"Cluster {cluster_name} has been created.\")", "def initialize_cluster(cluster):\n logger.info('Creating a new cluster for %s...', cluster)\n\n configuration = ClusterConfiguration(version=__version__)\n ztransaction = cluster.zookeeper.transaction()\n ztransaction.create(cluster.path, BinaryCodec(ClusterConfiguration).encode(configuration))\n ztransaction.create(cluster.get_set_path())\n commit(ztransaction)", "def initialize_cluster():\n ch_core.hookenv.log(\"Initializing InnoDB cluster.\", \"DEBUG\")\n with charm.provide_charm_instance() as instance:\n instance.configure_instance(instance.cluster_address)\n instance.create_cluster()\n instance.assess_status()", "def cassandra_connection():\n cluster = Cluster()\n session = cluster.connect()\n session.execute(\"\"\"\n CREATE KEYSPACE IF NOT EXISTS test\n WITH REPLICATION =\n { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }\n \"\"\")\n session.set_keyspace('test')\n print(\"Connected\")\n return session, cluster", "def Create(ctx,\n mvip,\n svip,\n rep_count,\n username,\n password,\n nodes,\n accept_eula = None,\n attributes = None):\n \"\"\"\"\"\"\n \"\"\"Note: You need to log into the node that is used as the master node for the cluster. Once logged in, run the GetBootstrapConfig method on the node to get the IP addresses for the rest of the nodes that you want to include in the cluster. Then run the CreateCluster method.\"\"\"\n if ctx.element is None:\n ctx.logger.error(\"You must establish at least one connection and specify which you intend to use.\")\n exit()\n\n\n\n nodes = parser.parse_array(nodes)\n if(attributes is not None):\n kwargsDict = simplejson.loads(attributes)\n attributes = dict(**kwargsDict)\n\n ctx.logger.info(\"\"\"accept_eula = \"\"\"+str(accept_eula)+\"\"\";\"\"\"+\"\"\"mvip = \"\"\"+str(mvip)+\"\"\";\"\"\"+\"\"\"svip = \"\"\"+str(svip)+\"\"\";\"\"\"+\"\"\"rep_count = \"\"\"+str(rep_count)+\"\"\";\"\"\"+\"\"\"username = \"\"\"+str(username)+\"\"\";\"\"\"+\"\"\"password = \"\"\"+str(password)+\"\"\";\"\"\"+\"\"\"nodes = \"\"\"+str(nodes)+\"\"\";\"\"\"+\"\"\"attributes = \"\"\"+str(attributes)+\"\"\";\"\"\"+\"\")\n try:\n CreateClusterResult = ctx.element.create_cluster(mvip=mvip, svip=svip, rep_count=rep_count, username=username, password=password, nodes=nodes, accept_eula=accept_eula, attributes=attributes)\n except common.ApiServerError as e:\n ctx.logger.error(e.message)\n exit()\n except BaseException as e:\n ctx.logger.error(e.__str__())\n exit()\n\n cli_utils.print_result(CreateClusterResult, ctx.logger, as_json=ctx.json, depth=ctx.depth, filter_tree=ctx.filter_tree)", "def create(self):\n print(\"+ Creating cluster: {}. This may take a few minutes ...\".format(self.name_hyphenated))\n if self.num_gpus == 0:\n out = util.syscall(\"gcloud container clusters create {} -m {} --disk-size {} --num-nodes {} {}\".\n format(self.name_hyphenated, self.machine_type, self.disk_size, self.num_nodes,\n \"--zone \" + self.location if self.location else \"\"), return_outputs=\"as_str\")\n else:\n out = util.syscall(\"gcloud container clusters create {} --enable-cloud-logging --enable-cloud-monitoring \"\n \"--accelerator type={},count={} {} -m {} --disk-size {} --enable-kubernetes-alpha \"\n \"--image-type UBUNTU --num-nodes {} --cluster-version 1.9.2-gke.1 --quiet\".\n format(self.name_hyphenated, self.gpu_type, self.gpus_per_node,\n \"--zone \"+self.location if self.location else \"\", self.machine_type, self.disk_size,\n self.num_nodes), return_outputs=\"as_str\")\n # check output of cluster generating code\n if re.search(r'error', out, re.IGNORECASE):\n raise util.TFCliError(out)\n else:\n print(\"+ Successfully created cluster.\")\n self.instances, self.primary_name = util.get_compute_instance_specs(self.name_hyphenated)\n self.started = True\n\n # install NVIDIA drivers on machines per local kubectl\n if self.num_gpus > 0:\n print(\"+ Installing NVIDIA GPU drivers and k8s device plugins ...\")\n util.syscall(\"kubectl create -f https://raw.githubusercontent.com/GoogleCloudPlatform/\"\n \"container-engine-accelerators/k8s-1.9/daemonset.yaml\")\n util.syscall(\"kubectl delete -f https://raw.githubusercontent.com/kubernetes/kubernetes/\"\n \"release-1.9/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml\")\n util.syscall(\"kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/\"\n \"release-1.9/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml\")\n\n print(\"+ Done. Cluster: {} created.\".format(self.name_hyphenated))", "def set_cluster():\n print(\"Setting cluster\")\n \n cluster = Cluster()\n session = cluster.connect()\n try:\n session.execute(\n \"\"\"\n CREATE KEYSPACE IF NOT EXISTS udacity \n WITH REPLICATION = \n { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }\"\"\"\n )\n except Exception as e:\n print(e)\n try:\n session.set_keyspace(\"udacity\")\n except Exception as e:\n print(e)\n\n return cluster, session", "def createServer():\n cd('/')\n srv = cmo.createServer(managedServername) \n srv.setCluster(getMBean('/Clusters/%s' % cluster_name))\n srv.setListenPort(managedServerPort)\n return srv", "def __cassandra_connect(self):\n self.cluster = Cluster()\n self.session = self.cluster.connect('demo')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gather ceph monitor information
def get_monitor_info(handle, timeout): mon_info = dict() mon_info['stat'] = ceph_mon_command(handle, 'mon stat' , timeout) mon_info['dump'] = ceph_mon_command(handle, 'mon dump' , timeout) mon_info['map'] = ceph_mon_command(handle, 'mon getmap' , timeout) mon_info['metadata'] = ceph_mon_command(handle, 'mon metadata', timeout) return mon_info
[ "async def get_monitor_data(self):\n json = await self._api_call(\"app/monitors/%s/overview\" % self.sense_monitor_id)\n if \"monitor_overview\" in json and \"monitor\" in json[\"monitor_overview\"]:\n self._monitor = json[\"monitor_overview\"][\"monitor\"]\n return self._monitor", "def monitor(self, **kwargs):\n self.show_info(monitor=True, **kwargs)", "def get_stats(self):\n\t\n\tceph_cluster = \"%s-%s\" % (self.prefix, self.cluster)\n\n\tdata = { ceph_cluster: { } }\n\tadmin_folder=\"/var/run/ceph/\"\n\tif(os.path.isdir(admin_folder)):\n\t\tfiles=os.walk(admin_folder).next()[2]\n else:\n\t\tprint \"No folder exists \"+admin_folder\n\t\treturn -1\n\tabs_path=[admin_folder+x for x in files]\n\tadmin_socket = max(abs_path, key=os.path.getmtime)\n\tcmd = \"ceph --admin-daemon \"+admin_socket +\" perf dump -f json\"\n\ttry:\n\t\toutput = subprocess.check_output(cmd, shell=True)\n\texcept Exception as exc:\n\t\tcollectd.error(\"ceph-osd: failed to ceph osd perf dump :: %s :: %s\" % (exc, traceback.format_exc()))\n\t\treturn\n\n\tif output is None:\n\t\tcollectd.error('ceph-osd: failed to ceph osd perf dump :: output was None')\n\n\tjson_data = json.loads(output)\n\tmatch=(re.search(r'([\\w.-]+)(\\d)([\\w.-]+)',admin_socket))\n\tif match:\n\t\tosd_id=match.group(2)\n\telse:\n\t\treturn\n\tdata[ceph_cluster][osd_id]={}\n\tdata[ceph_cluster][osd_id]['op_latency']={}\n\tdata[ceph_cluster][osd_id]['op_w_latency']={}\n\tdata[ceph_cluster][osd_id]['op_r_latency']={}\n\tdata[ceph_cluster][osd_id]['op_latency']['sum']=json_data['osd']['op_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_latency']['avgcount']=json_data['osd']['op_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['sum']=json_data['osd']['op_w_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['avgcount']=json_data['osd']['op_w_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['sum']=json_data['osd']['op_r_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['avgcount']=json_data['osd']['op_r_latency']['avgcount']\n\n\t#print data\t\n\treturn data", "def report_system_status():\n return {\n 'start': session.start_time.strftime('%Y/%m/%d/ %H:%M:%S'),\n 'time': int(\n (datetime.datetime.now() - session.start_time).total_seconds()),\n ## Constant monitor variables\n 'temp1': session.cmd.gpio.ntc_read(0),\n 'temp2': session.cmd.gpio.rtd_read(1),\n 'volt1': session.cmd.gpio.adc_read(2),\n 'volt2': session.cmd.gpio.adc_read(3),\n 'coord':\n [session.cmd.gcoder.cx, session.cmd.gcoder.cy, session.cmd.gcoder.cz],\n }", "def monitor(self):\n procdata = self.collect_userprocs_info()\n now = int(time.time())\n #-------------------\n proclist = []\n for name in procdata:\n mem = procdata[name]['rss']\n pcode = self.DB.get_code(name)\n proclist.append((now, pcode, mem))\n self.DB.add_proc_info(proclist)\n #-------------------\n totmem = psutil.virtual_memory()\n self.DB.add_total_mem_info(now, totmem.used, totmem.available, totmem.free)\n #-------------------\n disk = psutil.disk_usage('/')\n dinfo = {\n \"utime\" : now,\n \"total\" : disk.total,\n \"used\" : disk.used,\n \"free\" : disk.free,\n \"percent\" : disk.percent\n }\n self.DB.add_diskuse_info(dinfo)\n #-------------------\n cpu = json.dumps(psutil.cpu_percent(None, True))\n self.DB.add_total_cpu(now, cpu)\n #-------------------\n net = psutil.net_io_counters()\n ninfo = {\n \"utime\" : now,\n \"brecv\" : net.bytes_recv,\n \"bsent\" : net.bytes_sent,\n \"precv\" : net.packets_recv,\n \"psent\" : net.packets_sent,\n \"errin\" : net.errin,\n \"errin\" : net.errout\n }\n self.DB.add_net_info(ninfo)", "def net_monitor_list(cs, args):\n cls_print.print_monitor_list(cs.net_monitor.list().json())", "def monitor():\n format = request.args.get('format', 'text')\n if format == 'json':\n return json(_monitor.export())\n else:\n return Response('\\n'.join('%s=%r' % (k, v) for k, v\n in _monitor.export().items()))", "def clusterMonitor():\n node = os.environ['DIM_DNS_NODE']\n xml = XMLTaskList.TransformXmlToObjects()\n xml.load('../xml/TaskInventory.xml') # loads the Task Inventory\n xml.load('../xml/HLTD01.xml') # loads the Node List\n xml.load('../xml/HLTD02.xml') # loads the Node List\n xml.load('../xml/HLTD03.xml') # loads the Node List\n xml.load('../xml/HLTD04.xml') # loads the Node List\n xml.load('../xml/HLTD06.xml') # loads the Node List\n xml.load('../xml/HLTD07.xml') # loads the Node List\n xml.load('../xml/HLTD08.xml') # loads the Node List\n xml.load('../xml/HLTD09.xml') # loads the Node List\n xml.load('../xml/HLTD10.xml') # loads the Node List\n xml.load('../xml/HLTD11.xml') # loads the Node List\n xml.load('../xml/HLTE04.xml') # loads the Node List\n xml.load('../xml/HLTE06.xml') # loads the Node List\n xml.load('../xml/'+node.upper()+'.xml') # loads the Node List\n collector = ClusterCollector(xml)\n collector.startx()\n collector.run()", "def show_monitors(self):\n try:\n OUTPUT.info('Loading and displaying monitors')\n\n outlines = []\n outlines.append(['name', 'thresholdtype', 'warn_thresh', 'alert_thresh', 'enabled'])\n for monitor in self.monitorjobs:\n outlines.append(monitor.show())\n\n col_width = max(len(word) for row in outlines for word in row) + 2\n # padding\n for row in outlines:\n OUTPUT.echo(\"\".join(word.ljust(col_width) for word in row))\n\n except Exception as monitorexception:\n OUTPUT.error('Unable to print monitor output')\n raise monitorexception", "def print_monitors(self):\n for m in self.monitors: \n print \"%-30s %s\" % (m.name, m.get_status())", "def watch():\n\n try:\n headers = ('CONTAINER ID', 'NAME', 'CPU %', 'MEM USAGE / LIMIT',\n 'MEM %', 'NET I/O', 'BLOCK I/O', 'PIDS')\n column_width = 20\n for element in headers:\n print(element.ljust(column_width)),\n print('')\n\n for container in CLIENT.containers.list():\n column_width = 20\n stats = container.stats(stream=False)\n\n # Block I/O stats\n blkio = stats.get('blkio_stats').get('io_service_bytes_recursive')\n # in case blkio is empty --> IndexError: list index out of range\n if not blkio:\n blkio_read = '0'\n blkio_write = '0'\n else:\n blkio_read = size(blkio[0].get('value'), system=si)\n blkio_write = size(blkio[1].get('value'), system=si)\n\n # Network stats\n rx_stats = size(stats.get('networks').get('eth0').get('rx_bytes'), system=si)\n tx_stats = size(stats.get('networks').get('eth0').get('tx_bytes'), system=si)\n\n # Memory stats\n mem = stats.get('memory_stats')\n mem_usage = mem.get('stats').get('active_anon')\n mem_limit = mem.get('limit')\n mem_percent = (\"%.2f\"%((mem_usage / mem_limit)*100))\n\n # CPU stats\n # this is taken directly from docker CLIENT:\n # https://github.com/docker/docker/blob/28a7577a029780e4533faf3d057ec9f6c7a10948/api/CLIENT/stats.go#L309\n cpu_percent = 0.0\n cpu = stats.get('cpu_stats')\n pre_cpu = stats.get('precpu_stats')\n cpu_total = cpu.get('cpu_usage').get('total_usage')\n pre_cpu_total = pre_cpu.get('cpu_usage').get('total_usage')\n cpu_count = cpu.get('online_cpus')\n\n cpu_delta = cpu_total - pre_cpu_total\n system_delta = cpu.get('system_cpu_usage') - pre_cpu.get('system_cpu_usage')\n\n if system_delta > 0.0 and cpu_delta > 0.0:\n cpu_percent = (\"%.2f\"%(cpu_delta / system_delta * 100.0 * cpu_count))\n\n # container attributes\n attrs = [(str(container.short_id), str(container.name), str(cpu_percent),\n str(size((mem_usage), system=si) + \" / \" + size((mem_limit), system=si)),\n str(mem_percent), str(rx_stats + \" / \" + tx_stats),\n str(blkio_read + \" / \" + blkio_write),\n str(stats.get('pids_stats').get('current')))]\n\n for row in attrs:\n for element in row:\n print(element.ljust(column_width)),\n print('')\n\n except (docker.errors.NotFound, KeyError, AttributeError):\n print('No such container or container not running!')", "def getCurrentMetrics(self):\n self.notifyPut('Obtaining Current Display Metrics')\n try:\n data = []\n data = win32api.EnumDisplayMonitors(None, None)\n screens = {}\n scrNum = 0\n for screen in data:\n screens[scrNum] = screen[2]\n scrNum += 1\n return screens \n except Exception, e:\n self.logQ.put('{0} - Unable to capture current metrics'.format(e))", "def get_info():\n i = celery.control.inspect()\n stats = i.stats()\n result = {\n 'stats': stats,\n 'registered': i.registered(),\n 'active': i.active(),\n 'scheduled': i.scheduled()\n }\n return result", "def get_monitor_info_a(h_monitor):\n return __get_monitor_info(WINDLL.user32.GetMonitorInfoA, h_monitor)", "def parseMonitor(self, monitor):\r\n param = \"\"\r\n if monitor[\"opcode\"] == \"data_variable\":\r\n cmd = \"getVar:\"\r\n param = monitor[\"params\"][\"VARIABLE\"]\r\n color = self.monitorColors[\"data\"]\r\n elif monitor[\"opcode\"] == \"data_listcontents\":\r\n return {\r\n \"listName\": monitor[\"params\"][\"LIST\"],\r\n \"contents\": (\"value\" in monitor and monitor[\"value\"] or []),\r\n \"isPersistent\": False,\r\n \"x\": monitor[\"x\"],\r\n \"y\": monitor[\"y\"],\r\n \"width\": monitor[\"width\"],\r\n \"height\": monitor[\"height\"],\r\n \"visible\": monitor[\"visible\"]\r\n }\r\n elif monitor[\"opcode\"] == \"looks_costumenumbername\":\r\n if monitor[\"params\"][\"NUMBER_NAME\"] == \"number\":\r\n cmd = \"costumeIndex\"\r\n color = self.monitorColors[\"looks\"]\r\n elif monitor[\"params\"][\"NUMBER_NAME\"] == \"name\":\r\n log.warning(\"Monitor costume name not supported.\")\r\n elif monitor[\"opcode\"] == \"looks_backdropnumbername\":\r\n if monitor[\"params\"][\"NUMBER_NAME\"] == \"number\":\r\n cmd = \"backgroundIndex\"\r\n elif monitor[\"params\"][\"NUMBER_NAME\"] == \"name\":\r\n cmd = \"sceneName\"\r\n color = self.monitorColors[\"looks\"]\r\n elif monitor[\"opcode\"] == \"sensing_current\":\r\n cmd = \"timeAndDate\"\r\n param = monitor[\"params\"][\"CURRENTMENU\"].lower()\r\n color = self.monitorColors[\"sensing\"]\r\n elif monitor[\"opcode\"] in self.monitorOpcodes:\r\n cmd = self.monitorOpcodes[monitor[\"opcode\"]]\r\n color = self.monitorColors[monitor[\"opcode\"].split(\"_\")[0]]\r\n else:\r\n log.warning(\"Unkown monitor '%s'\" % monitor[\"opcode\"])\r\n return None\r\n \r\n if monitor[\"spriteName\"]:\r\n label = monitor[\"spriteName\"] + \": \" + param\r\n else:\r\n label = param\r\n \r\n return {\r\n \"target\": monitor[\"spriteName\"] or \"Stage\",\r\n \"cmd\": cmd,\r\n \"param\": param or None,\r\n \"color\": color,\r\n \"label\": label,\r\n \"mode\": self.monitorModes[monitor[\"mode\"]],\r\n \"sliderMin\": (\"min\" in monitor and monitor[\"min\"] or 0),\r\n \"sliderMax\": (\"max\" in monitor and monitor[\"max\"] or 100),\r\n \"isDiscrete\": True,\r\n \"x\": monitor[\"x\"],\r\n \"y\": monitor[\"y\"],\r\n \"visible\": monitor[\"visible\"]\r\n }", "def get_manager_info(handle, timeout):\n mgr_info = dict()\n mgr_info['ls-modules'] = ceph_mon_command(handle, 'mgr module ls', timeout)\n mgr_info['dump'] = ceph_mon_command(handle, 'mgr dump' , timeout)\n mgr_info['metadata'] = ceph_mon_command(handle, 'mgr metadata' , timeout)\n return mgr_info", "def get_monitor_info_w(h_monitor):\n return __get_monitor_info(WINDLL.user32.GetMonitorInfoW, h_monitor)", "def gather_metric(self):\n device_dict = {}\n # Delete first and last line of output of adb.\n output = self._shell.run(self.COMMAND).stdout\n\n # Example Line, Device Serial Num TAB Phone Status\n # 00bd977c7f504caf\toffline\n if output:\n for line in output.split('\\n'):\n spl_line = line.split('\\t')\n # spl_line[0] is serial, [1] is status. See example line.\n device_dict[spl_line[0]] = spl_line[1]\n\n return {self.DEVICES: device_dict}", "async def start_monitor(self):\n self._logger.info(\"Starting monitor...\")\n org1_admin = self.fabric_client.get_user(org_name='org1.example.com', name='Admin')\n\n self._logger.info(\"Starting monitor...\")\n cmd = \"/home/martijn/go/bin/go run \" \\\n \"/home/martijn/fabric-examples/fabric-cli/cmd/fabric-cli/fabric-cli.go event listenblock \" \\\n \"--cid mychannel --peer localhost:8001 \" \\\n \"--config /home/martijn/fabric-examples/fabric-cli/cmd/fabric-cli/config.yaml\"\n out_file = open(\"transactions.txt\", \"w\")\n my_env = os.environ.copy()\n my_env[\"GOPATH\"] = \"/home/martijn/gocode\"\n self.monitor_process = subprocess.Popen(cmd.split(\" \"), env=my_env, stdout=out_file,\n cwd=\"/home/martijn/fabric-examples/fabric-cli/cmd/fabric-cli/\")\n\n async def get_latest_block_num():\n self._logger.info(\"Getting latest block nr...\")\n response = await self.fabric_client.query_info(\n requestor=org1_admin,\n channel_name='mychannel',\n peers=['peer0.org1.example.com'],\n decode=True\n )\n print(response)\n\n latest_block = response.height\n if latest_block > self.latest_block_num:\n self._logger.info(\"Updating to block nr %d\", latest_block)\n old_latest_block_num = self.latest_block_num\n self.latest_block_num = latest_block\n confirm_time = int(round(time.time() * 1000))\n for confirmed_block_num in range(old_latest_block_num + 1, latest_block + 1):\n self.block_confirm_times[confirmed_block_num] = confirm_time\n\n self.monitor_lc = run_task(get_latest_block_num, interval=0.1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GAther ceph device information
def get_device_info(handle, timeout): device_info = dict() device_info['ls'] = ceph_mon_command(handle, 'device ls', timeout) return device_info
[ "def device_info(self) -> dict[str, any]:\n return system_info(self.hacs)", "def device_info(self) -> dict:\n return self._device_info", "def DeviceInfo(self): \n \n self.is_attached = self.spatial.isAttached()\n self.device_name = self.spatial.getDeviceName()\n self.seriel_number = self.spatial.getSerialNum()\n self.device_version= self.spatial.getDeviceVersion()\n self.accel_axis_count = self.spatial.getAccelerationAxisCount()\n self.gyro_axis_count = self.spatial.getGyroAxisCount()\n self.comp_axis_count = self.spatial.getCompassAxisCount()\n \n print(\"Start of Device Info\")\n print self.is_attached\n print self.device_name\n print self.seriel_number\n print self.device_version\n print self.accel_axis_count\n print self.gyro_axis_count\n print self.comp_axis_count\n print(\"End of Device Info\")", "def get_device_information(self):\n return self.mycam.devicemgmt.GetDeviceInformation()", "def print_device_info(device):\n assert(isinstance(device, Device))\n print(\" Device Name : %s\" % device.name)\n print(\" OS Type : %s\" % device.os_type)\n print(\" IP Address : %s\" % device.ip_addr)\n print(\" Interfaces : %s\" % \", \".join(device.iflist))", "def device_info(self) -> dict:\n return {\n \"connections\": {(DOMAIN, self._unique_id)},\n \"name\": self._host,\n \"manufacturer\": \"IMAP E-Mail\",\n \"sw_version\": VERSION,\n }", "def get_ceph_drv_info():\n disks_info = []\n stat = psutil.disk_io_counters(perdisk=True)\n for drv in get_ceph_disk():\n info = CEPHDiskInfo(drv)\n disk = basename(drv)\n if disk in stat:\n info.rd_cnt = stat[disk].read_count\n info.wr_cnt = stat[disk].write_count\n info.rd_bytes = stat[disk].read_bytes\n info.wr_bytes = stat[disk].write_bytes\n info.rd_time = stat[disk].read_time\n info.wr_time = stat[disk].write_time\n\n disks_info.append(info)\n\n return disks_info", "def zha_device_info(self) -> dict[str, Any]:\n device_info: dict[str, Any] = {}\n device_info.update(self.device_info)\n device_info[ATTR_ACTIVE_COORDINATOR] = self.is_active_coordinator\n device_info[\"entities\"] = [\n {\n \"entity_id\": entity_ref.reference_id,\n ATTR_NAME: entity_ref.device_info[ATTR_NAME],\n }\n for entity_ref in self.gateway.device_registry[self.ieee]\n ]\n\n topology = self.gateway.application_controller.topology\n device_info[ATTR_NEIGHBORS] = [\n {\n \"device_type\": neighbor.device_type.name,\n \"rx_on_when_idle\": neighbor.rx_on_when_idle.name,\n \"relationship\": neighbor.relationship.name,\n \"extended_pan_id\": str(neighbor.extended_pan_id),\n \"ieee\": str(neighbor.ieee),\n \"nwk\": str(neighbor.nwk),\n \"permit_joining\": neighbor.permit_joining.name,\n \"depth\": str(neighbor.depth),\n \"lqi\": str(neighbor.lqi),\n }\n for neighbor in topology.neighbors[self.ieee]\n ]\n\n device_info[ATTR_ROUTES] = [\n {\n \"dest_nwk\": str(route.DstNWK),\n \"route_status\": str(route.RouteStatus.name),\n \"memory_constrained\": bool(route.MemoryConstrained),\n \"many_to_one\": bool(route.ManyToOne),\n \"route_record_required\": bool(route.RouteRecordRequired),\n \"next_hop\": str(route.NextHop),\n }\n for route in topology.routes[self.ieee]\n ]\n\n # Return endpoint device type Names\n names: list[dict[str, str]] = []\n for endpoint in (ep for epid, ep in self.device.endpoints.items() if epid):\n profile = PROFILES.get(endpoint.profile_id)\n if profile and endpoint.device_type is not None:\n # DeviceType provides undefined enums\n names.append({ATTR_NAME: profile.DeviceType(endpoint.device_type).name})\n else:\n names.append(\n {\n ATTR_NAME: (\n f\"unknown {endpoint.device_type} device_type \"\n f\"of 0x{(endpoint.profile_id or 0xFFFF):04x} profile id\"\n )\n }\n )\n device_info[ATTR_ENDPOINT_NAMES] = names\n\n reg_device = self.gateway.ha_device_registry.async_get(self.device_id)\n if reg_device is not None:\n device_info[\"user_given_name\"] = reg_device.name_by_user\n device_info[\"device_reg_id\"] = reg_device.id\n device_info[\"area_id\"] = reg_device.area_id\n return device_info", "def gather_metric(self):\n device_dict = {}\n # Delete first and last line of output of adb.\n output = self._shell.run(self.COMMAND).stdout\n\n # Example Line, Device Serial Num TAB Phone Status\n # 00bd977c7f504caf\toffline\n if output:\n for line in output.split('\\n'):\n spl_line = line.split('\\t')\n # spl_line[0] is serial, [1] is status. See example line.\n device_dict[spl_line[0]] = spl_line[1]\n\n return {self.DEVICES: device_dict}", "def device_info(self) -> dict[str, any]:\n if self.repository.data.full_name == HacsGitHubRepo.INTEGRATION:\n return system_info(self.hacs)\n\n return {\n \"identifiers\": {(DOMAIN, str(self.repository.data.id))},\n \"name\": self.repository.display_name,\n \"model\": self.repository.data.category,\n \"manufacturer\": \", \".join(\n author.replace(\"@\", \"\") for author in self.repository.data.authors\n ),\n \"configuration_url\": \"homeassistant://hacs\",\n \"entry_type\": DeviceEntryType.SERVICE,\n }", "def get_device(self):\r\n device_list = dict()\r\n # device_list['Cisco-IOS-XE-native:native'] = {'device':[]}\r\n api_data = self._execute_call('Cisco-IOS-XE-native:native')\r\n device = DictQuery(api_data.json).get(\r\n 'Cisco-IOS-XE-native:native')\r\n\r\n # print(system)\r\n\r\n hostname = device.get('hostname')\r\n version = device.get('version')\r\n\r\n dict_temp = dict()\r\n dict_temp['hostname'] = hostname\r\n dict_temp['version'] = version\r\n device_list['Cisco-IOS-XE-native:native'] = dict()\r\n device_list['Cisco-IOS-XE-native:native']['device'] = dict_temp\r\n\r\n\r\n return json.dumps(device_list, sort_keys=False, indent=4)", "def get_compute_host_info(self):\n volume_dash_volume_hex = (\"volume-%s\" % self.volume_hex)\n if not self._check_dev_mapper_exists(volume_dash_volume_hex):\n self._check_dev_mapper_exists(self.simple_BV_ID)\n self._get_fuser_dev_mapper()\n self._get_qemu_xml_file()\n if self.instance_hex is None:\n self._get_instance_from_fuser()\n self._check_running()\n self._get_dmsetup_info()", "def device_info(self):\n info = {\n \"identifiers\": {\n (\n DOMAIN,\n \"serial-number\",\n self._ctrl.data[\"routerboard\"][\"serial-number\"],\n \"switch\",\n \"NAT\",\n )\n },\n \"manufacturer\": self._ctrl.data[\"resource\"][\"platform\"],\n \"model\": self._ctrl.data[\"resource\"][\"board-name\"],\n \"name\": f\"{self._inst} NAT\",\n }\n return info", "def device():\n pass", "def test_device_info_properties(device: Device) -> None:\n info = device.device_info\n\n assert info.manufacturer == \"IKEA of Sweden\"\n assert info.model_number == \"STARKVIND Air purifier\"\n assert info.firmware_version == \"1.0.033\"\n assert info.serial == \"\"", "def _print_device_info(self):\n \n if self.verbose > 0:\n print(\"=================================================================================\")\n print(\"Device id: \" + str(self.device_id))\n print(\"Device name: \" + str(self.device.name()))\n print(\"---------------------------------------------------------------------------------\")\n print(\"Attributes:\\n\")\n attributes = self.device.get_attributes()\n for (key, value) in attributes.iteritems():\n print(\"\\t%s:%s\" % (str(key), str(value))) \n print(\"=================================================================================\")", "def test_device_info_properties(device):\n info = device.device_info\n\n assert info.manufacturer == \"IKEA of Sweden\"\n assert info.model_number == \"STARKVIND Air purifier\"\n assert info.firmware_version == \"1.0.033\"\n assert info.serial == \"\"", "def device(self):\n return self._vars[0].device", "def test_get_device(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gather ceph manager information
def get_manager_info(handle, timeout): mgr_info = dict() mgr_info['ls-modules'] = ceph_mon_command(handle, 'mgr module ls', timeout) mgr_info['dump'] = ceph_mon_command(handle, 'mgr dump' , timeout) mgr_info['metadata'] = ceph_mon_command(handle, 'mgr metadata' , timeout) return mgr_info
[ "def manager_info(self, manager):\n _, body = self.request('/v1.1/managers/active/%s' % manager, 'GET')\n return body", "def get_monitor_info(handle, timeout):\n mon_info = dict()\n mon_info['stat'] = ceph_mon_command(handle, 'mon stat' , timeout)\n mon_info['dump'] = ceph_mon_command(handle, 'mon dump' , timeout)\n mon_info['map'] = ceph_mon_command(handle, 'mon getmap' , timeout)\n mon_info['metadata'] = ceph_mon_command(handle, 'mon metadata', timeout)\n return mon_info", "def get_stats(self):\n\t\n\tceph_cluster = \"%s-%s\" % (self.prefix, self.cluster)\n\n\tdata = { ceph_cluster: { } }\n\tadmin_folder=\"/var/run/ceph/\"\n\tif(os.path.isdir(admin_folder)):\n\t\tfiles=os.walk(admin_folder).next()[2]\n else:\n\t\tprint \"No folder exists \"+admin_folder\n\t\treturn -1\n\tabs_path=[admin_folder+x for x in files]\n\tadmin_socket = max(abs_path, key=os.path.getmtime)\n\tcmd = \"ceph --admin-daemon \"+admin_socket +\" perf dump -f json\"\n\ttry:\n\t\toutput = subprocess.check_output(cmd, shell=True)\n\texcept Exception as exc:\n\t\tcollectd.error(\"ceph-osd: failed to ceph osd perf dump :: %s :: %s\" % (exc, traceback.format_exc()))\n\t\treturn\n\n\tif output is None:\n\t\tcollectd.error('ceph-osd: failed to ceph osd perf dump :: output was None')\n\n\tjson_data = json.loads(output)\n\tmatch=(re.search(r'([\\w.-]+)(\\d)([\\w.-]+)',admin_socket))\n\tif match:\n\t\tosd_id=match.group(2)\n\telse:\n\t\treturn\n\tdata[ceph_cluster][osd_id]={}\n\tdata[ceph_cluster][osd_id]['op_latency']={}\n\tdata[ceph_cluster][osd_id]['op_w_latency']={}\n\tdata[ceph_cluster][osd_id]['op_r_latency']={}\n\tdata[ceph_cluster][osd_id]['op_latency']['sum']=json_data['osd']['op_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_latency']['avgcount']=json_data['osd']['op_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['sum']=json_data['osd']['op_w_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['avgcount']=json_data['osd']['op_w_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['sum']=json_data['osd']['op_r_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['avgcount']=json_data['osd']['op_r_latency']['avgcount']\n\n\t#print data\t\n\treturn data", "async def get_system_info(self) -> Dict[str, Any]:\n assert self._client is not None\n return await self._client.invoke_method(\"system.info\")", "def get_compute_host_info(self):\n volume_dash_volume_hex = (\"volume-%s\" % self.volume_hex)\n if not self._check_dev_mapper_exists(volume_dash_volume_hex):\n self._check_dev_mapper_exists(self.simple_BV_ID)\n self._get_fuser_dev_mapper()\n self._get_qemu_xml_file()\n if self.instance_hex is None:\n self._get_instance_from_fuser()\n self._check_running()\n self._get_dmsetup_info()", "def get_device_info(handle, timeout):\n device_info = dict()\n device_info['ls'] = ceph_mon_command(handle, 'device ls', timeout)\n\n return device_info", "def get_ceph_srv_info():\n services = []\n for name, pid in get_ceph_pids():\n process = psutil.Process(pid)\n services.append(CEPHSrvInfo(name, pid, process.get_cpu_percent(),\\\n process.memory_info().rss))\n return services", "def status(client):\n managers = client.manager.get_managers().items\n updated_columns = CLUSTER_COLUMNS\n # After we get the managers list from either of the managers in the profile\n # we retrieve each manager's status directly\n for manager in managers:\n direct_rest_client = env.get_rest_client(\n rest_host=manager.public_ip, cluster=[])\n try:\n services = direct_rest_client.manager.get_status()['services']\n updated_columns += [\n service['display_name'] for service in services\n if service['display_name'] not in updated_columns\n ]\n manager.update({'status': 'Active'})\n except (ConnectionError, CloudifyClientError) as e:\n if isinstance(e, CloudifyClientError) and e.status_code != 502:\n raise\n manager.update({'status': 'Offline'})\n continue\n for service in services:\n state = service['instances'][0]['state'] \\\n if 'instances' in service and \\\n len(service['instances']) > 0 else 'unknown'\n manager.update({service['display_name']: state})\n print_data(updated_columns, managers, 'HA Cluster nodes')", "def get_manager(self) -> \"ManagerAPI\":\n ...", "def stateshmgr(shmgr_name):", "def get_hmc_info(module):\n global OUTPUT\n\n info_hash = {}\n\n (ret, std_out, std_err) = exec_cmd('LC_ALL=C lsnim -t hmc -l',\n module, shell=True)\n if ret != 0:\n OUTPUT.append('Failed to get NIM HMC info: {}'\n .format(std_err))\n logging.error('Failed to get NIM HMC info: {}'\n .format(std_err))\n return info_hash\n\n obj_key = ''\n for line in std_out.split('\\n'):\n line = line.rstrip()\n match_key = re.match(r\"^(\\S+):\", line)\n # HMC name\n if match_key:\n obj_key = match_key.group(1)\n info_hash[obj_key] = {}\n continue\n\n match_cstate = re.match(r\"^\\s+Cstate\\s+=\\s+(.*)$\", line)\n if match_cstate:\n cstate = match_cstate.group(1)\n info_hash[obj_key]['cstate'] = cstate\n continue\n\n match_key = re.match(r\"^\\s+passwd_file\\s+=\\s+(.*)$\", line)\n if match_key:\n info_hash[obj_key]['passwd_file'] = match_key.group(1)\n continue\n\n match_key = re.match(r\"^\\s+login\\s+=\\s+(.*)$\", line)\n if match_key:\n info_hash[obj_key]['login'] = match_key.group(1)\n continue\n\n match_key = re.match(r\"^\\s+if1\\s*=\\s*\\S+\\s*(\\S*)\\s*.*$\", line)\n if match_key:\n info_hash[obj_key]['ip'] = match_key.group(1)\n continue\n\n return info_hash", "def get_cmginfo(cls):\n try:\n cmg_home = os.environ['CMG_HOME']\n simulator = list(Path(cmg_home).rglob('mx*.exe'))\n sim_exe = sorted(simulator)[-1]\n report = list(Path(cmg_home).rglob('report*.exe'))\n report_exe = sorted(report)[-1]\n return cls.__cmginfo(cmg_home, sim_exe, report_exe)\n except KeyError as error:\n raise KeyError(\n 'Verifique se a variรกvel de ambiente CMG_HOME existe!') from error", "def query_usr_manager(self, manager_name):", "def _get_conf():\n configs = [\"mds_cache_memory_limit\",\n \"mds_cache_reservation\",\n \"mds_health_cache_threshold\"]\n holder = {}\n for config in configs:\n cmd = \"sudo ceph daemon mds.\" \\\n \"$HOSTNAME config show | grep {}\".format(config)\n conf = model.run_on_unit(self.TESTED_UNIT, cmd)\n for i in (conf['Stdout'].replace('\"', '')\n .replace(',', '')\n .strip()\n .split(\"\\n\")):\n key, val = i.split(\":\")\n holder[key] = val.strip()\n return holder", "def bdev_nvme_get_transport_statistics(client):\n return client.call('bdev_nvme_get_transport_statistics')", "def get_ceph_drv_info():\n disks_info = []\n stat = psutil.disk_io_counters(perdisk=True)\n for drv in get_ceph_disk():\n info = CEPHDiskInfo(drv)\n disk = basename(drv)\n if disk in stat:\n info.rd_cnt = stat[disk].read_count\n info.wr_cnt = stat[disk].write_count\n info.rd_bytes = stat[disk].read_bytes\n info.wr_bytes = stat[disk].write_bytes\n info.rd_time = stat[disk].read_time\n info.wr_time = stat[disk].write_time\n\n disks_info.append(info)\n\n return disks_info", "def get_cluster_info(self) -> Dict[str, Any]:\n pass", "def get_info():\n i = celery.control.inspect()\n stats = i.stats()\n result = {\n 'stats': stats,\n 'registered': i.registered(),\n 'active': i.active(),\n 'scheduled': i.scheduled()\n }\n return result", "def device_info(self) -> dict[str, any]:\n return system_info(self.hacs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that graveyard_removal.py correctly removes the graveyard from an h5m file.
def test_default_graveyard_removal(): os.system("python svalinn_tools/graveyard_removal.py " + test_file_path + test_file) size = os.path.getsize(test_file[:-4] + "_no_grave.h5m") assert size == 5748780
[ "def remove_group(self):\n try:\n with open_hdf5(self.file_name, mode=\"a\") as hdf_file:\n del hdf_file[self.h5_path]\n except KeyError:\n pass", "def test_print_graveyard_removal(capfd):\n os.system(\"python svalinn_tools/graveyard_removal.py \" + test_file_path + test_file + \" -p\")\n out, err = capfd.readouterr()\n assert (\"12682136550675318127\" in out) == True", "def remove_data_from_h5(h5path, keys, ):\n with h5py.File(h5path, mode='a') as f:\n for key in keys:\n del f[key]", "def __delitem__(self, key):\n if self.file_exists:\n try:\n with open_hdf5(self.file_name, mode=\"a\") as store:\n del store[self._get_h5_path(key)]\n except (AttributeError, KeyError):\n pass", "def test_fs_removal_mgt_unformatted(self):\n self.create_simple_filesystem(self.host, start=False)\n\n self.assertState(self.mgt, \"unformatted\")\n self.fs = self.set_and_assert_state(self.fs, \"removed\")\n self.assertState(self.mgt, \"unformatted\")\n with self.assertRaises(ManagedFilesystem.DoesNotExist):\n ManagedFilesystem.objects.get(pk=self.fs.pk)", "def export_h5m(\n self,\n filename: Optional[str] = 'dagmc.h5m',\n skip_graveyard: Optional[bool] = False,\n tolerance: Optional[float] = 0.001,\n graveyard_offset: Optional[float] = 100) -> str:\n\n path_filename = Path(filename)\n\n if path_filename.suffix != \".h5m\":\n path_filename = path_filename.with_suffix(\".h5m\")\n\n path_filename.parents[0].mkdir(parents=True, exist_ok=True)\n\n moab_core, moab_tags = define_moab_core_and_tags()\n\n surface_id = 1\n volume_id = 1\n\n for item in self.shapes_and_components:\n\n item.export_stl(item.stl_filename, tolerance=tolerance)\n moab_core = add_stl_to_moab_core(\n moab_core,\n surface_id,\n volume_id,\n item.material_tag,\n moab_tags,\n item.stl_filename)\n volume_id += 1\n surface_id += 1\n\n if skip_graveyard is False:\n self.make_graveyard(graveyard_offset=graveyard_offset)\n self.graveyard.export_stl(self.graveyard.stl_filename)\n volume_id = 2\n surface_id = 2\n moab_core = add_stl_to_moab_core(\n moab_core,\n surface_id,\n volume_id,\n self.graveyard.material_tag,\n moab_tags,\n self.graveyard.stl_filename\n )\n\n all_sets = moab_core.get_entities_by_handle(0)\n\n file_set = moab_core.create_meshset()\n\n moab_core.add_entities(file_set, all_sets)\n\n moab_core.write_file(str(path_filename))\n\n return str(path_filename)", "def cleanup(session):\n\n print(bcolors.BOLD + '\\nCleaning up' + bcolors.ENDC)\n if os.path.isfile(MAPFILE_PATH):\n if check_postgres(session, DATASET_NAME_VERSION):\n print 'Unpublishing...',\n cmd = ['esgunpublish', '--project', 'test', '--database-delete', '--map', MAPFILE_PATH]\n success, err_msg = execute_cmd(cmd)\n print_res(success, True, session, final_run=True)\n print 'Deleting mapfile...',\n try:\n os.remove(MAPFILE_PATH)\n success = True\n except:\n success = False\n print_res(success, True, session, final_run=True)\n else:\n print 'Nothing to do...',\n print_res(True, True, session, final_run=True)", "def clean_useless_gradle_catch_snapshot():\n if check_user_gradle_path():\n snapshot_path_list = find_out_snapshot_path_list()\n for snapshot_path in snapshot_path_list:\n for walk_dir, walk_folder, walk_file in os.walk(snapshot_path):\n now_folder = {}\n for d in walk_folder:\n snapshot_hash_path = os.path.join(walk_dir, d)\n m_time = os.path.getmtime(snapshot_hash_path)\n now_folder[m_time] = snapshot_hash_path\n if len(now_folder) > 0:\n new_sort = sorted_dict_values_to_list(now_folder)\n for path in new_sort[0: -1]:\n shutil.rmtree(path)\n log_printer('remove useless snapshot at path\\n-> %s' % path, 'i', True)\n else:\n log_printer('can not found gradle catch folder exit', 'e', True)\n exit(1)", "def test_delete_namespaced_virtual_machine_snapshot_content(self):\n pass", "def test_delete_namespaced_virtual_machine_snapshot(self):\n pass", "def remove_file_from_cache(self, md5_hash):\n self.used_space -= len(self.storage[md5_hash])\n self.storage.pop(md5_hash)\n self.remove_from_usage_queue(md5_hash)", "def test_delete_collection_namespaced_virtual_machine_snapshot(self):\n pass", "def test_delete_collection_namespaced_virtual_machine_snapshot_content(self):\n pass", "def test_unmanage_snapshot(self):\n self.assertRaises(\n NotImplementedError,\n self.driver.unmanage_snapshot,\n TEST_SNAPSHOT[0])", "def test_exc(self):\n g = h5g.open(self.fid, '/')\n g._close()\n self.assertEqual(h5i.get_type(g), h5i.BADID)", "def drop_riffdata_db():\n do_drop_riffdata_db()", "def test_fs_removal_mgt_offline(self):\n self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, \"unmounted\")\n self.fs = self.set_and_assert_state(self.fs, \"removed\")\n self.assertState(self.mgt.managedtarget_ptr, \"mounted\")\n with self.assertRaises(ManagedFilesystem.DoesNotExist):\n ManagedFilesystem.objects.get(pk=self.fs.pk)", "def clear_empty_groups(pth):\n import h5py\n f = h5py.File(pth,'a')\n deleted = []\n for k in f.keys():\n if len(f[k].keys()) == 0:\n deleted.append(k)\n del f[k]\n \n f.close()\n return deleted", "def clean_database(databasePathname):\n print '# loading database ' + databasePathname\n try:\n db = gdbm.open(databasePathname, 'w')\n except:\n print \"# \" + databasePathname + \" could not be loaded\"\n sys.exit(-1)\n\n # even though gdbm supports memory efficient iteration over\n # all keys, I want to order my traversal across similar\n # paths to leverage caching of directory files:\n allKeys=db.keys()\n print '# finished loaded keys from ' + databasePathname\n allKeys.sort()\n print '# finished sorting keys from ' + databasePathname\n print '# deleting dead nodes'\n count=0\n for currKey in allKeys:\n try:\n os.stat(currKey)\n sys.stdout.write('.')\n except OSError:\n del db[currKey]\n sys.stdout.write('*')\n count=count+1\n sys.stdout.flush()\n print \"\\n# reorganizing \" + databasePathname\n db.reorganize()\n db.sync()\n db.close()\n print '# done cleaning ' + databasePathname + ', removed ' + str(count) + ' dead nodes!'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that graveyard_removal.py prints the correct entity handle for the graveyard volume.
def test_print_graveyard_removal(capfd): os.system("python svalinn_tools/graveyard_removal.py " + test_file_path + test_file + " -p") out, err = capfd.readouterr() assert ("12682136550675318127" in out) == True
[ "def test_delete_volume(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'volume10'}\n self.driver.delete_volume(volume)\n expected = {'name': 'volume10'}\n self.assertDictMatch(expected, self.deleted)", "def test_delete_snapshot(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n snapshot = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'snap10'}\n self.driver.delete_snapshot(snapshot)\n expected = {'name': 'snap10'}\n self.assertDictMatch(expected, self.deleted)", "def drop(self):\n Game.instance.area_map.entities.append(self.owner)\n Game.instance.inventory.remove(self.owner)\n self.owner.x = Game.instance.player.x\n self.owner.y = Game.instance.player.y\n message('You dropped a ' + self.owner.name + '.', palette.yellow)", "def test_ghost(self):\n factory = self.root.manage_addProduct['Silva']\n factory.manage_addGhost('ghost', None, haunted=self.root.document)\n\n version = self.root.ghost.get_editable()\n self.assertTrue(verifyObject(IGhost, self.root.ghost))\n self.assertTrue(verifyObject(IGhostVersion, version))\n\n self.assertEqual(version.get_link_status(), None)\n self.assertEqual(version.get_haunted(), self.root.document)\n self.assertEqual(\n aq_chain(version.get_haunted()),\n aq_chain(self.root.document))\n\n manager = IContainerManager(self.root)\n with manager.deleter() as deleter:\n deleter(self.root.document)\n\n self.assertEqual(version.get_haunted(), None)\n self.assertEqual(version.get_link_status(), errors.EmptyInvalidTarget())", "def test_default_graveyard_removal():\n\tos.system(\"python svalinn_tools/graveyard_removal.py \" + test_file_path + test_file)\n\tsize = os.path.getsize(test_file[:-4] + \"_no_grave.h5m\")\n\tassert size == 5748780", "def test_delete_volume_failure_modes(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'volume10'}\n self._fail_space_delete = True\n # This should not throw an exception, space-delete failure not problem\n self.driver.delete_volume(volume)\n self._fail_space_delete = False\n volume['provider_id'] = None\n # This should also not throw an exception\n self.driver.delete_volume(volume)", "def test_09_delete_detached_volume(self):\n\n self.debug(\"Delete Volume ID: %s\" % self.volume.id)\n\n self.volume_1 = Volume.create(\n self.apiclient,\n self.services,\n account=self.account.name,\n domainid=self.account.domainid\n )\n self.cleanup.append(self.volume_1)\n\n self.virtual_machine.attach_volume(self.apiClient, self.volume_1)\n self.virtual_machine.detach_volume(self.apiClient, self.volume_1)\n\n cmd = deleteVolume.deleteVolumeCmd()\n cmd.id = self.volume_1.id\n self.apiClient.deleteVolume(cmd)\n self.cleanup.remove(self.volume_1)\n\n list_volume_response = Volume.list(\n self.apiClient,\n id=self.volume_1.id,\n type='DATADISK'\n )\n self.assertEqual(\n list_volume_response,\n None,\n \"Check if volume exists in ListVolumes\"\n )\n return", "def dropObject(player):\n for treasure in Treasure.List:\n if player.treasureCaptured:\n player.treasureCaptured = False\n treasure.x = player.x\n treasure.y = player.y\n treasure.img = pygame.image.load(Treasure.treasure_img[0])", "def test_v1vmi_removevolume(self):\n pass", "def find_graveyard_inner_box():\n volumes = get_volume_list()\n graveyard = 0\n for v in volumes:\n if volume_is_graveyard( v ): \n graveyard = v\n break\n if graveyard == 0:\n raise DagmcError( 'Could not find a graveyard volume' )\n\n xyz_lo, xyz_hi = volume_boundary( graveyard )\n xyz_mid = numpy.array( [ (hi+lo)/2.0 for (hi,lo) in zip( xyz_hi, xyz_lo) ], dtype=numpy.float64 )\n\n result_lo = numpy.array( [0]*3, dtype=numpy.float64 )\n result_hi = numpy.array( [0]*3, dtype=numpy.float64 )\n\n for i in range(0,3):\n uvw = [0,0,0]\n uvw[i] = 1\n lo_mid = xyz_mid.copy()\n lo_mid[i] = xyz_lo[i]\n _, dist = fire_one_ray( graveyard, lo_mid, uvw )\n result_lo[i] = lo_mid[i] + dist\n uvw[i] = -1\n hi_mid = xyz_mid.copy()\n hi_mid[i] = xyz_hi[i]\n _, dist = fire_one_ray( graveyard, hi_mid, uvw )\n result_hi[i] = hi_mid[i] - dist\n \n return result_lo, result_hi", "def test_volume_snapshot_create_get_list_delete(self):\n volume = self.create_volume()\n self.addCleanup(self.delete_volume, volume['id'])\n\n s_name = data_utils.rand_name(self.__class__.__name__ + '-Snapshot')\n # Create snapshot\n snapshot = self.snapshots_client.create_snapshot(\n volume_id=volume['id'],\n display_name=s_name)['snapshot']\n\n def delete_snapshot(snapshot_id):\n waiters.wait_for_volume_resource_status(self.snapshots_client,\n snapshot_id,\n 'available')\n # Delete snapshot\n self.snapshots_client.delete_snapshot(snapshot_id)\n self.snapshots_client.wait_for_resource_deletion(snapshot_id)\n\n self.addCleanup(delete_snapshot, snapshot['id'])\n self.assertEqual(volume['id'], snapshot['volumeId'])\n # Get snapshot\n fetched_snapshot = self.snapshots_client.show_snapshot(\n snapshot['id'])['snapshot']\n self.assertEqual(s_name, fetched_snapshot['displayName'])\n self.assertEqual(volume['id'], fetched_snapshot['volumeId'])\n # Fetch all snapshots\n snapshots = self.snapshots_client.list_snapshots()['snapshots']\n self.assertIn(snapshot['id'], map(lambda x: x['id'], snapshots))", "def test_create_volume_from_snapshot(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n snap = {'id': '1', 'name': 'volume1', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10,\n 'provider_id': 'space_orig'}\n volume = {'id': '2', 'name': 'volume2', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10}\n pid = self.driver.create_volume_from_snapshot(volume, snap)\n # We must copy entier underlying storage, ~12GB, not just 10GB\n self.assertEqual(11444 * units.Mi, self.dd_count)\n self.assertEqual('1M', self.bs)\n # Check space-create command\n expected = {'redundancy': '0', 'group': 'xanadu',\n 'name': 'volume2', 'mode': '0777',\n 'user': 'kane', 'net': 'net1',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,',\n 'size': '12'}\n self.assertDictMatch(expected, self.created)\n # Check the returned provider\n expected_pid = {'provider_id': 'volume2'}\n self.assertDictMatch(expected_pid, pid)", "def test_bad_uuid_blockdev(self):\n command_line = [\"blockdev\", \"debug\", \"get-object-path\", \"--uuid=not\"]\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)", "def test_hotplug_storage(self):\n target_vols = [\n {\n \"type\": \"FCP\",\n \"volume_id\": \"1024400000000000\",\n \"boot_device\": True,\n \"specs\": {\n \"multipath\": True,\n \"adapters\": [{\n \"devno\": \"0.0.1800\",\n \"wwpns\": ['300607630503c1ae', '300607630503c1af']\n }, {\n \"devno\": \"0.0.1801\",\n \"wwpns\": ['300607630503c1ae', '300607630503c1af']\n }]\n }\n }\n ]\n # set response from storage pool object\n pool_resp = {target_vols[0]['volume_id']: '/dev/mapper/mpatha'}\n self._mock_pool.return_value.activate.return_value = pool_resp\n\n guest_obj = self._check_init()\n guest_obj.login()\n # validate response\n self.assertEqual(guest_obj.hotplug(vols=target_vols),\n {'vols': pool_resp})", "def test_removal_mount_dependency(self):\n from chroma_core.models import ManagedMgs\n\n self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, \"mounted\")\n try:\n # Make it so that the mount unconfigure operations will fail\n MockAgentRpc.succeed = False\n\n # -> the TargetMount removal parts of this operation will fail, we\n # want to make sure that this means that Target deletion part\n # fails as well\n self.set_and_assert_state(self.mgt.managedtarget_ptr, \"removed\", check=False)\n\n ManagedMgs.objects.get(pk=self.mgt.pk)\n self.assertNotEqual(ManagedMgs._base_manager.get(pk=self.mgt.pk).state, \"removed\")\n finally:\n MockAgentRpc.succeed = True\n\n # Now let the op go through successfully\n self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, \"removed\")\n with self.assertRaises(ManagedMgs.DoesNotExist):\n ManagedMgs.objects.get(pk=self.mgt.pk)\n self.assertEqual(ManagedMgs._base_manager.get(pk=self.mgt.pk).state, \"removed\")", "def _detach_volume_fcd(self, connection_info, instance):\n vm_ref = vm_util.get_vm_ref(self._session, instance)\n data = connection_info['data']\n adapter_type = data['adapter_type']\n\n if adapter_type == constants.ADAPTER_TYPE_IDE:\n state = vm_util.get_vm_state(self._session, instance)\n if state != power_state.SHUTDOWN:\n raise exception.Invalid(_('%s does not support disk '\n 'hotplug.') % adapter_type)\n\n vm_util.detach_fcd(self._session, vm_ref, data['id'])", "def database_volume_snapshot_delete(volume_snapshot_uuid):\n db = database_get()\n session = db.session()\n query = session.query(model.VolumeSnapshot)\n query.filter(model.VolumeSnapshot.uuid == volume_snapshot_uuid).delete()\n session.commit()", "def dispense_full_plate(self, ref, reagent, volume):\n columns = []\n for col in range(0,ref.container_type.col_count):\n columns.append({\"column\": col, \"volume\": volume})\n self.instructions.append(Dispense(ref, reagent, columns))", "def test_drop_disk():\n gc.drop_disk(0, 0, 1)\n assert gc.board[0][0] == 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate the regex patterns, but only partially while the user is still typing. Because the 'from' pattern will be where the user specifies captures, changing it also requires revalidating the substitution pattern. However if the user is still typing (as opposed to hitting enter to complete the input) we do the minimal amount of work necessary, i.e we just set the colors back to neutral and disable the Apply button.
def validateRegexFields(self, complete=False): # Assume the patterns aren't valid. self.m_validFromRe = False self.m_validPatterns = False ### Validate the 'from' pattern # regexCtl = self.m_reFromCtl subsCtl = self.m_reToCtl regex, subs = regexCtl.Value, subsCtl.Value regColor, subColor = wx.NullColour, wx.NullColour if complete and regex: regColor = subColor = wx.BLUE try: re.sub(regex, subs, '') except re.error as e: subColor = wx.RED try: re.compile(regex) except re.error as e: regColor = wx.RED else: self.m_validFromRe = True else: self.m_validFromRe = True self.m_validPatterns = bool(subs) self.setTextColor(regexCtl, regColor) self.setTextColor(subsCtl, subColor) if complete: self.populateFileList() else: self.m_applyBtn.Enabled = False
[ "def onTextChange(self, event):\n\n self.validateRegexFields(complete=False)\n event.Skip()", "def onHitEnterInFrom(self, event):\n\n self.validateRegexFields(complete=True)\n if self.m_validFromRe:\n self.m_reToCtl.SetFocus()", "def validate_data(self):\n for pattern in self.patterns:\n if pattern == \"\":\n self.patterns.remove(\"\")\n\n if not self.patterns:\n print(\"WARNING! Missing pattern or empty string!\")\n sys.exit()", "def applyPattern(self):\n self.cube.__init__()\n identifier = self.sender().text() #get string from sender button\n patterns = {\n \"Checkerboard\": \"U2 D2 F2 B2 L2 R2\",\n \"Superflip\": \"U R2 F B R B2 R U2 L B2 R U' D' R2 F R' L B2 U2 F2\",\n \"Cube2nd\": \"F L F U' R U F2 L2 U' L' B D' B' L2 U\",\n \"Cube3rd\": \"U' L' U' F' R2 B' R F U B2 U B' L U' F U R F'\",\n \"CrossOne\": \"U F B' L2 U2 L2 F' B U2 L2 U\",\n \"CrossTwo\": \"R2 L' D F2 R' D' R' L U' D R D B2 R' U D2\",\n \"Python\": \"F2 R' B' U R' L F' L F' B D' R B L2\",\n \"GreenMamba\": \"R D R F R' F' B D R' U' B' U D2\",\n \"6Centers\": \"U D' R L' F B' U D'\",\n \"CEC\": \"U' R2 L2 F2 B2 U' R L F B' U F2 D2 R2 L2 F2 U2 F2 U' F2\",\n \"Tetris\": \"L R F B U' D' L' R'\",\n \"4H\": \"L2 R2 U2 L2 R2 U2 y L2 R2 U2 L2 R2 U2 y'\" \n }\n self.cube.scramble(patterns[identifier]) #manipulate cubestring according to input\n self.stringToCubemap()\n self.lineEdit_InOut.setText(patterns[identifier])\n self.label_CurrentString.setText(\"Pattern:\")\n self.statusbar.showMessage(\"Applied Pattern\")\n msg = \"Applied Pattern: [{}] {}\".format(identifier, patterns[identifier])\n self.addLogEntry(msg)\n pass", "def search_regex_pattern(tasks_dict):\n # clear the screen\n os.system(\"cls\" if os.name == \"nt\" else \"clear\")\n user_regex = input(\"Please enter your regex pattern:\\n\")\n\n selected_tasks = []\n for i, tasks in tasks_dict.items():\n for task in tasks:\n if re.search(user_regex, task.name) or re.search(user_regex,\n task.notes):\n selected_tasks.append(task)\n display_tasks(selected_tasks)\n return # go back to Search menu", "def on_patternEdit_textChanged(self, txt):\n self.__updateOK()", "def test_grammar_rules_regex(self) -> None:\n for rule in self.rules.grammar_regex:\n positions: List[Tuple[int, int]] = self.report.get_regex_postions(\n rule[\"regex\"], ignore_case=True\n )\n for position in positions:\n self.add_error(rule[\"message\"], position=position)", "def regex_validation(dataset):\n # This list contains some common pre-constructed regex to allow easy reuse.\n prep_regex = {\"EmptyOrJustSpaces\": \"\\s\", \"AllNumbers-S\": \"^\\d[\\d]*$\", \"AnyCharOrNum+S\": \".+\",\n \"UnitPrice\": \"^ยฃ{0,1}([0-9]+\\.[0-9]*[1-9]+)|([1-9][0-9]*)\", \"TitleCaseWords\": \"[A-Z][a-z]+(\\s[A-Z][a-z]+)*\",\n \"Date\": \"(^([0-2]\\d|[3][0-1])\\/([0]\\d|[1][0-2])\\/([2][01]|[1][6-9])\\d{2}(\\s{1,4}([0-1]\\d|[2][0-3])(\\:[0-5]\\d){1,2})?$)\"\n }\n\n # The index of this list represents the column index in the dataset.\n prep_str = [[\"\\d{6}\"], [\"\\d{5}\"], [prep_regex[\"AnyCharOrNum+S\"]], [prep_regex[\"AllNumbers-S\"]],\n [prep_regex[\"Date\"]], [prep_regex[\"UnitPrice\"]], [\"\\d{5}\"], [prep_regex[\"TitleCaseWords\"]]]\n\n num_columns = len(dataset[0])\n # \"compromised_rows\" contains a list of compromised entries for each column.\n # Format => {columnID0: {rowID0: data0}, columnID1: {rowID1: data1}, ...}\n # Example Structure: {0: {50:\"0\"}, 1: {8:\"85099C\"}, 2:{}}\n compromised_rows = {count: {} for count in range(num_columns)}\n\n print(\"Scanning for erroneous data..\")\n # Look through the dataset cell by cell and apply the regex validation to each cell/value.\n for row_count in range(len(dataset)):\n row = dataset[row_count]\n\n for column_count in range(len(row)):\n column = row[column_count]\n\n # Fetch the all the validation strings for the column\n curr_regex = None\n for regex in prep_str[column_count]:\n curr_regex = regex\n\n # Extract the data that does not match the set format by applying regex validation on it.\n if not (re.fullmatch(curr_regex, column)):\n compromised_rows[column_count].update({row_count: column})\n\n # region Display the information extracted from the pre-processing.\n print(\"\\n\\tPotentially Erroneous Data:\")\n for column_id in compromised_rows:\n print(\"\\t- ColumnID:\", column_id, \", Num Rows:\", len(compromised_rows[column_id]))\n # print(\"\\n Frequency & Recency Calculations for the Period (01/12/2010 to 09/12/2011): \")\n\n inspect = input(\"\\nWould you like to inspect the abnormalities in the data? (Y/N)\\n\")\n\n while inspect.upper() == \"Y\":\n selected_column = input(\"Please enter the ColumnID of the column you are interested in here: \")\n\n # Check that the entered input is valid ColumnID\n if selected_column.isdigit() and (0 <= int(selected_column) < num_columns):\n for key, value in compromised_rows[int(selected_column)].items():\n print(\"Value --> \", value, \" found at row --> \", key)\n\n print(\"\\n\\tPotentially Erroneous Data:\")\n for column_id in compromised_rows:\n print(\"ColumnID:\", column_id, \", Num Rows:\", len(compromised_rows[column_id]))\n\n inspect = input(\"\\nWould you like to inspect another column? (Y/N)\\n\")\n else:\n print(\"Invalid Input, the ColumnID ranges from 0 to\", num_columns - 1, \"!\")\n # endregion", "def validateInput(self):\n palette = QPalette()\n validInput = self.sender().hasAcceptableInput()\n if validInput:\n palette.setColor(QPalette.Text, Qt.black)\n else:\n palette.setColor(QPalette.Text, Qt.blue)\n self.sender().setPalette(palette)\n self.hasValidInput.emit(validInput)", "def apply(self, options):\n # Which date token in the pattern are we on?\n arg_index = 0\n # How many tokens have we looked over since the last one that's part of the pattern?\n counter = 0\n # What are the token possibilities we've run into so far that fit the pattern?\n ordered_tokens = []\n ordered_tokens_current = []\n # Iterate through the lists of token possibilities\n for token_list in options.allowed:\n # Check if we've passed over the allowed number of in-between tokens yet,\n # if so then reset the pattern search\n if ordered_tokens_current:\n counter += 1\n if counter > self.max_distance:\n arg_index = 0\n counter = 0\n ordered_tokens_current = []\n # Does the token here match the pattern?\n # (Only consider directives with scores greater than or equal to self.min_match_score,\n # and decorators of any score)\n found_token = 0\n for tok in token_list:\n if (tok.score >= self.min_match_score or tok.is_decorator()) and tok.text in self.sequence[arg_index]:\n ordered_tokens_current.append(tok)\n found_token += 1\n # One or more possibilities here match the pattern!\n # On to the next expected possibility in the pattern sequence.\n if found_token:\n arg_index += 1\n counter = 0\n # Did we hit the end of the pattern sequence?\n # If so, let's reset so we can see if there's any more occurrences.\n if arg_index == len(self.sequence):\n arg_index = 0\n ordered_tokens.extend(ordered_tokens_current)\n # Positive reinforcement\n if self.pos_score:\n for tok in ordered_tokens:\n tok.score += self.pos_score\n # Negative reinforcement\n if self.neg_score:\n # Iterate through all possibilities for all tokens\n for token_list in options.allowed:\n for tok in token_list:\n # Is the possibility a directive?\n if not tok.is_decorator():\n # Does the possibility exist anywhere in the pattern?\n for match_text in self.sequence:\n if tok.text in match_text:\n # Was it not a part of any found instances of the pattern? If so, whack the score.\n if tok not in ordered_tokens:\n tok.score += self.neg_score", "def on_validateButton_clicked(self):\n regex = self.regexpLineEdit.text()\n if regex:\n re = QRegExp(regex)\n if self.caseSensitiveCheckBox.isChecked():\n re.setCaseSensitivity(Qt.CaseSensitive)\n else:\n re.setCaseSensitivity(Qt.CaseInsensitive)\n re.setMinimal(self.minimalCheckBox.isChecked())\n re.setPatternSyntax(\n self.syntaxCombo.itemData(self.syntaxCombo.currentIndex()))\n if re.isValid():\n E5MessageBox.information(\n self,\n self.tr(\"Validation\"),\n self.tr(\"\"\"The regular expression is valid.\"\"\"))\n else:\n E5MessageBox.critical(\n self,\n self.tr(\"Error\"),\n self.tr(\"\"\"Invalid regular expression: {0}\"\"\")\n .format(re.errorString()))\n return\n else:\n E5MessageBox.critical(\n self,\n self.tr(\"Error\"),\n self.tr(\"\"\"A regular expression must be given.\"\"\"))", "def _source_matchpattern_field_string_is_valid_as_regex(self):\n if self.source_matchpattern is None:\n raise RuleError(\"'source_matchpattern' must be a valid regex.\")\n if not regex_is_valid(self.source_matchpattern):\n # print(f\"{self}\")\n raise SourceMatchpatternError(\n \"Value for 'source_matchpattern' must be a valid regex.\"\n )\n return True", "def on_regexpLineEdit_textChanged(self, txt):\n self.nextButton.setEnabled(False)", "def check_regex():\n if input_cfg.option == \"-r\":\n try:\n re.compile(input_cfg.searchstr)\n except re.error:\n input_cfg.errmsg = \"bad regular expression\"", "def validate_search_inputs(self):\r\n\r\n debug(\"validate\")\r\n fail = False\r\n msg = \"\"\r\n if self.m_regex_search_checkbox.GetValue():\r\n if self.m_searchfor_textbox.GetValue() == \"\" or self.validate_search_regex():\r\n msg = _(\"Please enter a valid search regex!\")\r\n fail = True\r\n elif self.m_searchfor_textbox.GetValue() == \"\":\r\n msg = _(\"Please enter a valid search!\")\r\n fail = True\r\n if not fail and self.m_fileregex_checkbox.GetValue():\r\n if self.m_filematch_textbox.GetValue().strip() == \"\" or self.validate_regex(self.m_filematch_textbox.Value):\r\n msg = \"Please enter a valid file regex!\"\r\n fail = True\r\n elif self.m_filematch_textbox.GetValue().strip() == \"\":\r\n msg = _(\"Please enter a valid file pattern!\")\r\n fail = True\r\n if not fail and self.m_dirregex_checkbox.GetValue():\r\n if self.validate_regex(self.m_exclude_textbox.Value):\r\n msg = _(\"Please enter a valid exlcude directory regex!\")\r\n fail = True\r\n if not fail and not exists(self.m_searchin_text.GetValue()):\r\n msg = _(\"Please enter a valid search path!\")\r\n fail = True\r\n if (\r\n not fail and\r\n self.m_logic_choice.GetStringSelection() != \"any\" and\r\n re.match(r\"[1-9]+[\\d]*\", self.m_size_text.GetValue()) is None\r\n ):\r\n msg = _(\"Please enter a valid size!\")\r\n fail = True\r\n if not fail:\r\n try:\r\n self.m_modified_date_picker.GetValue().Format(\"%m/%d/%Y\")\r\n except:\r\n msg = _(\"Please enter a modified date!\")\r\n fail = True\r\n if not fail:\r\n try:\r\n self.m_created_date_picker.GetValue().Format(\"%m/%d/%Y\")\r\n except:\r\n msg = _(\"Please enter a created date!\")\r\n fail = True\r\n if fail:\r\n errormsg(msg)\r\n return fail", "def validate_all_patterns_consumed(self):\n try:\n pattern = next(self._patterns)\n except StopIteration:\n pass\n else:\n raise AssertionError(\n 'Not all the I/O patterns are consumed. '\n 'Remaining patterns starts from: %s' % repr(pattern)\n ) from None", "def _line_fits_pattern(self, logline):\n for (fieldname, pattern) in self._excludepatterns:\n try:\n m = pattern.search(str(logline.__dict__[fieldname]))\n except AttributeError:\n warn(\"Exclude patterns must be tuples of a field name and a compiled regex.\")\n warn(\"The object that you provided as a regex seems not to have a 'search' method\")\n exit(-1)\n except KeyError:\n warn(\"You tried to filter for a field that doesn't exist\")\n m = False\n if m:\n return False\n if len(self._includepatterns) == 0:\n return True # no includepatterns means 'accept everything'\n for (fieldname, pattern) in self._includepatterns:\n try:\n m = pattern.search(str(logline.__dict__[fieldname]))\n except AttributeError:\n warn(\"Exclude patterns must be tuples of a field name and a compiled regex.\")\n warn(\"The object that you provided as a regex seems not to have a 'search' method\")\n exit(-1)\n except KeyError:\n warn(\"You tried to filter for a field that doesn't exist\")\n m = False\n if m:\n return True\n return False", "def __collect_replacement__(self, s, where, orgpat, newpat, precond,\n postcond, existcond, startcond):\n\n vowels = 'aeiouy'\n tmpstr = s\n changesstr = ''\n\n start_search = 0 # Position from where to start the search\n pat_len = len(orgpat)\n stop = False\n\n # As long as pattern is in string\n #\n while ((orgpat in tmpstr[start_search:]) and (stop == False)):\n\n pat_start = tmpstr.find(orgpat, start_search)\n str_len = len(tmpstr)\n\n # Check conditions of previous and following character\n #\n OKpre = False # Previous character condition\n OKpre1 = False # Previous character1 condition\n OKpre2 = False # Previous character2 condition\n\n OKpost = False # Following character condition\n OKpost1 = False # Following character1 condition\n OKpost2 = False # Following character2 condition\n\n OKexist = False # Existing pattern condition\n OKstart = False # Existing start pattern condition\n\n index = 0\n\n if (precond == 'None'):\n OKpre = True\n\n elif (pat_start > 0):\n if (((precond == 'V') and (tmpstr[pat_start-1] in vowels)) or\n ((precond == 'C') and (tmpstr[pat_start-1] not in vowels))):\n OKpre = True\n\n elif ((precond.find(';')) > -1):\n if (precond.find('|') > -1):\n rls = precond.split('|')\n rl1 = rls[0].split(';')\n\n if (int(rl1[1]) < 0):\n index = pat_start+int(rl1[1])\n else:\n index = pat_start+(len(orgpat)-1)+int(rl1[1])\n\n i = 2\n if (rl1[0] == 'n'):\n while (i < (len(rl1))):\n if (tmpstr[index:(index+len(rl1[i]))] == rl1[i]):\n OKpre1 = False\n break\n else:\n OKpre1 = True\n i += 1\n else:\n while (i < (len(rl1))):\n if (tmpstr[index:(index+len(rl1[i]))] == rl1[i]):\n OKpre1 = True\n break\n i += 1\n\n rl2 = rls[1].split(';')\n\n if (int(rl2[1]) < 0):\n index = pat_start+int(rl2[1])\n else:\n index = pat_start+(len(orgpat)-1)+int(rl2[1])\n\n i = 2\n if (rl2[0] == 'n'):\n while (i < (len(rl2))):\n if (tmpstr[index:(index+len(rl2[i]))] == rl2[i]):\n OKpre2 = False\n break\n else:\n OKpre2 = True\n i += 1\n else:\n while (i < (len(rl2))):\n if (tmpstr[index:(index+len(rl2[i]))] == rl2[i]):\n OKpre2 = True\n break\n i += 1\n\n OKpre = OKpre1 and OKpre2\n\n else:\n rl = precond.split(';')\n # -\n if (int(rl[1]) < 0):\n index = pat_start+int(rl[1])\n else:\n index = pat_start+(len(orgpat)-1)+int(rl[1])\n\n i = 2\n if (rl[0] == 'n'):\n while (i < (len(rl))):\n if (tmpstr[index:(index+len(rl[i]))] == rl[i]):\n OKpre = False\n break\n else:\n OKpre = True\n i += 1\n else:\n while (i < (len(rl))):\n if (tmpstr[index:(index+len(rl[i]))] == rl[i]):\n OKpre = True\n break\n i += 1\n\n if (postcond == 'None'):\n OKpost = True\n\n else:\n pat_end = pat_start+pat_len\n\n if (pat_end < str_len):\n if (((postcond == 'V') and (tmpstr[pat_end] in vowels)) or\n ((postcond == 'C') and (tmpstr[pat_end] not in vowels))):\n OKpost = True\n elif ((postcond.find(';')) > -1):\n if (postcond.find('|') > -1):\n rls = postcond.split('|')\n\n rl1 = rls[0].split(';')\n\n if (int(rl1[1]) < 0):\n index = pat_start+int(rl1[1])\n else:\n index = pat_start+(len(orgpat)-1)+int(rl1[1])\n\n i = 2\n if (rl1[0] == 'n'):\n while (i < (len(rl1))):\n if (tmpstr[index:(index+len(rl1[i]))] == rl1[i]):\n OKpost1 = False\n break\n else:\n OKpost1 = True\n i += 1\n else:\n while (i < (len(rl1))):\n if (tmpstr[index:(index+len(rl1[i]))] == rl1[i]):\n OKpost1 = True\n break\n i += 1\n\n rl2 = rls[1].split(';')\n\n if (int(rl2[1]) < 0):\n index = pat_start+int(rl2[1])\n else:\n index = pat_start+(len(orgpat)-1)+int(rl2[1])\n\n i = 2\n if (rl2[0] == 'n'):\n while (i < (len(rl2))):\n if (tmpstr[index:(index+len(rl2[i]))] == rl2[i]):\n OKpost2 = False\n break\n else:\n OKpost2 = True\n i += 1\n else:\n while (i < (len(rl2))):\n if (tmpstr[index:(index+len(rl2[i]))] == rl2[i]):\n OKpost2 = True\n break\n i += 1\n\n OKpost = OKpost1 and OKpost2\n\n else:\n rl = postcond.split(';')\n\n if (int(rl[1]) < 0):\n index = pat_start+int(rl[1])\n else:\n index = pat_start+(len(orgpat)-1)+int(rl[1])\n\n i = 2\n if (rl[0] == 'n'):\n while (i < (len(rl))):\n if (tmpstr[index:(index+len(rl[i]))] == rl[i]):\n OKpost = False\n break\n else:\n OKpost = True\n i += 1\n else:\n while (i < (len(rl))):\n if (tmpstr[index:(index+len(rl[i]))] == rl[i]):\n OKpost = True\n break\n i += 1\n\n if (existcond == 'None'):\n OKexist = True\n\n else:\n rl = existcond.split(';')\n if (rl[1] == 'slavo'):\n r = self.__slavo_germanic__(s)\n if (rl[0] == 'n'):\n if (r == 0):\n OKexist = True\n else:\n if (r == 1):\n OKexist = True\n else:\n i = 1\n if (rl[0] == 'n'):\n while (i < (len(rl))):\n if (s.find(rl[i]) > -1):\n OKexist = False\n break\n else:\n OKexist = True\n i += i\n else:\n while (i < (len(rl))):\n if (s.find(rl[i]) > -1):\n OKexist = True\n break\n i += i\n\n if (startcond == 'None'):\n OKstart = True\n\n else:\n rl = startcond.split(';')\n i = 1\n if (rl[0] == 'n'):\n while (i < (len(rl))):\n if (s.find(rl[i]) > -1):\n OKstart = False\n break\n else:\n OKstart = True\n i += i\n else:\n while (i < (len(rl))):\n if (s.find(rl[i]) == 0):\n OKstart = True\n break\n i += i\n\n # Replace pattern if conditions and position OK\n #\n if ((OKpre == True) and (OKpost == True) and (OKexist == True) and\n (OKstart == True)) and (((where == 'START') and (pat_start == 0))\n or ((where == 'MIDDLE') and (pat_start > 0) and\n (pat_start+pat_len < str_len)) or ((where == 'END') and\n (pat_start+pat_len == str_len)) or (where == 'ALL')):\n tmpstr = tmpstr[:pat_start]+newpat+tmpstr[pat_start+pat_len:]\n changesstr += ',' + orgpat + '>' + newpat + '>' + where.lower()\n start_search = pat_start + len(newpat)\n\n else:\n start_search = pat_start+1\n\n if (start_search >= (len(tmpstr)-1)):\n stop = True\n\n tmpstr += changesstr\n\n return tmpstr", "def validate_inputs(self):\r\n\r\n\t\t# Initialise the validation flag and set the change variable\r\n\t\tvalidate = False\r\n\t\tcharacters_exceeded = False\r\n\t\tchange = self.change_type.get()\r\n\r\n\t\t# Set the validation criteria\r\n\t\tself.subassembly_vaidation = bool(self.subassembly.get())\r\n\t\tself.users_validation = bool(\r\n\t\t\tself.requester.get() and self.creator.get())\r\n\t\tself.comment_validation = self.comment.get(\"1.0\", \"end-1c\")\r\n\t\tself.limit_warning = \"Maximum comment length is 85 characters\"\r\n\r\n\t\t# Warning message\r\n\t\tself.message = \"Please provide all the inputs\"\r\n\r\n\t\t# Evaluation of change type, subassembly and part input fields\r\n\t\tif change in range(1, 5):\r\n\t\t\tif self.subassembly_vaidation:\r\n\t\t\t\tvalidate = True\r\n\t\t\telse:\r\n\t\t\t\tvalidate = False\r\n\t\telse:\r\n\t\t\tvalidate = False\r\n\r\n\t\t# Evaluation of requester and creator fields\r\n\t\tif not self.users_validation:\r\n\t\t\tvalidate = False\r\n\r\n\t\tif len(self.comment_validation) > 85:\r\n\t\t\tcharacters_exceeded = True\r\n\r\n\t\t# Trigger workflow or raise warning based on evaluation critera\r\n\t\tif validate and not characters_exceeded:\r\n\t\t\tMainWindow.workflow(self)\r\n\t\telif characters_exceeded:\r\n\t\t\tmessagebox.showwarning(\"Limit Exceeded\", self.limit_warning)\r\n\t\telse:\r\n\t\t\tmessagebox.showwarning(\"Insufficient Inputs\", self.message)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Uses the list of filesondisk and the regex patterns to build a list of what the directory will look like if we renamed the files. Because we're justusing a simple text list, we use symbols to show the user which filenames would change and whether they would produce any duplicates, substituting "." with "\1.txt".
def populateFileList(self): self.m_fileList.SetForegroundColour(wx.NullColour) # We'll need to track which file names are modified and which # file names duped. applicable, dupes = set(), set() if not self.m_validPatterns: # Regex's don't compile yet, just use the raw filename list. newNames = self.m_diskNames else: # Apply the substitution to the filename list to produce a # destination-name list, and identify whether the patterns # actually affect anything. # newNames, modifiedIndexes = [], [] matcher = re.compile(self.m_reFromCtl.Value).subn subs = self.m_reToCtl.Value for filename in self.m_diskNames: # Perform the sub (filename, numChanges) = matcher(subs, filename) # Was there a modification? if numChanges: # Record the affected name. applicable.add(filename) if filename in newNames: dupes.add(filename) # Add to the primary list newNames.append(filename) # Does this produce a different list than we already had? If so, # clear the file list and replace it with the new one. # if newNames != self.m_newNames: self.m_fileList.Clear() # Figure out the longest name so we can create a cleanly-formatted # set of prefix/suffix characters for the modified/duped annotation. # maxLen = max(map(len, newNames)) decorate = '{m} {fn:<{ml}} {m}'.format # Now build a list of display elements. for filename in newNames: mark = ' ' if filename not in applicable else '|' if filename in dupes: mark = '*' self.m_fileList.Append(decorate(m=mark, fn=filename, ml=maxLen)) # Keep the list. self.m_newNames[:] = newNames # Update the apply button, we only want it enabled when the user # has a valid set of patterns that affect any files and have no # dupes produced as a result. # self.m_applyBtn.Enabled = bool(applicable) and not dupes if dupes: # Emphasize the presence of dupes. self.m_fileList.SetForegroundColour(wx.RED) # Draw the list. self.m_fileList.Refresh()
[ "def mark_files(self,postfix):\n dk = self.cand.keys()\n dk.sort() # sort by date...\n dk.reverse() # .. and reverse so latest comes first\n res = \"\"\n for k in dk:\n fn = self.cand[k].name\n pfn = os.path.join(self.dir,fn)\n fnnew = fn + postfix\n pfnnew = os.path.join(self.dir,fnnew)\n os.rename(pfn,pfnnew)\n res += \"mv %s %s\\n\" % (pfn,pfnnew)\n return res", "def rename_files(file_list, src_dir, pattern, rename=False):\n i = 0\n renamed = regex_group_split(file_list, pattern, False)\n renamed_w_path = [src_dir + fn for fn in renamed]\n orig_fp_list = orig_filepath_list(file_list, src_dir)\n\n for filename in file_list:\n if not (orig_fp_list[i] == renamed_w_path[i]):\n print (colors.BLUE + \"_ORIGINAL_: \" + orig_fp_list[i].replace(src_dir, \"\") + colors.ENDC)\n print (colors.RED + \"__UPDATE__: \" + renamed_w_path[i].replace(src_dir, \"\") + colors.ENDC)\n\n if rename:\n os.rename(orig_fp_list[i], renamed_w_path[i])\n i += 1", "def file_names(files, directory, extension):\n # return the paths of the files, do not remove duplicates\n return [f.make_path(directory, extension) for f in files]", "def findDuplicateReleaseFiles(self, initialList, workingTowerName, newInfix):\n Release_Tower_Name = self.getReleaseVersion(workingTowerName, newInfix)\n Duplicate_List = []\n for fname in initialList:\n prefixStream, postfixStream = string.split(fname, workingTowerName)\n A_File_Name = prefixStream + Release_Tower_Name + postfixStream\n if (os.path.exists(A_File_Name)):\n Duplicate_List = Duplicate_List + [A_File_Name]\n \n return Duplicate_List", "def main(root, filelist):\n #print \"got %s: %s\" % (root, filelist)\n rename(root, filelist)", "def modifier(lst, mod):\n if not isinstance(lst, list):\n raise TypeError('Argument must be a list!')\n names = []\n # remove '.dcm' from mod if there is one\n if str(mod).find('.dcm') > 0:\n mod = str(mod).split('.')[:-1]\n for pt in lst:\n p, name = os.path.split(pt) # split into path and file name\n # if there is a dot in the file name\n if name.find('.') > 0: # if there is a dot in the file name\n # remove suffix and replace it with mod.dcm\n name = ''.join(name.split('.')[:-1]) + str(mod) + '.' + name.split('.')[-1]\n else: # if there is no .dcm we'll add it ourselves\n name = name + str(mod) + '.dcm'\n names.append(os.path.join(p, name)) # join the path with the name\n return names", "def rename(root, filelist):\n if not filelist:\n return\n def apply_rules(filename):\n rulez = [('_+' , ' '), # One or more underscores to spaces\n ('-{2,}' , '-'), # Two or more hyphens to single hyphen\n ('&' , 'And'), # An ampersand to 'And'\n ('(-)(\\w*)' ,r' \\1 \\2')]# Spaces around hyphen seperated words\n \n for look_for, replacement in rulez:\n filename = re.sub(look_for, replacement, filename)\n # Capitalize first letter of every word\n filename = \" \".join([ word.capitalize() for word in filename.split() ])\n return filename\n \n names = []\n for filename in filelist:\n basename = os.path.basename(filename)\n names.append(os.path.join(root, apply_rules(filename)))\n try:\n dest = os.tmpnam()\n fl = open(dest, 'w')\n fl.write(\"\\n\".join(names))\n fl.close()\n os.system(\"%s %s\" % (EDITOR, dest))\n ans = 'no'\n for oldname, newname in zip(filelist, open(dest).readlines()):\n oldname = os.path.join(root, oldname)\n newname = newname.strip()\n if oldname == newname:\n print \"No change from %s to %s ...skipping\" % (oldname, newname)\n else:\n print \"Changing %s to %s\" % (oldname, newname)\n if not ans[0].lower == 'a':\n ans = raw_input(\"Contine (Yes/No/All) ? [N] \") or 'no'\n if ans[0].lower() in ('a', 'y'):\n os.rename(oldname, newname)\n else:\n os.rename(oldname, newname)\n finally:\n os.remove(dest)", "def seperate_path_filename(paths_files, paths, files):\r\n for i in paths_files:\r\n temp = i[0].split('\\\\')\r\n paths.append(i[0].replace(temp[-1], ''))\r\n files.append(temp[-1])", "def rename_date_formats(files_list):\n\n count_renamed = 0\n count_skipped = 0\n\n for file in files_list:\n\n # finding DD-DD-DDDD matches\n if date_regex.search(file):\n date_format = date_regex.search(file).group()\n date_split = date_format.split(\"-\")\n\n # detecting MM-DD-YYYY format and renaming to DD-MM-YYYY format\n if 1 <= int(date_split[0]) <= 12 and 1 <= int(date_split[1]) <= 31:\n european_format_date = \"-\".join([date_split[1], date_split[0], date_split[2]])\n new_file_name = file.replace(date_format, european_format_date)\n\n # checking that newly renamed file won't be a duplicate\n if new_file_name not in files_list:\n shutil.move(file, new_file_name)\n print(f\"<{file}> renamed to <{new_file_name}>\")\n count_renamed += 1\n else:\n print(f\"Cannot rename <{file}> because file <{new_file_name}> already exists\")\n count_skipped += 1\n\n # for files with DD-DD-DDDD format, but not MM-DD-YYYY like 89-77-3445\n else:\n print(f\"<{file}> has no MM-DD-YYYY date in name\")\n count_skipped += 1\n\n # for files with no MM-DD-YYYY format like 12-1221.txt or text.pdf\n else:\n print(f\"<{file}> has no MM-DD-YYYY date in name\")\n count_skipped += 1\n\n print(f\"\\nSUMMARY:\\nRenamed files count - {count_renamed}, not affected files count - {count_skipped}.\")", "def fix_renamed_files(files):\n new_files = []\n for file in files:\n if \"=>\" in file:\n if \"{\" and \"}\" in file:\n # file moved\n src, dst = file.split(\"{\")[1].split(\"}\")[0].split(\"=>\")\n fix = lambda repl: re.sub(r\"{[\\.a-zA-Z_/\\-0-9]* => [\\.a-zA-Z_/\\-0-9]*}\", repl.strip(), file)\n new_files.extend(map(fix, [src, dst]))\n else:\n # full path changed\n new_files.extend(map(lambda x: x.strip(), file.split(\"=>\")))\n pass\n else:\n new_files.append(file)\n return new_files", "def findDuplicateWorkingFiles(self, initialList, curInfix, newInfix):\n Duplicate_List = []\n for fname in initialList:\n infixStream = iccs_apex.whatInfixIsStream(fname)\n if (infixStream == curInfix):\n prefixStream, postfixStream = string.split(fname, infixStream)\n A_File_Name = prefixStream + newInfix + postfixStream\n if (os.path.exists(A_File_Name)):\n Duplicate_List = Duplicate_List + [A_File_Name]\n \n return Duplicate_List", "def convert_names(root_directory, replace_from_list, replace_to, warning = False):\n # Initialize variables\n files_not_converted = []\n directories_not_converted = []\n\n # For each file and directory under the root directory\n for curr_dir, dirnames, filenames in os.walk(root_directory, topdown=False):\n # For each file\n for fname in filenames:\n # split the extension\n split_fname = os.path.splitext(fname)\n # Rename\n new_name = reduce(lambda temp_name, curr_replace: temp_name.replace(curr_replace, REPLACE_TO), REPLACE_FROM, split_fname[0])\n # Apply\n try:\n os.rename(os.path.join(curr_dir, fname), os.path.join(curr_dir, new_name + split_fname[1]))\n except WindowsError, error:\n if warning:\n print (fname, \"- problem converting\")\n files_not_converted.append(os.path.join(curr_dir, fname))\n for dname in dirnames:\n try:\n # Rename & Apply\n os.rename(os.path.join(curr_dir, dname), os.path.join(curr_dir, reduce(lambda temp_name, curr_replace: temp_name.replace(curr_replace, REPLACE_TO), REPLACE_FROM, dname)))\n except WindowsError, error:\n if warning:\n print (dname, \"- problem converting\")\n directories_not_converted.append(os.path.join(curr_dir, dname))\n\n return (files_not_converted, directories_not_converted)", "def main(filenames, cut=\":\", paste=\"\"):\n if isinstance(filenames, list):\n for fname in filenames:\n new_name = char_replace(fname, cut, paste)\n os.rename(fname, new_name)\n\n elif isinstance(filenames, str):\n new_name = char_replace(filenames, cut, paste)\n os.rename(filenames, new_name)\n return new_name", "def build_quick_note_list(file_list):\n quick_list = []\n for name in file_list:\n _, ext = os.path.splitext(name)\n if not (ext.lower() in ['.txt', '.md']):\n continue\n if re.search(\"^[0-9]{2,4}-.*$\", name):\n quick_list.append(name)\n return quick_list", "def test_paths_to_plates():\n output = filelister_yoko.paths_to_plates(TEST_PATH_YOKO)\n prefix = os.path.abspath(TEST_PATH_YOKO)\n plate_names = [\"screen-name-batch1_20190213_095340/A000002-PC\"]\n make_own = [os.path.join(prefix, name) for name in plate_names]\n assert len(output) == len(plate_names)\n for ans in output:\n assert ans in make_own", "def _glob(filenames):\n if isinstance(filenames, string_types):\n filenames = [filenames]\n for name in filenames:\n matched_names = glob(name)\n if not matched_names:\n # use the original string\n matches.append(name)\n else:\n matches.extend(matched_names)\n return matches", "def test_file_patterns(self):\r\n file_patterns = []\r\n self.replace_case(\r\n self.path, 'find ', False, file_patterns, 12, 3, 12, 6)\r\n self.reset()\r\n file_patterns = ['*.php']\r\n self.replace_case(self.path, 'find ', False, file_patterns, 4, 1, 4, 2)\r\n self.reset()\r\n file_patterns = ['*.php', '*.html']\r\n self.replace_case(self.path, 'find ', False, file_patterns, 8, 2, 8, 4)", "def file_matches(self, text):\n \n #print 'Completer->file_matches: <%s>' % text # dbg\n\n # chars that require escaping with backslash - i.e. chars\n # that readline treats incorrectly as delimiters, but we\n # don't want to treat as delimiters in filename matching\n # when escaped with backslash\n \n protectables = ' ()[]{}'\n\n def protect_filename(s):\n return \"\".join([(ch in protectables and '\\\\' + ch or ch)\n for ch in s])\n\n lbuf = self.get_line_buffer()[:self.readline.get_endidx()]\n open_quotes = 0 # track strings with open quotes\n try:\n lsplit = shlex_split(lbuf)[-1]\n except ValueError:\n # typically an unmatched \", or backslash without escaped char.\n if lbuf.count('\"')==1:\n open_quotes = 1\n lsplit = lbuf.split('\"')[-1]\n elif lbuf.count(\"'\")==1:\n open_quotes = 1\n lsplit = lbuf.split(\"'\")[-1]\n else:\n return None\n except IndexError:\n # tab pressed on empty line\n lsplit = \"\"\n\n if lsplit != protect_filename(lsplit):\n # if protectables are found, do matching on the whole escaped\n # name\n has_protectables = 1\n text0,text = text,lsplit\n else:\n has_protectables = 0\n text = os.path.expanduser(text)\n \n if text == \"\":\n return [protect_filename(f) for f in self.glob(\"*\")]\n\n m0 = self.clean_glob(text.replace('\\\\',''))\n if has_protectables:\n # If we had protectables, we need to revert our changes to the\n # beginning of filename so that we don't double-write the part\n # of the filename we have so far\n len_lsplit = len(lsplit)\n matches = [text0 + protect_filename(f[len_lsplit:]) for f in m0]\n else:\n if open_quotes:\n # if we have a string with an open quote, we don't need to\n # protect the names at all (and we _shouldn't_, as it\n # would cause bugs when the filesystem call is made).\n matches = m0\n else:\n matches = [protect_filename(f) for f in m0]\n if len(matches) == 1 and os.path.isdir(matches[0]):\n # Takes care of links to directories also. Use '/'\n # explicitly, even under Windows, so that name completions\n # don't end up escaped.\n matches[0] += '/'\n return matches", "def get_file_names(path, postfix):\n flist = list()\n for i in os.listdir(path):\n if os.path.splitext(i)[1]==postfix:\n flist.append(i)\n return flist" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
When the user hits 'enter' in the 'from' field.
def onHitEnterInFrom(self, event): self.validateRegexFields(complete=True) if self.m_validFromRe: self.m_reToCtl.SetFocus()
[ "def pressed_enter(self):\n pass", "def hit_enter():\n keyboard.press_and_release('Enter')", "def enter(self):\n\t\tself.actionObject().key_down(Keys.ENTER).key_up(Keys.ENTER).perform()", "def enterKey_cb(widget, dialog):\n dialog.response(gtk.RESPONSE_ACCEPT)", "def enter_press_log_watcher(self, event): # makes it so you can use enter instead of having to press the button\r\n if event.keycode == 13:\r\n self.choose_watcher_num()", "def message_key_handler(self, e):\n if (e.key() == QtCore.Qt.Key_Return) or \\\n (e.key() == QtCore.Qt.Key_Enter):\n text = self.message.toPlainText()\n if text:\n self.message_pipe_send.send(text)\n self.earlier_messages.append(\"yo: \" + text)\n self.message.clear()\n else:\n QtGui.QTextEdit.keyPressEvent(self.message, e)", "def enter(self):\n self.__keyboard.press(Key.enter)\n self.__keyboard.release(Key.enter)", "def enterToContinue(self):\n self.accept(\"Press any key to continue\")", "def _OnPressEnter1(self):\n\t self.epsg1.set( self.epsg1.get() )\n\t self.epsg1_entry.focus_set()\n\t self.epsg1_entry.selection_range(0, Tkinter.END)\n\t print('epsg code set to %s ' % (str(self.epsg1.get())))", "def enter(self):\n\t\tself._translate(True)\n\t\tinputCore.manager.emulateGesture(keyboardHandler.KeyboardInputGesture.fromName(\"enter\"))", "def press_enter():\n raw_input(\"\\n\\nPress Enter\")", "def on_enter(self, event=None):\n if len(self.text):\n eventtypes.post_chatmsg(self, self.nickname+': '+self.text)\n self.text = \"\"\n self.caretpos = 0", "def get_anykey(self):\n return input('Press --> ENTER<-- to move forward.')", "def press_enter_to_continue():\n # Wait for user input\n input(f\"{helper.blue_text}Press Enter to go back to the main menu.\\n\")\n ui()", "def ask_user_for_keypress():\n return HumanInputAgent.ask_user_for_input(\"Press Enter to continue.\")", "def handle_user_enter_number(self, event, number):\n pass", "def respond_to_keypress(self, key):", "def _OnPressEnter4(self):\n\t self.epsg4.set( self.epsg4.get() )\n\t self.epsg4_entry.focus_set()\n\t self.epsg4_entry.selection_range(0, Tkinter.END)\n\t print('epsg code set to %s ' % (str(self.epsg4.get())))", "def enter_press_log_show(self, event): # makes it so you can use enter instead of having to press the button\r\n if event.keycode == 13:\r\n self.show_game(self.game_number.get())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
When the user modifies the content of either regex field.
def onTextChange(self, event): self.validateRegexFields(complete=False) event.Skip()
[ "def to_field(self, **kwargs):\n if self.regex:\n if not 'regex' in self.field_args:\n self.field_args = self.field_args + ('regex', )\n self.field_klass = forms.RegexField\n return super(StringSetting, self).to_field(**kwargs)", "def get_regex_mismatch_error_text(field_name, source_regex):\n\n\treturn(\"Value entered for '{0}' does not match regex '{1}'\"\n\t\t .format(field_name, source_regex.pattern))", "def on_regex_search_toggle(self, event):\r\n\r\n if self.m_regex_search_checkbox.GetValue():\r\n update_autocomplete(self.m_searchfor_textbox, \"regex_search\")\r\n else:\r\n update_autocomplete(self.m_searchfor_textbox, \"literal_search\")\r\n event.Skip()", "def validate(self,NewRegex,sentence):\n\n #6\n if re.search(NewRegex,sentence):\n if len(self.memory) > 0:\n for x in self.memory:\n if x != NewRegex:\n self.NewRegex = NewRegex\n self.sentence = sentence\n else:\n self.NewRegex = NewRegex\n self.sentence = sentence\n\n else: return", "def on_patternEdit_textChanged(self, txt):\n self.__updateOK()", "def _regex_operation(self, left, right):\n if self.dialect == 'mysql':\n return literal(left).op('REGEXP', is_comparison=True)(right)\n elif self.dialect == 'postgresql':\n return literal(left).op('~', is_comparison=True)(right)\n elif self.dialect == 'oracle':\n return func.REGEXP_LIKE(left, right)\n return None", "def on_regexpLineEdit_textChanged(self, txt):\n self.nextButton.setEnabled(False)", "def apply_regex(self):\n if self.regex:\n if self.data.dtype is np.dtype('O'):\n # filter by regex\n logger.info(\"Applying regex filter with the following regex:\")\n logger.info(self.regex)\n self.data = self.data[self.data.str.contains(self.regex)]", "def code_line_edit_changed(self, text):\n if self.updating_code_lineEdit:\n return\n\n self.updating_code_lineEdit = True\n\n if re.findall(r'[^a-zA-Z0-9_ ]+', text):\n self.code_line_edit.set_invalid('Invalid character')\n else:\n if text == '':\n self.code_line_edit.set_invalid('Please enter a code!')\n else:\n if len(text) > 24:\n self.code_line_edit.set_invalid('Code is too long (>24)')\n else:\n self.code_line_edit.set_valid()\n\n # just update the code field\n formatted_text = text.strip().replace(' ', '_').replace('-', '_')\n\n # remove multiple under scores\n formatted_text = re.sub('[_]+', '_', formatted_text)\n\n self.code_line_edit.setText(formatted_text)\n self.updating_code_lineEdit = False", "def _add_regexp_listener(dbapi_con, con_record):\n\n def regexp(expr, item):\n reg = re.compile(expr)\n return reg.search(six.text_type(item)) is not None\n dbapi_con.create_function('regexp', 2, regexp)", "def filterRegExpChanged(self):\n syntax = QRegExp.RegExp\n caseSensitivity = Qt.CaseInsensitive\n regExp = QRegExp(self.filterText.text(), caseSensitivity, syntax)\n\n self.cacheTable.proxyModel.setFilterKeyColumn( COL_NAME )\n self.cacheTable.proxyModel.setFilterRegExp(regExp)\n\n self.cacheTable.adjustColumns()\n self.cacheTable.adjustRows()", "def bound_regex(self, bound_regex):\n\n self._bound_regex = bound_regex", "def RegEx(self, regex):\n if len(regex) > 0:\n try:\n regexreplaced = regex.replace(\"%TARGET%\", self._target)\n self._regex = regexreplaced\n except AttributeError:\n regexreplaced = []\n for r in regex:\n regexreplaced.append(r.replace(\"%TARGET%\", self._target))\n self._regex = regexreplaced\n else:\n self._regex = \"\"", "def replace_rules(self):", "def on_validateButton_clicked(self):\n regex = self.regexpLineEdit.text()\n if regex:\n re = QRegExp(regex)\n if self.caseSensitiveCheckBox.isChecked():\n re.setCaseSensitivity(Qt.CaseSensitive)\n else:\n re.setCaseSensitivity(Qt.CaseInsensitive)\n re.setMinimal(self.minimalCheckBox.isChecked())\n re.setPatternSyntax(\n self.syntaxCombo.itemData(self.syntaxCombo.currentIndex()))\n if re.isValid():\n E5MessageBox.information(\n self,\n self.tr(\"Validation\"),\n self.tr(\"\"\"The regular expression is valid.\"\"\"))\n else:\n E5MessageBox.critical(\n self,\n self.tr(\"Error\"),\n self.tr(\"\"\"Invalid regular expression: {0}\"\"\")\n .format(re.errorString()))\n return\n else:\n E5MessageBox.critical(\n self,\n self.tr(\"Error\"),\n self.tr(\"\"\"A regular expression must be given.\"\"\"))", "def on_replacetextCombo_editTextChanged(self, text):\n self.__enableFindButton()", "def setFilterRegularExpression(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass", "async def edit_regex_slash(\n self,\n interaction: discord.Interaction,\n trigger: app_commands.Transform[Trigger, TriggerTransformer],\n regex: app_commands.Transform[str, RegexTransformer],\n ):\n if trigger is None:\n return\n if regex is None:\n return\n ctx = await interaction.client.get_context(interaction)\n await self.edit_regex(ctx, trigger, regex=regex)", "def setFilterRegExp(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the updated log_conf, taking into account new log files present on the instance as well as modifications made to the corresponding logentries host.
def update_instance_conf(log_paths, log_conf): log_client = LogClient.Client(account_key) instance_id, config = get_ssh_config(env.host) if log_conf is None and len(log_paths)>0: print 'log_conf is None' log_conf = create_host_logs(log_client,instance_id,log_paths) elif log_conf is not None: print 'log_conf is not None' conf_host = log_conf.get_host() if conf_host is None: print 'Error. This instance configuration is missing the corresponding model!! instance_id=%s'%instance_id logger.error('Error. This instance configuration is missing the corresponding model!! instance_id=%s',instance_id) log_conf = create_host_logs(log_client,instance_id,log_paths) return log_conf if conf_host.get_key() is None: print 'Host %s has an logentries-rsyslog config file but no account key!!'%host.get_name() logger.warning('Host %s has an logentries-rsyslog config file but no account key!!',host.get_name()) log_conf = create_host_logs(log_client,instance_id,log_paths) return log_conf account = log_client.get_account() matching_host = None for host in account.get_hosts(): if host.get_key() == conf_host.get_key(): matching_host = host break # If there is no matching host, then it is assumed that it was deleted from Logentries and that no configuration should be associated to this instance. if matching_host is None: log_conf = create_host_logs(log_client,instance_id,log_paths) return log_conf for new_log in get_new_logs(log_paths, log_conf): # Update matching host so that each new log becomes part of it. matching_host = log_client.create_log_token(host=matching_host,log_name=new_log) log_conf.set_host(matching_host) return log_conf
[ "def update_config(update):\n global _config\n new_config = copy.deepcopy(_config)\n _update_dict_recursive(new_config, update)\n logging.config.dictConfig(new_config)\n _configure_ulog_bridge()\n _config = new_config", "def update_log_config(self, monitor_name, log_config):\n pass", "def conf_update(self):\n pass", "def __logChanges(self, host):\n self.logger.info('New Interfaces Config:')\n self.logger.info(jsondumps(self.yamlconf[host].get('interface', {})))\n self.logger.info('New BGP Config:')\n self.logger.info(jsondumps(self.yamlconf[host].get('sense_bgp', {})))", "def log_config(self) -> 'outputs.FirewallLogConfigResponse':\n return pulumi.get(self, \"log_config\")", "def set_rsyslog_old_configuration():\n add_udp = False\n add_tcp = False\n # Do the configuration lines exist\n is_exist_udp_conf = False\n is_exist_tcp_conf = False\n with open(rsyslog_conf_path, \"rt\") as fin:\n for line in fin:\n if \"imudp\" in line or \"UDPServerRun\" in line:\n is_exist_udp_conf = True\n add_udp = True if \"#\" in line else False\n elif \"imtcp\" in line or \"InputTCPServerRun\" in line:\n is_exist_tcp_conf = True\n add_tcp = True if \"#\" in line else False\n fin.close()\n if add_udp or not is_exist_udp_conf:\n append_content_to_file(rsyslog_old_config_udp_content, rsyslog_conf_path)\n if add_tcp or not is_exist_tcp_conf:\n append_content_to_file(rsyslog_old_config_tcp_content, rsyslog_conf_path)\n print_ok(\"Rsyslog.conf configuration was changed to fit required protocol - \" + rsyslog_conf_path)\n return True", "def configuration_of_the_logs():\n log_formatter = logging.Formatter('%(asctime)s %(levelname)s %(funcName)s(%(lineno)d) %(message)s')\n\n #todo:#Check to see if the folder exist\n my_path = path.dirname(path.realpath(__file__))\n logFile = my_path + '/logs/logs.txt'\n\n #The logs.txt file can't be more than 5MB\n my_handler = RotatingFileHandler(logFile, mode='a', maxBytes=5*1024*1024,\n backupCount=2, encoding=None, delay=0)\n my_handler.setFormatter(log_formatter)\n my_handler.setLevel(logging.INFO)\n\n app_log = logging.getLogger('root')\n #app_log.setLevel(logging.INFO)\n app_log.setLevel(logging.INFO)\n\n app_log.addHandler(my_handler)\n #app_log.info('configuraring the logs')\n\n return app_log", "def get_logger_config(self):\n return self._load_config_file(self._logger_config_path)", "def getLoggerDictConfig(self):\n if 'logging' in self._winfo:\n return self._winfo['logging']\n else:\n return super(WorkerConfig, self).getLoggerDictConfig()", "def update_log_files(self, control_log, helper_log, server_log):\n if control_log is not None:\n self.control_log_file.update(\n control_log, \"server_config.control_log_file\")\n if helper_log is not None:\n self.helper_log_file.update(\n helper_log, \"server_config.helper_log_file\")\n if server_log is not None:\n for index, server_params in enumerate(self.server_params):\n log_name = list(os.path.splitext(server_log))\n if len(self.server_params) > 1:\n log_name.insert(1, \"_{}\".format(index))\n server_params.log_file.update(\n \"\".join(log_name),\n \"server_config.server[{}].log_file\".format(index))", "def get_logger_config(log_dir,\r\n logging_env=\"no_env\",\r\n tracking_filename=\"tracking.log\",\r\n edx_filename=\"edx.log\",\r\n dev_env=False,\r\n syslog_addr=None,\r\n debug=False,\r\n local_loglevel='INFO',\r\n console_loglevel=None,\r\n service_variant=None):\r\n\r\n # Revert to INFO if an invalid string is passed in\r\n if local_loglevel not in LOG_LEVELS:\r\n local_loglevel = 'INFO'\r\n\r\n if console_loglevel is None or console_loglevel not in LOG_LEVELS:\r\n console_loglevel = 'DEBUG' if debug else 'INFO'\r\n\r\n if service_variant is None:\r\n # default to a blank string so that if SERVICE_VARIANT is not\r\n # set we will not log to a sub directory\r\n service_variant = ''\r\n\r\n hostname = platform.node().split(\".\")[0]\r\n syslog_format = (\"[service_variant={service_variant}]\"\r\n \"[%(name)s][env:{logging_env}] %(levelname)s \"\r\n \"[{hostname} %(process)d] [%(filename)s:%(lineno)d] \"\r\n \"- %(message)s\").format(service_variant=service_variant,\r\n logging_env=logging_env,\r\n hostname=hostname)\r\n\r\n handlers = ['console', 'local'] if debug else ['console',\r\n 'syslogger-remote', 'local']\r\n\r\n logger_config = {\r\n 'version': 1,\r\n 'disable_existing_loggers': False,\r\n 'formatters': {\r\n 'standard': {\r\n 'format': '%(asctime)s %(levelname)s %(process)d '\r\n '[%(name)s] %(filename)s:%(lineno)d - %(message)s',\r\n },\r\n 'syslog_format': {'format': syslog_format},\r\n 'raw': {'format': '%(message)s'},\r\n },\r\n 'handlers': {\r\n 'console': {\r\n 'level': console_loglevel,\r\n 'class': 'logging.StreamHandler',\r\n 'formatter': 'standard',\r\n 'stream': sys.stderr,\r\n },\r\n 'syslogger-remote': {\r\n 'level': 'INFO',\r\n 'class': 'logging.handlers.SysLogHandler',\r\n 'address': syslog_addr,\r\n 'formatter': 'syslog_format',\r\n },\r\n 'newrelic': {\r\n 'level': 'ERROR',\r\n 'class': 'lms.lib.newrelic_logging.NewRelicHandler',\r\n 'formatter': 'raw',\r\n }\r\n },\r\n 'loggers': {\r\n 'tracking': {\r\n 'handlers': ['tracking'],\r\n 'level': 'DEBUG',\r\n 'propagate': False,\r\n },\r\n '': {\r\n 'handlers': handlers,\r\n 'level': 'DEBUG',\r\n 'propagate': False\r\n },\r\n }\r\n }\r\n\r\n if dev_env:\r\n tracking_file_loc = os.path.join(log_dir, tracking_filename)\r\n edx_file_loc = os.path.join(log_dir, edx_filename)\r\n logger_config['handlers'].update({\r\n 'local': {\r\n 'class': 'logging.handlers.RotatingFileHandler',\r\n 'level': local_loglevel,\r\n 'formatter': 'standard',\r\n 'filename': edx_file_loc,\r\n 'maxBytes': 1024 * 1024 * 2,\r\n 'backupCount': 5,\r\n },\r\n 'tracking': {\r\n 'level': 'DEBUG',\r\n 'class': 'logging.handlers.RotatingFileHandler',\r\n 'filename': tracking_file_loc,\r\n 'formatter': 'raw',\r\n 'maxBytes': 1024 * 1024 * 2,\r\n 'backupCount': 5,\r\n },\r\n })\r\n else:\r\n # for production environments we will only\r\n # log INFO and up\r\n logger_config['loggers']['']['level'] = 'INFO'\r\n logger_config['handlers'].update({\r\n 'local': {\r\n 'level': local_loglevel,\r\n 'class': 'logging.handlers.SysLogHandler',\r\n 'address': '/dev/log',\r\n 'formatter': 'syslog_format',\r\n 'facility': SysLogHandler.LOG_LOCAL0,\r\n },\r\n 'tracking': {\r\n 'level': 'DEBUG',\r\n 'class': 'logging.handlers.SysLogHandler',\r\n 'address': '/dev/log',\r\n 'facility': SysLogHandler.LOG_LOCAL1,\r\n 'formatter': 'raw',\r\n },\r\n })\r\n\r\n return logger_config", "def sanitize_new_config(self):\n config_log = self._load_config_log()\n if 'new' in config_log:\n for cfg in config_log['new']:\n with open(cfg, 'r+') as f:\n data = yaml.load(f)\n f.seek(0)\n yaml.safe_dump(data, f, default_flow_style=False)\n f.truncate()\n del config_log['new']\n\n self._save_config_log(config_log)", "def update_remotelogging_config(self, context):\n\n personalities = [constants.CONTROLLER,\n constants.WORKER,\n constants.STORAGE]\n config_uuid = self._config_update_hosts(context, personalities)\n\n config_dict = {\n \"personalities\": [constants.CONTROLLER],\n \"classes\": ['platform::sysctl::controller::runtime',\n 'platform::remotelogging::runtime']\n }\n self._config_apply_runtime_manifest(context, config_uuid, config_dict)\n\n config_dict = {\n \"personalities\": [constants.WORKER, constants.STORAGE],\n \"classes\": ['platform::remotelogging::runtime'],\n }\n self._config_apply_runtime_manifest(context, config_uuid, config_dict)", "def changelog(self):\n # Work around log time is not at the same day as build \"start_time\"\n # Like kernel-3.10.0-512.el7, its log time is 2016-09-30 12:00:00\n # but its build start time is 2016-10-01 03:27:46.300467\n dt_obj = datetime.datetime.strptime(\n self.build[\"start_time\"], \"%Y-%m-%d %H:%M:%S.%f\")\n dt_delta = dt_obj - datetime.timedelta(days=3)\n dt_delta_str = dt_delta.strftime(\"%Y-%m-%d %H:%M:%S\")\n self._logger.debug(\n \"Build was built at {0}, changelogs will be fetched after {1}.\"\n .format(dt_obj, dt_delta_str)\n )\n changelogs = self._get_proxy().getChangelogEntries(\n self.name, '', '', '', '', dt_delta_str\n )\n self._logger.debug(\"Raw changelogs: {0}.\".format(changelogs))\n for changelog in changelogs:\n if self.build[\"version\"] in changelog[\"author\"]:\n return changelog", "def _config_log(self):\n config_worker = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'handlers': {\n 'queue': {\n 'class': 'hqc_meas.utils.log.tools.QueueHandler',\n 'queue': self.log_queue,\n },\n },\n 'root': {\n 'level': 'DEBUG',\n 'handlers': ['queue']\n },\n }\n logging.config.dictConfig(config_worker)", "def log_updated(self) -> Union[float, None]:\n\n # The log will be here during the build.\n if self.tmp_log_path.exists():\n try:\n return self.tmp_log_path.stat().st_mtime\n except OSError:\n # This mostly for the race condition, but will also handle\n # any permission problems.\n pass\n\n # After the build, the log will be here.\n if self.log_path.exists():\n try:\n return self.log_path.stat().st_mtime\n except OSError:\n return None", "def update_log_file(self) -> None:\n\n logger = logging.getLogger(\"mfa\")\n if logger.handlers:\n for handler in logger.handlers:\n if isinstance(handler, logging.FileHandler):\n self.log_file = handler.baseFilename\n break\n self.refresh_message()", "def log_config_changed(event):\n InteractionLog(\n timestamp=event.timestamp,\n expire=event.timestamp + datetime.timedelta(seconds=TTL_CONFIG),\n object=event.managed_object.id,\n user=event.vars.get(\"user\"),\n op=InteractionLog.OP_CONFIG_CHANGED,\n text=\"Config changed\",\n ).save()", "def update_config_log(self):\n \n # Open the configuration and performance log file\n with open(self.cfg.model_path + '\\\\' + self.cfg.model_name + '_cfg_and_performance.txt', 'r') as f:\n cfg_file = f.read()\n \n output_string = cfg_file + \"\\n#\\n# Testing Performance\\n\\n\"\n \n output_string += \"TESTING_ERROR = \" + \\\n str(self.test_loss) + \"\\n\"\n output_string += \"PHYSICAL_ERROR_PRECIPITATION = \" + \\\n str(self.lstm_error_p.item()) + \"\\n\"\n output_string += \"PHYSICAL_ERROR_AIR TEMPERATURE = \" + \\\n str(self.lstm_error_t.item()) + \"\\n\"\n output_string += \"PHYSICAL_ERROR_SUNSHINE_DURATION = \" + \\\n str(self.lstm_error_sd.item()) + \"\\n\"\n output_string += \"PHYSICAL_ERROR_RELATIVE_HUMIDITY = \" + \\\n str(self.lstm_error_rh.item()) + \"\\n\"\n output_string += \"PHYSICAL_ERROR_AIR TEMPERATURE = \" + \\\n str(self.lstm_error_wv.item()) + \"\\n\"\n output_string += \"PHYSICAL_ERROR_WELL_5 = \" + \\\n str(self.lstm_error_w5.item()) + \"\\n\"\n output_string += \"PHYSICAL_ERROR_WELL_6 = \" + \\\n str(self.lstm_error_w6.item()) + \"\\n\"\n \n # Save the updated performance metrics into the file\n with open(self.cfg.model_path + '\\\\' + self.cfg.model_name + '_cfg_and_performance.txt', 'w') as _text_file:\n _text_file.write(output_string)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a distribution, given by the list p_list, returns the entropy of the distribution.
def entropy(p_list): assert len(p_list) > 0 E = 0.0 for p in p_list: if p == 0.0: continue E += p*math.log(p) return E
[ "def entropy(p):\n h = 0\n\n # TODO -- Calculate entropy value in nats for probability distribution `p`\n for x in p:\n h -= p[x] * math.log(p[x])\n\n return h", "def entropy_of_a_list(values):\n counts = Counter(values).values()\n total = float(sum(counts))\n probabilities = [val / total for val in counts]\n entropy = stats.entropy(probabilities)\n return entropy", "def entropy(list_of_probabilities):\n return(crossentropy(list_of_probabilities, list_of_probabilities))", "def entropy_py(p):\n return 2 ** np.sum(-p*np.log2(p+1e-10))", "def _entropy(cls, distribution):\n h = 0\n denominator = sum(distribution)\n if denominator:\n for freq in distribution:\n probability = float(freq) / denominator\n if probability:\n h += (probability * math.log(probability, cls.LOG_BASE))\n return -h", "def entropy(p):\n assert (p >= 0).all()\n assert abs(np.sum(p)-1) < 1e-6\n return -np.sum(p*np.log(p+1e-12))", "def entropy(a: list) -> float:\n\n a_dict = count_occur(a)\n total = len(a)\n return sum([-(val/total) * math.log(val/total, 2) for val in a_dict.values()])", "def entropy(probabilities):\n return -(sum([p * log(p, 2) if p > 0 else 0 for p in probabilities]))", "def entropy_of_list(ls):\n elements = {}\n for e in ls:\n elements[e] = elements.get(e, 0) + 1\n length = float(len(ls))\n return sum(map(lambda v: -v / length * math.log(v / length), elements.values()))", "def entropy(distribution):\n \n # Find the entropy as in\n # <http://en.wikipedia.org/wiki/Uncertainty_coefficient>. Comes out in bits\n # due to the use of log base 2.\n total = 0\n \n for value, probability in distribution.iteritems():\n # Sum the probability of each thing times the negative log of that.\n total -= probability * math.log(probability, 2)\n \n # Return the entropy of the distribution.\n return total", "def entropy(pd):\n entp = 0\n for proba in pd:\n if proba == 0:\n continue\n entp -= proba*math.log(proba, 2)\n return entp", "def entropy(dist):\n #dist = array([max(d,1e-100) for d in dist])\n dist = dist + 1e-20\n return dot(dist,(log(1.0/dist) * (1.0/log(2.0))).T)", "def entropyIntIntegrand(p):\n return p * np.log(p))", "def entropy(x):\n d = defaultdict(lambda: 0)\n s = 0.0\n entr = 0.0\n for i in x:\n d[i] += 1 # Calculating frequency of an event\n s += 1\n for i in d:\n p = d[i] / s # Calculating probability for an event\n entr -= p * log(p, 2) # H(x) = - ฮฃ p(x)log(p(x))\n return entr", "def entropy(ps):\n single_dist = len(ps.shape) == 1\n if single_dist: ps = np.array([ps]);\n # H = -1 * np.sum(ps * np.log2(ps), axis=1)\n H = -1 * np.sum(np.multiply(ps, np.ma.log2(ps).filled(0)), axis=1)\n return H[0] if single_dist else H", "def compute_entropy(PMFs):\n \n # getting rid of the zero probabilities so that logarithm\n # function can be applied\n PMFs[PMFs==0] += 10e-8\n \n # compuing Shannon entropy\n entropies = -np.sum(PMFs * np.log(PMFs), axis=0)\n \n return entropies", "def entropy(data, idxList):\n df = data.loc[idxList]\n counts = df.value_counts().to_numpy()\n counts = counts.reshape(1, -1).astype(np.float32)\n counts /= np.sum(counts)\n log_sum = counts @ np.log2(counts.T)\n return -log_sum[0, 0]", "def entropy(data):\n\n freqs = {}\n suma = len(data)\n\n for i in range(0, len(data)):\n freqs[data[i]] = 1.0 + freqs.get(data[i], 0)\n\n res = 0.0\n for i in freqs:\n res += (freqs[i] / suma) * log((freqs[i] / suma), 2)\n return -res", "def discrete_entropy(vec, num_bins=100):\n\n vec = np.absolute(vec)\n vec = vec - np.mean(vec)\n h = np.histogram(vec, density=True, bins=num_bins)[0];\n p = np.array(h) + EPSILON\n \n p = p / np.sqrt(2 * np.pi)\n p = p / np.sum(p)\n\n # p = p/(2*np.pi)\n entropy = -np.sum(p * np.log(p))\n entropy = entropy # /(2*np.pi)#/float(num_bins)\n return entropy" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For a list of dictionaries mapping values to counts, returns a cost used for DT splitting that is optimal at 0. Currently uses the negative of information gain.
def split_cost(label_count_list): return -split_information_gain(label_count_list) #this cost value is the misclassification error. return split_misclassification_error(label_count_list)
[ "def cost(foods, foods_used):\n cost = 0.00\n for i, count in foods_used.items():\n cost += (foods[i]['serving_cost'] * count)\n return cost", "def cost_fun(self, specs_dict: Dict[str, float]) -> float:\n cost = 0\n for spec in self.spec_range.keys():\n penalty = self.compute_penalty(specs_dict[spec], spec)[0]\n cost += penalty\n\n return cost", "def metric_dictionary(lists):\n \"\"\"\n lists.sort()\n\n minimum = round(min(lists),2)\n maximum = round(max(lists),2)\n mean = round(sum(lists)/len(lists),2)\n\n numerator_list = [(item - mean)**2 for item in lists]\n numerator = sum(numerator_list)\n variance = round(numerator/(len(lists) - 1),2)\n\n standard_deviation = round(variance**(1/2),2)\n\n if len(lists) % 2 == 0: \n p1 = int((len(lists)/2))\n p2 = int((len(lists)/2)-1)\n median = round((lists[p1] + lists[p2])/2,2)\n\n else:\n median = round(lists[len(lists)//2],2)\n\n return {'min': minimum, 'max': maximum, 'mean': mean, 'median': median, 'std': standard_deviation, 'var': variance}\n \"\"\"\n return ({'Mean':round(np.mean(items),2),'Median':np.median(items),'Maximum':np.max(items),'Minimum':np.min(items),'Variance':round(np.var(items,ddof=1),2),'Standard Deviation':round(np.std(items,ddof=1),2)})", "def compute_cost(equivalence_class):\n cost = 0\n for item in equivalence_class.values():\n if len(item) >= k:\n cost += (len(item)) ** 2\n else:\n cost += len(item) * len(tupleList)\n return cost", "def compute_distr(self, items, adjusted=False):\n n = len(items)\n sum_one_over_ranks = harmonic_number(n)\n count = 0\n distr = {}\n for indx, item in enumerate(items):\n rank = indx + 1\n story_freq = distr.get(item, 0.)\n distr[item] = story_freq + 1 * 1 / rank / sum_one_over_ranks if adjusted else story_freq + 1 * 1 / n\n count += 1\n\n return distr", "def greedy(items_list, max_cost, key_function):\n tmp_list = sorted(items_list, key=key_function, reverse=True)\n cur_cost = 0\n cur_value = 0\n result = []\n\n for item in tmp_list:\n if cur_cost + item.getCost() <= max_cost:\n result.append(item)\n cur_cost += item.getCost()\n cur_value += item.getValue()\n return result, cur_value", "def weight_balancing(self, classesList):\n \n print(len(classesList))\n \n c = {}\n for l in classesList:\n c[l] = c.get(l,0) + 1\n \n weight = {i: len(classesList)/float(c[i]) for i in c.keys()}\n \n return weight", "def cost_func(weights)->float:\n\n cost = 0\n for ith_element in training_set:\n cost += math.pow(hypothesis_value(weights, ith_element[:-1]) - ith_element[-1], 2)\n return cost / 2", "def cost(self) -> float:", "def compute_transition_weights(trans_counts, smoothing):\n weights = defaultdict(float)\n \n \n\n all_tags = list(trans_counts.keys()) + [END_TAG]\n V = len(all_tags) -1\n\n for tag1 in trans_counts:\n \n counter = trans_counts[tag1]\n total_count = sum(counter.values()) \n for tag in all_tags:\n count = counter[tag]\n weights[(tag,tag1)] = np.log((smoothing + count)/(smoothing*V + total_count))\n\n\n\n #dictionary[word] = np.log((alpha + lcount)/(alpha*V + totalTokens))\n\n return weights", "def trials():\r\n # 83 250 4 2 457\r\n # 103 207 4 2 523\r\n a = 23\r\n b = 56\r\n c = 8\r\n d = 5\r\n \r\n maxBestTotal = 0\r\n for a in range(23, 113, 10):\r\n for b2 in range(1, 8):\r\n b = a*b2 + 1\r\n for c in [4, 8, 16, 32, 64]:\r\n for d in range(2, 7):\r\n \r\n diffTotal = 0\r\n W = 10\r\n numReps = 1\r\n numTrials = 1\r\n while W <= 65536:\r\n \r\n items = []\r\n # Item (Value, Weight)\r\n for i in range(a,b,c):\r\n items.append(Item(i,i))\r\n items.append(Item(d*i+1,d*i+1))\r\n \r\n itemSet = 'items=[]\\n'\r\n for item in items:\r\n itemSet = itemSet + 'items.append(Item(' + str(item.value) + ',' + str(item.weight) + '))\\n'\r\n #itemSet = itemSet + 'items.append(Item(' + str(W//4) + \",\" + str(W//4) + '))\\n'\r\n setup= '''\r\nfrom adk.knapsack import knapsack_unbounded, knapsack_01, knapsack_approximate, Item, record_best\r\nimport random\\n''' + itemSet + '''\r\n'''\r\n executeUnbound = '''\r\nrecord_best (knapsack_unbounded(items,''' + str(W) + ''')[0])\r\n'''\r\n totalUnbound = min(timeit.Timer(executeUnbound, setup=setup).repeat(numReps,numTrials))\r\n \r\n executeApproximate = '''\r\nrecord_best (knapsack_approximate(items,''' + str(W) + ''')[0])\r\n'''\r\n totalApproximate = min(timeit.Timer(executeApproximate, setup=setup).repeat(numReps,numTrials))\r\n \r\n #print (W, totalUnbound, totalApproximate, record_best())\r\n best2 = record_best()\r\n if len(best2) > 0:\r\n diffTotal += best2[0] - best2[1]\r\n W = W * 2 + 1\r\n \r\n if diffTotal > maxBestTotal:\r\n print (a,b,c,d,diffTotal)\r\n maxBestTotal = diffTotal", "def tier_completion(required_portions, categ_prices, ideal_split, batch_qty=500):\n\n # meal_prices, nb_meals = [], []\n res = []\n\n # Sort by the number of portions required\n tmp_vals = sorted(list(set(required_portions.values())), reverse=True)\n # Get a list of each categ missing by tier\n buying_categs = []\n for idx in range(len(tmp_vals) - 1):\n buying_categs.append([])\n for categ, categ_need in required_portions.items():\n if categ_need > 0:\n buying_categs[tmp_vals.index(categ_need)].append(categ)\n # Loop on increasingly needed categ\n running_categs = []\n for idx, categs in enumerate(buying_categs):\n running_categs.extend(categs)\n # get the price per extra meal\n # meal_prices.append(batch_qty / 1000 * sum([ideal_split[cat] * categ_prices[cat] for cat in running_categs]))\n # nb_meals.append(tmp_vals[idx] - tmp_vals[idx + 1])\n res.append(dict(nb_meals=tmp_vals[idx] - tmp_vals[idx + 1],\n meal_price=batch_qty / 1000 * sum([ideal_split[cat] * categ_prices[cat] for cat in running_categs]),\n categ_bought={cat: batch_qty / 1000 * ideal_split[cat] for cat in running_categs}))\n\n return res", "def item_relevant_cost(self, T: float, i: int) -> float:\n return 0.5*(self.h[i]*T*self.d[i]*(1-self.d[i]/self.p[i]))+self.K[i]/T", "def segmentDict(dict, weights):\n # Normalize weights\n weights = normalize(weights)\n\n segments = {}\n actual_weights = []\n total_instances = 0\n percent_instances = 0\n i = 0\n cat = None\n\n for k,v in dict.items():\n total_instances += v\n if cat == None:\n cat = k[0].upper()\n\n sorted_d = sorted(dict.items(), key=operator.itemgetter(1), reverse=True)\n for k,v in sorted_d:\n percent_instances += v/total_instances\n segments[k] = cat + str(i)\n if percent_instances >= weights[i]:\n actual_weights += [percent_instances]\n percent_instances = 0\n i += 1\n actual_weights += [percent_instances]\n return [segments, actual_weights]", "def best_split(values,labels,nonelabels=None):\n assert len(values) >= 2\n assert len(values) == len(labels)\n N = len(values)\n ilist = sorted((v,l) for (v,l) in zip(values,labels))\n leftcount = defaultdict(int)\n rightcount = defaultdict(int)\n for v,l in ilist:\n rightcount[l] += 1\n bestindex = -1\n bestcost = split_cost([leftcount,rightcount])\n\n cost = bestcost\n #costs = [cost]\n #print \"Split costs:\"\n for i in xrange(len(ilist)):\n v,l = ilist[i]\n rightcount[l] -= 1\n leftcount[l] += 1\n if i+1 >= len(ilist) or v == ilist[i+1][0]:\n #no splits when v is equal to the next value\n continue\n cost = split_cost([leftcount,rightcount])\n #print \" \",v,leftcount.values(),rightcount.values(),cost\n #costs.append(cost)\n if cost < bestcost:\n bestcost = cost\n bestindex = i\n #raw_input()\n if bestindex < 0:\n #no split found... try splitting in half\n splitval = (ilist[0][0]+ilist[-1][0])*0.5\n else:\n splitval = (ilist[bestindex][0] + ilist[bestindex+1][0])*0.5\n if nonelabels is None:\n return (splitval,bestcost)\n #reevaluate counts\n leftcount = defaultdict(int)\n rightcount = defaultdict(int)\n for l in nonelabels:\n leftcount[l] += 1\n rightcount[l] += 1\n for v,l in ilist:\n if v <= splitval:\n leftcount[l] += 1\n else:\n rightcount[l] += 1\n return splitval,split_cost([leftcount,rightcount])", "def computeDStats(genoCounts, blockSize):\n pass", "def _greedy_packing(items: List[Item], cap: int,\n func: Callable) -> Tuple[Set[int], int]:\n items.sort(key=func)\n included = set()\n total_val, total_weight = 0, 0\n for item in items:\n if total_weight + item.weight > cap:\n continue\n included.add(item.idx)\n total_val += item.val\n total_weight += item.weight\n return included, total_val\n # Running time complexity: O(nlog n)", "def store_IBD_stats(stat_dict, pair_list, pair_dict, results, head_list, min_val=0):\n for choice in stat_dict:\n stat_func = stat_dict[choice]\n for pair in pair_list:\n data = [d for d in pair_dict[pair] if d >= min_val]\n if choice != 'num' and len(data) < 1:\n data = [0]\n\n results.append( stat_func(data) )\n if min_val == 0:\n head_list.append('IBD_{}_{}'.format(choice,pair))\n else:\n head_list.append('IBD{}_{}_{}'.format(min_val, choice, pair))", "def get_duty_cate_score(chosen_duty_list: list) -> pmag.MagicDict:\n res = pmag.MagicDict()\n for w, cate in chosen_duty_list:\n freq = MODEL[cate]['duty'][w]['freq']\n prob = MODEL[cate]['duty'][w]['prob']\n score = prob # freq * prob / DUTY_NF[cate]\n if cate in res:\n res[cate] += score\n else:\n res[cate] = score\n return res" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list of values and associated labels, optimizes the best split threshold z where dividing the values into z has the lowest split cost. Returns a pair (z,cost) where cost is the split_cost of the threshold z. If nonelabels is given, this indicates the labels of missing values that must be passed down to both subtrees. This does not affect the output z but it does affect the output cost value.
def best_split(values,labels,nonelabels=None): assert len(values) >= 2 assert len(values) == len(labels) N = len(values) ilist = sorted((v,l) for (v,l) in zip(values,labels)) leftcount = defaultdict(int) rightcount = defaultdict(int) for v,l in ilist: rightcount[l] += 1 bestindex = -1 bestcost = split_cost([leftcount,rightcount]) cost = bestcost #costs = [cost] #print "Split costs:" for i in xrange(len(ilist)): v,l = ilist[i] rightcount[l] -= 1 leftcount[l] += 1 if i+1 >= len(ilist) or v == ilist[i+1][0]: #no splits when v is equal to the next value continue cost = split_cost([leftcount,rightcount]) #print " ",v,leftcount.values(),rightcount.values(),cost #costs.append(cost) if cost < bestcost: bestcost = cost bestindex = i #raw_input() if bestindex < 0: #no split found... try splitting in half splitval = (ilist[0][0]+ilist[-1][0])*0.5 else: splitval = (ilist[bestindex][0] + ilist[bestindex+1][0])*0.5 if nonelabels is None: return (splitval,bestcost) #reevaluate counts leftcount = defaultdict(int) rightcount = defaultdict(int) for l in nonelabels: leftcount[l] += 1 rightcount[l] += 1 for v,l in ilist: if v <= splitval: leftcount[l] += 1 else: rightcount[l] += 1 return splitval,split_cost([leftcount,rightcount])
[ "def split_cost(label_count_list):\n return -split_information_gain(label_count_list)\n #this cost value is the misclassification error.\n return split_misclassification_error(label_count_list)", "def pick_best_split(self,db,labels,ids,features=None):\n idlabels = [labels[id] for id in ids]\n if misclassification_error(idlabels) == 0:\n #base case: no misclassifications\n self.type = 'v'\n self.value = idlabels[0]\n return 0\n best = None\n bestCost = 0\n splitval = None\n discrete = True\n if features == None:\n if len(ids) < db.numFeatures():\n #look at all present features in the training set\n features = db.getPresentFeatures(ids)\n #print len(features),\"of\",db.numFeatures(),\"features selected\"\n else:\n features = range(db.numFeatures())\n elif callable(features):\n features = features()\n for i in features:\n if len(db.entryLists[i]) == 0: continue\n idiscrete = db.discreteFeature[i]\n if idiscrete:\n #count number of labels of a certain value\n splitter = defaultdict(lambda:defaultdict(int))\n #count of labels for missing values\n nmissing = defaultdict(int)\n for id in ids:\n val = db[i,id]\n if val is None:\n #missing values go down to all splits\n nmissing[labels[id]] += 1\n continue\n splitter[val][labels[id]] += 1\n if len(splitter) > continuous_variable_threshold:\n #print \"Determined to be a continuous variable\"\n idiscrete = False\n break\n if idiscrete:\n if len(splitter) <= 1:\n #only a single value\n continue\n #count number of missing values in all splits\n cmax = 0\n for k in splitter:\n for l,v in nmissing.iteritems():\n splitter[k][l] += v\n cmax = max(cmax,sum(splitter[k].values()))\n #shrink by fraction of (# of ids - largest child)/(# of ids)\n scale = (1.0-float(cmax)/float(len(ids)))*len(splitter)\n #evaluate cost\n cost = split_cost(splitter.values())*scale\n #print \"Split on\",i,\"information gain\",-cost,splitter.values()\n else:\n #continuous, need to learn the best split\n vals = []\n presentlabels = []\n nonelabels = []\n for id in ids:\n val = db[i,id]\n if val is None:\n nonelabels.append(labels[id])\n continue\n vals.append(val)\n presentlabels.append(labels[id])\n if len(vals) <= 1:\n print \"No values for feature\",i,\"?\"\n print vals\n continue\n #print \"Considering continuous split on\",i\n s,cost = best_split(vals,presentlabels,nonelabels)\n scale = (1.0-float(len(presentlabels)/2+len(nonelabels))/float(len(ids)))*2\n cost *= scale\n #print \"Result\",s,\"Information gain\",-cost\n \n if cost < bestCost:\n best = i\n bestCost = cost\n discrete = idiscrete\n if not idiscrete:\n splitval = s\n \n if best is None:\n self.type = 'v'\n if len(ids) > 0:\n self.value = vote(idlabels)\n return misclassification_error(idlabels)\n else:\n self.value = None\n return 0\n else:\n self.feature = best\n #discrete or inequality split\n if discrete:\n self.type = 's'\n else:\n self.type = 'i'\n self.value = splitval\n return bestCost", "def fraction_mislabeled_nodes(labels, labels_pred):\n G1 = partition_indicator(labels)\n G2 = partition_indicator(labels_pred)\n\n # cost is minimized, overlap maximized\n cost_matrix = -G1.T.dot(G2)\n row_ind, col_ind = linear_sum_assignment(cost_matrix.A)\n cost = -cost_matrix[row_ind, col_ind].sum()\n\n return 1 - (cost / len(labels))", "def compute_best_split(feature, y, weights, classes, metric):\n count = 0\n metrics_list = []\n split_values = []\n\n # Case : continuous\n for value in np.sort(feature)[:-1]:\n left_idx, right_idx = split(feature, value)\n metrics_list.append(split_metric(left_idx, right_idx,\n y, weights,\n classes, metric))\n split_values.append(value)\n count += 1\n\n # TODO : categorical\n\n # Compute min and return value\n idx_max = np.argmin(np.array(metrics_list))\n\n best_metric, best_value = metrics_list[idx_max], split_values[idx_max]\n\n return best_metric, best_value", "def split_cost(split,classes):\r\n cost=0\r\n total_samples=0\r\n \r\n # estimate the relative size of each branch\r\n for branch in split:\r\n total_samples+=branch.shape[0]\r\n \r\n # for each (left/right) split on the proposed tree\r\n for br_index,branch in enumerate(split):\r\n # initialise list of class counts for this branch\r\n class_counts_for_branch=[]\r\n # for each class value, count total of data examples (rows) that have for this class, in this branch \r\n for class_val in classes:\r\n \r\n if branch.shape[0] == 0: # don't continue if size of split is 0\r\n continue\r\n \r\n # slice data to return only rows with this specific class value \r\n branch_per_class=branch[np.where(branch[:,-1]==class_val)]\r\n # sum up the number of rows in for this class in this branchand append \r\n total_rows=branch_per_class.shape[0]\r\n class_counts_for_branch.append(total_rows)\r\n\r\n # estimate the gini coefficient for this split \r\n cost+=gini_coefficient(class_counts_for_branch)* (branch.shape[0]/total_samples)\r\n \r\n \r\n return cost", "def find_best_split(data, feature_names, min_samples_leaf=5, random_subset=False, column_class=-1):\n N, f_n = np.shape(data)\n n = f_n - 1\n root_n = int(np.ceil(np.sqrt(n)))\n if (column_class == -1):\n column_class = f_n - 1\n if (random_subset == True):\n list_features = np.random.choice(np.arange(0, f_n-1, 1), root_n).tolist()\n feature_names_search = np.array(feature_names)[list_features].tolist()\n else:\n feature_names_search = feature_names\n G = gini(data)\n best_question = None\n #initialize the value to optimize\n info_best = -np.inf\n #begin the optimization loop\n for it_f in range(len(feature_names)):\n if (column_class == it_f):\n continue\n #functionality for limiting features to split on\n if (feature_names[it_f] not in feature_names_search):\n continue\n #the list for unique vals\n val_list = list()\n for sample in range(N):\n #get the unique value to create the question with\n val = data[sample,it_f]\n if (val not in val_list):\n val_list.append(val)\n #create the question\n question = Question(column=it_f , value=val, feature_names=feature_names)\n left, right = partition(data, question)\n #make sure the partition counts exceed the necessary number\n if (right is not None and left is not None):\n m_l,_ = np.shape(left)\n m_r,_ = np.shape(right)\n if (m_l >= min_samples_leaf and m_r >= min_samples_leaf):\n #compute the info gain\n gain = info_gain(left, right, G)\n #now check if it is the best\n if (gain > info_best):\n info_best = gain\n best_question = question\n return info_best, best_question", "def determine_best_split(data, potential_splits, mltask):\n\n first_iteration = True\n for column_index in potential_splits:\n for value in potential_splits[column_index]:\n data_below,data_above = split_data(data, column_index, value)\n \n if mltask == 'regression':\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_mse)\n \n # classification\n else:\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_entropy)\n \n \n if first_iteration or current_overall_metric <= best_overall_metric:\n first_iteration = False\n \n best_overall_metric = current_overall_metric\n best_split_column = column_index\n best_split_value = value\n \n \n return best_split_column,best_split_value", "def greedy_split(data, n=2, costfun=max, workfun=lambda w,x: x if w is None else x+w):\n\t# Sort data based on standalone costs\n\tcosts = []\n\tnowork = workfun(None,None)\n\twork = [nowork for i in xrange(n)]\n\tfor d in data:\n\t\twork[0] = workfun(nowork,d)\n\t\tcosts.append(costfun(work))\n\torder = np.argsort(costs)[::-1]\n\t# Build groups using greedy algorithm\n\tgroups = [[] for i in xrange(n)]\n\twork = [nowork for i in xrange(n)]\n\tcost = costfun(work)\n\tfor di in order:\n\t\td = data[di]\n\t\t# Try adding to each group\n\t\tfor i in xrange(n):\n\t\t\tiwork = workfun(work[i],d)\n\t\t\ticost = costfun(work[:i]+[iwork]+work[i+1:])\n\t\t\tif i == 0 or icost < best[2]: best = (i,iwork,icost)\n\t\t# Add it to the best group\n\t\ti, iwork, icost = best\n\t\tgroups[i].append(di)\n\t\twork[i] = iwork\n\t\tcost = icost\n\treturn groups, cost, work", "def best_cutoff(self,\n split_label):\n split_args = self.sub_split_args[split_label]\n split_data = self.sub_split_data[split_label]\n # This criterion for the use_scipy flag is arbitrary and needs\n # further testing\n n_unique = len(np.unique(split_data[~np.isnan(split_data)]))\n use_scipy = True\n if n_unique > len(split_data)/1000:\n use_scipy = False\n idxcut_below, effects_below, rstats_below, ndata_below =\\\n self.u_data(split_label, use_scipy=use_scipy)\n idxcut_above, effects_above, rstats_above, ndata_above =\\\n self.u_data(split_label, above=True, use_scipy=use_scipy)\n\n # Default cutoff is min(split_data) - 1\n cutoff = split_data[split_args[0]] - 1\n value = 0\n # If no cutoff was possible\n if len(idxcut_below) == 0 or len(idxcut_above) == 0:\n return cutoff, value\n\n # All idx_cutoffs and values for cutoffs, for debugging\n for idx in range(len(idxcut_above)):\n idxcut = idxcut_above[idx]\n if idxcut != idxcut_below[idx]:\n raise NameError('Code error, invalid split')\n value_temp = (abs(effects_above[idx] -\n effects_below[idx]) *\n rstats_above[idx] *\n rstats_below[idx] *\n min(ndata_above[idx]) *\n min(ndata_below[idx]))\n if value_temp > value:\n cutoff = (split_data[split_args[int(idxcut)]] +\n split_data[split_args[int(idxcut)+1]])/2\n value = value_temp\n return cutoff, value", "def choose_best_split(self, X_subset, y_subset):\n\n feature_index = -1\n threshold = -1\n maximal_eval = None\n for current_feature in range(X_subset.shape[1]):\n all_values = set(X_subset[:, current_feature])\n all_values.add(max(all_values) + 0.01)\n \n for current_threshold in all_values:\n y_left, y_right = self.make_split_only_y(current_feature, current_threshold, X_subset, y_subset)\n if y_left.shape[0] == 0 or y_right.shape[0] == 0:\n continue\n assert np.sum(y_left) != 0\n assert np.sum(y_right) != 0\n current_eval = -y_left.shape[0]*self.criterion(y_left)\n current_eval -= y_right.shape[0]*self.criterion(y_right)\n if maximal_eval is None or current_eval > maximal_eval:\n maximal_eval = current_eval\n feature_index = current_feature\n threshold = current_threshold\n\n return feature_index, threshold", "def best_split(self):\r\n best_splits = [[0, None, None]]\r\n impurity, best_S, best_xj = 0, None, None\r\n \r\n for xj in self.x_names:\r\n for S in self.potential_splits(xj):\r\n ir = float(self.impurity_reduction(xj, S))\r\n if ir > impurity:\r\n impurity, best_S, best_xj = ir, S, xj\r\n best_splits.append([S, xj])\r\n else: \r\n pass\r\n \r\n return best_S, best_xj", "def _find_best_split(self, X, y):\n\n def calculate_entropy(p):\n # _, counts = np.unique(y, return_counts=True)\n # entropy = 0.0\n # for prob in counts / float(len(y)):\n # entropy -= prob * math.log(prob, 2)\n # return entropy\n p = np.bincount(p) / float(p.shape[0])\n return stats.entropy(p)\n\n def calculate_information_gain(y, left_y, right_y):\n # p = len(left_y) / len(y)\n # return calculate_entropy(y) - p * \\\n # calculate_entropy(left_y) - (1 - p) * \\\n # calculate_entropy(right_y)\n return calculate_entropy(y) \\\n - calculate_entropy(left_y) * (float(left_y.shape[0]) / y.shape[0]) \\\n - calculate_entropy(right_y) * (float(right_y.shape[0]) / y.shape[0])\n\n def find_splits(x):\n \"\"\"Find all possible split values.\"\"\"\n split_values = set()\n\n # Get unique values in a sorted order\n x_unique = list(np.unique(x))\n for i in range(1, len(x_unique)):\n # Find a point between two values\n average = (x_unique[i - 1] + x_unique[i]) / 2.0\n split_values.add(average)\n\n return list(split_values)\n\n def split_mask(x, value):\n if isinstance(value, int) or isinstance(value, float):\n left_mask = (x >= value)\n right_mask = (x < value)\n else:\n left_mask = (x == value)\n right_mask = (x != value)\n return left_mask, right_mask\n\n max_gain, max_i_feature, max_value = None, None, None\n\n _, n_features = np.shape(X)\n for i_feature in range(n_features):\n column = X[:, i_feature]\n split_values = find_splits(column)\n for value in split_values:\n left_mask, right_mask = split_mask(column, value)\n gain = calculate_information_gain(y, y[left_mask], y[right_mask])\n\n if (max_gain is None) or (gain > max_gain):\n max_i_feature, max_value, max_gain = i_feature, value, gain\n \n if max_gain is None:\n return None, None, None, None, None, None, None\n \n left_mask, right_mask = split_mask(X[:, max_i_feature], max_value)\n return max_gain, max_i_feature, max_value, \\\n X[left_mask], X[right_mask], y[left_mask], y[right_mask]", "def greedy_learn(self,node,db,labels,ids):\n if node.depth >= self.maxdepth or len(ids) <= self.minexamples:\n #terminate recursion\n node.pick_best_label(db,labels,ids)\n err = misclassification_error([labels[id] for id in ids])\n if err > 0:\n print \"Reached a leaf and had to make some sacrifices, cost\",err\n print \" depth\",node.depth\n print \" labels\",[labels[id] for id in ids]\n return err\n\n features = self.feature_subset(node,db,labels,ids)\n cost = node.pick_best_split(db,labels,ids,features)\n \n #do a split\n if node.type == 'v':\n #base case: no misclassifications\n \"\"\"\n if cost>0:\n print \"greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero\"\n print \"cost=\",cost,\"misclassification=\",misclassification_error([labels[id] for id in ids])\n print \"# of ids:\",len(ids)\n for i in ids:\n print \"id\",i,\",\",\n for k in range(db.numFeatures()):\n if db[k,i] != None:\n print k,\"=\",db[k,i],\",\",\n print \"label\",labels[i]\n raw_input()\n \"\"\"\n return 0\n elif node.type == 's':\n #print \"Picked feature\",node.feature,\"split\"\n #do a discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in ids:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #print \" split sizes:\",[len(x) for x in Eids.values()]\n #print \" None ids:\",len(noneids)\n ids = None\n errors = 0\n for v,vids in Eids.iteritems():\n #recurse\n c = DecisionTreeNode(node)\n #print \"Recursing on value\",v\n #print \" ids:\",vids\n errors += self.greedy_learn(c,db,labels,vids+noneids)\n node.children[v] = c\n if c.depth > self.deepest:\n self.deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors\n else:\n #do an inequality split\n assert node.type == 'i'\n #print \"Picked feature\",node.feature,\"inequality value\",node.value,\"cost\",cost\n leftids = []\n rightids = []\n for id in ids:\n if db[node.feature,id] is not None:\n if db[node.feature,id] <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(rightids) == len(ids) or len(leftids) == len(ids):\n #due to missing values, this split is useless\n errors = misclassification_error([labels[id] for id in ids])\n print \"useless split on feature\",node.feature,\"value\",node.value,\"misclassification error\",errors\n print \"Left size\",len(leftids),\"right size\",len(rightids)\n raw_input()\n node.pick_best_label(db,labels,ids)\n return errors\n #clear memory associated with ids list\n del ids[:]\n ids = None\n #print \"Left size\",len(leftids),\"right size\",len(rightids)\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n #left side\n errors = self.greedy_learn(c1,db,labels,leftids)\n #right side\n errors += self.greedy_learn(c2,db,labels,rightids)\n #restore index\n node.children = {0:c1,1:c2}\n if c1.depth > self.deepest:\n self.deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors", "def _add_gap_labels(self, labels):\n # Add None gap labels between every beat label\n labels = [x for y in (labels[i:i+1] + [None] * (i < len(labels) - 1) for i in range(len(labels))) for x in y]\n\n # Add 'na' and 'break' labels\n for idx in np.where(np.array(labels) == None)[0]:\n\n if (labels[idx + 1]['start'] - labels[idx - 1]['end']) * 1 / self.fs > self.gap_tolerance:\n labels[idx] = 'break'\n\n elif (self._check_beat_order(previous_beat=labels[idx-1]['label'], next_beat=labels[idx+1]['label']) and\n labels[idx + 1]['start'] != labels[idx - 1]['end']):\n labels[idx] = {'peak': None, 'start': labels[idx-1]['end'],\n 'end': labels[idx+1]['start'], 'label': 'na'}\n else:\n pass\n\n # Remove None gap for contiguous labels\n labels = [label for label in labels if label is not None]\n\n return labels", "def get_best_split_all(x, y) -> Tuple[int, float, float]:\n m = x.shape[1]\n col_best_gin = np.ones(shape=m)\n col_best_val = np.ones(shape=m)\n for c in range(m):\n best = 1\n best_x = 0\n for i in np.unique(x[:, c]):\n gini = Tree.split(x[:, c], y, i)\n if gini < best:\n best = gini\n best_x = i\n col_best_gin[c] = best\n col_best_val[c] = best_x\n\n # Select best feature to split on\n col_idx = np.argmin(col_best_gin)\n # Convert to bool index\n col_idx = np.array(range(x.shape[1])) == col_idx\n\n return col_idx, col_best_val[col_idx], col_best_gin[col_idx]", "def pick_best_label(self,db,labels,ids):\n self.type = 'v'\n if len(labels) > 0:\n self.value = vote([labels[id] for id in ids])\n else:\n self.value = None\n return", "def _relabel(labels, minval=0, bgval=None):\n\n labels = np.unique(labels, return_inverse=True)[-1] + minval\n if bgval is not None:\n labels[labels == minval] = bgval\n return labels", "def estimate_labels(\n spectrum: Union[Spectrum1D, SpectrumList],\n weights: Tuple[np.ndarray],\n biases: Tuple[np.ndarray],\n x_min: Tuple[np.ndarray],\n x_max: Tuple[np.ndarray],\n model_wavelength: Tuple[np.ndarray],\n label_names: Tuple[str],\n mask: Optional[np.array] = None,\n initial_labels: Optional[np.array] = None,\n continuum: Optional[np.array] = None,\n v_rad_tolerance: Optional[Union[float, int]] = None,\n opt_tolerance: Optional[float] = 5e-4,\n data_product=None,\n **kwargs,\n):\n\n LARGE = kwargs.get(\"LARGE\", 1e9)\n\n # number of label names\n K = weights[0].shape[1]\n L = 0 + K\n fit_v_rad = v_rad_tolerance is not None and v_rad_tolerance > 0\n if fit_v_rad:\n L += 1\n\n if initial_labels is None:\n initial_labels = np.zeros(L)\n\n bounds = np.zeros((2, L))\n bounds[0, :] = -0.5\n bounds[1, :] = +0.5\n if fit_v_rad:\n bounds[:, -1] = [-abs(v_rad_tolerance), +abs(v_rad_tolerance)]\n\n N, P = np.atleast_2d(spectrum.flux).shape\n\n p_opt = np.empty((N, L))\n p_cov = np.empty((N, L, L))\n model_flux = np.empty((N, P))\n meta = []\n\n def objective_function(x, *labels):\n y_pred = predict_stellar_spectrum(labels[:K], weights, biases)\n if fit_v_rad:\n y_pred = redshift_spectrum(x, y_pred, labels[-1])\n return y_pred\n\n wavelength = spectrum.wavelength.value\n all_flux = np.atleast_2d(spectrum.flux.value)\n all_e_flux = np.atleast_2d(\n spectrum.uncertainty.represent_as(StdDevUncertainty).array\n )\n\n if continuum is not None:\n all_flux /= continuum\n all_e_flux /= continuum\n\n if mask is None:\n mask = np.zeros(model_wavelength.shape, dtype=bool)\n else:\n assert (\n mask.shape == model_wavelength.shape\n ), \"Mask and model wavelengths do not have the same shape\"\n\n source_id = spectrum.meta.get(\"CAT_ID\", None) \n if source_id is None and data_product is not None:\n try:\n source_id = data_product.sources[0].catalogid\n except:\n None\n parent_data_product_id = spectrum.meta.get(\"DATA_PRODUCT_ID\", None)\n if (parent_data_product_id is None or len(parent_data_product_id) == 0) and data_product is not None:\n parent_data_product_id = [data_product.id] * N\n results = []\n meta_results = []\n kwds = kwargs.copy()\n for i in range(N):\n\n # Interpolate data onto model wavelengths -- not The Right Thing to do!\n flux = np.interp(model_wavelength, wavelength, all_flux[i], left=1, right=1)\n e_flux = np.interp(\n model_wavelength, wavelength, all_e_flux[i], left=LARGE, right=LARGE\n )\n e_flux[mask] = LARGE\n\n # Fix non-finite pixels and error values.\n non_finite = ~np.isfinite(flux) + ~np.isfinite(e_flux) + (e_flux <= 0)\n flux[non_finite] = 1\n e_flux[non_finite] = LARGE\n\n # \"normalize\"\n scale = np.median(flux)\n flux /= scale\n e_flux /= scale\n\n kwds.update(\n xdata=model_wavelength,\n ydata=flux,\n sigma=e_flux,\n p0=initial_labels,\n bounds=bounds,\n absolute_sigma=True,\n method=\"trf\",\n xtol=opt_tolerance,\n ftol=opt_tolerance,\n )\n\n result = OrderedDict([\n (\"source_id\", source_id),\n (\"parent_data_product_id\", parent_data_product_id[i])\n ])\n\n try:\n p_opt, p_cov = curve_fit(objective_function, **kwds)\n\n except ValueError:\n log.exception(f\"Error occurred fitting spectrum {i}:\")\n result.update(dict(zip(label_names, [np.nan] * len(label_names))))\n result.update(dict(zip([f\"e_{ln}\" for ln in label_names], [np.nan] * len(label_names))))\n for j, k in zip(*np.triu_indices(L, 1)):\n result[f\"rho_{label_names[j]}_{label_names[k]}\"] = np.nan\n result.update(\n snr=spectrum.meta[\"SNR\"][i],\n chi_sq=np.nan,\n reduced_chi_sq=np.nan\n )\n \n meta = OrderedDict([(\"model_flux\", np.nan * np.ones_like(flux))])\n if continuum is not None:\n resampled_model_flux *= continuum[i]\n meta[\"continuum\"] = continuum[i]\n \n results.append(result)\n meta_results.append(meta)\n\n else:\n labels = (p_opt + 0.5) * (x_max - x_min) + x_min\n e_labels = np.sqrt(np.diag(p_cov)) * (x_max - x_min)\n\n result.update(dict(zip(label_names, labels)))\n result.update(dict(zip([f\"e_{ln}\" for ln in label_names], e_labels)))\n\n rho = np.corrcoef(p_cov)\n for j, k in zip(*np.triu_indices(L, 1)):\n result[f\"rho_{label_names[j]}_{label_names[k]}\"] = rho[j, k]\n\n # Interpolate model_flux back onto the observed wavelengths.\n model_flux = objective_function(model_wavelength, *p_opt)\n resampled_model_flux = np.interp(\n wavelength, model_wavelength, model_flux, left=np.nan, right=np.nan\n )\n chi_sq = np.sum(((model_flux - flux) / e_flux) ** 2)\n reduced_chi_sq = chi_sq / (model_flux.size - L - 1)\n result.update(\n snr=spectrum.meta[\"SNR\"][i],\n chi_sq=chi_sq, \n reduced_chi_sq=reduced_chi_sq\n )\n meta = OrderedDict([(\"model_flux\", resampled_model_flux)])\n if continuum is not None:\n resampled_model_flux *= continuum[i]\n meta[\"continuum\"] = continuum[i]\n \n results.append(result)\n meta_results.append(meta)\n\n return (results, meta_results)", "def get_optimal_threshhold(true_label, prediction, iterations=100, size=17):\n best_threshhold = [0.2]*size\n for t in range(size):\n best_fbeta = 0\n temp_threshhold = [0.2]*size\n for i in range(iterations):\n temp_value = i / float(iterations)\n temp_threshhold[t] = temp_value\n temp_fbeta = fbeta(true_label, prediction > temp_threshhold)\n if temp_fbeta > best_fbeta:\n best_fbeta = temp_fbeta\n best_threshhold[t] = temp_value\n return best_threshhold" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Looks up the leaf node corresponding to the given entry. Does not handle missing values.
def lookup(self,entry): if self.type == 'v': return self v = entry[self.feature] assert v != None if self.type == 's': c = None try: c = self.children[v] except KeyError: #print "Unseen value for feature",self.feature,": ",v best = None bestDist = float('inf') for (val,c) in self.children.iteritems(): if abs(val - v) < bestDist: bestDist = abs(val - v) best = c c = best return c.lookup(entry) elif self.type == 'i': if v <= self.value: return self.children[0].lookup(entry) else: return self.children[1].lookup(entry) raise RuntimeError("Invalid DecisionTreeNode type?")
[ "def build_node_from_entry(self, entry):\n if entry is None:\n mess = \"Object browser entry expected, %s found\" % entry\n raise ValueError(mess)\n node = Node(self.sstd, self.sbld, entry)\n sobj = node.get_sobj()\n if sobj.GetID() == sobj.GetFatherComponent().GetID():\n node = Node(self.sstd, self.sbld, entry, is_root=True)\n return node", "def lookupVal(self, val):\n pybtlib.lookupVal.restype = ctypes.c_int\n pybtlib.lookupVal.argtypes = [ctypes.POINTER(Tree), ctypes.c_int]\n return pybtlib.lookupVal(ctypes.byref(self), val)", "def FindLeafNode(self, node, index):\n if node.start > index or node.end() <= index:\n if self.debug:\n print node.ToPrettyString();\n print index;\n raise ValueError(\"Node don't contain index\");\n if node.start == index and node.level == 0: return node;\n if not node.children:\n raise ValueError(\"Didn't find the index\");\n for child in node.children:\n if child.start <= index and child.end() > index:\n return self.FindLeafNode(child, index);\n if self.debug:\n print node.ToPrettyString();\n print index;\n print \"node.start=%d\" % node.start;\n print \"node.end=%d\" % node.end();\n raise ValueError(\"Shouldn't reach the end\");", "def check_leaf(leaf_value, dic, entry_list, messages, current_elem):\n value = traverse_dict(dic, entry_list)\n default_value = leaf_value['default']\n required_type = type(default_value)\n required = leaf_value['required']\n # messages.append(\"Checking leaf \" + str(entry_list))\n if required and value is None:\n add_message(\n messages, current_elem, \"The required value in \" + str(entry_list) + \" cannot be found!\"\n )\n if value is not None and not isinstance(value, required_type):\n add_message(\n messages,\n current_elem,\n \"The required value in \"\n + str(entry_list)\n + \" doesn't match expected type \"\n + str(required_type),\n )", "def get_entry(self, entry: str) -> Optional[Union['Directory', NormalFile, VirusFile, Entry]]:\n for e in self.get_entries():\n if e.get_name() == entry:\n return e", "def _extract_leaf(leaf):\n try:\n return re.match(r'leaf-(\\d+)', leaf).group(1)\n except:\n return None", "def _get_leaf_node(vec: Dict, assignment: np.ndarray) -> np.ndarray:\n is_leaf = vec['is_leaf']\n leaf = np.arange(is_leaf.size)[is_leaf]\n return (assignment * leaf).sum(1).astype(np.int32)", "def hashtable_find_entry(bucket, key):\n for entry in bucket:\n if entry[0] == key:\n return entry\n return None", "def get_leaf(self, descr):\n matches = [x for x in self.leaves if x.descr == descr]\n if matches == []:\n raise RuntimeError(f\"Did not find any leaves matching '{descr}'\")\n if len(matches) > 1:\n raise RuntimeError(f\"Found multiple matching leaves: {matches}\")\n return matches[0]", "def root_find(value, tree):\n parent, relink = tree.get(value), list()\n while parent is not None:\n relink.append(value)\n value, parent = parent, tree.get(parent)\n tree.update(dict.fromkeys(relink, value))\n return value", "def node(self):\n config = get_config()\n return leaf_lookup(config.layer_tree, target_node_name=self.layer_id)", "def fn(val):\n stack = [(root, \"\")]\n while stack: \n node, path = stack.pop()\n if node.val == val: return path \n if node.left: stack.append((node.left, path + \"L\"))\n if node.right: stack.append((node.right, path + \"R\"))", "def get_leaf(self, leaf_index):\n return self.__leaves_db.get(encode_int(leaf_index))", "def __getitem__(self, (row, col)) :\n for node in self.root[col].children :\n if node.row == row :\n return node\n return None", "def leaf(self, value, depth, available):\n method_name = 'leaf_' + value.__class__.__name__\n method = getattr(self, method_name, self.generic_leaf)\n return method(value, depth, available)", "def _get_node(self, key):\r\n hash_code = self._hash(key)\r\n idx = self._index(hash_code)\r\n # extract the head of the corresponding bucket\r\n cur = self.buckets[idx]\r\n while cur:\r\n if cur.key == key:\r\n return cur\r\n cur = cur.next\r\n raise KeyError", "def get_entry(self, entry_name):\n if entry_name in self.entries: # Don't invoke constructor if not needed\n return self.entries[entry_name]\n return self.entries.setdefault(entry_name, PathElement(self.file_name, self.namespaces))", "def get_child_node(self, val):\n return self._val_to_child_node_dict.get(val, None)", "def get_node(self, key: str) -> Optional[Node]:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a indexed database db, a list of labels (one for each id), and a list of ids to test, sets this node to the best label.
def pick_best_label(self,db,labels,ids): self.type = 'v' if len(labels) > 0: self.value = vote([labels[id] for id in ids]) else: self.value = None return
[ "def learn(self,db,labels):\n self.keys = db.keys[:]\n labelindex = -1\n if isinstance(labels,str):\n labelindex = db.keys.index(labels)\n assert labelindex >= 0,\"label does not exist in database keys\"\n labels = db.get_column(labelindex)\n elif isinstance(labels,int):\n labelindex = labels\n labels = db.get_column(labelindex)\n else:\n assert len(labels) == len(db.entries)\n self.root = DecisionTreeNode()\n if labelindex >= 0:\n raise NotImplementedError(\"Ooops, taking out indexed label broken\")\n entries = np.delete(entries,labelindex,1)\n db = IndexedDatabase(db)\n if self.maxnodes != None:\n return self.greedy_learn_search(db,labels)\n else:\n self.deepest = 0\n return self.greedy_learn(self.root,db,labels,range(len(labels)))", "def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err", "def greedy_learn(self,node,db,labels,ids):\n if node.depth >= self.maxdepth or len(ids) <= self.minexamples:\n #terminate recursion\n node.pick_best_label(db,labels,ids)\n err = misclassification_error([labels[id] for id in ids])\n if err > 0:\n print \"Reached a leaf and had to make some sacrifices, cost\",err\n print \" depth\",node.depth\n print \" labels\",[labels[id] for id in ids]\n return err\n\n features = self.feature_subset(node,db,labels,ids)\n cost = node.pick_best_split(db,labels,ids,features)\n \n #do a split\n if node.type == 'v':\n #base case: no misclassifications\n \"\"\"\n if cost>0:\n print \"greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero\"\n print \"cost=\",cost,\"misclassification=\",misclassification_error([labels[id] for id in ids])\n print \"# of ids:\",len(ids)\n for i in ids:\n print \"id\",i,\",\",\n for k in range(db.numFeatures()):\n if db[k,i] != None:\n print k,\"=\",db[k,i],\",\",\n print \"label\",labels[i]\n raw_input()\n \"\"\"\n return 0\n elif node.type == 's':\n #print \"Picked feature\",node.feature,\"split\"\n #do a discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in ids:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #print \" split sizes:\",[len(x) for x in Eids.values()]\n #print \" None ids:\",len(noneids)\n ids = None\n errors = 0\n for v,vids in Eids.iteritems():\n #recurse\n c = DecisionTreeNode(node)\n #print \"Recursing on value\",v\n #print \" ids:\",vids\n errors += self.greedy_learn(c,db,labels,vids+noneids)\n node.children[v] = c\n if c.depth > self.deepest:\n self.deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors\n else:\n #do an inequality split\n assert node.type == 'i'\n #print \"Picked feature\",node.feature,\"inequality value\",node.value,\"cost\",cost\n leftids = []\n rightids = []\n for id in ids:\n if db[node.feature,id] is not None:\n if db[node.feature,id] <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(rightids) == len(ids) or len(leftids) == len(ids):\n #due to missing values, this split is useless\n errors = misclassification_error([labels[id] for id in ids])\n print \"useless split on feature\",node.feature,\"value\",node.value,\"misclassification error\",errors\n print \"Left size\",len(leftids),\"right size\",len(rightids)\n raw_input()\n node.pick_best_label(db,labels,ids)\n return errors\n #clear memory associated with ids list\n del ids[:]\n ids = None\n #print \"Left size\",len(leftids),\"right size\",len(rightids)\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n #left side\n errors = self.greedy_learn(c1,db,labels,leftids)\n #right side\n errors += self.greedy_learn(c2,db,labels,rightids)\n #restore index\n node.children = {0:c1,1:c2}\n if c1.depth > self.deepest:\n self.deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors", "def pick_best_split(self,db,labels,ids,features=None):\n idlabels = [labels[id] for id in ids]\n if misclassification_error(idlabels) == 0:\n #base case: no misclassifications\n self.type = 'v'\n self.value = idlabels[0]\n return 0\n best = None\n bestCost = 0\n splitval = None\n discrete = True\n if features == None:\n if len(ids) < db.numFeatures():\n #look at all present features in the training set\n features = db.getPresentFeatures(ids)\n #print len(features),\"of\",db.numFeatures(),\"features selected\"\n else:\n features = range(db.numFeatures())\n elif callable(features):\n features = features()\n for i in features:\n if len(db.entryLists[i]) == 0: continue\n idiscrete = db.discreteFeature[i]\n if idiscrete:\n #count number of labels of a certain value\n splitter = defaultdict(lambda:defaultdict(int))\n #count of labels for missing values\n nmissing = defaultdict(int)\n for id in ids:\n val = db[i,id]\n if val is None:\n #missing values go down to all splits\n nmissing[labels[id]] += 1\n continue\n splitter[val][labels[id]] += 1\n if len(splitter) > continuous_variable_threshold:\n #print \"Determined to be a continuous variable\"\n idiscrete = False\n break\n if idiscrete:\n if len(splitter) <= 1:\n #only a single value\n continue\n #count number of missing values in all splits\n cmax = 0\n for k in splitter:\n for l,v in nmissing.iteritems():\n splitter[k][l] += v\n cmax = max(cmax,sum(splitter[k].values()))\n #shrink by fraction of (# of ids - largest child)/(# of ids)\n scale = (1.0-float(cmax)/float(len(ids)))*len(splitter)\n #evaluate cost\n cost = split_cost(splitter.values())*scale\n #print \"Split on\",i,\"information gain\",-cost,splitter.values()\n else:\n #continuous, need to learn the best split\n vals = []\n presentlabels = []\n nonelabels = []\n for id in ids:\n val = db[i,id]\n if val is None:\n nonelabels.append(labels[id])\n continue\n vals.append(val)\n presentlabels.append(labels[id])\n if len(vals) <= 1:\n print \"No values for feature\",i,\"?\"\n print vals\n continue\n #print \"Considering continuous split on\",i\n s,cost = best_split(vals,presentlabels,nonelabels)\n scale = (1.0-float(len(presentlabels)/2+len(nonelabels))/float(len(ids)))*2\n cost *= scale\n #print \"Result\",s,\"Information gain\",-cost\n \n if cost < bestCost:\n best = i\n bestCost = cost\n discrete = idiscrete\n if not idiscrete:\n splitval = s\n \n if best is None:\n self.type = 'v'\n if len(ids) > 0:\n self.value = vote(idlabels)\n return misclassification_error(idlabels)\n else:\n self.value = None\n return 0\n else:\n self.feature = best\n #discrete or inequality split\n if discrete:\n self.type = 's'\n else:\n self.type = 'i'\n self.value = splitval\n return bestCost", "def setLabels(greedytrees, word2index, labels):\n predicted_label = []\n for sentid in word2index.keys():\n eval_tree = greedytrees[sentid]\n eval_labels = labels[sentid]\n if eval_tree:\n predicted_label.append(set_predict_label(eval_tree, eval_labels))\n else:\n predicted_label.append(eval_labels)\n return(np.array(predicted_label).ravel())", "def assign_label(self, dbcurs, existing_labs):\n if not self.is_usable() or self.not_identified():\n raise FindResultErr(\"Object not usable\")\n n = 0\n base = ord('A')\n while 1:\n nlab = chr(base + n % 26)\n if n >= 26:\n nlab += str(n // 26)\n if nlab not in existing_labs:\n break\n n += 1\n self.obj.label = self.label = nlab\n existing_labs.add(nlab)\n return dbcurs.execute(\"UPDATE objdata SET label=%s WHERE ind={:d}\".format(self.obj.objind), nlab)", "def put_labels():\n dao.delete_all_labels()\n for label in request.json:\n if 'id' not in label or not label['id']:\n label['id'] = str(uuid.uuid4())\n dao.set_label(id=label['id'],\n name=label['name'],\n fields=label['fields'])\n return if_found(dao.get_labels())", "def identify_examples(self,db,labels,node):\n path = []\n while node.parent != None:\n nkey = None\n for (k,c) in node.parent().children.iteritems():\n if c is node:\n nkey = k\n break\n assert nkey != None\n path.append((node.parent(),nkey))\n node = node.parent()\n path = path[::-1]\n nids = len(labels)\n ids = []\n for id in xrange(nids):\n valid = True\n for n,ckey in path:\n f = n.feature\n val = featureMatrix[f,id]\n if val is None:\n #it's a None value, just continue on\n continue\n else:\n key = None\n if n.type == 'i':\n key = (0 if val <= n.value else 1)\n else:\n key = val\n if key != ckey:\n valid = False\n break\n if valid:\n ids.append(id)\n return ids", "def matchidsorted(ids,targetid):\r\n i1 = N.searchsorted(ids,targetid)\r\n if targetid == ids[i1]:\r\n ibest = i1\r\n else:\r\n ibest = -1 \r\n return ibest", "def set_labels(repo: Repository, labels: list[Label]):\n\n log.info(f\"Fetching existing labels from {repo.full_name}\")\n existing_labels = {label.name.casefold(): label for label in repo.get_labels()}\n log.info(f\"Found {len(existing_labels)} existing labels\")\n\n for label in labels:\n qualified_name = label.qualified_name\n folded_name = qualified_name.casefold()\n if folded_name not in existing_labels:\n log.info(f\"Creating label {qualified_name}\")\n repo.create_label(**label.api_arguments)\n elif label != existing_labels[folded_name]:\n log.info(f\"Updating label {qualified_name}\")\n existing_label = existing_labels[folded_name]\n existing_label.edit(**label.api_arguments)\n else:\n log.info(f\"Label {qualified_name} already exists\")", "def save_data_to_db(labelled):\n add_query = sqlite3.connect(DB_PATH).cursor()\n add_query.execute(\n \"CREATE TABLE IF NOT EXISTS labels(text TEXT, label TEXT, score FLOAT)\")\n for entry in labelled:\n add_query.execute(\"\"\"INSERT INTO labels(text,label,score) VALUES(?,?,?)\"\"\",\n (entry))\n return", "def assign_trainIds(self, label):\n for k, v in self.id2trainId.iteritems():\n label[label == k] = v\n return label", "def _compute_relevance_map(self, labels):\n\n ds_labels = np.zeros(self.ds_size)\n ds_relevance_map = 0\n for i in np.unique(labels):\n if i != 0:\n # 2.1- Compute the coarse label image\n y, x, z = np.where(labels == i)\n ds_labels[np.int32(y * self.full_to_ds_ratio[0]),\n np.int32(x * self.full_to_ds_ratio[1]), z] = i\n # 2.2- Compute the energy map\n M = np.ones_like(ds_labels)\n M[ds_labels == i] = 0\n distance_map = distance_transform_edt(M)\n ds_relevance_map += distance_map\n\n # 2.3- Normalize the energy map and compute the ROI\n ds_relevance_map = ds_relevance_map / ds_relevance_map.max()\n return ds_labels, ds_relevance_map", "def relabelIndex(self,labels):\n ## init correct name matrix\n fixed_labels = []\n\n ## Iterate over the labels\n for label in labels:\n ## if it must be fixed, lookup in label_replace_dictionary and store the fixed name \n ## or store the name if not.\n if label in self.label_replace_dictionary:\n fixed_labels.append(self.label_replace_dictionary[label])\n else:\n fixed_labels.append(label)\n return(fixed_labels)", "def label_selection(labels):\r\n\r\n # Unique / Frequency of labels\r\n unique, frequency = np.unique(labels,\r\n return_counts=True)\r\n\r\n # If there is only one kind of label, assign it to that label\r\n if len(unique) == 1:\r\n majority_label = unique\r\n\r\n # Else\r\n else:\r\n # Count of Label 1 is greater than Count of Label 2\r\n if frequency[0] > frequency[1]:\r\n\r\n # Assign Label 1\r\n majority_label = unique[0]\r\n\r\n # Count of Label 2 is greater than Count of Label 1\r\n elif frequency[1] > frequency[0]:\r\n majority_label = unique[1]\r\n\r\n else:\r\n # Randomly generated integer from 0 to 1\r\n j = random.randint(0, 1)\r\n\r\n # Assign the label using that randomly generated integer\r\n majority_label = unique[j]\r\n\r\n return majority_label", "def load_relevance_labels(self, file_path):\n print(\"Loading relevance labels.\")\n retrieval_start_time = time.time()\n\n with open(file_path) as file:\n for line in file.readlines():\n if line[:2] not in self.query_ids:\n continue\n\n query_id, _, ext_doc_id, relevance = line.split()\n idx = '~'.join((query_id, ext_doc_id))\n\n if idx in self.df.index:\n self.df.loc['~'.join((query_id, ext_doc_id)), 'relevance_label'] = int(relevance)\n\n self.df['relevance_label'].fillna(value=0, inplace=True)\n print(\"Labels loaded in {} seconds.\".format(time.time() - retrieval_start_time))", "def assign_by_neighbor(self):\n labels = {}\n for node in self.connected_nodes:\n if node.is_assignable() == False:\n label = node.assignment\n if not label in labels:\n labels[label] = 0\n labels[label] += 1\n best_label = None\n best_score = None\n for label in labels:\n if best_score is None or labels[label] > best_score:\n best_score = labels[label]\n best_label = label\n self.assign(best_label)", "def set_predict_label(eval_tree, eval_labels):\n root_label = -1\n\n for path in eval_tree.treepositions():\n if isinstance(eval_tree[path], Tree):\n eval_tree[path]._label = eval_labels[eval_tree[path]._index]\n\n root_label = eval_tree[()]._label\n assert(root_label != -1)\n return(root_label)", "def _update_labels(self, new_labels):\n\n self.logger.debug(f'Updating labels...')\n try:\n self.labels_set = self.labels_set | set(new_labels) # Update labels set\n for label in new_labels:\n self.labels2index_map[label] = len(self.labels2index_map) # Update labels2index_map\n self._set_output_dim(len(self.labels2index_map))\n except:\n self.logger.error('---> !!! Error in updating labels !!!')\n raise ValueError\n self.logger.debug('Updating labels done.')\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an index database db, a list of labels (one for each id), and a list of ids to train on, computes the optimal split value. It modifies this node to have the optimal split type and value, and then returns the quality of the split as computed by the split_cost function. If features != None, it is a list of available feature indices to use in this split, or a function of 0 arguments that can be called to get a list of features.
def pick_best_split(self,db,labels,ids,features=None): idlabels = [labels[id] for id in ids] if misclassification_error(idlabels) == 0: #base case: no misclassifications self.type = 'v' self.value = idlabels[0] return 0 best = None bestCost = 0 splitval = None discrete = True if features == None: if len(ids) < db.numFeatures(): #look at all present features in the training set features = db.getPresentFeatures(ids) #print len(features),"of",db.numFeatures(),"features selected" else: features = range(db.numFeatures()) elif callable(features): features = features() for i in features: if len(db.entryLists[i]) == 0: continue idiscrete = db.discreteFeature[i] if idiscrete: #count number of labels of a certain value splitter = defaultdict(lambda:defaultdict(int)) #count of labels for missing values nmissing = defaultdict(int) for id in ids: val = db[i,id] if val is None: #missing values go down to all splits nmissing[labels[id]] += 1 continue splitter[val][labels[id]] += 1 if len(splitter) > continuous_variable_threshold: #print "Determined to be a continuous variable" idiscrete = False break if idiscrete: if len(splitter) <= 1: #only a single value continue #count number of missing values in all splits cmax = 0 for k in splitter: for l,v in nmissing.iteritems(): splitter[k][l] += v cmax = max(cmax,sum(splitter[k].values())) #shrink by fraction of (# of ids - largest child)/(# of ids) scale = (1.0-float(cmax)/float(len(ids)))*len(splitter) #evaluate cost cost = split_cost(splitter.values())*scale #print "Split on",i,"information gain",-cost,splitter.values() else: #continuous, need to learn the best split vals = [] presentlabels = [] nonelabels = [] for id in ids: val = db[i,id] if val is None: nonelabels.append(labels[id]) continue vals.append(val) presentlabels.append(labels[id]) if len(vals) <= 1: print "No values for feature",i,"?" print vals continue #print "Considering continuous split on",i s,cost = best_split(vals,presentlabels,nonelabels) scale = (1.0-float(len(presentlabels)/2+len(nonelabels))/float(len(ids)))*2 cost *= scale #print "Result",s,"Information gain",-cost if cost < bestCost: best = i bestCost = cost discrete = idiscrete if not idiscrete: splitval = s if best is None: self.type = 'v' if len(ids) > 0: self.value = vote(idlabels) return misclassification_error(idlabels) else: self.value = None return 0 else: self.feature = best #discrete or inequality split if discrete: self.type = 's' else: self.type = 'i' self.value = splitval return bestCost
[ "def __get_split_feature(self, data_set, target_feature, tree_features):\n\n if self.__criterion == 'entropy':\n feature_gains = {feature: self.__gain(data_set, feature, target_feature) for (feature) in tree_features}\n split_feature = max(feature_gains, key=feature_gains.get)\n return split_feature\n elif self.__criterion == 'gini':\n feature_ginis = {feature: self.__gini(data_set, feature, target_feature) for (feature) in tree_features}\n split_feature = min(feature_ginis, key=feature_ginis.get)\n return split_feature\n # TODO: I should check this (gini index).", "def find_best_split(data, feature_names, min_samples_leaf=5, random_subset=False, column_class=-1):\n N, f_n = np.shape(data)\n n = f_n - 1\n root_n = int(np.ceil(np.sqrt(n)))\n if (column_class == -1):\n column_class = f_n - 1\n if (random_subset == True):\n list_features = np.random.choice(np.arange(0, f_n-1, 1), root_n).tolist()\n feature_names_search = np.array(feature_names)[list_features].tolist()\n else:\n feature_names_search = feature_names\n G = gini(data)\n best_question = None\n #initialize the value to optimize\n info_best = -np.inf\n #begin the optimization loop\n for it_f in range(len(feature_names)):\n if (column_class == it_f):\n continue\n #functionality for limiting features to split on\n if (feature_names[it_f] not in feature_names_search):\n continue\n #the list for unique vals\n val_list = list()\n for sample in range(N):\n #get the unique value to create the question with\n val = data[sample,it_f]\n if (val not in val_list):\n val_list.append(val)\n #create the question\n question = Question(column=it_f , value=val, feature_names=feature_names)\n left, right = partition(data, question)\n #make sure the partition counts exceed the necessary number\n if (right is not None and left is not None):\n m_l,_ = np.shape(left)\n m_r,_ = np.shape(right)\n if (m_l >= min_samples_leaf and m_r >= min_samples_leaf):\n #compute the info gain\n gain = info_gain(left, right, G)\n #now check if it is the best\n if (gain > info_best):\n info_best = gain\n best_question = question\n return info_best, best_question", "def greedy_learn(self,node,db,labels,ids):\n if node.depth >= self.maxdepth or len(ids) <= self.minexamples:\n #terminate recursion\n node.pick_best_label(db,labels,ids)\n err = misclassification_error([labels[id] for id in ids])\n if err > 0:\n print \"Reached a leaf and had to make some sacrifices, cost\",err\n print \" depth\",node.depth\n print \" labels\",[labels[id] for id in ids]\n return err\n\n features = self.feature_subset(node,db,labels,ids)\n cost = node.pick_best_split(db,labels,ids,features)\n \n #do a split\n if node.type == 'v':\n #base case: no misclassifications\n \"\"\"\n if cost>0:\n print \"greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero\"\n print \"cost=\",cost,\"misclassification=\",misclassification_error([labels[id] for id in ids])\n print \"# of ids:\",len(ids)\n for i in ids:\n print \"id\",i,\",\",\n for k in range(db.numFeatures()):\n if db[k,i] != None:\n print k,\"=\",db[k,i],\",\",\n print \"label\",labels[i]\n raw_input()\n \"\"\"\n return 0\n elif node.type == 's':\n #print \"Picked feature\",node.feature,\"split\"\n #do a discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in ids:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #print \" split sizes:\",[len(x) for x in Eids.values()]\n #print \" None ids:\",len(noneids)\n ids = None\n errors = 0\n for v,vids in Eids.iteritems():\n #recurse\n c = DecisionTreeNode(node)\n #print \"Recursing on value\",v\n #print \" ids:\",vids\n errors += self.greedy_learn(c,db,labels,vids+noneids)\n node.children[v] = c\n if c.depth > self.deepest:\n self.deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors\n else:\n #do an inequality split\n assert node.type == 'i'\n #print \"Picked feature\",node.feature,\"inequality value\",node.value,\"cost\",cost\n leftids = []\n rightids = []\n for id in ids:\n if db[node.feature,id] is not None:\n if db[node.feature,id] <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(rightids) == len(ids) or len(leftids) == len(ids):\n #due to missing values, this split is useless\n errors = misclassification_error([labels[id] for id in ids])\n print \"useless split on feature\",node.feature,\"value\",node.value,\"misclassification error\",errors\n print \"Left size\",len(leftids),\"right size\",len(rightids)\n raw_input()\n node.pick_best_label(db,labels,ids)\n return errors\n #clear memory associated with ids list\n del ids[:]\n ids = None\n #print \"Left size\",len(leftids),\"right size\",len(rightids)\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n #left side\n errors = self.greedy_learn(c1,db,labels,leftids)\n #right side\n errors += self.greedy_learn(c2,db,labels,rightids)\n #restore index\n node.children = {0:c1,1:c2}\n if c1.depth > self.deepest:\n self.deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors", "def best_split(values,labels,nonelabels=None):\n assert len(values) >= 2\n assert len(values) == len(labels)\n N = len(values)\n ilist = sorted((v,l) for (v,l) in zip(values,labels))\n leftcount = defaultdict(int)\n rightcount = defaultdict(int)\n for v,l in ilist:\n rightcount[l] += 1\n bestindex = -1\n bestcost = split_cost([leftcount,rightcount])\n\n cost = bestcost\n #costs = [cost]\n #print \"Split costs:\"\n for i in xrange(len(ilist)):\n v,l = ilist[i]\n rightcount[l] -= 1\n leftcount[l] += 1\n if i+1 >= len(ilist) or v == ilist[i+1][0]:\n #no splits when v is equal to the next value\n continue\n cost = split_cost([leftcount,rightcount])\n #print \" \",v,leftcount.values(),rightcount.values(),cost\n #costs.append(cost)\n if cost < bestcost:\n bestcost = cost\n bestindex = i\n #raw_input()\n if bestindex < 0:\n #no split found... try splitting in half\n splitval = (ilist[0][0]+ilist[-1][0])*0.5\n else:\n splitval = (ilist[bestindex][0] + ilist[bestindex+1][0])*0.5\n if nonelabels is None:\n return (splitval,bestcost)\n #reevaluate counts\n leftcount = defaultdict(int)\n rightcount = defaultdict(int)\n for l in nonelabels:\n leftcount[l] += 1\n rightcount[l] += 1\n for v,l in ilist:\n if v <= splitval:\n leftcount[l] += 1\n else:\n rightcount[l] += 1\n return splitval,split_cost([leftcount,rightcount])", "def choose_best_split(self, X_subset, y_subset):\n\n feature_index = -1\n threshold = -1\n maximal_eval = None\n for current_feature in range(X_subset.shape[1]):\n all_values = set(X_subset[:, current_feature])\n all_values.add(max(all_values) + 0.01)\n \n for current_threshold in all_values:\n y_left, y_right = self.make_split_only_y(current_feature, current_threshold, X_subset, y_subset)\n if y_left.shape[0] == 0 or y_right.shape[0] == 0:\n continue\n assert np.sum(y_left) != 0\n assert np.sum(y_right) != 0\n current_eval = -y_left.shape[0]*self.criterion(y_left)\n current_eval -= y_right.shape[0]*self.criterion(y_right)\n if maximal_eval is None or current_eval > maximal_eval:\n maximal_eval = current_eval\n feature_index = current_feature\n threshold = current_threshold\n\n return feature_index, threshold", "def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err", "def __bestSplit__(self, pts, num_features):\n lowestEntropy = float(\"inf\")\n bestBeta = -1\n bestFeature = -1\n featsToTest = np.random.choice(self.column_names[:-1], num_features, False)\n\n for c in featsToTest:\n if c in self.quantitative_cols:\n ent, beta = self.__bestBetaForQuantFeature__(pts, c)\n if ent < lowestEntropy:\n lowestEntropy = ent\n bestBeta = beta\n bestFeature = c\n elif c in self.categorical_cols:\n ent, beta = self.__bestBetaForCatFeature__(pts, c)\n if ent < lowestEntropy:\n lowestEntropy = ent\n bestBeta = beta\n bestFeature = c\n return bestFeature, bestBeta", "def bestfeature_split(dataset):\n fea_nums=len(dataset[0])-1\n base_entro=dataentro_cal(dataset)\n best_gain=0.0\n for i in range (fea_nums): # iterate all the features\n con_entro=0.0\n fea_list=[sample[i] for sample in dataset ] # collect all the feature values\n fea_set=set(fea_list) # make the values unique to each other\n for value in fea_set: # iterate all the values of a feature to seperate the dataset\n sub_dataset=split_dataset(dataset,i,value)\n entropy=dataentro_cal(sub_dataset)\n prob_subdataset=len(sub_dataset)/float(len(dataset))\n con_entro+=prob_subdataset*entropy\n info_gain=base_entro-con_entro\n if (info_gain>best_gain):\n best_gain=info_gain\n best_feature=i\n return best_feature", "def _reevaluate_best_split(self, node, parent, branch_index):\n stop_flag = False\n if not node.observed_class_distribution_is_pure():\n if self._split_criterion == self._GINI_SPLIT:\n split_criterion = GiniSplitCriterion()\n elif self._split_criterion == self._INFO_GAIN_SPLIT:\n split_criterion = InfoGainSplitCriterion()\n elif self._split_criterion == self._HELLINGER:\n split_criterion = HellingerDistanceCriterion()\n else:\n split_criterion = InfoGainSplitCriterion()\n\n best_split_suggestions = node.best_split_suggestions(split_criterion, self)\n if len(best_split_suggestions) > 0:\n # Sort the attribute accordingly to their split merit for each attribute\n # (except the null one)\n best_split_suggestions.sort(key=attrgetter('merit'))\n\n # x_best is the attribute with the highest merit\n x_best = best_split_suggestions[-1]\n id_best = x_best.split_test.attrs_test_depends_on()[0]\n\n # x_current is the current attribute used in this SplitNode\n id_current = node.split_test.attrs_test_depends_on()[0]\n x_current = node.find_attribute(id_current, best_split_suggestions)\n\n # Get x_null\n x_null = node.null_split(split_criterion)\n\n # Compute Hoeffding bound\n hoeffding_bound = self._hoeffding_bound(\n split_criterion.range_of_merit(node.stats), self.split_confidence,\n node.total_weight)\n\n if x_null.merit - x_best.merit > hoeffding_bound:\n # Kill subtree & replace the EFDTSplitNode by an EFDTLearningNode\n best_split = self._kill_subtree(node)\n\n # update EFDT\n if parent is None:\n # Root case : replace the root node by a new split node\n self._tree_root = best_split\n else:\n parent.set_child(branch_index, best_split)\n\n deleted_node_cnt = node.count_nodes()\n\n self._n_active_leaves += 1\n self._n_active_leaves -= deleted_node_cnt['leaf_nodes']\n self._n_decision_nodes -= deleted_node_cnt['decision_nodes']\n stop_flag = True\n\n # Manage memory\n self._enforce_size_limit()\n\n elif (x_best.merit - x_current.merit > hoeffding_bound or hoeffding_bound\n < self.tie_threshold) and (id_current != id_best):\n # Create a new branch\n new_split = self._new_split_node(x_best.split_test, node.stats, node.depth,\n node.attribute_observers)\n # Update weights in new_split\n new_split.last_split_reevaluation_at = node.total_weight\n\n # Update EFDT\n for i in range(x_best.num_splits()):\n new_child = self._new_learning_node(x_best.resulting_stats_from_split(i))\n new_split.set_child(i, new_child)\n\n deleted_node_cnt = node.count_nodes()\n\n self._n_active_leaves -= deleted_node_cnt['leaf_nodes']\n self._n_decision_nodes -= deleted_node_cnt['decision_nodes']\n self._n_decision_nodes += 1\n self._n_active_leaves += x_best.num_splits()\n\n if parent is None:\n # Root case : replace the root node by a new split node\n self._tree_root = new_split\n else:\n parent.set_child(branch_index, new_split)\n\n stop_flag = True\n\n # Manage memory\n self._enforce_size_limit()\n\n elif (x_best.merit - x_current.merit > hoeffding_bound or hoeffding_bound\n < self.tie_threshold) and (id_current == id_best):\n node._split_test = x_best.split_test\n\n return stop_flag", "def find_best_split(data, attributes, splits):\n max_gain= -1000.0\n best_split_attr = None\n \n # Calculates best split\n for split in splits:\n gain = info_gain(data, split)\n if gain > max_gain:\n max_gain = gain\n best_split_attr = split\n \n return best_split_attr", "def compute_best_split(feature, y, weights, classes, metric):\n count = 0\n metrics_list = []\n split_values = []\n\n # Case : continuous\n for value in np.sort(feature)[:-1]:\n left_idx, right_idx = split(feature, value)\n metrics_list.append(split_metric(left_idx, right_idx,\n y, weights,\n classes, metric))\n split_values.append(value)\n count += 1\n\n # TODO : categorical\n\n # Compute min and return value\n idx_max = np.argmin(np.array(metrics_list))\n\n best_metric, best_value = metrics_list[idx_max], split_values[idx_max]\n\n return best_metric, best_value", "def split(self, node):\n assert node.left == None, 'Not a leaf node.'\n # Check whether able to skip consideration of action, value or normalised derivative entirely.\n if (node.action_impurity > 0) and (self.split_by == 'pick' or self.impurity_weights[0] > 0): do_action = True\n else: do_action = False\n if (node.value_impurity > 0) and (self.split_by == 'pick' or self.impurity_weights[1] > 0): do_value = True\n else: do_value = False\n if (node.derivative_impurity > 0) and (self.split_by == 'pick' or self.impurity_weights[2] > 0): do_derivatives = True\n else: do_derivatives = False\n if (not do_action) and (not do_value) and (not do_derivatives): return False\n\n # Iterate through features and find best split(s) for each.\n candidate_splits = []\n for f in range(self.num_features):\n candidate_splits += self.split_feature(node, f, do_action, do_value, do_derivatives)\n # If beneficial split found on at least one feature...\n if sum([s[3][0] != None for s in candidate_splits]) > 0: \n split_quality = [s[3][2] for s in candidate_splits] \n # Choose one feature to split on. \n if self.stochastic_splits:\n # Sample in proportion to relative impurity gain.\n chosen_split = np.random.choice(range(len(candidate_splits)), p=split_quality)\n else:\n # Deterministically choose the feature with greatest relative impurity gain.\n chosen_split = np.argmax(split_quality) # Ties broken by lowest index. \n # Unpack information for this split and create child leaves.\n node.feature_index, node.split_by, indices_sorted, (node.threshold, split_index, _, _, _, _) = candidate_splits[chosen_split] \n address = int_to_bits(node.nint)\n node.left = self.new_leaf(list(address)+[0], indices_sorted[:split_index])\n node.right = self.new_leaf(list(address)+[1], indices_sorted[split_index:]) \n self.num_leaves += 1\n # Store impurity gains, scaled by node.num_samples, to measure feature importance.\n node.feature_importance = np.zeros((4, self.num_features))\n if do_action:\n fi_action = np.array([s[3][3] for s in candidate_splits if s[1] in ('action','weighted')]) * node.num_samples \n node.feature_importance[2,:] = fi_action # Potential.\n node.feature_importance[0,node.feature_index] = max(fi_action) # Realised.\n if do_value:\n fi_value = np.array([s[3][4] for s in candidate_splits if s[1] in ('value','weighted')]) * node.num_samples \n node.feature_importance[3,:] = fi_value # Potential.\n node.feature_importance[1,node.feature_index] = max(fi_value) # Realised.\n # Back-propagate importances to all ancestors.\n while address != ():\n ancestor, address = self.parent(address)\n ancestor.feature_importance += node.feature_importance\n return True\n return False", "def choose_best_split(self, X_subset, y_subset):\n # YOUR CODE HERE\n feature_index = None\n threshold = None\n best_G = np.inf\n N = len(X_subset)\n \n for current_feature in range(X_subset.shape[1]):\n thresholds = np.unique(X_subset[:, current_feature])\n \n for t in thresholds:\n y_left, y_right = self.make_split_only_y(current_feature, t, X_subset, y_subset)\n H_L = self.H(y_left)\n H_R = self.H(y_right)\n \n G = (len(y_left) / N) * H_L + (len(y_right) / N) * H_R\n \n if G < best_G:\n best_G = G\n feature_index = current_feature\n threshold = t\n \n return feature_index, threshold", "def _find_best_split(self, node):\n\n # init \n max_impurity_decrease = 0\n best_split_feature_idx = 0\n best_split_threshold = np.inf # all goes to the left\n \n for feature_idx in self._search_scope(node.X):\n x_feature = node.X[:,feature_idx]\n possible_thresholds = set(x_feature)\n\n for threshold in possible_thresholds:\n left_idx = x_feature < threshold\n right_idx = np.array([not i for i in left_idx])\n impurity_decrease = self._compute_impurity_decrease(node, left_idx, right_idx)\n\n if impurity_decrease > max_impurity_decrease:\n best_split_feature_idx = feature_idx\n best_split_threshold = threshold\n max_impurity_decrease = impurity_decrease\n \n return best_split_feature_idx, best_split_threshold, max_impurity_decrease", "def chooseBestFeatureToSplit(dataSet):\n\tnumFeatures = len(dataSet[0]) -1 #่ฎก็ฎ—ๅ‡บ็‰นๅพๅ€ผ็š„ๆ•ฐ็›ฎ๏ผŒๅ‡ๅŽป1 ๆ˜ฏๅ› ไธบๆœ€ๅŽไธ€ๅˆ—ๆ˜ฏๅˆ†็ฑป \n\tbaseEntropy = calcShannonEnt(dataSet)#่ฎก็ฎ—ๆ•ดไธชๆ•ฐๆฎ้›†็š„ๅŽŸๅง‹shannon entropy ๆˆ‘ไปฌไฟๆŒๆœ€ๅˆ็š„ๆ— ๅบๅบฆ้‡ๅ€ผ๏ผŒ็”จไบŽไธŽๅˆ’ๅˆ†ไน‹ๅŽ็š„ๆ•ฐๆฎ้›†่ฎก็ฎ—็š„็†ตๅ€ผ่ฟ›่กŒๆฏ”่พƒ\n\tprint 'the base entropy is ',baseEntropy\n\tbestInfoGain = 0.0 #init the ๆœ€้ซ˜็š„ไฟกๆฏๅขž็›Š\n\tbestFeature = -1 #init the best feature,maybe it will return 0 or 1 \n\tfor i in range(numFeatures):#่ฟ™้‡Œ็š„iๅ…ถๅฎžไนŸๆ˜ฏ่ฟ™ไบ›็‰นๅพๅ€ผๅˆ—็š„็ดขๅผ•\n\t\tfeatList = [example[i] for example in dataSet]#ๆ€ปๆ˜ฏๆ„Ÿ่ง‰่ฟ™ๆ ท็‰นๅˆซไธๅฅฝ๏ผŒไธบไบ†ๅ–ๅ‡บ็‰นๅพๅ€ผ่ฟ™ไธ€ๅˆ—็‰นๅพๅ€ผๅˆ—็š„ๆ•ฐๆฎไนŸๆ˜ฏ่›ฎๆ‹ผ็š„\n\t\tprint featList\n\t\tuniqueVals = set(featList)#ๅ–ๅ‡บ็‰นๅพๅ€ผ่ฟ™ไธ€ๅˆ—ๆ•ฐๆฎไธญ็š„็‰นๅพ้กน\n\t\t# print uniqueVals\n\t\tnewEntropy = 0.0#\n\t\tfor value in uniqueVals:\n\t\t\t##่ฟ™้‡ŒsplitDataSetๅ‡ฝๆ•ฐ็š„็ฌฌไบŒไธชๅ‚ๆ•ฐไธบ็‰นๅพๅ€ผ็š„ๅˆ—ไฝ,ๆˆ‘ไปฌๅฐ†ๅฏนๆฏไธช็‰นๅพๅˆ’ๅˆ†ๆ•ฐๆฎ้›†็š„็ป“ๆžœ่ฎก็ฎ—ไธ€ๆฌกไฟกๆฏ็†ต๏ผŒ็„ถๅŽๅˆคๆ–ญๆŒ‰็…งๅ“ชไธช็‰นๅพๅˆ’ๅˆ†ๆ•ฐๆฎ้›†ๆ˜ฏๆœ€ๅฅฝ็š„ๅˆ’ๅˆ†ๆ–นๅผ\n\t\t\tsubDataSet = splitDataSet(dataSet,i,value)\n\t\t\tprob = len(subDataSet)/float(len(dataSet))\n\t\t\tnewEntropy += prob * calcShannonEnt(subDataSet)\n\t\tinfoGain = baseEntropy - newEntropy\n\t\tif infoGain > bestInfoGain:\n\t\t\tbestInfoGain = infoGain\n\t\t\tbestFeature = i \n\treturn bestFeature", "def determine_best_split(data, potential_splits, mltask):\n\n first_iteration = True\n for column_index in potential_splits:\n for value in potential_splits[column_index]:\n data_below,data_above = split_data(data, column_index, value)\n \n if mltask == 'regression':\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_mse)\n \n # classification\n else:\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_entropy)\n \n \n if first_iteration or current_overall_metric <= best_overall_metric:\n first_iteration = False\n \n best_overall_metric = current_overall_metric\n best_split_column = column_index\n best_split_value = value\n \n \n return best_split_column,best_split_value", "def split(\n fs: \"FeatureSet\", ids_for_split1: List[int], ids_for_split2: Optional[List[int]] = None\n ) -> Tuple[\"FeatureSet\", \"FeatureSet\"]:\n # Note: an alternative way to implement this is to make copies\n # of the given FeatureSet instance and then use the `filter()`\n # method but that wastes too much memory since it requires making\n # two copies of the original FeatureSet which may be huge. With\n # the current implementation, we are creating new objects but\n # they should be much smaller than the original FeatureSet.\n ids1 = fs.ids[ids_for_split1]\n labels1 = fs.labels[ids_for_split1] if fs.labels is not None else None\n features1 = fs.features[ids_for_split1] if fs.features is not None else None\n\n # if ids_for_split2 is not given, it will be the complement of ids_split1\n if ids_for_split2 is None:\n ids_for_split2 = [ind for ind in range(len(fs.ids)) if ind not in ids_for_split1]\n\n ids2 = fs.ids[ids_for_split2]\n labels2 = fs.labels[ids_for_split2] if fs.labels is not None else None\n features2 = fs.features[ids_for_split2] if fs.features is not None else None\n\n fs1 = FeatureSet(\n f\"{fs.name}_1\", ids1, labels=labels1, features=features1, vectorizer=fs.vectorizer\n )\n fs2 = FeatureSet(\n f\"{fs.name}_2\", ids2, labels=labels2, features=features2, vectorizer=fs.vectorizer\n )\n return fs1, fs2", "def best_cutoff(self,\n split_label):\n split_args = self.sub_split_args[split_label]\n split_data = self.sub_split_data[split_label]\n # This criterion for the use_scipy flag is arbitrary and needs\n # further testing\n n_unique = len(np.unique(split_data[~np.isnan(split_data)]))\n use_scipy = True\n if n_unique > len(split_data)/1000:\n use_scipy = False\n idxcut_below, effects_below, rstats_below, ndata_below =\\\n self.u_data(split_label, use_scipy=use_scipy)\n idxcut_above, effects_above, rstats_above, ndata_above =\\\n self.u_data(split_label, above=True, use_scipy=use_scipy)\n\n # Default cutoff is min(split_data) - 1\n cutoff = split_data[split_args[0]] - 1\n value = 0\n # If no cutoff was possible\n if len(idxcut_below) == 0 or len(idxcut_above) == 0:\n return cutoff, value\n\n # All idx_cutoffs and values for cutoffs, for debugging\n for idx in range(len(idxcut_above)):\n idxcut = idxcut_above[idx]\n if idxcut != idxcut_below[idx]:\n raise NameError('Code error, invalid split')\n value_temp = (abs(effects_above[idx] -\n effects_below[idx]) *\n rstats_above[idx] *\n rstats_below[idx] *\n min(ndata_above[idx]) *\n min(ndata_below[idx]))\n if value_temp > value:\n cutoff = (split_data[split_args[int(idxcut)]] +\n split_data[split_args[int(idxcut)+1]])/2\n value = value_temp\n return cutoff, value", "def _find_best_split(self, X, y):\n\n def calculate_entropy(p):\n # _, counts = np.unique(y, return_counts=True)\n # entropy = 0.0\n # for prob in counts / float(len(y)):\n # entropy -= prob * math.log(prob, 2)\n # return entropy\n p = np.bincount(p) / float(p.shape[0])\n return stats.entropy(p)\n\n def calculate_information_gain(y, left_y, right_y):\n # p = len(left_y) / len(y)\n # return calculate_entropy(y) - p * \\\n # calculate_entropy(left_y) - (1 - p) * \\\n # calculate_entropy(right_y)\n return calculate_entropy(y) \\\n - calculate_entropy(left_y) * (float(left_y.shape[0]) / y.shape[0]) \\\n - calculate_entropy(right_y) * (float(right_y.shape[0]) / y.shape[0])\n\n def find_splits(x):\n \"\"\"Find all possible split values.\"\"\"\n split_values = set()\n\n # Get unique values in a sorted order\n x_unique = list(np.unique(x))\n for i in range(1, len(x_unique)):\n # Find a point between two values\n average = (x_unique[i - 1] + x_unique[i]) / 2.0\n split_values.add(average)\n\n return list(split_values)\n\n def split_mask(x, value):\n if isinstance(value, int) or isinstance(value, float):\n left_mask = (x >= value)\n right_mask = (x < value)\n else:\n left_mask = (x == value)\n right_mask = (x != value)\n return left_mask, right_mask\n\n max_gain, max_i_feature, max_value = None, None, None\n\n _, n_features = np.shape(X)\n for i_feature in range(n_features):\n column = X[:, i_feature]\n split_values = find_splits(column)\n for value in split_values:\n left_mask, right_mask = split_mask(column, value)\n gain = calculate_information_gain(y, y[left_mask], y[right_mask])\n\n if (max_gain is None) or (gain > max_gain):\n max_i_feature, max_value, max_gain = i_feature, value, gain\n \n if max_gain is None:\n return None, None, None, None, None, None, None\n \n left_mask, right_mask = split_mask(X[:, max_i_feature], max_value)\n return max_gain, max_i_feature, max_value, \\\n X[left_mask], X[right_mask], y[left_mask], y[right_mask]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Learns from a Database instance. Each entry is given a label.
def learn(self,db,labels): self.keys = db.keys[:] labelindex = -1 if isinstance(labels,str): labelindex = db.keys.index(labels) assert labelindex >= 0,"label does not exist in database keys" labels = db.get_column(labelindex) elif isinstance(labels,int): labelindex = labels labels = db.get_column(labelindex) else: assert len(labels) == len(db.entries) self.root = DecisionTreeNode() if labelindex >= 0: raise NotImplementedError("Ooops, taking out indexed label broken") entries = np.delete(entries,labelindex,1) db = IndexedDatabase(db) if self.maxnodes != None: return self.greedy_learn_search(db,labels) else: self.deepest = 0 return self.greedy_learn(self.root,db,labels,range(len(labels)))
[ "def save_data_to_db(labelled):\n add_query = sqlite3.connect(DB_PATH).cursor()\n add_query.execute(\n \"CREATE TABLE IF NOT EXISTS labels(text TEXT, label TEXT, score FLOAT)\")\n for entry in labelled:\n add_query.execute(\"\"\"INSERT INTO labels(text,label,score) VALUES(?,?,?)\"\"\",\n (entry))\n return", "def get_by_label(self, label, table, verbose=True):\n assert (self.connected)\n \n theId = -1\n GET_BY_LABEL_COMMAND = \"SELECT id,label FROM {0} WHERE samples.label = \\\"{1}\\\"\".format(table, label)\n \n \n self.cursor.execute(GET_BY_LABEL_COMMAND)\n \n for row in self.cursor:\n theId = row[0]\n break\n \n if verbose and theId != -1: \n print(\"Item with id {0} and label '{1}' retrieved.\".format(theId, label))\n elif verbose: \n print(\"No item in the table '{0}' with the label '{1}' was found.\".format(table, label))\n \n return int(theId)", "def getLabels(self):\n query = (\"SELECT label from %s \" % (self.__tablename__))\n result = self.sql_fetchall(query)\n return [x[0] for x in results]", "def ls(label, flat):\n labels = get_labels()\n subset = [k for k in labels if not label or k.startswith(label)]\n if subset:\n click.echo('Password Database')\n if flat:\n click.echo('\\n'.join(sorted(subset)))\n else:\n print_tree(paths_to_tree(subset))\n elif label:\n click.echo('No entries starting with \"{}\".'.format(label))\n else:\n click.echo('No entries to show.')", "def classify(self, example: SimpleProgramExampleWrapper) -> List[Label]:\n db_to_query = self.db.extend() # type: ClauseDB\n for ex_statement in example:\n db_to_query += ex_statement\n\n query_results = self._query(db_to_query) # type: Dict[Term, float]\n labels = self.query_result_label_extractor.extract_labels(query_results) # type: List[Label]\n\n if self.debug_printing:\n print('\\nQueried database:')\n for model_db_statement in self.db:\n print('\\t' + str(model_db_statement))\n for ex_statement in db_to_query:\n print('\\t' + str(ex_statement))\n print('Query results:')\n print('\\t' + str(query_results))\n print('Chosen class labels:')\n print('\\t' + str(labels))\n\n return labels", "def create_data_for_intent_pipeline_from_database():\n labels = []\n docs = []\n try:\n db = ExpressionsDatabaseEngine()\n data = db.get_intents_and_expressions()\n db.release_database_connection()\n for datum in data:\n labels.append(datum[0])\n docs.append(datum[1])\n return [docs, labels]\n except Exception as e:\n logger.error(\"Exception occurred importing database data.\")\n logger.exception(e)\n logger.debug(\"returning empty arrays\")\n return [docs, labels]", "def __get_labels(self):\n\n uncertain_pairs_index = self.__query_pairs()\n\n to_label_raw = self.all_raw_data.loc[uncertain_pairs_index]\n to_label_features = self.all_features.loc[uncertain_pairs_index]\n\n # Remove uncertain pairs from the candidate pool\n self.all_features.drop(uncertain_pairs_index, axis=0, inplace=True)\n\n labels_list = []\n for index, row in to_label_raw.iterrows():\n\n print(\"\\n{0:30}\\t{1}\\n{2:30}\\t{3}\\n{4:30}\\t{5}\\n{6:30}\\t{7}\\n\".format(row.name_a, row.name_b,\n row.address_a, row.address_b,\n row.zip_a, row.zip_b,\n row.city_a, row.city_b))\n\n\n label = self.__user_input(\"Is this a match? (0/1)\")\n labels_list.append((index, label))\n\n labels_index = [index for index, label in labels_list]\n labels_values = [label for index, label in labels_list]\n\n # Create dataframe with index and labels\n add_labels = pd.Series(labels_values, index=labels_index, name='label')\n\n # Union the new training set to the full training set\n self.labeled_features = pd.concat([self.labeled_features, to_label_features], axis = 0, ignore_index=False)\n self.labeled_labels = pd.concat([self.labeled_labels, add_labels], axis = 0, ignore_index=False)\n\n return self", "def load_labels(self, labels):\n self.labels = pd.DataFrame(labels, index=[\"label\"]).T", "def retrieve_labels(user_id: int) -> dict:\n user_label_table = dict()\n cur.execute('''SELECT USER_ID, NAME, CONTENT FROM \"labels\"''')\n rows = cur.fetchall()\n for row in rows:\n if user_id == row[0]:\n user_label_table[row[1]] = row[2]\n return user_label_table", "def get_labels():\n return if_found(dao.get_labels())", "def load_pdbbind_labels(labels_file):\n # Some complexes have labels but no PDB files. Filter these manually\n missing_pdbs = [\"1d2v\", \"1jou\", \"1s8j\", \"1cam\", \"4mlt\", \"4o7d\"]\n contents = []\n with open(labels_file) as f:\n for line in f:\n if line.startswith(\"#\"):\n continue\n else:\n # Some of the ligand-names are of form (FMN ox). Use regex\n # to merge into form (FMN-ox)\n p = re.compile('\\(([^\\)\\s]*) ([^\\)\\s]*)\\)')\n line = p.sub('(\\\\1-\\\\2)', line)\n elts = line.split()\n # Filter if missing PDB files\n if elts[0] in missing_pdbs:\n continue\n contents.append(elts)\n contents_df = pd.DataFrame(\n contents,\n columns=(\"PDB code\", \"resolution\", \"release year\", \"-logKd/Ki\", \"Kd/Ki\",\n \"ignore-this-field\", \"reference\", \"ligand name\"))\n return contents_df", "def label_for_name(label_name: str, db_session):\n label = db_session.query(DomainLabel).filter(DomainLabel.name == label_name).first()\n\n if not label:\n label = DomainLabel(label_name)\n db_session.add(label)\n try:\n db_session.commit()\n except sqlalchemy.exc.IntegrityError:\n db_session.rollback()\n label = db_session.query(DomainLabel).filter(DomainLabel.name == label_name).first()\n if not label:\n raise\n return label", "def load_keywords():\n\n results = select(p for p in Keyword if p.active)[:] # Retrieving all keywords from db\n\n # Creating a dataframe\n df = pd.DataFrame(columns=['label',\n 'value'])\n\n # Looping through the results and making up a dataframe\n for result in results:\n df = df.append({'label': result.keyword,\n 'value': result.keyword},\n ignore_index=True)\n\n return df.to_dict('records') # Returning the keywords as a table", "def lookup(conn, language_code, graphic, phonetic, restrictions):\n c = conn.cursor()\n entry_ids = tuple(c.execute('SELECT entry_id FROM lemmas WHERE language = ? AND graphic = ? and phonetic = ?', (language_code, graphic, hiragana_to_katakana(phonetic))))\n return tuple(Lexeme(conn, language_code, entry_id, restrictions) for (entry_id,) in entry_ids)", "def list(self) -> None:\n words = self.db.all()\n if not words:\n print(\"[-] No words found in the database\")\n return\n\n entries = [self.dict2entry(word) for word in words]\n for entry in entries:\n print(entry.get_str())", "def assign_label(self, dbcurs, existing_labs):\n if not self.is_usable() or self.not_identified():\n raise FindResultErr(\"Object not usable\")\n n = 0\n base = ord('A')\n while 1:\n nlab = chr(base + n % 26)\n if n >= 26:\n nlab += str(n // 26)\n if nlab not in existing_labs:\n break\n n += 1\n self.obj.label = self.label = nlab\n existing_labs.add(nlab)\n return dbcurs.execute(\"UPDATE objdata SET label=%s WHERE ind={:d}\".format(self.obj.objind), nlab)", "def extract_labels(pdbbind_label_file):\n assert os.path.isfile(pdbbind_label_file)\n labels = {}\n with open(pdbbind_label_file) as f:\n content = f.readlines()\n for line in content:\n if line[0] == \"#\":\n continue\n line = line.split()\n # lines in the label file have format\n # PDB-code Resolution Release-Year -logKd Kd reference ligand-name\n #print line[0], line[3]\n labels[line[0]] = line[3]\n return labels", "def _db_store(self, labels: Sequence[Tuple[int, np.ndarray]], table: str) -> None:\r\n # Labels are expected to be\r\n # [\r\n # (class, points),\r\n # (class, points)\r\n # .\r\n # .\r\n # .\r\n # ]\r\n # Where points are np.arrays\r\n # There should also always be one fish in the scene => len(labels) >= 1\r\n\r\n n_points = np.prod(labels[0][1].shape)\r\n\r\n gen = ((self.n, class_, *points.ravel().round(3)) for class_, points in labels)\r\n\r\n # First two \"?\" are for image id and class respectively, rest are for points\r\n sql_command = (\r\n f'INSERT INTO {table} VALUES {(\"?\",\"?\",*[\"?\" for i in range(n_points)])}'\r\n ).replace(\"'\", \"\")\r\n\r\n self.cursor.executemany(sql_command, gen)", "def test_get_species_from_label(self):\n test_labels = ['1-Butene', 'Acetic acid', 'Ethanol']\n retrieved_species = self.database.get_species_from_label(test_labels)\n self.assertEqual(len(retrieved_species), 3)\n self.assertEqual(retrieved_species[0].label, '1-Butene')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a indexed database, greedily and recursively learns the split value for the subtree of the indicated node. Return value is the number of mistakes made by the decision tree. Missing values are handled properly as indicating a 'don't care' value that gets passed down to both sides of the tree.
def greedy_learn(self,node,db,labels,ids): if node.depth >= self.maxdepth or len(ids) <= self.minexamples: #terminate recursion node.pick_best_label(db,labels,ids) err = misclassification_error([labels[id] for id in ids]) if err > 0: print "Reached a leaf and had to make some sacrifices, cost",err print " depth",node.depth print " labels",[labels[id] for id in ids] return err features = self.feature_subset(node,db,labels,ids) cost = node.pick_best_split(db,labels,ids,features) #do a split if node.type == 'v': #base case: no misclassifications """ if cost>0: print "greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero" print "cost=",cost,"misclassification=",misclassification_error([labels[id] for id in ids]) print "# of ids:",len(ids) for i in ids: print "id",i,",", for k in range(db.numFeatures()): if db[k,i] != None: print k,"=",db[k,i],",", print "label",labels[i] raw_input() """ return 0 elif node.type == 's': #print "Picked feature",node.feature,"split" #do a discrete split node.children = dict() #select sub-indices Eids = defaultdict(list) noneids = [] for id in ids: v = db[node.feature,id] if v is None: #item doesn't exist, it's a missing value noneids.append(id) else: Eids[v].append(id) #print " split sizes:",[len(x) for x in Eids.values()] #print " None ids:",len(noneids) ids = None errors = 0 for v,vids in Eids.iteritems(): #recurse c = DecisionTreeNode(node) #print "Recursing on value",v #print " ids:",vids errors += self.greedy_learn(c,db,labels,vids+noneids) node.children[v] = c if c.depth > self.deepest: self.deepest = c.depth print "Decision tree learner: Reached node with depth",self.deepest return errors else: #do an inequality split assert node.type == 'i' #print "Picked feature",node.feature,"inequality value",node.value,"cost",cost leftids = [] rightids = [] for id in ids: if db[node.feature,id] is not None: if db[node.feature,id] <= node.value: leftids.append(id) else: rightids.append(id) else: leftids.append(id) rightids.append(id) if len(rightids) == len(ids) or len(leftids) == len(ids): #due to missing values, this split is useless errors = misclassification_error([labels[id] for id in ids]) print "useless split on feature",node.feature,"value",node.value,"misclassification error",errors print "Left size",len(leftids),"right size",len(rightids) raw_input() node.pick_best_label(db,labels,ids) return errors #clear memory associated with ids list del ids[:] ids = None #print "Left size",len(leftids),"right size",len(rightids) c1 = DecisionTreeNode(node) c2 = DecisionTreeNode(node) #left side errors = self.greedy_learn(c1,db,labels,leftids) #right side errors += self.greedy_learn(c2,db,labels,rightids) #restore index node.children = {0:c1,1:c2} if c1.depth > self.deepest: self.deepest = c1.depth print "Decision tree learner: Reached node with depth",self.deepest return errors
[ "def count_splits_on_tree(self, tree):\n if self.taxon_set is None:\n self.taxon_set = tree.taxon_set\n else:\n assert tree.taxon_set is self.taxon_set\n self.total_trees_counted += 1\n if not self.ignore_node_ages:\n tree.calc_node_ages()\n for split, edge in tree.split_edges.iteritems():\n if self.is_rooted:\n split = edge.split_bitmask\n try:\n self.split_counts[split] += 1\n except KeyError:\n self.splits.append(split)\n self.split_counts[split] = 1\n if tree.weight is None:\n weight_to_use = 1.0\n else:\n weight_to_use = float(tree.weight)\n try:\n self.weighted_split_counts[split] += weight_to_use\n except KeyError:\n self.weighted_split_counts[split] = weight_to_use\n self.sum_of_weights += weight_to_use\n if not self.ignore_edge_lengths:\n sel = self.split_edge_lengths.setdefault(split,[])\n if edge.length is not None:\n sel.append(tree.split_edges[split].length)\n # for correct behavior when some or all trees have no edge lengths\n# else:\n# self.split_edge_lengths[split].append(0.0)\n if not self.ignore_node_ages:\n sna = self.split_node_ages.setdefault(split, [])\n if edge.head_node is not None:\n sna.append(edge.head_node.age)", "def evaluate_split( df, attribute, split ):\n mask = df[attribute] <= split\n \n # split the dataset on the split attribute\n dfl = df[mask]\n dfr = df[~mask]\n \n \n # calculate weighting factors for child\n weighting_factor_left = float(dfl.shape[0])/df.shape[0]\n weighting_factor_right = float(dfr.shape[0])/df.shape[0]\n\n # calculate gini for left and right\n gini_parent = gini_impurity(df)\n gini_left = gini_impurity(dfl)\n gini_right = gini_impurity(dfr)\n \n # calculate weighted gini for this split \n weighted_gini = gini_parent - (weighting_factor_left*gini_left + weighting_factor_right*gini_right)\n return weighted_gini", "def _reevaluate_best_split(self, node, parent, branch_index):\n stop_flag = False\n if not node.observed_class_distribution_is_pure():\n if self._split_criterion == self._GINI_SPLIT:\n split_criterion = GiniSplitCriterion()\n elif self._split_criterion == self._INFO_GAIN_SPLIT:\n split_criterion = InfoGainSplitCriterion()\n elif self._split_criterion == self._HELLINGER:\n split_criterion = HellingerDistanceCriterion()\n else:\n split_criterion = InfoGainSplitCriterion()\n\n best_split_suggestions = node.best_split_suggestions(split_criterion, self)\n if len(best_split_suggestions) > 0:\n # Sort the attribute accordingly to their split merit for each attribute\n # (except the null one)\n best_split_suggestions.sort(key=attrgetter('merit'))\n\n # x_best is the attribute with the highest merit\n x_best = best_split_suggestions[-1]\n id_best = x_best.split_test.attrs_test_depends_on()[0]\n\n # x_current is the current attribute used in this SplitNode\n id_current = node.split_test.attrs_test_depends_on()[0]\n x_current = node.find_attribute(id_current, best_split_suggestions)\n\n # Get x_null\n x_null = node.null_split(split_criterion)\n\n # Compute Hoeffding bound\n hoeffding_bound = self._hoeffding_bound(\n split_criterion.range_of_merit(node.stats), self.split_confidence,\n node.total_weight)\n\n if x_null.merit - x_best.merit > hoeffding_bound:\n # Kill subtree & replace the EFDTSplitNode by an EFDTLearningNode\n best_split = self._kill_subtree(node)\n\n # update EFDT\n if parent is None:\n # Root case : replace the root node by a new split node\n self._tree_root = best_split\n else:\n parent.set_child(branch_index, best_split)\n\n deleted_node_cnt = node.count_nodes()\n\n self._n_active_leaves += 1\n self._n_active_leaves -= deleted_node_cnt['leaf_nodes']\n self._n_decision_nodes -= deleted_node_cnt['decision_nodes']\n stop_flag = True\n\n # Manage memory\n self._enforce_size_limit()\n\n elif (x_best.merit - x_current.merit > hoeffding_bound or hoeffding_bound\n < self.tie_threshold) and (id_current != id_best):\n # Create a new branch\n new_split = self._new_split_node(x_best.split_test, node.stats, node.depth,\n node.attribute_observers)\n # Update weights in new_split\n new_split.last_split_reevaluation_at = node.total_weight\n\n # Update EFDT\n for i in range(x_best.num_splits()):\n new_child = self._new_learning_node(x_best.resulting_stats_from_split(i))\n new_split.set_child(i, new_child)\n\n deleted_node_cnt = node.count_nodes()\n\n self._n_active_leaves -= deleted_node_cnt['leaf_nodes']\n self._n_decision_nodes -= deleted_node_cnt['decision_nodes']\n self._n_decision_nodes += 1\n self._n_active_leaves += x_best.num_splits()\n\n if parent is None:\n # Root case : replace the root node by a new split node\n self._tree_root = new_split\n else:\n parent.set_child(branch_index, new_split)\n\n stop_flag = True\n\n # Manage memory\n self._enforce_size_limit()\n\n elif (x_best.merit - x_current.merit > hoeffding_bound or hoeffding_bound\n < self.tie_threshold) and (id_current == id_best):\n node._split_test = x_best.split_test\n\n return stop_flag", "def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err", "def n_splittable_leaf_nodes(tree: Tree) -> int:\n return len(tree.splittable_leaf_nodes)", "def buildTree(rows, maxDepth = None, scoref=entropy, depth = 0):\n #A base condition for the recursion. Check if this branch of a split has no data\n if len(rows)==0:\n return decisionNode( )\n newDepth = depth + 1 #Calculate the depth of the next split.\n #Check if the depth at the next split is greater than a maximum specified depth\n if (maxDepth == 0 or maxDepth) and (newDepth > maxDepth): \n return decisionNode(results=__uniqueCounts(rows)) #If so, stop splitting.\n current_score=scoref(rows) #Calculate the current value of the score function.\n # Set up some variables to track the best criteria\n best_gain=0.0 #Initialize a value for the best gain from all possible splits\n best_criteria=None #Initialize a variable for the best column to split on\n best_sets=None #Initialize a variable for the best split's true and false data.\n\n #Count the number of columns in the row, minus the results column \n column_count=len(rows[0])-1\n for col in range(0,column_count): #Iterate over all the columns of the data\n #Generate the list of different values in this column\n column_values={} #Initialize a dictionary to store the column values\n for row in rows: \n #Iterate over each row, adding a key in the dict for each observed value\n column_values[row[col]]=1\n # Divide the dataset on each value in this column.\n for value in column_values.keys( ):\n (set1,set2)=__divideset(rows,col,value)\n #Calculate the fraction of data in the true branch\n p=float(len(set1))/len(rows) \n #Calculate the gain on the chosen score function using this split.\n gain=current_score-p*scoref(set1)-(1-p)*scoref(set2) \n #Check if this split provides a better gain than the best previous split\n if gain>best_gain and len(set1)>0 and len(set2)>0:\n best_gain=gain\n best_criteria=(col,value)\n best_sets=(set1,set2)\n # Recursively create the subbranches\n if best_gain>0:\n trueBranch=buildTree(best_sets[0], maxDepth = maxDepth, depth = newDepth)\n falseBranch=buildTree(best_sets[1], maxDepth = maxDepth, depth = newDepth)\n return decisionNode(col=best_criteria[0],value=best_criteria[1],\n tb=trueBranch,fb=falseBranch)\n else:\n return decisionNode(results=__uniqueCounts(rows))", "def log_probability_split_within_tree(tree: Tree, mutation: GrowMutation) -> float:\n prob_node_chosen_to_split_on = safe_negative_log(n_splittable_leaf_nodes(tree))\n prob_split_chosen = log_probability_split_within_node(mutation)\n return prob_node_chosen_to_split_on + prob_split_chosen", "def _compute_best_split_and_push(self, node):\n\n node.split_info = self.splitter.find_node_split(\n node.sample_indices, node.histograms, node.sum_gradients,\n node.sum_hessians)\n\n if node.split_info.gain <= 0: # no valid split\n self._finalize_leaf(node)\n else:\n heappush(self.splittable_nodes, node)", "def _split_threshold(self, node):\n\n # define the score to improve upon\n if self.n_clusters >= self.min_leaves and node.size <= self.max_leaf_size:\n # split only if min(children scores) > node.score\n force_split = False\n best_score = node.score\n else:\n # force split: just take the best (even if children are worse)\n force_split = True\n best_score = None\n\n left, right = None, None\n\n # iterate over embedding dimensions (first ones are more reliable)\n # up to max_n_vec (included), until we found an improving split\n for _vec in range(self.n_vec):\n\n # get the candidate thresholds along this dimension\n threshs = self._get_candidate_thresholds(node, _vec)\n\n # look for an improving best split along this eigenvector\n for _t in threshs:\n # compute the split\n below_thresh = self.E[node.ids, _vec] < _t\n _lids = node.ids[below_thresh]\n _rids = node.ids[np.logical_not(below_thresh)]\n # check if the tubes are not too small\n _nl, _nr = len(_lids), len(_rids)\n is_valid = _nl >= self.min_leaf_size and _nr >= self.min_leaf_size\n if is_valid:\n # compute the score of the new tubes only\n _sl = self.get_tube_score(_lids)\n _sr = self.get_tube_score(_rids)\n # get the score of this split\n split_score = min(_sl, _sr)\n if best_score is None or split_score > best_score:\n # better split\n best_score = split_score\n node.has_children = True\n node.thresh = _t\n left = SpectralNode(\n _lids, _vec, score=_sl, name=node.name + \"0\")\n right = SpectralNode(\n _rids, _vec, score=_sr, name=node.name + \"1\")\n\n # check stopping criterion\n if node.has_children:\n # we found an improving split\n if _vec > 0 or not force_split:\n # found an improving non-forced split: stop here\n break\n\n return left, right", "def data_split(df, best_feature, info_gain_dict, dt_dict,\r\n curr_node, depth, continous = False):\r\n \r\n depth -= 1\r\n # decrease the depth count\r\n no_data = False\r\n # default flag for data check\r\n match_threshold_df = df[df[best_feature] == info_gain_dict[best_feature][0]]\r\n # subset the data if threshold is matched\r\n if not len(match_threshold_df):\r\n # no more data points\r\n no_data = True\r\n match_threshold_df = df\r\n # go back to prev dataframe\r\n else:\r\n pass\r\n \r\n mismatch_threshold_df = df[df[best_feature] != info_gain_dict[best_feature][0]]\r\n # subset the data if there is a mismatch\r\n if not len(mismatch_threshold_df):\r\n # if no more data points\r\n no_data = True\r\n mismatch_threshold_df = df\r\n # go back to prev dataframe\r\n else:\r\n pass\r\n decision_tree(match_threshold_df, dt_dict, curr_node, best_feature,\r\n align_dir = \"equal\", depth=depth, no_data = no_data)\r\n # function call to grow tree on the left side\r\n decision_tree(mismatch_threshold_df, dt_dict, curr_node, best_feature,\r\n align_dir = \"not_equal\", depth=depth, no_data = no_data)\r\n # function call to grow the tree on the right side\r", "def DecisionTreeAlgorithm(df, mltask, counter = 0, min_samples = 2, max_depth = 5, random_subspace = None):\n\n if counter == 0:\n global COLUMN_HEADERS, FEATURE_TYPE\n COLUMN_HEADERS = df.columns\n FEATURE_TYPE = hf.determine_type_of_feature(df)\n data = df.values\n else:\n data = df\n \n if (check_purity(data)) or (len(data) < min_samples) or (counter == max_depth):\n leaf = create_leaf(data, mltask)\n return leaf\n \n else:\n counter += 1\n \n potential_splits = get_potential_split(data, random_subspace)\n split_column,split_value = determine_best_split(data, potential_splits, mltask)\n data_below,data_above = split_data(data,split_column,split_value)\n \n if (len(data_below) == 0) or (len(data_above) == 0):\n leaf = create_leaf(data, mltask)\n return leaf\n \n feature_name = COLUMN_HEADERS[split_column]\n type_of_feature = FEATURE_TYPE[split_column]\n if type_of_feature == 'continuous':\n question = '{} <= {}'.format(feature_name,split_value)\n else:\n question = '{} = {}'.format(feature_name,split_value)\n sub_tree = {question:[]}\n \n yes_answer = DecisionTreeAlgorithm(data_below, mltask, counter, min_samples, max_depth, random_subspace)\n no_answer = DecisionTreeAlgorithm(data_above, mltask, counter, min_samples, max_depth, random_subspace)\n \n if yes_answer == no_answer :\n sub_tree = yes_answer\n else :\n sub_tree[question].append(yes_answer)\n sub_tree[question].append(no_answer)\n \n return sub_tree", "def analyze_tree(dataset,my_tree,column_class=-1):\n #get the relevant starting variables\n labels = dataset[:,column_class]\n N, class_num = np.shape(dataset)\n datapred = np.zeros(N)\n #now loop and get the predictions\n for i in range(N):\n prediction = predict_tree(dataset[i,:], my_tree)\n datapred[i] = prediction\n #now get the accuracy\n check_array = datapred == labels\n return np.sum(check_array)/(np.shape(check_array)[0])", "def prune(tree, testSet, res, technique):\n assert technique in [\"reduced_error\"]\n if technique == \"reduced_error\":\n tbSet = testSet[testSet[tree.col] >= tree.value] #find which test observations belong to this tree's true branch\n fbSet = testSet[testSet[tree.col] < tree.value] #find which test observations belong to this tree's false branch\n \n if tree.tb.results is None: #Check if the true branch of this sub-tree is a leaf\n ptb = prune(tree.tb, tbSet, res, technique) #If not, recursively travel down the true branch and prune it.\n else:\n ptb = tree.tb #If the true branch is a leaf, then the true branch has--in essence--already been pruned.\n if tree.fb.results is None: #Check if the false branch of this sub-tree is a leaf\n pfb = prune(tree.fb, fbSet, res, technique) #If not, recursively travel down the false branch and prune it.\n else:\n pfb = tree.fb #If the false branch is a leaf, then the false branch has--in essence--already been pruned.\n \n #Sum the number of misclassifications of the test data at each of the leaves of this node\n wrong_in_leaves = __deep_count_errors(ptb, tbSet, res) + __deep_count_errors(pfb, fbSet, res)\n \n #Count the number of misclassificationsof the test data that would occur if this node were treated as a leaf\n wrong_at_node = __count_errors(tree, testSet, res)\n \n #Assess whether or not treating the node as a leaf improves the accuracy on the test set\n if wrong_at_node <= wrong_in_leaves: \n #NOTE:The following line of code seems slightly redundant since count_errors(tree, testSet, res) had to call \n #__get_results(tree). I should set up some way to save the output of that function call instead of calling it twice.\n return decisionNode(results = __get_results(tree)) #If so, return a decisionNode where the node is a leaf\n else:\n #If not, return a decisionNode where the node splits on the same column and value as before, but the \n #true and false branches are the pruned-versions of the original true and false branches. See above for\n #definition of ptb and pfb\n return decisionNode(col = tree.col, value = tree.value, tb = ptb, fb = pfb)", "def split(self, node):\n assert node.left == None, 'Not a leaf node.'\n # Check whether able to skip consideration of action, value or normalised derivative entirely.\n if (node.action_impurity > 0) and (self.split_by == 'pick' or self.impurity_weights[0] > 0): do_action = True\n else: do_action = False\n if (node.value_impurity > 0) and (self.split_by == 'pick' or self.impurity_weights[1] > 0): do_value = True\n else: do_value = False\n if (node.derivative_impurity > 0) and (self.split_by == 'pick' or self.impurity_weights[2] > 0): do_derivatives = True\n else: do_derivatives = False\n if (not do_action) and (not do_value) and (not do_derivatives): return False\n\n # Iterate through features and find best split(s) for each.\n candidate_splits = []\n for f in range(self.num_features):\n candidate_splits += self.split_feature(node, f, do_action, do_value, do_derivatives)\n # If beneficial split found on at least one feature...\n if sum([s[3][0] != None for s in candidate_splits]) > 0: \n split_quality = [s[3][2] for s in candidate_splits] \n # Choose one feature to split on. \n if self.stochastic_splits:\n # Sample in proportion to relative impurity gain.\n chosen_split = np.random.choice(range(len(candidate_splits)), p=split_quality)\n else:\n # Deterministically choose the feature with greatest relative impurity gain.\n chosen_split = np.argmax(split_quality) # Ties broken by lowest index. \n # Unpack information for this split and create child leaves.\n node.feature_index, node.split_by, indices_sorted, (node.threshold, split_index, _, _, _, _) = candidate_splits[chosen_split] \n address = int_to_bits(node.nint)\n node.left = self.new_leaf(list(address)+[0], indices_sorted[:split_index])\n node.right = self.new_leaf(list(address)+[1], indices_sorted[split_index:]) \n self.num_leaves += 1\n # Store impurity gains, scaled by node.num_samples, to measure feature importance.\n node.feature_importance = np.zeros((4, self.num_features))\n if do_action:\n fi_action = np.array([s[3][3] for s in candidate_splits if s[1] in ('action','weighted')]) * node.num_samples \n node.feature_importance[2,:] = fi_action # Potential.\n node.feature_importance[0,node.feature_index] = max(fi_action) # Realised.\n if do_value:\n fi_value = np.array([s[3][4] for s in candidate_splits if s[1] in ('value','weighted')]) * node.num_samples \n node.feature_importance[3,:] = fi_value # Potential.\n node.feature_importance[1,node.feature_index] = max(fi_value) # Realised.\n # Back-propagate importances to all ancestors.\n while address != ():\n ancestor, address = self.parent(address)\n ancestor.feature_importance += node.feature_importance\n return True\n return False", "def test_gini_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 1)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1]), set([2])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1, 2:2})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.66)", "def test_gini_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 0)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.5)", "def split_next_best(self, pbar=None):\n assert hasattr(self, 'tree'), 'Must have started growth process already.'\n if self.leaf_impurity_sums == []: return False\n imp_norm = np.array(self.leaf_impurity_sums) / self.root_impurity_sums\n if self.split_by == 'action': best = np.argmax(imp_norm[:,0])\n elif self.split_by == 'value': best = np.argmax(imp_norm[:,1])\n elif self.split_by == 'derivative': best = np.argmax(imp_norm[:,2])\n # NOTE: For split_by='pick', sum normalised impurities and find argmax.\n elif self.split_by == 'pick': best = np.argmax(imp_norm.sum(axis=1))\n # NOTE: For split_by='weighted', take weighted sum instead. \n elif self.split_by == 'weighted': best = np.argmax(np.inner(imp_norm, self.impurity_weights))\n nint = self.untried_leaf_nints.pop(best)\n imp = self.leaf_impurity_sums.pop(best)\n node = self.node(nint)\n if self.split(node): # The split is tried here.\n if pbar: pbar.update(1)\n self.untried_leaf_nints.append(node.left.nint)\n self.leaf_impurity_sums.append(self.get_node_impurity_sums(node.left))\n self.untried_leaf_nints.append(node.right.nint)\n self.leaf_impurity_sums.append(self.get_node_impurity_sums(node.right))\n return True\n # If can't make a split, recurse to try the next best.\n else: return self.split_next_best()", "def __count_errors(node, testSet, res):\n training_results = __get_results(node) #Get a dictionary of labels and counts for the *training* data which made it to this node\n leaf_label = None #Initialize a label for this leaf\n majority_count = 0 #Initialize a variable to track the number of observations for the label with the most observations\n #Note that the steps below do not handle ties of the majority count in a nice way.\n for label, count in training_results.items(): #iterate through each pair of labels and counts from the training set\n if count > majority_count: #find the label with the highest count\n leaf_label = label #the label for the leaf is the label with the highest count\n majority_count = count #keep track of the count for the leaf_label\n \n wrong_labels = testSet[res].unique().tolist() #initialize wrong_labels to be all labels in the testSet\n if leaf_label in wrong_labels: #If the leaf label is in the list of labels for the part of the test set that got to this node\n wrong_labels.remove(leaf_label) #remove the leaf_label so that all which remains are incorrect labels\n \n wrong_count = 0 #Initialize a count of how many testSet observations will be classified incorrectly\n testCounts = testSet.groupby(res).size() #Get a series of the testSet labels and how many observations pertain to each label\n for label in wrong_labels: #Iterate over all the labels not equal to the leaf_label\n wrong_count += testCounts[label] #Sum up all of the observations with a label not equal to the leaf_label\n return wrong_count", "def path_length_tree(x, t,e):\r\n e = e\r\n if t.exnodes == 1:\r\n e = e+ c(t.size) # normlization\r\n return e\r\n else:\r\n a = t.split_by\r\n if x[a] < t.split_value :\r\n return path_length_tree(x, t.left, e+1)\r\n\r\n if x[a] >= t.split_value :\r\n return path_length_tree(x, t.right, e+1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Identifies the list of example indices that would follow the decision tree to node.
def identify_examples(self,db,labels,node): path = [] while node.parent != None: nkey = None for (k,c) in node.parent().children.iteritems(): if c is node: nkey = k break assert nkey != None path.append((node.parent(),nkey)) node = node.parent() path = path[::-1] nids = len(labels) ids = [] for id in xrange(nids): valid = True for n,ckey in path: f = n.feature val = featureMatrix[f,id] if val is None: #it's a None value, just continue on continue else: key = None if n.type == 'i': key = (0 if val <= n.value else 1) else: key = val if key != ckey: valid = False break if valid: ids.append(id) return ids
[ "def indices(self):", "def list_indices(self):", "def target_nodes_indexes(self) -> _TargetNodes:\n return self.__target_nodes_indexes", "def reference_nodes_idx(self) -> Dict[str, torch.Tensor]:\n return self.node_idx_references", "def getResultIdx(self):\n return [ element.idx for element in self.treated[self.target] ]", "def exploration_ids(self) -> List[str]:\n return [node.exploration_id for node in self.nodes]", "def __get_node_index(self, node):\r\n pass", "def ligand_idxs(self):\n return self._ligand_idxs", "def get_indications(self):\n indications = np.zeros_like(self.predictions)\n for i in range(self.predictions.shape[0]):\n ind = np.where(self.predictions[i, :] - self.labels != 0.0)[0]\n indications[i, ind] = 1.0\n\n return indications", "def getArrayIndices(self):\n \n pass", "def __get_node_index(self, node):\n pass", "def get_indices(exs: List[SentimentExample], embed: WordEmbeddings):\n indexer = embed.word_indexer\n max_len = float(\"-inf\")\n for ex in exs:\n max_len = max(max_len, len(ex.words))\n idxs = create2D(len(exs), max_len)\n labels = []\n for i in range(len(exs)):\n ex_len = len(exs[i].words)\n for j in range(ex_len):\n idx = indexer.index_of(exs[i].words[j])\n idx = idx if idx != -1 else 1\n idxs[i][j] = idx\n # idxs[i][j] = idx if idx != -1 else indexer.index_of(\"UNK\")\n for j in range(ex_len, max_len):\n idxs[i][j] = 0\n # idxs[i][j] = indexer.index_of(\"PAD\")\n labels.append(exs[i].label)\n return torch.LongTensor(idxs),torch.LongTensor(labels)", "def agent_locs_idx(self):\n return tuple(self.agent_locs.T)", "def _draw_indices(dataset: Dataset, labeled_sample_num: int, verbose: bool = True, seed: int = 1) \\\n -> Tuple[List[int], List[int]]:\n # todo add more rubost split method to enable fairness split of dataset, and validation set.\n total_num = len(dataset)\n targets = dataset.targets\n assert total_num >= labeled_sample_num, f\"`labeled_sample_num={labeled_sample_num} should be smaller than totoal_num={total_num}.`\"\n with FixRandomSeed(seed):\n # only fix numpy and random pkgs\n labeled_indices = sorted(choice(list(range(total_num)), labeled_sample_num, replace=False))\n unlabeled_indices = sorted(list(set(range(total_num)) - set(labeled_indices)))\n if verbose:\n print(f\">>>Generating {len(labeled_indices)} labeled data and {len(unlabeled_indices)} unlabeled data.\")\n assert labeled_indices.__len__() + unlabeled_indices.__len__() == total_num, f\"{1} split wrong.\"\n return labeled_indices, unlabeled_indices", "def _dofidxs(self):\n return [const['dofidxs'] for i, const in self._constraints_df.iterrows()]", "def output_node_ids(self):\n return [\n i\n for i in range(\n self.n_inputs + self.n_hidden,\n self.n_inputs + self.n_hidden + self.n_outputs,\n )\n ]", "def indicesIter(self):\n \n pass", "def input_node_ids(self):\n return [i for i in range(self.n_inputs)]", "def _get_label_indices(self):\n return np.where((np.array(self.annotations['symbol']) == 'p') |\n (np.array(self.annotations['symbol']) == 'N') |\n (np.array(self.annotations['symbol']) == 'A') |\n (np.array(self.annotations['symbol']) == 't'))[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Same as greedy learn, but with a maximum number of nodes. Rather than a DFS, this uses a priority queue that at each step splits the node with the maximum improvement in misclassification error. At most maxnodes are in the resulting tree, and the depth is limited to maxdepth. Returns the total number of misclassifications of the training set. There is a lowmemory mode when self.lowmem == True or self.lowmem == 'auto' and the number of saved ids at a node grows beyond a certain number (self.lowmem_threshold, 10m by default). In lowmemory mode, the subset of of examples at a given node is determined dynamically, which incurs a O(|D|d) cost per node, where d is the depth of the node. Overall this raises running time by a factor of approximately O(|D| log_2 |D|).
def greedy_learn_search(self,db,labels): queue = PriorityQueue() dolowmem = (self.lowmem == True) numidsets = 0 root_ids = range(len(labels)) queue.push((self.root,root_ids),len(labels)) numnodes = 1 deepest = 0 err = 0 while len(queue) > 0 and numnodes+2 <= self.maxnodes: #print "%d nodes, priority %d"%(numnodes,queue.nextkey()) nerr = queue.nextkey() (node,trainingset) = queue.pop() #print "Greedy learn",len(trainingset) if trainingset is None: trainingset = self.identify_examples(db,labels,node) if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples: #print " Hit depth or training set limit" node.pick_best_label(db,labels,trainingset) err += misclassification_error([labels[id] for id in trainingset]) continue features = self.feature_subset(node,db,labels,trainingset) cost = node.pick_best_split(db,labels,trainingset,features) numidsets -= len(trainingset) #do a split if node.type == 'v': continue elif node.type == 's': #discrete split node.children = dict() #select sub-indices Eids = defaultdict(list) noneids = [] for id in trainingset: v = db[node.feature,id] if v is None: #item doesn't exist, it's a missing value noneids.append(id) else: Eids[v].append(id) #determine whether to switch to low-memory mode if not dolowmem and self.lowmem=='auto': for v,vids in Eids.iteritems(): numidsets += len(vids)+len(noneids) if numidsets > self.lowmem_threshold: print "Decision tree learner switching to low-memory mode" dolowmem = True trainingset = None numnodes += len(Eids) #print "Split sizes",[len(v) for v in Eids.itervalues()] #print "None size",len(noneids) for v,vids in Eids.iteritems(): #print "->",len(vids),"+",len(noneids) #recurse c = DecisionTreeNode(node) node.children[v] = c err = misclassification_error([labels[id] for id in vids+noneids]) cids = (None if dolowmem else vids+noneids) queue.push((c,cids),err) if c.depth > deepest: deepest = c.depth print "Decision tree learner: Reached node with depth",deepest else: #do an inequality split assert node.type == 'i',"Got a weird type? "+str(node.type) leftids = [] rightids = [] for id in trainingset: val = db[node.feature,id] if val is not None: if val <= node.value: leftids.append(id) else: rightids.append(id) else: leftids.append(id) rightids.append(id) if len(leftids)==0 or len(rightids)==0: print "node feature "+str(node.feature)+" doesn't have a valid split value "+str(node.value) vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None] print "min,max of training set:",min(vals),max(vals) print "cost is",cost raw_input() assert len(leftids) > 0 and len(rightids) > 0 if not dolowmem and self.lowmem=='auto': numidsets += len(leftids) + len(rightids) if numidsets > self.lowmem_threshold: print "Decision tree learner switching to low-memory mode" dolowmem = True trainingset = None numnodes += 2 c1 = DecisionTreeNode(node) c2 = DecisionTreeNode(node) node.children = {0:c1,1:c2} #print "->",len(leftids) #print "->",len(rightids) err1 = misclassification_error([labels[id] for id in leftids]) err2 = misclassification_error([labels[id] for id in rightids]) if dolowmem: leftids = None rightids = None queue.push((c1,leftids),err1) queue.push((c2,rightids),err2) if c1.depth > deepest: deepest = c1.depth print "Decision tree learner: Reached node with depth",deepest #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes if len(queue) > 0: print "%d nodes remaining in queue, setting to leaves"%(len(queue),) for (node,trainingset) in queue: node.pick_best_label(db,labels,trainingset) err += misclassification_error([labels[id] for id in trainingset]) return err
[ "def max_node_count(self) -> int:\n return pulumi.get(self, \"max_node_count\")", "def node_count_max(self) -> int:\n return int(self.graph_tuple_stats.node_count_max or 0)", "def max_nodes(self):\n return self._max_nodes", "def max_nodes(self) -> int:\n return pulumi.get(self, \"max_nodes\")", "def n_prunable_decision_nodes(tree: Tree) -> int:\n return len(tree.prunable_decision_nodes)", "def test_node_limited_search(self):\n nodes = random.randrange(10000, 99999)\n info = self.goldfish.analyse(\n chess.Board(), chess.engine.Limit(nodes=nodes), info=chess.engine.Info.ALL\n )\n self.assertAlmostEqual(1.00, info[\"nodes\"] / nodes, 3)", "def max_nodes(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"max_nodes\")", "def unusable_node_count(self) -> int:\n return pulumi.get(self, \"unusable_node_count\")", "def max_percent_unhealthy_nodes(self) -> Optional[int]:\n return pulumi.get(self, \"max_percent_unhealthy_nodes\")", "def number_of_max_degree_nodes(G):\n return number_of_nodes_of_degree_k(G, max_degree(G))", "def get_max_nodes(self):\r\n return self._max_nodes_per_rack", "def greedy_learn(self,node,db,labels,ids):\n if node.depth >= self.maxdepth or len(ids) <= self.minexamples:\n #terminate recursion\n node.pick_best_label(db,labels,ids)\n err = misclassification_error([labels[id] for id in ids])\n if err > 0:\n print \"Reached a leaf and had to make some sacrifices, cost\",err\n print \" depth\",node.depth\n print \" labels\",[labels[id] for id in ids]\n return err\n\n features = self.feature_subset(node,db,labels,ids)\n cost = node.pick_best_split(db,labels,ids,features)\n \n #do a split\n if node.type == 'v':\n #base case: no misclassifications\n \"\"\"\n if cost>0:\n print \"greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero\"\n print \"cost=\",cost,\"misclassification=\",misclassification_error([labels[id] for id in ids])\n print \"# of ids:\",len(ids)\n for i in ids:\n print \"id\",i,\",\",\n for k in range(db.numFeatures()):\n if db[k,i] != None:\n print k,\"=\",db[k,i],\",\",\n print \"label\",labels[i]\n raw_input()\n \"\"\"\n return 0\n elif node.type == 's':\n #print \"Picked feature\",node.feature,\"split\"\n #do a discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in ids:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #print \" split sizes:\",[len(x) for x in Eids.values()]\n #print \" None ids:\",len(noneids)\n ids = None\n errors = 0\n for v,vids in Eids.iteritems():\n #recurse\n c = DecisionTreeNode(node)\n #print \"Recursing on value\",v\n #print \" ids:\",vids\n errors += self.greedy_learn(c,db,labels,vids+noneids)\n node.children[v] = c\n if c.depth > self.deepest:\n self.deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors\n else:\n #do an inequality split\n assert node.type == 'i'\n #print \"Picked feature\",node.feature,\"inequality value\",node.value,\"cost\",cost\n leftids = []\n rightids = []\n for id in ids:\n if db[node.feature,id] is not None:\n if db[node.feature,id] <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(rightids) == len(ids) or len(leftids) == len(ids):\n #due to missing values, this split is useless\n errors = misclassification_error([labels[id] for id in ids])\n print \"useless split on feature\",node.feature,\"value\",node.value,\"misclassification error\",errors\n print \"Left size\",len(leftids),\"right size\",len(rightids)\n raw_input()\n node.pick_best_label(db,labels,ids)\n return errors\n #clear memory associated with ids list\n del ids[:]\n ids = None\n #print \"Left size\",len(leftids),\"right size\",len(rightids)\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n #left side\n errors = self.greedy_learn(c1,db,labels,leftids)\n #right side\n errors += self.greedy_learn(c2,db,labels,rightids)\n #restore index\n node.children = {0:c1,1:c2}\n if c1.depth > self.deepest:\n self.deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors", "def preempted_node_count(self) -> int:\n return pulumi.get(self, \"preempted_node_count\")", "def __count_errors(node, testSet, res):\n training_results = __get_results(node) #Get a dictionary of labels and counts for the *training* data which made it to this node\n leaf_label = None #Initialize a label for this leaf\n majority_count = 0 #Initialize a variable to track the number of observations for the label with the most observations\n #Note that the steps below do not handle ties of the majority count in a nice way.\n for label, count in training_results.items(): #iterate through each pair of labels and counts from the training set\n if count > majority_count: #find the label with the highest count\n leaf_label = label #the label for the leaf is the label with the highest count\n majority_count = count #keep track of the count for the leaf_label\n \n wrong_labels = testSet[res].unique().tolist() #initialize wrong_labels to be all labels in the testSet\n if leaf_label in wrong_labels: #If the leaf label is in the list of labels for the part of the test set that got to this node\n wrong_labels.remove(leaf_label) #remove the leaf_label so that all which remains are incorrect labels\n \n wrong_count = 0 #Initialize a count of how many testSet observations will be classified incorrectly\n testCounts = testSet.groupby(res).size() #Get a series of the testSet labels and how many observations pertain to each label\n for label in wrong_labels: #Iterate over all the labels not equal to the leaf_label\n wrong_count += testCounts[label] #Sum up all of the observations with a label not equal to the leaf_label\n return wrong_count", "def _walk_recall(node, c):\n class_counts = Counter(node.true_classes).most_common()\n majority_classes = [c for c, occ in class_counts if occ == class_counts[0][1]] # there can be several majority classes in a node\n #majority_class = Counter(node.true_classes).most_common()[0][0]\n occ = node.true_classes.count(c)\n #print()\n #print(node)\n #print(\"\\t{}\".format(node.true_classes))\n #print(\"\\tmajority_classe: {}, occ({}): {}\".format(majority_classes, c, occ))\n if c in majority_classes and occ > self.temp_max_occ_class_in_cluster:\n self.temp_max_occ_class_in_cluster = occ\n # print(\"\\t updatetemp_max_occ_class_in_cluster: {}\".format(self.temp_max_occ_class_in_cluster))\n #if (occ > self.temp_max_occ_class_in_cluster\n # and (c in majority_classes or node.children == [])\n # ): # if we found a cluster with higher occ of documents for class c and the class c is the majority class in the cluster or leaf node\n # self.temp_max_occ_class_in_cluster = occ\n # print(\"\\tupdatetemp_max_occ_class_in_cluster: {}\".format(self.temp_max_occ_class_in_cluster))\n\n #if (occ > self.temp_max_occ_class_in_cluster and c in majority_classes):\n # self.temp_max_occ_class_in_cluster = occ\n # print(\"\\MAJ: tupdatetemp_max_occ_class_in_cluster: {}\".format(self.temp_max_occ_class_in_cluster))\n #elif (occ > self.temp_max_occ_class_in_cluster and node.children == []):\n # self.temp_max_occ_class_in_cluster = occ\n # print(\"\\tLEAF: updatetemp_max_occ_class_in_cluster: {}\".format(self.temp_max_occ_class_in_cluster))\n\n for child in node.children:\n _walk_recall(child, c)", "def n_trees(self):\n return len(self.data_kd)", "def getMinNumHiddenNodes(numTrainingSamples, inputDimension, desiredApproxError):\n return numTrainingSamples * desiredApproxError / inputDimension", "def data_flow_positive_node_count_max(self) -> Optional[int]:\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_positive_node_count_max or 0)", "def tooManyNodes(data):\n\t\t\t\tif maxNodes == 0:\n\t\t\t\t\treturn False\n\t\t\t\tif estimNumOfNodes(data) > maxNodes:\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\treturn False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes the list. If entries is given, this initializes the entries of the list. If memoized = True, any lazy evaluated entries are saved after their first evaluation.
def __init__(self,entries=None,memoized=False): if entries is not None: self.entries = entries[:] else: self.entries = [] self.memoized = memoized
[ "def __init__(self, contents=()):\n self._data = [self._Item(k, v) for k, v in contents]\n if self._data:\n self._heapify()", "def __init__(self, entries=None):\n ## Mark the start timestamp:\n self._started_at = datetime.datetime.now()\n self._finished_at = None\n\n ## Save data slots:\n self._entries = entries or []\n\n ## Declare and initialize private fields to be used during computing:\n self._balance = 0\n self.inventory = deque()\n self.trace = []\n\n ## Start computing:\n self._compute()", "def __init__(self, *entries):\n if not entries:\n raise ValueError('At least one entry is required')\n self._entries = entries", "def __init__(self,contents=()):\n self._data = [self._Item(k,v) for k,v in contents] # empty by default\n if len(self._data) > 1:\n self._heapify()", "def __init__(self, contents=()):\n self. data = [ self._Item(k,v) for k,v in contents ] # empty by default\n if len(self._data) > 1:\n self._heapify()", "def _dev_set_entries(self, entries):\n\n self.entries = entries", "def __init__(self, entries=[]):\n\n for item in entries:\n self.append(_WebObject(item))", "def _fillcache(self, n):\n if not n:\n n = 1\n try:\n while len(self._cache) < n:\n self._cache.append(next(self._iterable))\n except StopIteration:\n while len(self._cache) < n:\n self._cache.append(self.sentinel)", "def _create_temp_cache(self, num_traced_tensors, num_signatures, graph):\n init_value = constant_op.constant(_COMPACT_TRACE_ENTRY_INIT_VALUE,\n dtype=dtypes.float32,\n shape=[num_signatures])\n self._temp_cache_var[graph] = [\n init_value for _ in range(num_traced_tensors)]", "def init_cache(self):\n save_logs(param.INITIAL_EPSILON, \"epsilon\")\n t = 0\n save_logs(t, \"time\")\n D = deque()\n save_logs(D, \"deque\")", "def __init__(self, ErpLedgerEntries=None, *args, **kw_args):\n self._ErpLedgerEntries = []\n self.ErpLedgerEntries = [] if ErpLedgerEntries is None else ErpLedgerEntries\n\n super(ErpLedger, self).__init__(*args, **kw_args)", "def _fillcache(self, n: int | None) -> None:\n if not n:\n n = 1\n try:\n while len(self._cache) < n:\n self._cache.append(next(self._iterable)) # type: ignore\n except StopIteration:\n while len(self._cache) < n:\n self._cache.append(self.sentinel)", "def _fillcache(self, n):\n if not n:\n n = 1\n try:\n while len(self._cache) < n:\n self._cache.append(self.modifier(next(self._iterable)))\n except StopIteration:\n while len(self._cache) < n:\n self._cache.append(self.sentinel)", "def __init__(self, *args):\n dict.__init__(self, args)\n for key in EntryListColumn.list():\n if key not in self.iterkeys():\n if key == EntryListColumn.ID:\n self[key] = 0\n else:\n self[key] = ''", "def init_cache(self):\n self.left_lane_cache = list()\n self.right_lane_cache = list()", "def __init__(self, items=None):\n # type: (Optional[List[object]]) -> _WeakList\n list.__init__(self, self._refs(items or []))", "def __init__(self, initial_length=2, resizing_factor=2):\n\n # Initialise underlying list, with no elements\n self.main_list = [None] * initial_length\n\n # Initialise variable to store number of elements inserted into\n # main_list, which will always be less than or equal to list length\n self.num_elements = 0\n\n self.resizing_factor = resizing_factor", "def __init__(self, entries=None, root=True):\n\n # The children of this node. Because ordered traversals are not\n # important, these are stored in a dictionary.\n # Each path is keyed with its first letter, and stores a tuple\n # of (path, child).\n self.children = {}\n\n # Whether or not this is the root node.\n self.root = root\n\n # Data entry ids associated with the prefix stored in the path to\n # this node.\n if not self.root and entries:\n self.entries = entries.copy()\n else:\n self.entries = set()", "def _fillcache(self, n: int | None) -> None:\n if not n:\n n = 1\n try:\n while len(self._cache) < n:\n self._cache.append(self.modifier(next(self._iterable))) # type: ignore\n except StopIteration:\n while len(self._cache) < n:\n self._cache.append(self.sentinel)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the template tag js_settings
def test_js_settings(mocker, rf): mocker.patch( "mitxpro.templatetags.js_interop.get_js_settings", return_value={"data": "value"}, ) request = rf.get("/") context = Context({"request": request}) template = Template(("{% load js_interop %}" "{% js_settings %}")) rendered_template = template.render(context) assert ( rendered_template == """<script type="text/javascript"> var SETTINGS = {"data": "value"}; </script>""" )
[ "def jssettings(self):\n self.update()\n return \"var %s = %s\" % (self.js_var_settings_name,\n json.dumps(self.settings))", "def test_jssettings(self):\n settings_fullpath = os.path.join(dirs.get_main_js_dir(), \"mediabrute-settings.js\")\n \n if os.path.isfile(settings_fullpath):\n os.unlink(settings_fullpath) \n self.assertFalse(os.path.isfile(settings_fullpath))\n \n call_command(\"mediabrute_jssettings\")\n self.assertTrue(os.path.isfile(settings_fullpath))\n \n os.unlink(settings_fullpath) \n self.assertFalse(os.path.isfile(settings_fullpath))\n \n custom_filename = \"heyo.js\"\n custom_fullpath = os.path.join(dirs.get_main_js_dir(), \"heyo.js\")\n \n if os.path.isfile(custom_fullpath):\n os.unlink(custom_fullpath) \n self.assertFalse(os.path.isfile(custom_fullpath))\n \n call_command(\"mediabrute_jssettings\", custom_filename)\n self.assertTrue(os.path.isfile(custom_fullpath))\n \n os.unlink(custom_fullpath) \n self.assertFalse(os.path.isfile(custom_fullpath))\n \n custom_filename = \"heyo\"\n custom_fullpath = os.path.join(dirs.get_main_js_dir(), \"heyo.js\")\n \n if os.path.isfile(custom_fullpath):\n os.unlink(custom_fullpath) \n self.assertFalse(os.path.isfile(custom_fullpath))\n \n call_command(\"mediabrute_jssettings\", custom_filename)\n self.assertTrue(os.path.isfile(custom_fullpath))\n \n os.unlink(custom_fullpath) \n self.assertFalse(os.path.isfile(custom_fullpath))", "def test_js_url(self):\n self.assertEquals(dirs.get_js_url(), \"%s%s\" % (settings.STATIC_URL, \"js\"))\n \n with self.settings(MEDIABRUTE_USE_STATIC=False):\n self.assertEquals(dirs.get_js_url(), \"%s%s\" % (settings.MEDIA_URL, \"js\"))\n \n with self.settings(MEDIABRUTE_JS_URL_PATH=\"heyo/yoyo\"):\n self.assertEquals(dirs.get_js_url(), \"%s%s\" % (settings.STATIC_URL, \"heyo/yoyo\"))\n \n with self.settings(MEDIABRUTE_USE_STATIC=False, MEDIABRUTE_JS_URL_PATH=\"heyo/yoyo\"):\n self.assertEquals(dirs.get_js_url(), \"%s%s\" % (settings.MEDIA_URL, \"heyo/yoyo\"))", "def module_use_template_javascript(self):\n return False", "def this_is_js():\n return False", "def angular_js_tests(request):\n return locals()", "def util_js():\n return render_template('js/util.js')", "def get_js_file(self):\n return 'placeholder'", "def htmllibmanager_debug_init_js(self) -> ConfigNodePropertyString:\n return self._htmllibmanager_debug_init_js", "def test_js_source(self):\n actual = is_js_source(self.view)\n\n self.assertTrue(actual)", "def test_compressed_js_tag(self):\n self._touch_files(['test.js', 'test.d41d8cd98f00.js'])\n\n pipeline_settings.JAVASCRIPT = {\n 'test': {\n 'source_filenames': [],\n 'output_filename': 'test.js',\n }\n }\n\n t = Template('{% load compressed %}'\n '{% compressed_js \"test\" %}')\n\n self.assertHTMLEqual(\n t.render(Context({'test': 'test'})),\n '<script type=\"text/javascript\" src=\"/test.d41d8cd98f00.js\"'\n ' charset=\"utf-8\"></script>')", "def is_javascript(view):\n # Check the file extension\n name = view.file_name()\n extensions = set(settings.get('extensions'))\n if name and os.path.splitext(name)[1][1:] in extensions:\n return True\n # If it has no name (?) or it's not a JS, check the syntax\n syntax = view.settings().get(\"syntax\")\n if syntax and \"javascript\" in syntax.split(\"/\")[-1].lower():\n return True\n return False", "def match_js_vars(\n self,\n technology: Technology,\n js_vars,\n ):\n return self.match_pairs(technology, \"js\", js_vars)", "def test_never_load_jquery_setting(self):\n with patch_settings(LIVETRANSLATION_JQUERY=None):\n result = find_jquery_link(NO_JQUERY)\n self.assertEqual(result, True)", "def get_gizmo_js():\n return (\"test_extension/gizmos/custom_select_input/custom_select_input.js\",)", "def test_components_flagged():\n\n js = \"\"\"\n var x = Components.services.foo.bar;\n \"\"\"\n assert not _js_test(js).failed()\n assert _js_test(js, jetpack=True).failed()", "def test_default_url(self):\n with patch_settings(LIVETRANSLATION_JQUERY=None):\n pattern, url = process_jquery_setting()\n self.assertEqual(\n url,\n 'http://ajax.googleapis.com/ajax/libs/jquery/1.4.2/jquery.min.js')", "def test_get_settings(self):\n assert get_settings() == TEST_SETTINGS[\"settings\"]", "def test_media_includes_jsi18n(self):\n form = self.form_class(choices={'replacements': self.model.objects.all()})\n self.assertIn(reverse('admin:jsi18n'), form.media._js)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function computes the fundamental matrix by computing the SVD of Ax = 0 ; 8point algorithm
def computeFundamentalMatrix(pts1, pts2): A = np.empty((8, 9)) for i in range(len(pts1)-1): x1 = pts1[i][0] x2 = pts2[i][0] y1 = pts1[i][1] y2 = pts2[i][1] A[i] = np.array([x1 * x2, x2 * y1, x2, y2 * x1, y2 * y1, y2, x1, y1, 1]) # Compute F matrix by evaluating SVD U, S, V = np.linalg.svd(A) F = V[-1].reshape(3, 3) # Constrain the F matrix to rank 2 U1, S1, V1 = np.linalg.svd(F) # print('Old S', S) # S[2] = 0 S2 = np.array([[S1[0], 0, 0], [0, S1[1], 0], [0, 0, 0]]) # print('New S', S) F = np.dot(np.dot(U1, S2), V1) return F
[ "def svd0(A):\n M,N = A.shape\n if M>N: return sla.svd(A, full_matrices=True)\n else: return sla.svd(A, full_matrices=False)", "def _nullSpaceBasis(A):\n if A.any():\n U, s, Vh = la.svd(A)\n vecs = np.array([])\n toAppend = A.shape[1] - s.size\n s = np.append(s, zeros((1, toAppend)))\n for i in range(0, s.size):\n if s[i] == 0:\n vecs = Vh[-toAppend:, :]\n if vecs.size == 0:\n vecs = zeros((1, A.shape[1]))\n return np.mat(vecs)\n else:\n return zeros((0, 0))", "def invert_L1_svd():", "def svd(A, chi = 0, full_matrices = False, compute_uv = True):\n # Try using the normal svd\n try:\n #if A.dtype != np.complex128:\n # return svd_dgesvd.svd_dgesvd(A, full_matrices = full_matrices, compute_uv = compute_uv)\n #else:\n # return svd_zgesvd.svd_zgesvd(A, full_matrices = full_matrices, compute_uv = compute_uv)\n return sp.linalg.svd(A, compute_uv = compute_uv, full_matrices = full_matrices, overwrite_a = True)\n\n # Do manual if it failed\n except Exception as e:\n print(\"Canonical SVD failed: \", e )\n\n # Try making it square\n try:\n print(\"Trying to SVD the square version\")\n shape = A.shape\n dim = np.max( A.shape )\n squareA = np.zeros( (dim, dim), dtype=np.complex128 )\n squareA[:A.shape[0], :A.shape[1]] = A\n return sp.linalg.svd(squareA[:shape[0], :shape[1]])\n\n except:\n # Try sparse\n try:\n print(\"\\t \\t Resorting to MANUAL SVD\", \"red\")\n\n # Compute AA\\dagger\n AAt = np.dot(A, np.conjugate(np.transpose(A)))\n # Make sure symmetric/hermitian\n AAt = 0.5*(AAt + np.conjugate(np.transpose(AAt)))\n\n # Diagonalize and sort\n S1,U = np.linalg.eigh(AAt)\n idx = S1.argsort()[::-1] # Sort descending\n S1 = np.sqrt(np.abs(S1[idx])); U = U[:,idx]\n\n # Compute A\\daggerA\n AtA = np.dot(np.conjugate(np.transpose(A)), A)\n # Make sure symmetric/hermitian\n AtA = 0.5*(AtA + np.conjugate(np.transpose(AtA)))\n\n # Diagonalize and sort\n S2,V = np.linalg.eigh(AtA)\n idx = S2.argsort()[::-1] # Sort descending\n S2 = np.sqrt(np.abs(S2[idx])); V = V[:,idx]\n\n # Return SVD\n return U, 0.5*(S1+S2), np.conjugate(np.transpose(V))\n\n except:\n print(\"\\t Trying sparse\", \"yellow\")\n if chi == 0:\n chi = int(A.shape[0]/2)\n U, S, V = sp.sparse.linalg.svds(squareA, k=chi)#A.shape[0])\n S = np.real(S); idx = S.argsort()[::-1] # Sort descending\n return U[:,idx], S, V[idx,:]", "def PseudoInverse(A:np.matrix):\r\n U,S,V = ComputeSVD(A)\r\n #invert S\r\n S_inv = S.H\r\n for i in range(S_inv.shape[0]):\r\n S_inv[i,i] = 1/S_inv[i,i]\r\n return np.asmatrix(V*S_inv*U.H)", "def ComputeSVD(A:np.matrix):\r\n k = np.linalg.matrix_rank(A)\r\n B = A.transpose()@A\r\n w, V = eig(B)\r\n\r\n #Eigenwerte und Vektoren neu sortieren\r\n idx = np.argsort(w)\r\n w = w[idx]\r\n V = V[:,idx]\r\n\r\n #S berechnen\r\n S = np.zeros(A.shape)\r\n for i in range(S.shape[0]):\r\n for j in range(S.shape[1]):\r\n if i==j:\r\n S[i][j] = sqrt(w[i])\r\n\r\n #U berechnen\r\n U = np.zeros((k+1,k+1))\r\n for i in range(k):\r\n U[:,i] = (1/S[i][i] * A * V[:,i]).flat\r\n U = np.linalg.qr(U)[0]\r\n return np.asmatrix(U),np.asmatrix(S),np.asmatrix(V)", "def visualize_svd():", "def fundamental_matrix(image1, image2):\n [x1, y1, x2, y2] = automatic_point_correspondences(image1, image2, returntype='vector')\n\n # get the number of data points\n num_points = len(x1)\n o = np.ones((num_points, 1))\n\n x1y1, t1 = normalize_points(x1, y1)\n x2y2, t2 = normalize_points(x2, y2)\n\n x1 = x2y2[:, 0]\n y1 = x2y2[:, 1]\n x2 = x1y1[:, 0]\n y2 = x1y1[:, 1]\n\n # compute A - the equation matrix\n mul = lambda a, b : np.multiply(a, b)\n a = np.concatenate((mul(x1, x2), mul(x1, y2), x1, mul(y1, x2), mul(y1, y2), y1, x2, y2,\n o[:, 0])).reshape((num_points, -1), order='F')\n\n # the singular value decomposition SVD\n U, D, V = np.linalg.svd(a)\n\n # extract column of the smallest singular value - the last column\n smallest = V[8, :].T\n F = smallest.reshape(3, 3)\n\n # enforce singularity constraint (must be singular and of rank 2)\n U, D, V = np.linalg.svd(F)\n r = D[0]\n s = D[1]\n\n F = np.dot(U, np.diag([r, s, 0])).dot(V)\n F = t2.T.dot(F).dot(t1)\n\n return F", "def denoise_matrix(X):\n n_vols, n_voxels = X.shape\n min_mn = np.min(X.shape)\n X_m = np.mean(X, axis=1, keepdims=True) # MDD added Jan 2018; mean added back to signal below\n X = X - X_m\n # [U,S,V] = svdecon(X); MDD replaced with MATLAB svd vvv 3Nov2017\n # U, S, V = svd(X, 'econ')\n # NOTE: full matrices=False should be same as economy-size SVD\n U, S, V = np.linalg.svd(X, full_matrices=True)\n S = np.diag(S) # make S array into diagonal matrix\n\n lambda_ = (np.diag(S) ** 2) / n_voxels\n\n scaling = (n_vols - np.arange(min_mn)) / n_voxels\n scaling[scaling < 1] = 1\n for n_comps in range(min_mn):\n sigma2 = (lambda_[n_comps] - lambda_[min_mn - 1]) / (4 * np.sqrt((n_vols - n_comps) / n_voxels))\n p_test = np.sum(lambda_[n_comps:min_mn]) / scaling[n_comps] >= (min_mn - n_comps) * sigma2\n if p_test:\n continue\n\n sigma2 = np.sum(lambda_[n_comps:min_mn]) / (min_mn - n_comps) / scaling[n_comps]\n new_X = np.dot(np.dot(U[:, :n_comps], S[:n_comps, :n_comps]), V[:, :n_comps].T) + X_m\n return new_X, sigma2, n_comps", "def closest_fundamental_matrix(F):\r\n # HZ Ch. 11.1.1 (p.280) \r\n U, S, VT = np.linalg.svd(F)\r\n S = np.diag(S)\r\n S[-1, -1] = 0\r\n \r\n F = U@S@VT\r\n return F", "def singular_value_soft_threshold(X, lam):\n U, s, V = np.linalg.svd(X, full_matrices=False)\n s = soft_threshold(s, lam)\n idx = np.nonzero(s)[0]\n #print len(idx)\n U = U[:, idx]\n V = np.dot(np.diag(s[idx]), V[idx, :]).T\n return U, V", "def singular_decomp(A):\n # Initialization\n n, m = A.shape\n U = np.zeros((n, m), dtype='float64')\n\n # Diagonalization of A^T * A\n rot, e, V = eigen.diag(np.dot(np.transpose(A), A))\n\n # Calculate U\n U = np.dot(A, V)\n for i in range(m):\n e[i] = np.sqrt(e[i])\n U[:, i] /= e[i]\n\n return U, e, V", "def svdvals(x):\n return np.linalg.svd(x, full_matrices=False, compute_uv=False)", "def calculaSVD(self,datos):\n U, D2, Vt = np.linalg.svd(datos, full_matrices=False) \n return U, D2, Vt", "def update_svd_naive(self):\n print(\"Updating truncated SVD using naive method.\")\n start = time.perf_counter()\n self.Uk, self.sigmak, self.VHk = naive_update(\n self.A, self.Uk, self.sigmak, self.VHk, self.update_matrix\n )\n self.runtime += time.perf_counter() - start\n return self.Uk, self.sigmak, self.VHk", "def invert_L2_svd():\n print('Starting SVD inversion')\n\n pix2avevel = np.nans(ts.size)\n pix2cumdef = np.nans(ts.size)\n\n for i in np.range(ts.WIDTH):\n print('column {0}'.format(i))\n pix2date = np.zeros(ts.LENGTH, ts.DATES)\n pix2model = np.zeros(ts.LENGTH, ts.DT)\n colPix = np.zeros(ts.LENGTH, ts.IGRAMS)\n\n # concatenate same column from each interferogram into an array\n for j, ig in enumerate(ts):\n column = np.fromfile(ig.NAME, dtype=float16, size=ts.LENGTH)\n colPix[:,j] = column\n\n pix2igram = np.isfinite(colPix)\n coverage = np.fromfile(coverage) #laod DQmap\n iterPixels = np.where(coverage >= ts.igthresh)\n\n #preform pixel-by-pixel inversion\n for k, pixel in enumerate(iterPixels):\n indIG = find(pix2igram[pixel,:])==1\n indDate = unique(ts.timeIndex[indIG,:])\n dtVector = np.diff(ts.Serial(indDate)) / 365.242 #convert years to days\n\n # Set up B matrix\n B = np.zeros(len(indIG), len(dtVector))\n\n print('Done')", "def truncated_svd(A,k=None):", "def smith_nf(matrix):\n\n A=np.copy(matrix)\n if (np.around(A) != A).any():\n raise Exception('This function requires integer input.')\n\n # This looks much like an SVD algorithm that first bidiagonalizes\n # A by Givens rotations and then chases zeros, except for\n # the construction of the 2 by 2 elementary transformation.\n\n m, n = A.shape\n\n S = A\n U = np.eye(m)\n V = np.eye(n)\n\n # Bidiagonalize S with elementary Hermite transforms.\n for j in range(min(m, n)):\n # Zero column j below the diagonal.\n for i in range(j+1, m):\n if S[i, j]:\n # Construct an elementary Hermite transformation E\n # to zero S(i,j) by combining rows i and j.\n E = ehermite(S[j, j], S[i, j])\n # Apply the transform to S and U.\n S[[j, i], :] = np.dot(E, S[[j, i], :])\n # U[:, [j, i]] = U[:, [j, i]] / E\n U[:, [j, i]] = left_matrix_division(U[:, [j, i]], E) # solving the left matrix division\n\n # % Zero row j after the superdiagonal.\n for i in range(j+2, n):\n if S[j, i]:\n # Construct an elementary Hermite transformation E\n # to zero S(j,i) by combining columns j+1 and i.\n E = ehermite(S[j, j+1], S[j, i])\n # Apply the transform to S and V.\n S[:, [j+1, i]] = np.dot(S[:, [j+1, i]], E.T)\n # V[:, [j+1, i]] = V[:, [j+1, i]] / E\n V[:, [j+1, i]] = left_matrix_division(V[:, [j+1, i]], E) # solving the left matrix division\n\n # Now S is upper bidiagonal.\n # Chase the superdiagonal nonzeros away.\n\n D = np.diag(S, 1)\n while any(D):\n b = min(np.where(D))[0]\n # Start chasing bulge at first nonzero superdiagonal element.\n # To guarantee reduction in S(b,b), first make S(b,b) positive\n # and make S(b,b+1) nonnegative and less than S(b,b).\n if S[b, b] < 0:\n S[b, :] = -S[b, :]\n U[:, b] = -U[:, b]\n\n q = np.floor(S[b, b+1] / S[b, b])\n E = np.array([[1, 0], [-q, 1]])\n S[:, [b, b+1]] = np.dot(S[:, [b, b+1]], E.T)\n # V[:, [b, b+1]] = V[:, [b, b+1]] / E\n V[:, [b, b+1]] = left_matrix_division(V[:, [b, b+1]], E) # solving the left matrix division\n\n if S[b, b+1]:\n # Zero the first nonzero superdiagonal element\n # using columns b and b+1, to start the bulge at S(b+1,b).\n E = ehermite(S[b, b], S[b, b+1])\n S[:, [b, b+1]] = np.dot(S[:, [b, b+1]], E.T)\n # V[:, [b, b+1]] = V[:, [b, b+1]] / E\n V[:, [b, b+1]] = left_matrix_division(V[:, [b, b+1]], E)\n\n for j in range(min(m, n)):\n if j+1 < m:\n # Zero S(j+1,j) using rows j and j+1.\n E = ehermite(S[j, j], S[j+1, j])\n S[[j, j+1], :] = np.dot(E, S[[j, j+1], :])\n # U[:, [j, j+1]] = U[:, [j, j+1]] / E\n U[:, [j, j+1]] = left_matrix_division(U[:, [j, j+1]], E)\n if j+2 < n:\n # Zero S(j,j+2) using columns j+1 and j+2.\n E = ehermite(S[j, j+1], S[j, j+2])\n S[:, [j+1, j+2]] = np.dot(S[:, [j+1, j+2]], E.T)\n # V[:, [j+1, j+2]] = V[:, [j+1, j+2]] / E\n V[:, [j+1, j+2]] = left_matrix_division(V[:, [j+1, j+2]], E)\n D = np.diag(S, 1)\n\n # Now S is diagonal. Make it nonnegative.\n\n for j in range(min(m, n)):\n if S[j, j] < 0:\n S[j, :] = -S[j, :]\n U[:, j] = -U[:, j]\n\n # Squeeze factors to lower right to enforce divisibility condition.\n\n for i in range(min(m, n)):\n for j in range(i+1, min(m, n)):\n # Replace S(i,i), S(j,j) by their gcd and lcm respectively.\n a = S[i, i]\n b = S[j, j]\n [c, d, g] = extgcd(a, b)\n E = np.array([[1, d], [-b/g, a*c/g]])\n F = np.array([[c, 1], [-b*d/g, a/g]])\n S[np.ix_([i, j], [i, j])] = np.dot(np.dot(E, S[:, [i, j]][[i, j], :]), F.T)\n # S[i, i] = tmp_arr[0, 0]\n # S[i, j] = tmp_arr[0, 1]\n # S[j, i] = tmp_arr[1, 0]\n # S[j, j] = tmp_arr[1, 1]\n U[:, [i, j]] = left_matrix_division(U[:, [i, j]], E)\n V[:, [i, j]] = left_matrix_division(V[:, [i, j]], F)\n\n U = np.around(U)\n V = np.around(V)\n return U, S, V", "def calc_sifov2v_matrix():\n return SIFOV2V_DEFAULT" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Leverages the 8point algorithm and implement RANSAC algorithm to find the inliers and the best fundamental matrix
def getInlierRANSAC(pts1, pts2): # global finalFundamentalMatrix iterations = 50 threshold = 0.01 max_count = 0 n = len(pts1) finalFundamentalMatrix = np.zeros((3, 3)) for i in range(iterations): count = 0 idx = random.sample(range(n - 1), 8) left_pts = pts1[idx] right_pts = pts2[idx] F = computeFundamentalMatrix(left_pts, right_pts) left_feature_inlier = [] right_feature_inlier = [] # print("Sample index: ", len(idx)) for j in range(0, n): homogeneous_right = np.array([pts2[j, 0], pts2[j, 1], 1]) homogeneous_left = np.array([pts1[j, 0], pts1[j, 1], 1]) fit = np.dot(homogeneous_right.T, np.dot(F, homogeneous_left)) # print("Fit for iteration ", i," ", np.abs(fit)) if np.abs(fit) < threshold: left_feature_inlier.append(pts1[j]) right_feature_inlier.append(pts2[j]) count = count + 1 # print('Inlier count', count) inlier_Left = np.array(left_feature_inlier) inlier_Right = np.array(right_feature_inlier) if count > max_count: max_count = count finalFundamentalMatrix = F final_inlier_Left = inlier_Left final_inlier_Right = inlier_Right return finalFundamentalMatrix, final_inlier_Left, final_inlier_Right
[ "def evaluation(result, reference, verbose=0):\n # get column signals\n Ar = reference[0].copy()\n Sr = reference[1].T.copy()\n noise = reference[2].T.copy()\n Ae = result[0]\n Se = result[1].T\n r = Sr.shape[1]\n # set nan values to 0\n Ar[np.isnan(Ar)] = 0\n Sr[np.isnan(Sr)] = 0\n Ae[np.isnan(Ae)] = 0\n Se[np.isnan(Se)] = 0\n # precomputation\n SDR_A = compute_sdr_matrix(Ar, Ae)\n # order computation\n costMatrix = -SDR_A\n hungarian = munkres.Munkres()\n ind_list = hungarian.compute(costMatrix.tolist())\n indices = np.zeros(r, dtype=int)\n for k in range(0, r):\n indices[k] = ind_list[k][1]\n # reorder the factorization\n Ae = Ae[:, indices]\n Se = Se[:, indices]\n # get reordered results\n# Ae = result[0].copy()\n# Ae[np.isnan(Ae)] = 0\n# Se = result[1].T.copy()\n# Se[np.isnan(Se)] = 0\n# result_ordered = (Ae, Se)\n # compute criteria\n delta=abs(abs(linalg.inv(Ae.T.dot(Ae)).dot(Ae.T).dot(Ar)) - np.eye(r)).sum() / (r*r)\n criteria = {}\n # on S\n output = decomposition_criteria(Se, Sr, noise)\n decomposition = output[1]\n criteria['SDR_S'] = output[0]['SDR']\n criteria['SIR_S'] = output[0]['SIR']\n criteria['SNR_S'] = output[0]['SNR']\n criteria['SAR_S'] = output[0]['SAR']\n # on A\n# output = decomposition_criteria(Ae, Ar, reference['noise'])\n# criteria['SDR_A'] = output[0]['SDR']\n# criteria['SIR_A'] = output[0]['SIR']\n# criteria['SNR_A'] = output[0]['SNR']\n# criteria['SAR_A'] = output[0]['SAR']\n# if verbose != 0:\n# print(\"Results of the reconstruction:\")\n# print(\"Decomposition criteria on S:\")\n# print(\" - Mean SDR: \" + str(criteria['SDR_S']) + \".\")\n# print(\" - Mean SIR: \" + str(criteria['SIR_S']) + \".\")\n# print(\" - Mean SNR: \" + str(criteria['SNR_S']) + \".\")\n# print(\" - Mean SAR: \" + str(criteria['SAR_S']) + \".\")\n# print(\"Decomposition criteria on A:\")\n# print(\" - Mean SDR: \" + str(criteria['SDR_A']) + \".\")\n# print(\" - Mean SIR: \" + str(criteria['SIR_A']) + \".\")\n# print(\" - Mean SNR: \" + str(criteria['SNR_A']) + \".\")\n# print(\" - Mean SAR: \" + str(criteria['SAR_A']) + \".\")\n return (criteria, decomposition, delta,Ae,Se.T)", "def CalcFiniteHorizonOptimalInput(A,B,Q,R,P,N,x0):\n # print(\"CalcFiniteHorizonOptimalInput start\")\n\n # data check\n if A.shape[1] is not x0.shape[0]:\n print(\"Data Error: A's col == x0's row\")\n print(\"A shape:\")\n print(A.shape)\n print(\"x0 shape:\")\n print(x0.shape)\n return None\n elif B.shape[1] is not R.shape[1]:\n print(\"Data Error: B's col == R's row\")\n print(\"B shape:\")\n print(B.shape)\n print(\"R's shape:\")\n print(R.shape)\n return None\n\n sx=np.eye(A.ndim)\n su=np.zeros((A.ndim,B.shape[1]*N))\n\n #calc sx,su\n for i in range(N):\n #generate sx\n An=np.linalg.matrix_power(A, i+1)\n sx=np.r_[sx,An]\n\n #generate su\n tmp=None\n for ii in range(i+1):\n tm=np.linalg.matrix_power(A, ii)*B\n if tmp is None: \n tmp=tm\n else:\n tmp =np.c_[tm,tmp]\n\n for ii in np.arange(i,N-1):\n tm=np.zeros(B.shape)\n if tmp is None: \n tmp=tm\n else:\n tmp =np.c_[tmp,tm]\n\n su=np.r_[su,tmp]\n\n tm1=np.eye(N+1)\n tm1[N,N]=0\n tm2=np.zeros((N+1,N+1))\n tm2[N,N]=1\n Qbar=np.kron(tm1,Q)+np.kron(tm2,P)\n Rbar=np.kron(np.eye(N),R)\n\n uopt=-(su.T*Qbar*su+Rbar).I*su.T*Qbar*sx*x0\n # print(uBa)\n costBa=x0.T*(sx.T*Qbar*sx-sx.T*Qbar*su*(su.T*Qbar*su+Rbar).I*su.T*Qbar*sx)*x0\n # print(costBa)\n\n return uopt", "def test_riemannian_mst_computation():\n n_neighbors = 2\n eps = 1e-1\n tolerance = 1e-6\n\n cloud = np.array([\n [0,0,0],\n [0,1,0],\n [0,1,1],\n [0,1,2]\n ])\n normals = np.array([\n [1,0,0],\n [0,1,0],\n [1,0,0],\n [0,1,0]\n ])\n\n true_symmetrized_emst = np.array([\n [0,1,0,0],\n [1,0,1,0],\n [0,1,0,1],\n [0,0,1,0]\n ])\n true_kneighbors_graph = np.array([\n [0,1,1,0],\n [1,0,1,0],\n [0,1,0,1],\n [0,1,1,0]\n ])\n true_symmetrized_kneighbors_graph = np.array([\n [0,1,1,0],\n [1,0,1,1],\n [1,1,0,1],\n [0,1,1,0]\n ])\n true_riemannian_graph = np.array([\n [0,1+eps,eps,0],\n [1+eps,0,1+eps,eps],\n [eps,1+eps,0,1+eps],\n [0,eps,1+eps,0]\n ])\n true_possible_asymetric_rmsts = [\n np.array([\n [0,0,eps,0],\n [0,0,0,eps],\n [0,0,0,1+eps],\n [0,0,0,0],\n ]),\n np.array([\n [0,0,eps,0],\n [0,0,1+eps,eps],\n [0,0,0,0],\n [0,0,0,0],\n ]),\n np.array([\n [0,1+eps,eps,0],\n [0,0,0,eps],\n [0,0,0,0],\n [0,0,0,0],\n ]),\n ]\n true_possible_rmsts = [rmst + rmst.T for rmst in true_possible_asymetric_rmsts]\n\n actual_rmst = compute_riemannian_mst(cloud=cloud,normals=normals,n_neighbors=n_neighbors,eps=eps)\n is_possible = False\n for possible_rmst in true_possible_rmsts:\n is_possible = is_possible or (np.abs(possible_rmst - actual_rmst) < tolerance).all()\n if is_possible:\n return True\n else:\n print('---- Possible Riemannian MSTs:')\n for rmst in true_possible_rmsts:\n print(rmst)\n print('---- Actual Riemannian MST:')\n print(actual_rmst.toarray())\n return False", "def create_cands(data):\n\n best = np.zeros(data.dim+1)\n best[0:data.dim] = data.xbest\n best[data.dim] = 1-np.sum(data.xbest)\n\n # Ncand times the best value\n cp_e = np.kron(np.ones((data.Ncand, 1)), np.asmatrix(best))\n # This generates random perturbations\n # need dim+1 to account for the \"missing\" value\n r = np.random.rand(data.Ncand, data.dim+1)\n a = r < data.pertP\n idx = np.where(np.sum(a, axis=1) == 0)\n for ii in range(len(idx[0])):\n f = np.random.permutation(data.dim+1)\n a[idx[0][ii], f[0]] = True\n randnums = np.random.randn(data.Ncand, data.dim+1)\n randnums[a == False] = 0\n pv = randnums*data.sigma_stdev\n # Create new points by adding random fluctucations to best point\n new_pts = cp_e+pv\n\n # Iterative, column wise procedure to force the randomly\n # sampled point to be in [0,1]\n for ii in range(data.dim+1):\n vec_ii = new_pts[:, ii]\n adj_l = np.where(vec_ii < data.xlow)\n vec_ii[adj_l[0]] = data.xlow + (data.xlow - vec_ii[adj_l[0]])\n adj_u = np.where(vec_ii > data.xup)\n vec_ii[adj_u[0]] = data.xup - (vec_ii[adj_u[0]]-data.xup)\n stillout_u = np.where(vec_ii > data.xup)\n vec_ii[stillout_u[0]] = data.xlow\n stillout_l = np.where(vec_ii < data.xlow)\n vec_ii[stillout_l[0]] = data.xup\n new_pts[:, ii] = copy.copy(vec_ii)\n\n new_pts = new_pts / np.sum(new_pts, axis=1)\n\n cp_e = copy.copy(new_pts)\n rand_pts = np.asmatrix(np.random.uniform(0, 1, [data.Ncand, data.dim + 1]))\n cp_r = rand_pts/np.sum(rand_pts, axis=1)\n\n CandPoint = np.concatenate((cp_e, cp_r), axis=0)\n # return only data.dim candidate points\n CandPoint_out = CandPoint[:, 0:data.dim]\n\n return CandPoint_out", "def ransac(keypoints1, keypoints2, matches, sampling_ratio=0.5, n_iters=500, threshold=20):\n N = matches.shape[0]\n n_samples = int(N * sampling_ratio)\n \n # Please note that coordinates are in the format (y, x)\n matched1 = pad(keypoints1[matches[:,0]])\n matched2 = pad(keypoints2[matches[:,1]])\n matched1_unpad = keypoints1[matches[:,0]]\n matched2_unpad = keypoints2[matches[:,1]]\n\n max_inliers = np.zeros(N)\n n_inliers = 0\n\n # RANSAC iteration start\n ### YOUR CODE HERE\n \n matched1[:, [0,1]] = matched1[:, [1,0]]\n matched2[:, [0,1]] = matched2[:, [1,0]]\n matched1_unpad[:, [0,1]] = matched1_unpad[:, [1,0]]\n matched2_unpad[:, [0,1]] = matched2_unpad[:, [1,0]]\n \n for i in range(n_iters):\n indices = np.random.choice(range(N), size=n_samples)\n src = matched1_unpad[indices]\n dst = matched2_unpad[indices]\n \n H = compute_homography(src, dst)\n matched1_trans = transform_homography(matched1_unpad, H)\n \n dist = np.sum((matched1_trans - matched2_unpad) ** 2, axis=1)\n if np.sum(dist < threshold) > n_inliers:\n max_inliers = dist < threshold\n n_inliers = np.sum(dist < threshold)\n \n src = matched1_unpad[max_inliers]\n dst = matched2_unpad[max_inliers]\n H = compute_homography(src, dst)\n \n matched1_trans = transform_homography(matched1_unpad, H)\n dist = np.sum((matched1_trans - matched2_unpad) ** 2, axis=1)\n max_inliers = dist < threshold\n \n ### END YOUR CODE\n return H, matches[max_inliers]", "def ransac(cloud_s, cloud_t, \n depth_s, depth_t,\n A_prev, b_prev,\n n_iter, n_inlier_cutoff, d_cutoff):\n import random\n n_s = len(cloud_s)\n n_t = len(cloud_t)\n n_inliers = [0] * n_iter\n# Initialization\n A_init = A_prev\n b_init = b_prev\n pred_t = A_init.dot(cloud_s.T).T + b_init\n# TODO: should really be looking at the distance in the projected space!!\n inliers = [np.linalg.norm(pred_t[i,] - cloud_t[i,]) < d_cutoff for i in range(n_s)]\n max_inliers = sum(inliers)\n print(\"Have \" + str(n_s) + \" features that could be inliers\")\n print(\"Starting with \" + str(max_inliers) + \" inliers\")\n for iter in range(n_iter):\n assert n_s == n_t, \"clouds not of equal size in ransac()\"\n # TODO: replace this random choice with 3 corresponding feature descriptors\n points_inds = random.sample(range(n_s), 3)\n x_vals = np.array([cloud_s[i] for i in points_inds])\n y_vals = np.array([cloud_t[i] for i in points_inds])\n\n # Using Horn 1987, Closed-form solution of absolute orientation\n # using unit quaternions.\n A_init_tmp, b_init_tmp = horn_adjust(x_vals, y_vals)\n\n # TODO: find inliers to the transformation T\n pred_t = A_init_tmp.dot(cloud_s.T).T + b_init_tmp\n# TODO: should really be looking at the distance in the projected space!!\n inliers = [np.linalg.norm(pred_t[i,] - cloud_t[i,]) < d_cutoff for i in range(n_s)]\n n_inliers = sum(inliers)\n\n # TODO: do we want to refit on the inliers?\n if n_inliers > max_inliers:\n A_init = A_init_tmp\n b_init = b_init_tmp\n max_inliers = n_inliers\n print(\"Adjusting A and b again!\")\n print(A_init)\n print(b_init)\n\n # TODO: are we using n_inlier_cutoff in this way? Check the paper!\n if max_inliers < n_inlier_cutoff:\n raise Exception('insufficient inliers! Want ' + str(n_inlier_cutoff) +\n ' but got ' + str(max_inliers))\n #max_index = n_inliers.index(max(n_inliers)) \n # Compute the best transformation T_star\n# TODO: actually optimize over the depth field!! using spatial.KDTree and spatial.KDTree.query\n# Need to shift depth1XYZ by our initial transformation first\n depth1XYZ = A_init.dot(depth_s.T).T + b_init\n depth2XYZ = depth_t\n tree = spatial.KDTree(depth2XYZ)\n tree_q = tree.query(depth1XYZ)\n# Keep only matches within the cutoff.\n# depth_pair_inds has indeces for depth1XYZ and depth2XYZ\n cutoff = 0.01\n depth_pair_inds = [(i,tree_q[1][i]) for i in range(len(tree_q[0]))\n if tree_q[0][i] < cutoff]\n #depth_cloud_s = np.array([depth1XYZ[k[0]] for k in depth_pair_inds])\n depth_cloud_s = np.array([depth_s[k[0]] for k in depth_pair_inds])\n depth_cloud_t = np.array([depth2XYZ[k[1]] for k in depth_pair_inds])\n\n# A_d = list(range(n_s))\n# A, b = find_argmin_T(cloud_s, cloud_t, A_d,\n# A_init, b_init)\n A_d = list(range(depth_cloud_s.shape[0]))\n A, b = find_argmin_T(depth_cloud_s, depth_cloud_t, A_d,\n A_init, b_init)\n print(\"A_init value:\")\n print(A_init)\n print(\"b_init value:\")\n print(b_init)\n \n print(\"Returning A, b\")\n print(\"A value:\")\n print(A)\n print(\"b value:\")\n print(b)\n print(\"inliers:\")\n print(max_inliers)\n return(A, b)", "def prepare_exact_solution(self):", "def qr(m, n, a, lda, pivot):\n\n # region : Initialize parameters\n # ----------------------------------------\n global p05, eps_machine, ipvt, rdiag, acnorm, wa\n\n if ipvt is None or ipvt.size is not n:\n ipvt = np.zeros(n, np.int32)\n if rdiag is None or rdiag.size is not n:\n rdiag = np.zeros(n, data_type)\n if acnorm is None or acnorm.size is not n:\n acnorm = np.zeros(n, data_type)\n if wa is None or wa.size is not n:\n wa = np.zeros(n, data_type)\n\n # ----------------------------------------\n # endregion : Initialize parameters\n\n # > compute the initial column norms and initialize several arrays\n for j in range(n):\n acnorm[j] = enorm(a[lda * j:lda * (j + 1)])\n rdiag[j] = acnorm[j]\n wa[j] = rdiag[j]\n if pivot:\n ipvt[j] = j + 1\n\n # > reduce a to r with householder transformations\n min_mn = min(m, n)\n for j in range(min_mn):\n # > if pivot\n # --------------------------------------------------------\n if pivot:\n # >> bring the column of largest norm\n # into the pivot position\n k_max = j\n for k in range(j, n):\n if rdiag[k] > rdiag[k_max]:\n k_max = k\n # >> switch\n if k_max is not j:\n for i in range(m): # traverse rows\n # >>> switch\n temp = a[i + j * lda]\n a[i + j * lda] = a[i + k_max * lda]\n a[i + k_max * lda] = temp\n # >>> overwrite, acnorm[k_max] still hold\n rdiag[k_max] = rdiag[j]\n wa[k_max] = wa[j]\n # >>> switch\n k = ipvt[j]\n ipvt[j] = ipvt[k_max]\n ipvt[k_max] = k\n\n # > compute the householder transformation to reduce the\n # j-th column of a to a multiple of the j-th unit vector\n # ------------------------\n # >> normalize\n # :: v = x - ||x||_2 * e_1\n # :: ajnorm = ||x||_2\n ajnorm = enorm(a[lda * j + j:lda * (j + 1)])\n if ajnorm != 0.0:\n if a[j + j * lda] < 0.0:\n # :: prepare to keep a[i + j * lda] positive\n ajnorm = -ajnorm\n # :: x = sgn(x_1) * x / ||x||_2\n for i in range(j, m):\n a[i + j * lda] /= ajnorm\n # :: a[j + j * lda] temporarily stores v[0]\n # :: one number being subtracted from another close number\n # has been avoided\n a[j + j * lda] += 1.0\n\n # > apply the transformation to the remaining columns and\n # update the norms\n # t\n # :: A[i][k] -= beta * v[i] * w[k], w = A * v\n # t\n # :: beta = 1 / v[0], can be proved easily\n # :: w[k] = A[k-th column] * v\n jp1 = j + 1 # j plus 1\n if n > jp1:\n for k in range(jp1, n): # traverse columns\n sum = data_type(0.0) # this is w[j]\n for i in range(j, m): # traverse rows\n # v[i] A[i][k-th column]\n sum += a[i + j * lda] * a[i + k * lda]\n # :: beta * w[k]\n temp = sum / a[j + j * lda]\n for i in range(j, m):\n # :: a[i][k] -= beta * w[k] * v[i]\n a[i + k * lda] -= temp * a[i + j * lda]\n\n # :: rdiag stores information used to pivot\n # >> update rdiag to ensure that it can present\n # alpha = +- ||x||_2\n if pivot and rdiag[k] != 0:\n temp = a[j + k * lda] / rdiag[k]\n # >>> compute max\n d1 = 1.0 - temp * temp\n rdiag[k] *= np.sqrt(max(0.0, d1))\n # >>> compute 2nd power\n d1 = rdiag[k] / wa[k]\n # :: if rdiag is to small\n if p05 * (d1 * d1) <= eps_machine:\n rdiag[k] = enorm(\n a[jp1 + k * lda:(k + 1) * lda])\n wa[k] = rdiag[k]\n # :: sgn(ajnorm) = -sgn(x_0)\n # :: H * x = alpha * e_1\n rdiag[j] = -ajnorm\n\n # > return\n if pivot:\n return [ipvt, rdiag, acnorm]\n else:\n return [rdiag, acnorm]", "def compute_essential(data1, data2, K):\n\n \"\"\"YOUR CODE STARTS HERE\"\"\"\n data1 = data1[:2, :].T\n data2 = data2[:2, :].T\n\n # Normalize points\n def normalize_pts(data, T_matrix):\n norm_data = np.ones_like(data)\n for i, pt in enumerate(data):\n token = np.transpose(\n np.matmul(T_matrix, np.transpose(np.append(pt, [1]))))\n norm_data[i] = token[:-1]/token[-1]\n return norm_data\n\n norm_data1 = normalize_pts(data1, np.linalg.inv(K))\n norm_data2 = normalize_pts(data2, np.linalg.inv(K))\n\n # Find initial F matrix\n constraints = []\n for i in range(15):\n x, y = norm_data1[i]\n x_prime, y_prime = norm_data2[i]\n constraints.append([x_prime*x, x_prime*y, x_prime,\n y_prime*x, y_prime*y, y_prime, x, y, 1])\n constraints = np.array(constraints)\n\n _, _, vh = np.linalg.svd(constraints)\n e11, e12, e13, e21, e22, e23, e31, e32, e33 = vh[-1, :]\n E = [[e11, e12, e13],\n [e21, e22, e23],\n [e31, e32, e33]]\n\n # enforce singularity constraint\n e_u, e_s, e_vh = np.linalg.svd(E)\n e_s[:2] = (e_s[0] + e_s[1]) / 2\n e_s[-1] = 0.0\n e_s = np.diag(e_s)\n E = np.matmul(np.matmul(e_u, e_s), e_vh)\n\n \"\"\"YOUR CODE ENDS HERE\"\"\"\n\n return E", "def get_FQHE_Interlayer_MatEle(bas2sq, sq2bas, bas2sqT, sq2basT, bas2sqB, sq2basB, VjjtIntL) :\n import scipy.sparse as sps \n import numpy as np\n import itertools\n\n row = []\n col = []\n dat = []\n nT = sum(sq2basT[0])\n nB = sum(sq2basB[0])\n \n for ii in sq2bas.keys(): # for ii-th basis\n bas = sq2bas[ii]\n basT = list(sq2basT[bas[0]]) # basis function of top layer\n basB = list(sq2basB[bas[1]]) \n occpT = tuple(list(np.nonzero(basT))[0]) # occupied positions of electrons\n occpB = tuple(list(np.nonzero(basB))[0]) \n inits = list(itertools.product(occpT,occpB)) # find initial pair of electrons \n\n for init_i in inits: # find the possible scattering states\n for fins in VjjtIntL[init_i].keys(): # find the possible final states\n basT1 = []\n basT1 = basT1 + basT # initialize final state list\n basB1 = []\n basB1 = basB1 + basB # initialize final state list\n basT1[init_i[0]] = 0 # annihilate two electrons of initial state\n basB1[init_i[1]] = 0 \n basT1[fins[0]] = 1 # creat two electrons of final states \n basB1[fins[1]] = 1\n if sum(basT1) == nT and sum(basB1) == nB: # if there are two electrons on the same site\n jjT = bas2sqT[tuple(basT1)]\n jjB = bas2sqB[tuple(basB1)]\n jj = bas2sq[tuple([jjT, jjB])]\n ss0 = sorted([init_i[0], fins[0]])\n ss1 = sorted([init_i[1], fins[1]])\n exchangetime = sum(basT[0:init_i[0]]) + sum(basT1[0:fins[0]]) \n exchangetime += sum(basB[0:init_i[1]]) + sum(basB1[0:fins[1]])\n\n row += [int(ii)]\n col += [int(jj)]\n dat += [(-1)**exchangetime*VjjtIntL[init_i][fins]]\n \n return row, col, dat", "def compute_ricci(Gt, chi):\n global metric, inv_metric, C1, C2, d2\n\n Lchi = laplacian_conformal(chi)\n\n #print(type(Lchi))\n\n #print('Done with Lphi') #simplify(Lchi))\n\n\n#ewh4 DKchiDkchi = Matrix([4*metric[i, j]*sum([sum([inv_metric[k, l]*d(l, chi) for l in e_i])*d(k, chi) for k in e_i]) for i, j in e_ij])\n DKchiDkchi = Matrix([0.25/chi/chi*metric[i, j]*sum([sum([inv_metric[k, l]*d(l, chi) for l in e_i])*d(k, chi) for k in e_i]) for i, j in e_ij])\n\n #print('done with DKchi') # simplify(DKchiDkchi))\n\n CalGt = [sum(inv_metric[k,l]*C2[i,k,l] for k, l in e_ij) for i in e_i]\n\n Rt = Matrix([-0.5*sum([inv_metric[l, m]*d2(l, m, metric[i, j]) for l, m in e_ij]) +\n 0.5*sum([metric[k,i]*d(j, Gt[k]) + metric[k,j]*d(i, Gt[k]) for k in e_i]) +\n 0.5*sum([CalGt[k]*(C1[i,j,k] + C1[j,i,k]) for k in e_i]) +\n sum([inv_metric[l,m]*(C2[k,l,i]*C1[j,k,m] + C2[k,l,j]*C1[i,k,m] + C2[k,i,m]*C1[k,l,j])\n for k in e_i for l,m in e_ij]) for i,j in e_ij])\n\n #print('done with Rt') #simplify(Rt))\n\n#ewh5 Rphi_tmp = Matrix([2*metric[i, j]*Lchi - 4*d(i, chi)*d(j, chi) for i, j in e_ij])\n#dwn Rphi_tmp = Matrix([ 0.5*metric[i, j]*Lchi/chi - 0.25*d(i, chi)*d(j, chi)/chi/chi for i, j in e_ij])\n\n #print(simplify(Rphi_tmp))\n\n#ewh6 Rphi = -2*_Di_Dj(chi) - Rphi_tmp.reshape(3, 3) - DKchiDkchi.reshape(3, 3)\n#dwn Rphi = -0.5*_Di_Dj(chi)/chi - Rphi_tmp.reshape(3, 3) - DKchiDkchi.reshape(3, 3)\n xRphi = Matrix( [ 1/(2*chi)*(d2(i,j,chi) - \n sum(C2[k,j,i]*d(k,chi) for k in e_i)) -\n 1/(4*chi*chi)*d(i,chi)*d(j,chi) for i, j in e_ij]).reshape(3,3)\n\n Rphi = xRphi + Matrix( [ \n 1/(2*chi)*metric[i,j] * ( sum(inv_metric[k,l]*(d2(k,l,chi) - \n 3/(2*chi)*d(k,chi)*d(l,chi)) for k, l in e_ij) - \n sum(CalGt[m]*d(m,chi) for m in e_i))\n for i, j in e_ij ] ).reshape(3,3)\n\n return [Rt.reshape(3, 3) + Rphi, Rt.reshape(3,3), Rphi, CalGt]", "def compute_matrix_result_for_one_filter(matrice_dca, mat_f):\n v = len(mat_f) #the filter dimension\n v = v // 2\n (len_domain1, len_domain2) = matrice_dca.shape\n matrix_result = matrice_dca.copy() #copies the entire dca matrix\n matrix_best_f = np.zeros((len_domain1, len_domain2)) #makes a zero matrix of the same dimension of matrice_dca\n for indice_1 in range(0, len_domain1):\n for indice_2 in range(0, len_domain2): #this loop works as such: for protein 0 to all proteins form 0-104\n correlation = 0.0\n i_centre, j_centre = min(indice_1, v), min(indice_2, v) #v is defined at top\n\n a = max(0, indice_1 - v)\n b = indice_1 + v + 1\n c = max(0, indice_2 - v)\n d = indice_2 + v + 1\n\n sous_matrix = matrice_dca[max(0, indice_1 - v):indice_1 + v + 1, max(0, indice_2 - v):indice_2 + v + 1]\n #the above line: taking the row/column of matrice_dca starting from maximum between 0 and indice_1-v\n #ending at indice_1+v+1\n mat_f[len(mat_f) // 2, len(mat_f) // 2] = np.nan #the component of len(mat_f)//2 is made nan\n\n e = max(v - indice_1, 0)\n f = min(v * 2 + 1, len_domain1 - indice_1 + v)\n g = max(v - indice_2, 0)\n h = min(v * 2 + 1, len_domain2 + v - indice_2)\n\n m_f = mat_f[max(v - indice_1, 0):min(v * 2 + 1, len_domain1 - indice_1 + v),\n max(v - indice_2, 0):min(v * 2 + 1, len_domain2 + v - indice_2)] #takes a certain part of filter\n indic_flatt = m_f.shape[1] * i_centre + j_centre\n if matrice_dca[indice_1, indice_2] != sous_matrix.flatten()[indic_flatt]: #flatten() converts a matrix in row vector\n print('Problem')\n ## remove the central value\n m_f = np.delete(m_f.flatten(), indic_flatt) #we make the m_f a row vector deleting a component(indic_flatt)\n sous_matrix = np.delete(sous_matrix.flatten(), indic_flatt)\n # print scipy.stats.pearsonr(sous_matrix,m_f)[0]\n if np.sum(sous_matrix[0] == sous_matrix) != len(sous_matrix):\n correlation = scipy.stats.pearsonr(sous_matrix, m_f)[0]\n matrix_result[indice_1, indice_2] = correlation\n return matrix_result", "def computeFundamentalMatrix(pts1, pts2):\n A = np.empty((8, 9))\n for i in range(len(pts1)-1):\n x1 = pts1[i][0]\n x2 = pts2[i][0]\n y1 = pts1[i][1]\n y2 = pts2[i][1]\n A[i] = np.array([x1 * x2, x2 * y1, x2,\n y2 * x1, y2 * y1, y2,\n x1, y1, 1])\n # Compute F matrix by evaluating SVD\n U, S, V = np.linalg.svd(A)\n F = V[-1].reshape(3, 3)\n\n # Constrain the F matrix to rank 2\n U1, S1, V1 = np.linalg.svd(F)\n # print('Old S', S)\n # S[2] = 0\n S2 = np.array([[S1[0], 0, 0], [0, S1[1], 0], [0, 0, 0]])\n # print('New S', S)\n F = np.dot(np.dot(U1, S2), V1)\n\n return F", "def _inexact_alm_l1(imgflt_stack,options):\n # Get basic image information and reshape input\n img_width = imgflt_stack.shape[0]\n img_height = imgflt_stack.shape[1]\n img_size = img_width* img_height\n img_3d = imgflt_stack.shape[2]\n imgflt_stack = np.reshape(imgflt_stack,(img_size, img_3d))\n options['weight'] = np.reshape(options['weight'],imgflt_stack.shape)\n\n # Matrix normalization factor\n temp = np.linalg.svd(imgflt_stack,full_matrices=False,compute_uv=False)\n norm_two = np.float64(temp[0])\n del temp\n\n # A is a low rank matrix that is being solved for\n A = np.zeros(imgflt_stack.shape,dtype=np.float64)\n A_coeff = np.ones((1, img_3d),dtype=np.float64) # per image scaling coefficient, accounts for things like photobleaching\n A_offset = np.zeros((img_size,1),dtype=np.float64) # offset per pixel across all images\n\n # E1 is the additive error. Since the goal is determining the background signal, this is the real signal at each pixel\n E1 = np.zeros(imgflt_stack.shape,dtype=np.float64)\n\n # Normalization factors\n ent1 = np.float64(1) # flatfield normalization\n ent2 = np.float64(10) # darkfield normalization\n\n # Weights\n weight_upd = _dct2(np.mean(np.reshape(A,(img_width, img_height, img_3d)),2))\n\n # Initialize gradient and weight normalization factors\n Y1 = np.float64(0)\n mu = np.float64(12.5)/norm_two\n mu_bar = mu * 10**7\n rho = np.float64(1.5)\n\n # Frobenius norm\n d_norm = np.linalg.norm(imgflt_stack,'fro')\n\n # Darkfield upper limit and offset\n B1_uplimit = np.min(imgflt_stack)\n B1_offset = np.float64(0)\n\n # Perform optimization\n iternum = 0\n converged = False\n while not converged:\n iternum += 1\n\n # Calculate the flatfield using existing weights, coefficients, and offsets\n W_idct_hat = _idct2(weight_upd)\n A = np.matmul(np.reshape(W_idct_hat,(img_size,1)),A_coeff) + A_offset\n temp_W = np.divide(imgflt_stack - A - E1 + np.multiply(1/mu,Y1),ent1)\n\n # Update the weights\n temp_W = np.reshape(temp_W,(img_width, img_height, img_3d))\n temp_W = np.mean(temp_W,2)\n weight_upd = weight_upd + _dct2(temp_W)\n weight_upd = np.max(np.reshape(weight_upd - options['lambda']/(ent1*mu),(img_width, img_height,1)),-1,initial=0) + np.min(np.reshape(weight_upd + options['lambda']/(ent1*mu),(img_width, img_height,1)),-1,initial=0)\n W_idct_hat = _idct2(weight_upd)\n\n # Calculate the flatfield using updated weights\n A = np.matmul(np.reshape(W_idct_hat,(img_size,1)),A_coeff) + A_offset\n\n # Determine the error\n E1 = E1 + np.divide(imgflt_stack - A - E1 + np.multiply(1/mu,Y1),ent1)\n E1 = np.max(np.reshape(E1 - options['weight']/(ent1*mu),(img_size, img_3d,1)),-1,initial=0) + np.min(np.reshape(E1 + options['weight']/(ent1*mu),(img_size, img_3d,1)),-1,initial=0)\n\n # Calculate the flatfield coefficients by subtracting the errors from the original data\n R1 = imgflt_stack-E1\n A_coeff = np.reshape(np.mean(R1,0)/np.mean(R1),(1, img_3d))\n A_coeff[A_coeff<0] = 0 # pixel values should never be negative\n\n # Calculate the darkfield component if specified by the user\n if options['darkfield']:\n # Get images with predominantly background pixels\n validA1coeff_idx = np.argwhere(A_coeff<1)[:,1]\n R1_upper = R1[np.argwhere(np.reshape(W_idct_hat,(-1,1)).astype(np.float64)>(np.float64(np.mean(W_idct_hat))-np.float64(10**-5)))[:,0],:]\n R1_upper = np.mean(R1_upper[:,validA1coeff_idx],0)\n R1_lower = R1[np.argwhere(np.reshape(W_idct_hat,(-1,1))<np.mean(W_idct_hat)+np.float64(10**-5))[:,0],:]\n R1_lower = np.mean(R1_lower[:,validA1coeff_idx],0)\n B1_coeff = (R1_upper-R1_lower)/np.mean(R1)\n k = validA1coeff_idx.size\n\n # Calculate the darkfield offset\n temp1 = np.sum(np.square(A_coeff[0,validA1coeff_idx]))\n temp2 = np.sum(A_coeff[0,validA1coeff_idx])\n temp3 = np.sum(B1_coeff)\n temp4 = np.sum(A_coeff[0,validA1coeff_idx]*B1_coeff)\n temp5 = temp2 * temp3 - k*temp4\n if temp5 == 0:\n B1_offset = np.float64(0)\n else:\n B1_offset = (temp1*temp3-temp2*temp4)/temp5\n B1_offset = np.max(B1_offset,initial=0)\n B1_offset = np.min(B1_offset,initial=B1_uplimit/(np.mean(W_idct_hat)+10**-7))\n B_offset = B1_offset * np.mean(W_idct_hat) - B1_offset*np.reshape(W_idct_hat,(-1,1))\n\n # Calculate darkfield\n A1_offset = np.reshape(np.mean(R1[:,validA1coeff_idx],1),(-1,1)) - np.mean(A_coeff[0,validA1coeff_idx]) * np.reshape(W_idct_hat,(-1,1))\n A1_offset = A1_offset - np.mean(A1_offset)\n A_offset = A1_offset - np.mean(A1_offset) - B_offset\n\n # Update darkfield weights\n W_offset = _dct2(np.reshape(A_offset,(img_width, img_height)))\n W_offset = np.max(np.reshape(W_offset - options['lambda_darkfield']/(ent2*mu),(img_width, img_height,1)),-1,initial=0) \\\n + np.min(np.reshape(W_offset + options['lambda_darkfield']/(ent2*mu),(img_width, img_height,1)),-1,initial=0)\n\n # Calculate darkfield based on updated weights\n A_offset = _idct2(W_offset)\n A_offset = np.reshape(A_offset,(-1,1))\n A_offset = np.max(np.reshape(A_offset - options['lambda_darkfield']/(ent2*mu),(A_offset.shape[0],A_offset.shape[1],1)),-1,initial=0) \\\n + np.min(np.reshape(A_offset + options['lambda_darkfield']/(ent2*mu),(A_offset.shape[0],A_offset.shape[1],1)),-1,initial=0)\n A_offset = A_offset + B_offset\n\n # Loss\n Z1 = imgflt_stack - A - E1\n\n # Update weight regularization term\n Y1 = Y1 + mu*Z1\n\n # Update learning rate\n mu = np.min(mu*rho,initial=mu_bar)\n\n # Stop if loss is below threshold\n stopCriterion = np.linalg.norm(Z1,ord='fro')/d_norm\n if stopCriterion < options['optimization_tol'] or iternum > options['max_iterations']:\n converged = True\n\n # Calculate final darkfield image\n A_offset = A_offset + B1_offset * np.reshape(W_idct_hat,(-1,1))\n\n return A,E1,A_offset", "def Optimize(score):\n matrix = score.copy()\n # matrix[10][10]= 100000\n murk = Munkres()\n indexes = murk.compute(matrix)\n # print (matrix, 'Lowest cost through this matrix:')\n total = 0\n\n for row, column in indexes:\n value = matrix[row][column]\n total += value\n print '(%d, %d) -> %d' % (row, column, value)\n print 'total cost: %d' % total\n\n return indexes", "def search(self):\n #Create Shorthands\n al = self._al\n sf = self._sf\n\n #print len(al), [ar.shape for ar in al]\n ns_trs = [ar.shape[1] for ar in al] # Number of trials for each group\n crit_f = self._threshold\n l=al[0].shape[0]\n #Step 1: Calculate Anova (or other stat_fun) for original data\n #-------------------------------------------------------------\n fs = np.zeros((l),\"d\")\n for i in range(l):\n anova_ars = [ar[i,:] for ar in al]\n fs[i] = sf(*anova_ars)\n clusters = self.find_clusters(fs,crit_f,\"greater\") \n #Step 2: If we have some clusters, repeat process for num_surrogates surrogates\n #------------------------------------------------------------------------------\n if len(clusters)>0:\n cluster_stats = [np.sum(fs[c[0]:c[1]]) for c in clusters]\n cluster_ps = np.ones((len(clusters)),\"d\")\n cluster_stats_hist = np.zeros((self._ns)) #For making histogram (not visually) and finding percentile\n for i_s in range(self._ns):\n ar_shuffle = np.concatenate(al,axis=1)\n #Mache Liste mit Indices fuer alle Trials, permutiere, zerlege in Einzellisten der Laengen ns_trs\n indices_lists = np.split(np.random.permutation(sum(ns_trs)),np.cumsum(ns_trs)[:-1])\n #print ar_shuffle.shape, ar_shuffle\n ar_shuffle_list = [ar_shuffle[:,indices] for indices in indices_lists]\n #print \"ar_shuffle_list shapes\", [ar.shape for ar in ar_shuffle_list]\n fs_surr = np.zeros((l))\n for i in range(l):\n anova_ars_perm = [ar[i,:] for ar in ar_shuffle_list]\n fs_surr[i] = sf(*anova_ars_perm)\n clusters_perm = self.find_clusters(fs_surr,crit_f,\"greater\")\n #print \"clusters_perm\", clusters_perm\n if len(clusters_perm)>0:\n cluster_stats_perm = [np.sum(fs_surr[c[0]:c[1]]) for c in clusters_perm]\n cluster_stats_hist[i_s] = max(cluster_stats_perm)\n else:\n cluster_stats_hist[i_s] = 0\n #for each cluster in original data, calculate p-value as percentile of its \n #cluster statistics within all cluster statistics in surrogate data\n cluster_ps[:] = [percentileofscore(cluster_stats_hist,cluster_stats[i_cl]) for i_cl in range(len(clusters))]\n cluster_ps[:] = (100.0 - cluster_ps[:]) / 100.0 # From percent to fraction\n return fs, np.array(clusters)[cluster_ps<0.05], cluster_ps[cluster_ps<0.05], np.array(clusters), cluster_ps\n else:\n return fs,np.array([]),np.array([]),np.array([]),np.array([])", "def method1(self):\n cres=0. # Variable for storing Chern number.\n # The U matrices from Fukui's method; storage...\n Ux=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n # ... and calculation of U matrices\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.alleigvecs[:,:,ix ,iy ]\n if ix<self.kS.Nx:\n mat2=self.alleigvecs[:,:,ix+1,iy ]\n else:\n mat2=self.alleigvecs[:,:,1 ,iy ]\n if iy<self.kS.Ny:\n mat3=self.alleigvecs[:,:,ix ,iy+1]\n else:\n mat3=self.alleigvecs[:,:,ix ,1 ]\n Ux[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[:self.NL,:self.NL])\n Uy[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[:self.NL,:self.NL])\n \n # Local estimates of Berry curvature; storage ...\n ftempall=np.zeros((self.kS.Nx,self.kS.Ny),complex)\n # ... and calculation\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux[ix,iy]*Uy[ix+1,iy]/Ux[ix,iy+1]/Uy[ix,iy])\n ftempall[ix,iy]=ftemp # ... of local Berry curvature ...\n cres+=ftemp/2./pi/1j # ... and of Berry phase (Chern number).\n\n return cres.real, ftempall", "def compute_similarity_RANSAC(kp1, kp2, matches, tolerance):\n\n # TODO: Your code here\n transform= np.zeros([2,3])\n t=[]\n \n good_matches=[]\n good={}\n L=len(matches)\n max=0\n n=np.zeros(L)\n \n for i in range (0,L-1):\n \n img1_idx = matches[i].queryIdx\n img2_idx = matches[i].trainIdx\n (x1,y1) = kp1[img1_idx].pt\n (x2,y2) = kp2[img2_idx].pt\n img1_idx = matches[i+1].queryIdx\n img2_idx = matches[i+1].trainIdx\n (x3,y3) = kp1[img1_idx].pt\n (x4,y4) = kp2[img2_idx].pt\n A=np.array([[x1, -y1, 1, 0],[y1, x1, 0, 1], [x3, -y3, 1, 0], [y3, x3, 0, 1]])\n b=([[x2],[y2],[x4],[y4]])\n vector=np.dot(np.linalg.inv(A),b)\n transV=np.array([[vector[0][0], -vector[1][0], vector[2][0]], [vector[1][0], vector[0][0], vector[3][0]]])\n t.append(transV)\n \n good[i]=[matches[i]]\n for j in range (0,L):\n \n img1_idxj = matches[j].queryIdx\n img2_idxj = matches[j].trainIdx\n (x1j,y1j) = kp1[img1_idxj].pt\n (x2j,y2j) = kp2[img2_idxj].pt\n simAxis=np.array([[x1j],[y1j],[1]])\n axis_com=np.dot(transV, simAxis)\n \n d=np.sqrt((x2j-axis_com[0][0])**2+(y2j-axis_com[1][0])**2)\n \n if d<=tolerance:\n good[i].append(matches[j])\n \n n[i]=n[i]+1\n \n if n[i]>max:\n max=n[i]\n good_matches=good[i]\n transform=t[i]\n \n \n print('length of good match for simA and simB is ', len(good_matches)-1)\n \n return transform, good_matches", "def _cce(func, s, sf, bl, bu, mask, icall, maxn, alpha, beta, maxit, printit):\n\n \"\"\"\n List of local variables\n sb(.) = the best point of the simplex\n sw(.) = the worst point of the simplex\n w2(.) = the second worst point of the simplex\n fw = function value of the worst point\n ce(.) = the centroid of the simplex excluding wo\n snew(.) = new point generated from the simplex\n iviol = flag indicating if constraints are violated\n = 1 , yes\n = 0 , no\n \"\"\"\n # Assign the best and worst points:\n sb = s[0,:]\n fb = sf[0]\n sw = s[-1,:]\n fw = sf[-1]\n\n # Compute the centroid of the simplex excluding the worst point:\n ce = np.mean(s[:-1,:],axis=0)\n\n # Attempt a reflection point\n snew = ce + alpha*(ce-sw)\n snew = np.where(mask, snew, sb) # sb should have initial params at mask==False\n\n # Check if is outside the bounds:\n ibound = 0\n # s1 = snew-bl\n # idx = (s1<0).nonzero()\n # if idx[0].size != 0: ibound = 1\n if np.ma.any(np.ma.array(snew-bl, mask=~mask) < 0.): ibound = 1\n\n # s1 = bu-snew\n # idx = (s1<0).nonzero()\n # if idx[0].size != 0: ibound = 2\n if np.ma.any(np.ma.array(bu-snew, mask=~mask) < 0.): ibound = 2\n\n if ibound >= 1:\n snew = _SampleInputMatrix(1,bl,bu,distname='randomUniform')[0]\n snew = np.where(mask, snew, sb)\n\n fuc = func(snew)\n fnew = -fuc if maxit else fuc\n icall += 1\n if printit==1: print(' i, f, X: ', icall, fnew, snew)\n\n # Reflection failed; now attempt a contraction point:\n if fnew > fw:\n snew = sw + beta*(ce-sw)\n snew = np.where(mask, snew, sb)\n fuc = func(snew)\n fnew = -fuc if maxit else fuc\n icall += 1\n if printit==1: print(' i, f, X: ', icall, fnew, snew)\n\n # Both reflection and contraction have failed, attempt a random point;\n if fnew > fw:\n snew = _SampleInputMatrix(1,bl,bu,distname='randomUniform')[0]\n snew = np.where(mask, snew, sb)\n fuc = func(snew)\n fnew = -fuc if maxit else fuc\n icall += 1\n if printit==1: print(' i, f, X: ', icall, fnew, snew)\n\n # end of _cce\n return snew, fnew, icall" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function computes the essential matrix from the fundamental matrix. The E matrix is defined in normalized image coordinates
def getEssentialMatrix(K, F): E = np.dot(K.T, np.dot(F, K)) u, s, v = np.linalg.svd(E) # We correct the singular values of the E matrix s_new = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 0]]).reshape(3, 3) final_E = np.dot(u, np.dot(s_new, v)) return final_E
[ "def calc_eigendecomp(self):\n self.evals, self.evecs = np.linalg.eigh(self.sub_matrix)", "def energyMatrix(image):\n gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n sobelX = cv.Sobel(gray, cv.CV_64F,1,0)\n sobelY = cv.Sobel(gray, cv.CV_64F,0,1)\n engMatrix = np.abs(sobelX) + np.abs(sobelY)\n return engMatrix", "def _calc_e_matrices(self):\n el_len = self.coord_electrode.size\n # expanding electrode grid\n h = float(np.diff(self.coord_electrode).min())\n\n # Define transformation matrices\n c_mat3 = np.eye(el_len + 1) / h\n\n # Get K-matrix\n k_matrix = self._calc_k_matrix()\n\n # Define matrixes for C to A transformation:\n tja = np.eye(el_len + 2)[:-1, ]\n tjp1a = np.eye(el_len + 2, k=1)[:-1, ]\n\n # Define spline coefficients\n e_mat0 = tja\n e_mat1 = np.dot(tja, k_matrix)\n e_mat2 = 3 * np.dot(c_mat3**2, (tjp1a - tja)) - \\\n np.dot(np.dot(c_mat3, (tjp1a + 2 * tja)), k_matrix)\n e_mat3 = 2 * np.dot(c_mat3**3, (tja - tjp1a)) + \\\n np.dot(np.dot(c_mat3**2, (tjp1a + tja)), k_matrix)\n\n return e_mat0, e_mat1, e_mat2, e_mat3", "def reduced_echelon_form(matrix):\n M = echelon_form(Matrix(matrix))\n i = M.shape[0] - 1\n while i >= 0:\n row_bool = [bool(e) for e in M[i]]\n if any(row_bool): # If not an empty row\n j = row_bool.index(True) # First nonzero element\n M[i] = M[i] / M[i, j]\n for row in range(i):\n factor = - M[row, j]\n M[row] = M[row] + factor * M[i]\n i = i - 1\n return M", "def get_electric_field(E0=np.matrix([0.,0.,0.])):\n\n\tres = []\n\tfor k1, atom1 in GetRegister('Atom'):\n\t\tif not atom1._haspol: # skip centers with no polarisability\n\t\t\tcontinue\n\t\te = E0.copy()\n\t\tfor k2, atom2 in GetRegister('Atom'):\n\t\t\tif atom2._parent != atom1._parent:\n\t\t\t\t#print 'atom2.field_at(atom1._pos)', atom2.field_at(atom1._pos)\n\t\t\t\t#print 'type(atom2.field_at(atom1._pos))',type(atom2.field_at(atom1._pos))\n\t\t\t\te += atom2.field_at(atom1._pos) # add the field at atom2 from atom1\n\t\tfor i in range(3):\t\n\t\t\tres.append(e[0,i]) \n#\tprint res\n\treturn np.matrix(res) # return the result as a numpy matrix object ", "def fundamental_matrix(image1, image2):\n [x1, y1, x2, y2] = automatic_point_correspondences(image1, image2, returntype='vector')\n\n # get the number of data points\n num_points = len(x1)\n o = np.ones((num_points, 1))\n\n x1y1, t1 = normalize_points(x1, y1)\n x2y2, t2 = normalize_points(x2, y2)\n\n x1 = x2y2[:, 0]\n y1 = x2y2[:, 1]\n x2 = x1y1[:, 0]\n y2 = x1y1[:, 1]\n\n # compute A - the equation matrix\n mul = lambda a, b : np.multiply(a, b)\n a = np.concatenate((mul(x1, x2), mul(x1, y2), x1, mul(y1, x2), mul(y1, y2), y1, x2, y2,\n o[:, 0])).reshape((num_points, -1), order='F')\n\n # the singular value decomposition SVD\n U, D, V = np.linalg.svd(a)\n\n # extract column of the smallest singular value - the last column\n smallest = V[8, :].T\n F = smallest.reshape(3, 3)\n\n # enforce singularity constraint (must be singular and of rank 2)\n U, D, V = np.linalg.svd(F)\n r = D[0]\n s = D[1]\n\n F = np.dot(U, np.diag([r, s, 0])).dot(V)\n F = t2.T.dot(F).dot(t1)\n\n return F", "def get_temperature_matrix(self,E):\n rhs_vector = self.construct_rhs_vector(E)\n temperature_vector = solve(self.behaviour_matrix,rhs_vector)\n self.temperature_matrix = temperature_vector.reshape(self.cols+2,self.cols+2)", "def echelon_form(matrix):\n M = Matrix(matrix)\n i = 0\n j = 0\n while i < M.shape[0] and j < M.shape[1]:\n if M[i, j] == 0:\n column = [bool(M[row, j]) for row in range(len(M)) if row > i]\n try:\n swap_row = column.index(True) + i + 1\n M[i], M[swap_row] = M[swap_row], M[i]\n except ValueError:\n j = j + 1\n else:\n for row in range(i + 1, len(M)):\n factor = - M[row, j] / M[i, j]\n M[row] = M[row] + factor * M[i]\n i = i + 1\n j = j + 1\n return M", "def eig_matrix(self):\n (self.eig_vals, w) = sparsedl.sorted_eigh(self.rate_matrix)\n return w", "def decompose_essential_matrix(E, x1, x2):\n\n # Fix left camera-matrix\n Rl = np.eye(3)\n tl = np.array([[0, 0, 0]]).T\n Pl = np.concatenate((Rl, tl), axis=1)\n\n # TODO: Compute possible rotations and translations\n \n # s must be [1, 1, 0]\n u, s, vh = np.linalg.svd(E)\n E = u @ np.diag([1, 1, 0]) @ vh\n u, s, vh = np.linalg.svd(E)\n\n w = np.array([[ 0, 1, 0], \n [-1, 0, 0], \n [ 0, 0, 1]]) \n \n z = np.array([[ 0, -1, 0], \n [ 1, 0, 0],\n [ 0, 0, 0]])\n \n R1 = u @ w.T @ vh\n s1 = -u @ z @ u.T\n R2 = u @ w @ vh\n s2 = u @ z @ u.T\n\n t1 = np.array([[s1[2, 1]], \n [s1[0, 2]],\n [s1[1, 0]]])\n \n t2 = np.array([[s2[2, 1]], \n [s2[0, 2]], \n [s2[1, 0]]]) \n\n # Four possibilities\n Pr = [np.concatenate((R1, t1), axis=1),\n np.concatenate((R1, t2), axis=1),\n np.concatenate((R2, t1), axis=1),\n np.concatenate((R2, t2), axis=1)]\n\n # Compute reconstructions for all possible right camera-matrices\n X3Ds = [infer_3d(x1[:, 0:1], x2[:, 0:1], Pl, x) for x in Pr]\n\n # Compute projections on image-planes and find when both cameras see point\n test = [np.prod(np.hstack((Pl @ np.vstack((X3Ds[i], [[1]])), Pr[i] @ np.vstack((X3Ds[i], [[1]])))) > 0, 1) for i in\n range(4)]\n test = np.array(test)\n idx = np.where(np.hstack((test[0, 2], test[1, 2], test[2, 2], test[3, 2])) > 0.)[0][0]\n\n # Choose correct matrix\n Pr = Pr[idx]\n\n return Pl, Pr", "def calHessianEigen2D(image2D, scale, mask2D):\n image = scipy.ndimage.filters.gaussian_filter(image2D, scale, truncate=10)\n h = calHessian(image)\n h = np.transpose(h, [2, 3, 0, 1])\n\n selected_points = mask2D.flatten() > 0\n\n e, V = LA.eig(h.reshape([-1, 2, 2])[selected_points])\n\n idx = np.argsort(np.abs(e), axis=1)\n\n e1 = e[np.arange(len(idx)), idx[:, 0]].reshape([-1, 1])\n e2 = e[np.arange(len(idx)), idx[:, 1]].reshape([-1, 1])\n e = np.concatenate([e1, e2], axis=1)\n\n vec1 = V[np.arange(len(idx)), :, idx[:, 0]].reshape([len(idx), 1, 2])\n vec2 = V[np.arange(len(idx)), :, idx[:, 1]].reshape([len(idx), 1, 2])\n V = np.concatenate([vec1, vec2], axis=1)\n\n full_e = np.zeros(selected_points.shape + (2,))\n full_V = np.zeros(selected_points.shape + (2, 2))\n\n full_e[selected_points] = e\n full_V[selected_points] = V\n\n full_e = full_e.reshape(image2D.shape + (2,))\n full_V = full_V.reshape(image2D.shape + (2, 2))\n\n return h.astype(float), full_e.astype(float), full_V.astype(float)", "def calculate_E0(self) -> float:\n noisy = self.kernel_eigenvectors_[-1].copy()\n np.random.shuffle(noisy)\n\n kernel_eigenvectors = self.kernel_eigenvectors_[:-1]\n kernel_eigenvectors.append(noisy)\n\n eigenvectors_matrix = scipy.sparse.csr_matrix(\n np.column_stack([eigenvector for eigenvector in kernel_eigenvectors])\n )\n\n if len(kernel_eigenvectors) == 2:\n ev0 = kernel_eigenvectors[0]\n ev1 = kernel_eigenvectors[1]\n _, Gamma, _ = scipy.sparse.linalg.svds(\n ev0.T @ ev1, k=self.n_jointly_smooth_functions, which=\"LM\"\n )\n else:\n _, Gamma, _ = scipy.sparse.linalg.svds(\n eigenvectors_matrix, k=self.n_jointly_smooth_functions, which=\"LM\"\n )\n\n Gamma.sort()\n gamma2 = Gamma[-2]\n E0 = (1 + gamma2) / 2\n return E0", "def _compute_hecke_matrix(self, n):\n return self.cuspidal_submodule().hecke_matrix(n).block_sum(self.eisenstein_submodule().hecke_matrix(n))", "def m_echelon(A):\n\n m, n = A.shape\n M = np.eye(m)\n U = A.copy()\n\n row = 0\n for col in range(n):\n piv_row = row + np.argmax(np.abs(U[row:, col]))\n\n if abs(U[piv_row, col]) == 0:\n # column is exactly zero\n continue\n\n swap_rows(U, row, piv_row)\n swap_rows(M, row, piv_row)\n\n for el_row in range(row + 1, m):\n fac = -U[el_row, col]/U[row, col]\n U[el_row] += fac*U[row]\n M[el_row] += fac*M[row]\n\n row += 1\n\n if row + 1 >= m:\n break\n\n return M, U", "def prep_tmat(self,E): \n # find the corresponding on-shell momentum in the 2+1 system \n # qon should be positive (threshold is self.ed)\n qon=np.sqrt(4*self.mass/3*(E-self.ed))\n self.qgrid[self.nqpoints]=qon \n \n print(\"Start calculating tmatrix for on-shell momentum q0 = {0:15.6e} fm-1\".format(qon))\n \n # prepare off-shell energies for t-matrix \n etmat=E-0.75*self.qgrid**2/self.mass # note that this is a negative energy < E_b(two-body) \n \n # prepare numpy array that keeps all tmatrix elements \n # put to zero to treat pon for negative energies properly\n tmat=np.zeros((self.lmax//2+1,self.nqpoints+1,self.npoints+1,self.npoints+1),dtype=np.cdouble)\n \n # now I need to solve the Lippmann-Schwinger equation for each etmat and each l =0,..,lmax\n # take only even partial waves because of Pauli principle \n for l in range(0,self.lmax+1,2):\n print(\"Calculating for l = {0:d}\".format(l)) \n for ie in range(self.nqpoints+1): \n # on-shell momentum for this off-shell energy\n if etmat[ie] > 0:\n pon=np.sqrt(2*self.mred*etmat[ie])\n self.pgrid[self.npoints]=pon\n else:\n pon=0.0\n self.pgrid[self.npoints]=pon\n \n # define matrix for set of equations \n # predefine the Kronecker deltas \n amat=np.identity(self.npoints+1,dtype=np.cdouble)\n # now add the other pieces of the definition of the matrix \n for i in range(self.npoints+1):\n # first for j != N \n if self.pgrid[i]>0:\n for j in range(self.npoints): \n amat[i,j]+=-(2*self.mred)*self.pot.v(self.pgrid[i],self.pgrid[j],self.l)*self.pgrid[j]**2 \\\n /(2*self.mred*etmat[ie]-self.pgrid[j]**2)*self.pweight[j] \n # then for j==N\n if self.pgrid[i]>0 and pon>0:\n amat[i,self.npoints] \\\n +=(2*self.mred)*self.pot.v(self.pgrid[i],pon,self.l)*pon**2* \\\n np.sum(self.pweight[0:self.npoints-1]/(pon**2-self.pgrid[0:self.npoints-1]**2)) \\\n +1j*m.pi*self.mred*pon*self.pot.v(self.pgrid[i],pon,self.l) \\\n -self.mred*pon*self.pot.v(self.pgrid[i],pon,self.l)*np.log(abs((pon+self.pc)/(self.pc-pon)))\n \n # now define the rhs \n bmat=np.empty((self.npoints+1,self.npoints+1),dtype=np.cdouble)\n for i in range(self.npoints+1):\n for j in range(self.npoints+1):\n if self.pgrid[i]>0 and self.pgrid[j]>0:\n bmat[i,j]=self.pot.v(self.pgrid[i],self.pgrid[j],self.l)\n else:\n bmat[i,j]=0.0\n \n # finally solve set of equations and store in complete array\n # also multiply by (q0**2-q)\n # special treatment for l=0 and q=q0 (in this case, the equation is not solvable \n # but the t matrix is analytically known, etmat is negative = Ed in this case)\n if l==0 and ie==self.nqpoints:\n # use tilde t = (Ed-H0)|phi> <phi|(E-H0) \n for i in range(self.npoints):\n for j in range(self.npoints):\n tmat[l//2,ie,i,j]=(self.ed-self.pgrid[i]**2/self.mass) \\\n *self.wfd[i]*self.wfd[j] \\\n *(self.ed-self.pgrid[j]**2/self.mass)\n else: \n tmat[l//2,ie,:,:]=np.linalg.solve(amat,bmat)*0.75*(qon**2-self.qgrid[ie]**2)/self.mass\n \n print(\"Finished calculating tmatrix for on-shell momentum q0 = {0:15.6e} fm-1\".format(self.qgrid[self.nqpoints]))\n\n # return on- and off-shell t-matrix \n return tmat", "def _init_eigenmatrix(self, P):\n self._.d = nrows(P) - 1\n assert all(len(r) == self._.d + 1 for r in P), \\\n \"parameter length mismatch\"\n P = Matrix(SR, P)\n for i, x in enumerate(P[0]):\n P[0, i] = integralize(x)\n self._.n = sum(P[0])\n return P", "def compute_e(f_mat, m_mat):\r\n return m_mat.T @ f_mat @ m_mat", "def hecke_matrix(self, q, basis):\n #d = len(basis)\n T = []\n for Phi in basis:\n h = Phi.hecke(q)\n row = self.linear_relation(basis + [h])\n ## Probably should put some check here that it really worked.\n row.pop()\n T.append(row)\n return Matrix(T).transpose()", "def free_body_eigen_problem(self,p):\n M = sym.eye(p.qs*2)\n M[-p.qs:,-p.qs:]=self.M\n\n K = sym.zeros(p.qs*2)\n K[:p.qs,-p.qs:] = sym.eye(p.qs)\n K[-p.qs:,:p.qs] = -self.f.jacobian(p.q)\n K[-p.qs:,-p.qs:] = -self.f.jacobian(p.qd)\n return K,M" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given the essential matrix, we derive the camera position and orientation
def ExtractCameraPose(E): u, s, v = np.linalg.svd(E, full_matrices=True) w = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]]).reshape(3, 3) c1 = u[:, 2].reshape(3, 1) r1 = np.dot(np.dot(u, w), v).reshape(3, 3) c2 = -u[:, 2].reshape(3, 1) r2 = np.dot(np.dot(u, w), v).reshape(3, 3) c3 = u[:, 2].reshape(3, 1) r3 = np.dot(np.dot(u, w.T), v).reshape(3, 3) c4 = -u[:, 2].reshape(3, 1) r4 = np.dot(np.dot(u, w.T), v).reshape(3, 3) if np.linalg.det(r1) < 0: c1 = -c1 r1 = -r1 if np.linalg.det(r2) < 0: c2 = -c2 r2 = -r2 if np.linalg.det(r3) < 0: c3 = -c3 r3 = -r3 if np.linalg.det(r4) < 0: c4 = -c4 r4 = -r4 cam_center = np.array([c1, c2, c3, c4]) cam_rotation = np.array([r1, r2, r3, r4]) return cam_center, cam_rotation
[ "def get_camera_orientation(self):\n\n # Create the vector from the camera to the robot\n vector_x = self.robot_x - self.camera_x\n vector_y = self.robot_y - self.camera_y\n vector_z = self.robot_z - self.camera_z\n\n # Calculate yaw and pitch from this vector\n yaw = math.atan2(vector_y, vector_x)\n pitch = -math.asin(vector_z)\n\n # Create the quaternion from the euler angles\n self.quaternion = geometry_msgs.msg.Quaternion(\n *tf_conversions.transformations.quaternion_from_euler(0, pitch, yaw))", "def get_extrinsic_matrix(pose):\n batch_size, _ = pose.shape\n rot = pose[:,:3]\n trans = pose[:,3:]\n\n rot = transforms.euler_angles_to_matrix(rot,convention=\"XYZ\")\n pose = torch.cat((rot,trans.view(batch_size, 3, 1)), -1)\n\n return pose", "def camera_position(matrix):\n\t\tt = (matrix[0][3], matrix[1][3], matrix[2][3])\n\t\tr = (\n\t\t (matrix[0][0], matrix[0][1], matrix[0][2]),\n\t\t (matrix[1][0], matrix[1][1], matrix[1][2]),\n\t\t (matrix[2][0], matrix[2][1], matrix[2][2])\n\t\t)\n\t\trp = (\n\t\t (-r[0][0], -r[1][0], -r[2][0]),\n\t\t (-r[0][1], -r[1][1], -r[2][1]),\n\t\t (-r[0][2], -r[1][2], -r[2][2])\n\t\t)\n\t\toutput = mathutils.Vector((\n\t\t rp[0][0] * t[0] + rp[0][1] * t[1] + rp[0][2] * t[2],\n\t\t rp[1][0] * t[0] + rp[1][1] * t[1] + rp[1][2] * t[2],\n\t\t rp[2][0] * t[0] + rp[2][1] * t[1] + rp[2][2] * t[2],\n\t\t))\n\t\treturn output", "def getCameraMatrix(self): # real signature unknown; restored from __doc__\n pass", "def modelview_matrix(self):\n camera = self.figure.scene.camera\n return camera.view_transform_matrix.to_array().astype(np.float32)", "def get_4x4_cam_to_world_mat(self):\n # M = [R^T c]\n # [0 1]\n homogeneous_mat = np.identity(4, dtype=float)\n homogeneous_mat[\n 0:3, 0:3\n ] = self.get_rotation_as_rotation_mat().transpose()\n homogeneous_mat[0:3, 3] = self.get_camera_center()\n return homogeneous_mat", "def __calculateTransformationMatrix(self):\n\t\tpts1 = np.float32([\n\t\t\t[self.__pts[0][0], self.__pts[0][1]],\n\t\t\t[self.__pts[1][0], self.__pts[1][1]],\n\t\t\t[self.__pts[2][0], self.__pts[2][1]],\n\t\t\t[self.__pts[3][0], self.__pts[3][1]]])\n\n\t\tpts2 = np.float32([\n\t\t\t[0, 0],\n\t\t\t[self.__outputSize[0], 0],\n\t\t\t[0, self.__outputSize[1]],\n\t\t\t[self.__outputSize[0], self.__outputSize[1]]])\n\n\t\tself.__M = cv2.getPerspectiveTransform(pts1, pts2)", "def get_pose(self):\n\n (success, rvec, tvec) = cv2.solvePnP(self.model_points, self.image_points,\n self.camera_matrix, self.dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)\n '''\n The OpenCV Solve PnP method computes the rotation and translation vectors with respect to the camera coordinate \n system of the image_points referred to the 3d head model_points. It takes into account the camera matrix and\n the distortion coefficients.\n The method used is iterative (cv2.SOLVEPNP_ITERATIVE)\n An alternative method can be the cv2.SOLVEPNP_SQPNP\n '''\n\n if success: # if the solvePnP succeed, compute the head pose, otherwise return None\n\n rvec, tvec = cv2.solvePnPRefineVVS(\n self.model_points, self.image_points, self.camera_matrix, self.dist_coeffs, rvec, tvec)\n # this method is used to refine the rvec and tvec prediction\n\n # Head nose point in the image plane\n nose = (int(self.image_points[0][0]), int(self.image_points[0][1]))\n\n (nose_end_point2D, _) = cv2.projectPoints(\n self.axis, rvec, tvec, self.camera_matrix, self.dist_coeffs)\n # this function computes the 3 projection axis from the nose point of the head, so we can use them to\n # show the head pose later\n\n Rmat = cv2.Rodrigues(rvec)[0]\n # using the Rodrigues formula, this functions computes the Rotation Matrix from the rotation vector\n\n roll, pitch, yaw = 180 * \\\n (rotationMatrixToEulerAngles(Rmat) / np.pi)\n \"\"\"\n We use the rotationMatrixToEulerAngles function to compute the euler angles (roll, pitch, yaw) from the\n Rotation Matrix. This function also checks if we have a gymbal lock.\n The angles are converted from radians to degrees \n \"\"\"\n\n \"\"\"\n An alternative method to compute the euler angles is the following:\n \n P = np.hstack((Rmat,tvec)) -> computing the projection matrix\n euler_angles = -cv2.decomposeProjectionMatrix(P)[6] -> extracting euler angles for yaw pitch and roll from the projection matrix\n \"\"\"\n\n if self.verbose:\n # print(\"Camera Matrix :\\n {0}\".format(self.camera_matrix))\n # print (\"Rotation Vector:\\n {0}\".format(rvec))\n # print (\"Translation Vector:\\n {0}\".format(tvec))\n # print(\"Roll:\"+ str(roll) + \" Pitch: \" + str(pitch) + \" Yaw: \" + str(yaw))\n self.frame = draw_pose_info(\n self.frame, nose, nose_end_point2D, roll, pitch, yaw)\n # draws 3d axis from the nose and to the computed projection points\n for point in self.image_points:\n cv2.circle(self.frame, tuple(\n point.ravel().astype(int)), 2, (0, 255, 255), -1)\n # draws the 6 keypoints used for the pose estimation\n\n return self.frame, roll, pitch, yaw\n\n else:\n return None, None, None, None", "def perspective_matrix(image_w, image_h, theta, phi, fov=np.pi/4):\n\n # Convert the image to the 3D coordinate space by setting the Z coordinate of image pixel to 0\n to_3d = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 0], [0, 0, 1]])\n\n # Set the origin to the image center\n T = np.eye(4)\n T[0, 3] = -image_w/2\n T[1, 3] = -image_h/2\n T[2, 3] = 0\n\n # Convert to the camera frame\n R = np.eye(4)\n R[:3,:3] = np.array([[-1, 0, 0], [0, -1, 0], [0, 0 ,1]])\n\n # Rotate image by theta radians around the z-axis the camera frame\n R_theta = np.eye(4)\n R_theta[0, 0] = np.cos(theta)\n R_theta[0, 1] = -np.sin(theta)\n R_theta[1, 0] = np.sin(theta)\n R_theta[1, 1] = np.cos(theta)\n \n # Rotate image by phi radians around the x-axis the camera frame\n R_phi = np.eye(4)\n R_phi[1, 1] = np.cos(phi)\n R_phi[1, 2] = -np.sin(phi)\n R_phi[2, 1] = np.sin(phi)\n R_phi[2, 2] = np.cos(phi)\n\n # Size of the side of the square containg any possible rotation\n d = np.sqrt(image_w**2 + image_h**2)\n \n # Translate image away from camera along the z-axis in the camera frame by h\n h = d / (2*np.sin(fov/2))\n T2 = np.eye(4)\n T2[2, 3] = -h\n\n # Pinhole model intrinsics\n n = h - d/2 # focal length in pixels. Image pane will be placed as close to the image as possible, while still allowing any rotation without cropping.\n new_image_shape = (int(n*np.tan(fov/2)*2), int(n*np.tan(fov/2)*2)) #assumes save horizontal and vertical fov\n K = np.zeros((3,4))\n K[:3,:3] = np.eye(3)\n K[0,0] = n\n K[1,1] = n\n K[0,2] = new_image_shape[0]/2\n K[1,2] = new_image_shape[1]/2\n K[2,3] = 1\n \n # Perspetive matrix\n M = np.matmul(K, np.matmul(T2, np.matmul(R_theta, np.matmul(R_phi, np.matmul(R, T)))))\n M = np.matmul(M, to_3d)\n\n return M, new_image_shape", "def projection_matrix(camera_parameters, homography):\n # Compute rotation along the x and y axis as well as the translation\n homography = homography * (-1)\n rot_and_transl = np.dot(np.linalg.inv(camera_parameters), homography)\n col_1 = rot_and_transl[:, 0]\n col_2 = rot_and_transl[:, 1]\n col_3 = rot_and_transl[:, 2]\n # normalise vectors\n l = math.sqrt(np.linalg.norm(col_1, 2) * np.linalg.norm(col_2, 2))\n rot_1 = col_1 / l\n rot_2 = col_2 / l\n translation = col_3 / l\n # compute the orthonormal basis\n c = rot_1 + rot_2\n p = np.cross(rot_1, rot_2)\n d = np.cross(c, p)\n rot_1 = np.dot(c / np.linalg.norm(c, 2) + d / np.linalg.norm(d, 2), 1 / math.sqrt(2))\n rot_2 = np.dot(c / np.linalg.norm(c, 2) - d / np.linalg.norm(d, 2), 1 / math.sqrt(2))\n rot_3 = np.cross(rot_1, rot_2)\n # finally, compute the 3D projection matrix from the model to the current frame\n projection = np.stack((rot_1, rot_2, rot_3, translation)).T\n return np.dot(camera_parameters, projection)", "def ComputeInnerOrientation(self, imagePoints):\r\n if self.camera.fiducialMarks == 'no fiducials': # case of digital camera\r\n pixel_size = 0.0024 # [mm]\r\n a1 = 1 / pixel_size\r\n b2 = -1 / pixel_size\r\n a2 = 0\r\n b1 = 0\r\n a0 = self.camera.principalPoint[0] / pixel_size\r\n b0 = self.camera.principalPoint[1] / pixel_size\r\n self.__innerOrientationParameters = {'a0': a0, 'a1': a1, 'a2': a2, 'b0': b0, 'b1': b1, 'b2': b2,\r\n 'V': 0, 'sigma0': 0, 'sigmaX': 0}\r\n return {'a0': a0, 'a1': a1, 'a2': a2, 'b0': b0, 'b1': b1, 'b2': b2,\r\n 'V': 0, 'sigma0': 0, 'sigmaX': 0}\r\n else:\r\n\r\n # observation vector\r\n l = np.matrix(imagePoints).flatten('F').T\r\n\r\n # fiducial marks - camera system\r\n fc = self.camera.fiducialMarks\r\n\r\n # A matrix (16X6)\r\n j = len(imagePoints[:, 0])\r\n A = np.zeros((len(l), 6))\r\n for i in range(j):\r\n A[i, 0:3] = np.array([1, fc[i, 0], fc[i, 1]])\r\n A[i + j, 3:] = np.array([1, fc[i, 0], fc[i, 1]])\r\n\r\n # N matrix\r\n N = (A.T).dot(A)\r\n # U vector\r\n U = (A.T).dot(l)\r\n # adjusted variables\r\n X = (np.linalg.inv(N)).dot(U)\r\n # v remainders vector\r\n v = A.dot(X) - l\r\n\r\n # sigma posteriory\r\n u = 6\r\n r = len(l) - u\r\n sigma0 = ((v.T).dot(v)) / r\r\n sigmaX = sigma0[0, 0] * (np.linalg.inv(N))\r\n # update field\r\n self.__innerOrientationParameters = {'a0': X[0, 0], 'a1': X[1, 0], 'a2': X[2, 0], 'b0': X[3, 0],\r\n 'b1': X[4, 0],\r\n 'b2': X[5, 0],\r\n 'V': v, 'sigma0': sigma0[0, 0], 'sigmaX': sigmaX}\r\n\r\n return {'a0': X[0, 0], 'a1': X[1, 0], 'a2': X[2, 0], 'b0': X[3, 0], 'b1': X[4, 0], 'b2': X[5, 0],\r\n 'V': v, 'sigma0': sigma0[0, 0], 'sigmaX': sigmaX}", "def decompose_view_matrix(pybullet_view_matrix):\n # It would be MUCH better to use something from bullet, however pybullet does\n # not expose all of the linear algebra library.\n mat = pybullet_mat_to_numpy_4x4(pybullet_view_matrix)\n\n # View matrix is now:\n # | R_11 R_12 R_13 t_1 |\n # | R_21 R_22 R_23 t_2 |\n # | R_31 R_32 R_33 t_3 |\n # | 0 0 0 1 |\n\n # R is the inverse eye to target at orientation, and t is R * eye.\n mat_view_to_world = np.linalg.inv(mat)\n\n # mat_view_to_world is the view to world transform, therefore the translation\n # component of this matrix is simply the world space position (since mat *\n # (0, 0, 0, 1)) is just copying the right column.\n world_xyz_view = np.copy(mat_view_to_world[0:3, 3])\n\n mat_view_to_world[0:3, 3] = 0 # Zero out the position change.\n world_quat_view = matrix_to_rotation(mat_view_to_world).as_quat()\n\n return world_xyz_view, world_quat_view", "def camera_matrix_from_pbobject(intrinsics):\n K = np.eye(3)\n K[0, 0] = intrinsics.fx\n K[1, 1] = intrinsics.fy\n K[0, 2] = intrinsics.cx\n K[1, 2] = intrinsics.cy\n K[0, 1] = intrinsics.skew\n return K", "def camera_position(self):\n return (self.left.dist_from_center_m() + self.right.dist_from_center_m()) / 2.0", "def compute_orientation_3d(obj, P):\n # compute rotational matrix around yaw axis\n R = roty(obj.ry)\n # orientation in object coordinate system\n orientation_3d = np.array([[0.0, obj.l], [0, 0], [0, 0]])\n # rotate and translate in camera coordinate system, project in image\n orientation_3d = np.dot(R, orientation_3d)\n orientation_3d[0, :] = orientation_3d[0, :] + obj.t[0]\n orientation_3d[1, :] = orientation_3d[1, :] + obj.t[1]\n orientation_3d[2, :] = orientation_3d[2, :] + obj.t[2]\n # vector behind image plane?\n if np.any(orientation_3d[2, :] < 0.1):\n orientation_2d = None\n return orientation_2d, np.transpose(orientation_3d)\n # project orientation into the image plane\n orientation_2d = project_to_image(np.transpose(orientation_3d), P)\n return orientation_2d, np.transpose(orientation_3d)", "def get_cam_vectors( self ):\n if _DEBUG: print( \"Pos:\" , self.get_camera_position() , \"Cen:\" , self.center , \"Up:\" , self.up )\n return self.get_camera_position() , self.center , self.up", "def pose2mat(pose):\n extrinsic = torch.eye(4)\n extrinsic[:3, :] = pose[:, :4]\n inv_extrinsic = torch.inverse(extrinsic)\n extrinsic = torch.inverse(inv_extrinsic)\n h, w, focal_length = pose[:, 4]\n intrinsic = torch.Tensor([[focal_length, 0, w/2],\n [0, focal_length, h/2],\n [0, 0, 1]])\n\n return extrinsic, intrinsic", "def velo_to_cam_rotation(self):\n velo_to_cam = self._get_velo2camera()\n return velo_to_cam[:, :3]", "def camera_matrix(self) -> TransformationMatrixType:\n return numpy.matmul(\n self.rotation_matrix(*self.rotation),\n displacement_matrix(*-numpy.array(self.location)),\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function returns the extrinsic parameter matrix
def getExtrinsicParameter(K, R, C): t = np.dot(-R, C) homogeneous_matrix = np.hstack((R.reshape(3, 3), t)) extrinsic_parameter = np.dot(K, homogeneous_matrix) return extrinsic_parameter
[ "def get_extM(self):\n ext_mat = np.array([\n [0, 1, 0],\n [0, 0, 1],\n [1, 0, 0]]) @ np.hstack((self.R.T, -self.R.T @ self.T)) # extrinsic matrix (rows re-aligned)\n return ext_mat", "def get_model_parameters(self):\n return np.atleast_2d(self.cost_model.get_model_parameters())", "def internal_parameters(k_matrix):\n return k_matrix[0, 0], k_matrix[1, 1], \\\n (k_matrix[0, 2], k_matrix[1, 2]), k_matrix[0, 1]", "def get_extrinsic_matrix(pose):\n batch_size, _ = pose.shape\n rot = pose[:,:3]\n trans = pose[:,3:]\n\n rot = transforms.euler_angles_to_matrix(rot,convention=\"XYZ\")\n pose = torch.cat((rot,trans.view(batch_size, 3, 1)), -1)\n\n return pose", "def matrix_param(self):\n return self.__matrix_param", "def _get_design_matrix(self, n_samples):\n if isinstance(self.model, MixedEffect):\n X, V = _get_mixed_design(self)\n else:\n X = _get_fixed_design(self, n_samples)\n V = None\n return X, V", "def get_camera_parameters_extrinsic(scene):\n # bcam stands for blender camera\n bcam = scene.camera\n R_bcam2cv = np.array([[1, 0, 0],\n [0, -1, 0],\n [0, 0, -1]])\n\n # Transpose since the rotation is object rotation, \n # and we want coordinate rotation\n # R_world2bcam = cam.rotation_euler.to_matrix().transposed()\n # T_world2bcam = -1*R_world2bcam * location\n #\n # Use matrix_world instead to account for all constraints\n location = np.array([bcam.matrix_world.decompose()[0]]).T\n R_world2bcam = np.array(bcam.matrix_world.decompose()[1].to_matrix().transposed())\n\n # Convert camera location to translation vector used in coordinate changes\n # T_world2bcam = -1*R_world2bcam*bcam.location\n # Use location from matrix_world to account for constraints:\n T_world2bcam = np.matmul(R_world2bcam.dot(-1), location)\n\n # Build the coordinate transform matrix from world to computer vision camera\n R_world2cv = np.matmul(R_bcam2cv, R_world2bcam)\n T_world2cv = np.matmul(R_bcam2cv, T_world2bcam)\n\n extr = np.concatenate((R_world2cv, T_world2cv), axis=1)\n return extr", "def get_parameters(self):\n return self.sess.run(self.A_symm)", "def get_params(self):\n return torch.cat([p.data.view(-1) for p in self.parameters()], dim=0)", "def get_params_array(self):\n return np.array(self.W), np.array(self.b)", "def getParams(self):\n return np.hstack([np.array(m.getParams())[self.indices] for m in self.mogs])", "def _get_design(self, mean_part, inno_part, poly_orders):\r\n if len(poly_orders) == 3:\r\n mat_X = self._build_dmatrix(mean_part, poly_orders[0] + 1)\r\n mat_Z = self._build_dmatrix(inno_part, poly_orders[1] + 1)\r\n mat_W = self._build_mat_W(poly_orders[2] + 1)\r\n else:\r\n mat_X = np.array(patsy.dmatrix(mean_part + '-1', self.df))\r\n mat_Z = np.array(patsy.dmatrix(inno_part + '-1', self.df))\r\n mat_W = np.empty((0, 0))\r\n return mat_X,mat_Z,mat_W", "def camera_matrix_from_pbobject(intrinsics):\n K = np.eye(3)\n K[0, 0] = intrinsics.fx\n K[1, 1] = intrinsics.fy\n K[0, 2] = intrinsics.cx\n K[1, 2] = intrinsics.cy\n K[0, 1] = intrinsics.skew\n return K", "def M(self):\n return _hypre.HypreParMatrix_M(self)", "def get_camera_intrinsic_matrix(yfov,width,height):\n\n aspect_ratio = width / height\n intrinsic_matrix = np.zeros((3,3))\n intrinsic_matrix[0][0] = width / (2.0 * aspect_ratio * np.tan(yfov/2.0))\n intrinsic_matrix[1][1] = height / (2.0 * np.tan(yfov/2.0))\n intrinsic_matrix[2][2] = 1\n intrinsic_matrix[0][2] = width/2 # Or (width-1)/2\n intrinsic_matrix[1][2] = height/2 # Or (height-1)/2\n return intrinsic_matrix", "def get_neural_parameters(self):", "def get_params(self) -> torch.Tensor:\n params = []\n for pp in list(self.parameters()):\n params.append(pp.view(-1))\n return torch.cat(params)", "def parameter_values(self) -> Tuple[np.ndarray]:", "def get_matrix(self):\n return self.A" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
removes all Hydrogen atoms from instance
def remove_hydrogens(self) -> None: for cid, c in self: for rid, r in c: for aid, a in r: if a.element == 'H': print('removing H at %s' % aid) r.remove_atom(a)
[ "def delete(self):\n del self.shx.atoms[self.index]", "def strip(self):\n types = [type(self.strip),\n type(self.values),\n type(self.__ne__),\n type(self.__class__)]\n\n for attr in dir(self):\n if not type(getattr(self, attr)) in types:\n if any(i in attr for i in self.keep) or attr[0:2] == '__':\n continue\n else:\n x = getattr(self, attr)\n del x\n for molecule in self.values():\n molecule.strip_molecule(self.keep)\n exit()", "def remove_H_from_r(res, verbose=False):\n h_list = [atm.id for atm in res.get_atoms() if atm.element == 'H']\n for at_id in h_list:\n if verbose:\n print(\" Deleting atom \" + at_id)\n res.detach_child(at_id)", "def removeHetero(self):\n self.stripChildren('H', 'eq', 'h_flag', forgiving=False)", "def removeDoubleUnbondedAtoms (self):\r\n atomsToRemove = [] # Stores index of atoms we will need to remove\r\n \r\n # Go through each mol\r\n for i in range(len(self.mol)):\r\n # Atom is disconnected if number of unbonded spikes is equal to the number of spikes in the atom\r\n numUnbondedSpikes = 0\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.mol[i].spikeArray[j].bonded == False:\r\n # Spike not bonded so increment counter\r\n numUnbondedSpikes += 1\r\n # If atom disconnected then need to check to see if dangling nodes or tails are bonded\r\n if numUnbondedSpikes == len(self.mol[i].spikeArray):\r\n print (\"Atom: \" + str(self.mol[i].rbnNumber) + \" is being removed \\n\")\r\n anyBondedDanglingNodes = False\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.isUnbondedAtomConnected(self.mol[i].spikeArray[j]) == True:\r\n anyBondedDanglingNodes = True\r\n # If atom has connected dangling nodes then need to convert atom to metaAtom, add metaAtom to metaMolecule and\r\n # remove atom from ring\r\n if anyBondedDanglingNodes == True:\r\n print (\"A new metaAtom is being created \\n\")\r\n newMetaAtom = self.convertUnbondedAtomToMetaAtom(self.mol[i])\r\n self.metaMolecule.addMetaAtom(newMetaAtom)\r\n atomsToRemove.append(i)\r\n \r\n # Now need to remove atoms\r\n print (\"Length of ring before removal: \" + str(len(self.mol)) + \"\\n\")\r\n for i in range(len(atomsToRemove)):\r\n self.mol.pop(atomsToRemove[i])\r\n print (\"Length of ring after removal: \" + str(len(self.mol)) + \"\\n\")\r\n # Finally need to update metaMolecule with new mol \r\n self.metaMolecule.updateListMols(self)", "def delete_atom(self, atom):\n self.atoms = [a for a in self.atoms if a is not atom]", "def remove_dummy(self) -> None:\n\n for i, atom in enumerate(self):\n if isinstance(atom, DummyAtom):\n del self[i]\n return", "def trim_instance(inst_node):\n for child in list(inst_node):\n _, tag = util.split_tag(child.tag)\n if tag.startswith('__'):\n inst_node.remove(child)\n else:\n trim_instance(child)", "def remove_all(self):\r\n\t\twhile len(self.components_array) > 0:\r\n\t\t\tself.remove(self.components_array[0].__class__)", "def RemoveAll(self):", "def removeWeapArcs(self):\n for sim in self.weaparcs:\n self.world.removeFromWorld(sim)\n self.weaparcs = []", "def reset():\n for hist in (\"Epair_Etagm\", \"Etagm_Epair\", \"ttagm_pair\", \n \"Epair_Etagm_fit\", \"dEpair_Etagm_fit\"):\n h = ROOT.gROOT.FindObject(hist)\n if h:\n h.Delete()", "def __delitem__(self, instancename):\n element = self.elements.pop(instancename)\n\n ## Remove floating terminal nodes and internal nodes\n othernodes = set(self.terminal_nodes())\n for instance_name, e in self.elements.items():\n terminal_nodes = set([self.term_node_map[instance_name][term]\n for term in e.terminals])\n othernodes.update(terminal_nodes)\n internal_nodes = set(element.non_terminal_nodes(instancename))\n terminal_nodes = set([self.term_node_map[instancename][term]\n for term in element.terminals])\n floating_terminal_nodes = terminal_nodes - othernodes\n removed_nodes = internal_nodes | floating_terminal_nodes\n\n for node in removed_nodes:\n self.nodes.remove(node)\n del self.nodenames[node.name] \n\n for branch in self._instance_branches(element, instancename):\n self.branches.remove(branch)\n\n del self.term_node_map[instancename]\n\n self.update_node_map()", "def delX(self):\n del self.components[0]", "def strip_degenerate(self):\n return self.__class__(self.moltype.strip_degenerate(str(self)), info=self.info)", "def remove(self, atom):\n new_atoms = []\n for a in self.atoms:\n if a != atom:\n new_atoms.append(a)\n return Clause(new_atoms)", "def destroyGlobalNuclides():\n global instances\n global byName\n global byDBName\n global byLabel\n global byMcc2Id\n global byMcc3Id\n global byMcnpId\n global byAAAZZZSId\n\n instances = []\n byName.clear()\n byDBName.clear()\n byLabel.clear()\n byMcc2Id.clear()\n byMcc3Id.clear()\n byMcnpId.clear()\n byAAAZZZSId.clear()", "def __del__(self):\n for node in self.nodes:\n node.clear()", "def __call__(self, mol):\n return self.remove(mol)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
collect a set of residues with memb_z within [15, 15]
def memb_residues(pdb: MyPDB) -> list(): result = [] for ch in pdb.chains.values(): for res in ch.values(): if res.memb_z is not None: result.append(res) return result
[ "def residues(atoms):\n residues = dict([ [j,[]] for j in set([i['resid'] for i in atoms]) ])\n for i in range(len(atoms)):\n residues[atoms[i]['resid']].append(i)\n return residues", "def getLigandNbrs(resids: List[Residue], struct:Structure)->List[ResidueDict]:\n\n ns = NeighborSearch(list( struct.get_atoms() ))\n nbrs = []\n\n for r in resids:\n # a ligand consists of residues\n resatoms = r.child_list[0]\n # each residue has an atom plucked at random\n for nbrresidues in ns.search(resatoms.get_coord(), 5,level='R'):\n # we grab all residues in radius around that atom and extend the list of neighbors with those\n nbrs.extend([nbrresidues])\n\n # Filter out the residues that constitute the ligand itself\n filtered = [] \n for neighbor in nbrs:\n present = 0\n for constit in resids:\n if ResidueDict(constit)==ResidueDict( neighbor ):\n present = 1\n if present == 0:\n filtered.append(ResidueDict(neighbor))\n\n return [ * map(lambda x: addBanClass(x) , set(filtered) ) ]", "def ExtractCandidates(im_norm,h,radius,nbit):\r\n \r\n # Normalized top_hat filtering\r\n se=disk(radius)\r\n im=white_tophat(im_norm,se)\r\n \r\n #filtering local maxima\r\n h_maxima=extrema.h_maxima(im, h,selem=diamond(1))\r\n label_h_max=label(h_maxima,neighbors=4)\r\n labels=pd.DataFrame(data={'labels':np.sort(label_h_max[np.where(label_h_max!=0)])})\r\n dup=labels.index[labels.duplicated() == True].tolist() #find duplicates labels (=connected components) \r\n \r\n #splitting connected regions to get only one local maxima \r\n max_mask=np.zeros(im.shape)\r\n max_mask[label_h_max!=0]=np.iinfo(nbit).max\r\n \r\n for i in range (len(dup)):\r\n r,c=np.where(label_h_max==labels.loc[dup[i],'labels']) #find coord of points having the same label\r\n meanpoint_x=np.mean(c)\r\n meanpoint_y=np.mean(r)\r\n dist=[distance.euclidean([meanpoint_y,meanpoint_x],[r[j],c[j]]) for j in range(len(r))]\r\n ind=dist.index(min(dist))\r\n r,c=np.delete(r,ind),np.delete(c,ind) #delete values at ind position.\r\n max_mask[r,c]=0 #set to 0 points != medoid coordinates\r\n \r\n return max_mask", "def xic(self, mz, ppm):\n res = []\n for mza, inta in zip(self._mza, self._inta):\n left = mza.searchsorted(mz * (1 - ppm * 0.5 * 1e-6))\n right = mza.searchsorted(mz * (1 + ppm * 0.5 * 1e-6))\n res.append(inta[left:right].sum())\n\n res = np.array(res)\n assert len(res) == len(self._times), \"Wrong length of chromatogram\"\n return Chromatogram(self._times, res)", "def search_cmorlist(mip_filt, tab_filt):\n from dreqPy import dreq\n from dreqPy.extensions import collect\n\n\n dq = dreq.loadDreq()\n collect.add( dq )\n mip = dq.coll['mip'].items[0]._labelDict[mip_filt]\n\n mvars = mip._get__CMORvar()\n expts = mip._get__expt()\n\n MIP_Expts = set([ expt for expt in dq.coll['experiment'].items\n if expt.uid in expts ])\n MIP_Expts = list(MIP_Expts)\n\n # Find the 'CMORvar'-items, associated with these requestVars:\n\n cmorlist = {}\n mvarlist = {}\n for cmvar in dq.coll['CMORvar'].items:\n if cmvar.label == 'lwsnl':\n print (cmvar.mipTable, tab_filt)\n if cmvar.uid in mvars and tab_filt in cmvar.mipTable:\n cmorlist[cmvar.label] = cmvar\n mvarlist[cmvar.label] = dq.inx.uid[cmvar.vid] \n \n\n return cmorlist, mvarlist", "def create_subsets(x, y):\n # initiate empty list for return variables.\n sets_x = []\n sets_y = []\n indices = []\n\n # iterate through value of PRI_JET_NUM (ranged inclusively from 0 until 3)\n for pri_jet_num_val in np.unique(x[:,22]):\n \n # Find subset which DER_MASS_MMC is not equal to -999\n mask = (x[:,22] == pri_jet_num_val) & (x[:,0] != -999)\n x_tmp = x[mask,:]\n y_tmp = y[mask]\n\n # store the subset into list\n sets_x.append(x_tmp)\n sets_y.append(y_tmp)\n indices.append(mask)\n\n # Find subset which DER_MASS_MMC is equal to -999\n mask = (x[:,22] == pri_jet_num_val) & (x[:,0] == -999)\n x_tmp = x[mask,:]\n y_tmp = y[mask]\n\n # store the subset into list\n sets_x.append(x_tmp)\n sets_y.append(y_tmp)\n indices.append(mask) \n \n # return subsets of x, y, and corresponding indices\n return sets_x, sets_y, indices", "def get_regions_mask(self, input):", "def _find_members(self, given_members):\n if len(list(self.points)) > 3:\n out_mem = [m for m in given_members if\n self.intersects_poly(m.polygon)]\n else:\n out_mem = []\n return out_mem", "def get_candidates_from_unimod(mass_shift, tolerance, unimod_db, unimod_df):\n ind = list(unimod_df[abs(unimod_df['mono_mass']-mass_shift) < tolerance].index)\n sites_set = set()\n for i in unimod_db[ind]:\n sites_set.update(set(pd.DataFrame(i['specificity']).site))\n return list(sites_set)", "def make_zernike_indexes(self):\n zernike_n_m = []\n for n in range(10):\n for m in range(n+1):\n if (m+n) & 1 == 0:\n zernike_n_m.append((n,m))\n return np.array(zernike_n_m)", "def list_microregions(*search, **searches):\n return list_regions_helper(\"microrregioes\", search, searches)", "def ranges_for_z_calculations(n, all_factorizations, it_set):\n\n possible_primes = []\n for x in all_factorizations:\n for y in x:\n if len(y) == 1:\n possible_primes += y\n\n # Don't analyze for positions that are multiples of potential nonprimes.\n new_it_set = set()\n for y in it_set:\n valid = True\n for x in y:\n assert len(all_factorizations.reverse_idx[(x,)])==1\n if len(all_factorizations[all_factorizations.reverse_idx[(x,)][0]]) > 1:\n valid = False\n if valid:\n new_it_set.add(tuple(y))\n\n mask = {}\n\n for y in new_it_set:\n # Mask[y] will be set to True for each index we care about\n mask[y] = [False] * len(all_factorizations)\n for p in possible_primes:\n for x_idx in range(0, len(all_factorizations)):\n for z in all_factorizations[x_idx]:\n t, o = simplify(z, y)\n if not t:\n # y is a multiple of z; we're interested in this\n # FixMe: Why?\n mask[y][x_idx] = True\n tup = (sorted([p,p] + z), sorted([p]+z))\n for v in tup:\n\n val = all_factorizations.ord_absolute(v,y)\n\n if tuple(v) == y:\n # FixMe: Why?\n mask[y][x_idx] = True\n if tuple(v) not in all_factorizations.outstanding and \\\n tuple(v) not in all_factorizations.finished:\n # We haven't generated v yet at all, can't do anything here\n continue\n\n if val == 99:\n t, o = simplify(v, y)\n\n # We don't know p*p*z <> y; cancle out common factors and\n # for the remainder iterate through all possibilities\n # FixMe: Explain why we care about these\n if tuple(o) != y:\n for idx in all_factorizations.reverse_idx[tuple(t)]:\n mask[y][idx] = True\n for idx in all_factorizations.reverse_idx[tuple(o)]:\n mask[y][idx] = True\n # End for v in tup\n # End for z in all_factorizations[x_idx]:\n # End for x_idx in range(0, len(all_factorizations)):\n # End for p in possible_primes:\n\n\n for x in range(0, len(mask[y])):\n if mask[y][x]:\n for z in all_factorizations[x]:\n for zp in z:\n if len(all_factorizations.reverse_idx[(zp,)]) > 1:\n mask[y][all_factorizations.reverse_idx[(zp,)][0]] = True\n\n # For every factorization where mask[y][x]==True, set everywhere it could be to true\n # FixMe: Use shared here\n for x in range(0,2):\n present = set()\n for x_idx in range(0, len(all_factorizations)):\n if mask[y][x_idx] == True:\n for z in all_factorizations[x_idx]:\n present.add(tuple(z))\n\n for x_idx in range(0, len(all_factorizations)):\n if mask[y][x_idx] == False:\n for z in all_factorizations[x_idx]:\n if tuple(z) in present:\n mask[y][x_idx] = True\n\n # mask[y][y_idx] and everything shared should always be false.\n # FixMe: Explain why\n # FixMe: Use shared\n present = set()\n for y_idx in all_factorizations.reverse_idx[y]:\n mask[y][y_idx] = False\n for z in all_factorizations[y_idx]:\n present.add(tuple(z))\n\n for x_idx in range(0,len(all_factorizations)):\n for z in all_factorizations[x_idx]:\n if tuple(z) in present:\n mask[y][x_idx] = False;\n\n # For clarity, if there is only one option don't have the mask set to true\n for x_idx in range(0,len(all_factorizations)):\n if len(all_factorizations[x_idx]) == 1:\n mask[y][x_idx] = False;\n return mask", "def subset_zonotope_convexhullOFzonotopes(model,x,G,list_of_zonotopes):\n (n,n_G)=G.shape\n alpha={}\n alpha_abs={}\n beta={}\n beta_abs={}\n Lambda={}\n F={}\n d={}\n y={}\n for zono in list_of_zonotopes:\n (n,n_Z)=zono.shape\n F[zono]=np.empty((n,n_G),dtype='object')\n alpha[zono]=np.empty((n_Z,n_G),dtype='object')\n beta[zono]=np.empty((n_Z,1),dtype='object')\n y[zono]=np.empty((n,1),dtype='object')\n alpha_abs[zono]=np.empty(alpha.shape,dtype='object')\n beta_abs[zono]=np.empty(beta.shape,dtype='object')\n # Add Matrices\n F[zono]=add_Var_matrix(model,F[zono])\n alpha[zono]=add_Var_matrix(model,alpha[zono])\n beta[zono]=add_Var_matrix(model,beta[zono])\n alpha_abs[zono]=add_Var_matrix(model,alpha_abs[zono])\n beta_abs[zono]=add_Var_matrix(model,beta_abs[zono])\n y[zono]=add_Var_matrix(model,d[zono])\n Lambda[zono]=np.empty((1,1),dtype='object')\n Lambda[zono][0,0]=model.addVar(lb=0,ub=1)\n # Model Update\n model.update()\n # Constraints on \n constraints_AB_eq_CD(model,np.eye(n),F[zono],zono.G,alpha)\n constraints_AB_eq_CD(model,np.eye(n),y[zono],zono.G,beta)\n for row in range(n_Z):\n for column in range(n_G):\n model.addConstr(alpha_abs[zono][row,column]>=alpha[zono][row,column])\n model.addConstr(alpha_abs[zono][row,column]>=-alpha[zono][row,column])\n for row in range(n_Z):\n model.addConstr(beta_abs[zono][row,0]>=beta[zono][row,0])\n model.addConstr(beta_abs[zono][row,0]>=-beta[zono][row,0])\n for row in range(n_Z):\n sum_terms=LinExpr()\n sum_terms.add(beta_abs[zono][row,0])\n for column in range(n_G):\n sum_terms.add(alpha_abs[zono][row,column])\n model.addConstr(sum_terms<=Lambda[zono])\n # Summation for F:\n constraints_sum(model,G,[F[zono] for zono in list_of_zonotopes])\n constraints_sum(model,x,[d[zono] for zono in list_of_zonotopes])\n constraints_sum(model,np.array([1]).reshape(1,1),[Lambda[zono] for zono in list_of_zonotopes])\n for row in range(n):\n model.addConstr(d[row,0]==x[row,0]-y[row,0]) \n pass", "def fetchCooler(c, regions, coolerFetch = lambda coo, ext:coo.matrix(balance=True, sparse=True).fetch(ext),\n mask=True, force=False, ):\n regions = [list(i) for i in regions]\n resolution = c.binsize\n\n for i in regions:\n if i[1] == None:\n i[1] = 0 \n if i[2] == None:\n i[2] = c.chromsizes[i[0]]\n\n \n for a in regions: \n if str(a[0]) not in c.chromnames:\n raise ValueError(\"Chromosome {0} from regions not found in cooler\".format(a))\n if (a[1] % resolution) != 0:\n raise ValueError(\"Start of an region should be a multiple fo resolution\")\n \n# bins = c.bins()[:]\n \n# # managing masks \n# if mask is False: \n# bins[\"mask\"] = 1 \n# elif mask is None:\n# assert \"mask\" in bins.columns\n# elif mask is True: \n# pass \n# elif callable(mask):\n# pass \n# else:\n# bins[\"mask\"] = mask \n \n \n for region in regions:\n matrix = coolerFetch(c, region)\n try: # setting matrix nans to zeros.\n matrix.data = np.nan_to_num(matrix.data, copy=False)\n except TypeError: #workaround for old numpy versions\n matrix.data = np.nan_to_num(matrix.data)\n# st,end = c.extent(region)\n# subbins = bins[st:end].copy()\n if mask is True: \n newmask = np.array((matrix.sum(axis=0) > 0 ))[0]\n# if callable(mask):\n# new_mask = mask(matrix)\n# subbins[\"mask\"] = newmask \n\n assert len(newmask) == matrix.shape[0]\n\n yield matrix, newmask", "def inMSet(c, n):\r\n z = 0\r\n numbers = range(n)\r\n for x in numbers:\r\n z = z**2 + c\r\n if abs(z) > 2:\r\n return False\r\n return True", "def get_relevant_zones(array,threshold=3):\n\n\treturn [item for item in array if len(item)>3]", "def roi_vecs(layer_coords, vec_coords, region):\n \n if region == 'crown':\n #find threshold for vectors inside roi\n start_x_lst = []\n stop_x_lst = []\n for i in range(1,5):\n start_x_lst.append(layer_coords[i][0][0])\n stop_x_lst.append(layer_coords[i][-1][0])\n\n start_x = max(start_x_lst)\n stop_x = min(stop_x_lst)\n \n roi_vec_coords = [i for i in vec_coords if i[0][0] in list(range(start_x, stop_x+5))]\n \n return roi_vec_coords\n \n elif region == 'fundus':\n #find threshold for vectors inside roi\n start_x_lst = []\n stop_x_lst = []\n for i in range(1,5):\n start_x_lst.append(layer_coords[i][0][0])\n stop_x_lst.append(layer_coords[i][-1][0])\n\n start_x = max(start_x_lst)\n stop_x = min(stop_x_lst)\n\n # roi_vec_coords = [i for i in vec_coords if i[1][0] in list(range(start_x-10, stop_x+3))]\n roi_vec_coords = [i for i in vec_coords if i[0][0] in list(range(stop_x, start_x))]\n \n # print(roi_vec_coords)\n return roi_vec_coords", "def icovsubsample(covin,zlist):\n\n lbig = len(covin[:,0])\n ll = len(zlist)\n covnew = np.zeros([ll,ll])\n icovfinal = np.zeros([lbig, lbig])\n for i in range(ll):\n for j in range(ll):\n covnew[i,j] = covin[zlist[i], zlist[j]]\n\n icovnew = np.array(((np.matrix(covnew)).I))\n for i in range(ll):\n for j in range(ll):\n icovfinal[zlist[i], zlist[j]] = icovnew[i,j]\n\n return icovfinal", "def findCitiesByRectangle(self,bounds,citylist=None):\n xmin = bounds[0]\n xmax = bounds[1]\n ymin = bounds[2]\n ymax = bounds[3]\n subcities = []\n if citylist == None:\n citylist = self.cities\n for city in citylist:\n if city['lat'] >= ymin and city['lat'] <= ymax and city['lon'] >= xmin and city['lon'] <= xmax:\n subcities.append(city)\n\n return subcities" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes indicating root Python module. The application will look for all `Resource` classes defined in the given root module.
def __init__(self, root): self._root = root if not self.get_resources(): raise Exception('Your application has no Resource.')
[ "def initAll(cls):\n for dir in os.environ['MODULEPATH'].split(':'):\n if not os.path.exists(dir):\n continue\n for subdir in os.listdir(dir):\n subdirPath = \"%s/%s\" % (dir, subdir)\n if not os.path.isdir(subdirPath):\n continue\n for file in os.listdir(subdirPath):\n filePath = \"%s/%s\" % (subdirPath, file)\n if os.path.isdir(filePath) or file.startswith('.'):\n continue\n m = Modulefile(\"%s/%s\" % (subdir, file))", "def __init__(self):\n super(RootSuite, self).__init__(None, None, 'root')", "def init(self):\n\n self.loaded = False\n self.exports = NotImplemented\n self.exception = None\n self.namespace = self.create_namespace()\n self.namespace.__file__ = str(self.filename)\n self.namespace.module = self\n self.namespace.require = self.require", "def load_resources(self):\n\n # make services available to the endpoints, this way each endpoint can\n # make use of 'em.\n services = {\n \"socketio\": self.socketio,\n \"mail\": self.mail,\n \"api\": self.api,\n \"permissions\": self.permissions,\n \"config\": self.ctx.config\n }\n\n for res in RESOURCES:\n module = importlib.import_module('vantage6.server.resource.' + res)\n module.setup(self.api, self.ctx.config['api_path'], services)", "def __init__(self, rootPath=None):\n self.rootPath = rootPath or '.'", "def setup(self):\n if self.__isSetup:\n return\n for name, mod in zope.component.getUtilitiesFor(IAPIDocRootModule):\n module = safe_import(mod)\n if module is not None:\n self._children[name] = Module(self, name, module)\n self.__isSetup = True", "def init_rest(app_):\n\n rest_api = Api(app_)\n rest_api.add_resource(views.rest_resources.AppListResource,\n ActiveConfig.REST_URL_APPS_LIST,\n ActiveConfig.REST_URL_APPS_LIST + '/')\n rest_api.add_resource(views.rest_resources.AppResource,\n ActiveConfig.REST_URL_APPS_ITEM,\n ActiveConfig.REST_URL_APPS,\n ActiveConfig.REST_URL_APPS + '/')", "def register_root(cls):\n if RegisteredType._reg['root_class'] is None:\n\n del RegisteredType._reg\n RegisteredType._reg = {\n 'classes' : { 'classid_key' : 'type'},\n 'autoid' : 0,\n 'classids' : { 'type' : 'classid_key' },\n }\n RegisteredType._reg['root_class'] = cls \n cls.register_class()", "def test_init(app: Teal):\n DeviceDef, ComponentDef, ComputerDef = \\\n app.config['RESOURCE_DEFINITIONS'] # type: Tuple[ResourceDef]\n assert isinstance(app.resources['Device'], DeviceDef)\n assert isinstance(app.resources['Computer'], ComputerDef)\n assert isinstance(app.resources['Component'], ComponentDef)\n assert app.tree['Device'].parent is None\n assert app.tree['Computer'] in app.tree['Device'].descendants\n assert app.tree['Component'] in app.tree['Device'].descendants\n assert len(app.tree['Device'].descendants) == 2\n assert app.tree['Computer'].parent == app.tree['Component'].parent == app.tree['Device']\n\n views = {\n 'Component.main',\n 'Computer.main',\n 'Device.main',\n 'apidocs_endpoint',\n 'static'\n }\n assert views == set(app.view_functions.keys())", "def __init__(self, root, wadl_resource):\n if root is None:\n # This _is_ the root.\n root = self\n # These values need to be put directly into __dict__ to avoid\n # calling __setattr__, which would cause an infinite recursion.\n self.__dict__['_root'] = root\n self.__dict__['_wadl_resource'] = wadl_resource", "def __init__(self, root_path: str):\n self._root_path = abspath(root_path)", "def __init__(self, root, api, symlink_resource):\n assert root and isinstance(root, config_types.Path)\n self._root = root\n self._api = api\n self._resource = symlink_resource\n # dict[Path]list(Path): Maps target to a list of linknames.\n self._link_map = {}", "def __init__(self, resources_dir=\"resources\"):\n self._resources_dir = os.path.abspath(resources_dir)\n self._ensembl_rest_client = EnsemblRestClient()\n self._init_resource_attributes()", "def init_resource_manager():\n global resource_manager\n global REST_BASE\n global TRAYS\n global SPEC\n resource_manager = ResourceManager(REST_BASE, SPEC,MODE,TRAYS)\n\n # If POPULATE is specified in emulator-config.json, INFRAGEN is called to populate emulator (i.e. with Chassi, CS, Resource Blocks, etc) according to specified file\n try:\n POPULATE\n except:\n pass\n else:\n if os.path.exists(POPULATE):\n with open(POPULATE, 'r') as f:\n infragen_config = json.load(f)\n populate(infragen_config.get('POPULATE',10))\n\n resource_dictionary = ResourceDictionary()", "def init_library(root: 'str'):\n\n logger.debug('init(%r)', root)\n root = os.path.abspath(root)\n\n root_dir = Library.rootdir(root)\n logger.debug('mkdir %r', root_dir)\n try:\n os.mkdir(root_dir)\n except FileExistsError:\n logger.debug('skipping %r; exists', root_dir)\n\n root_file = Library.rootfile(root)\n if not os.path.exists(root_file):\n logger.debug('writing %r', root_file)\n with open(root_file, 'w') as f:\n f.write(root)\n\n dirs_dir = Library.dirsdir(root)\n logger.debug('mkdir %r', dirs_dir)\n try:\n os.mkdir(dirs_dir)\n except FileExistsError:\n logger.debug('skipping %r; exists', dirs_dir)\n\n return Library(root)", "def __init__(self, root, resource, wadl_method):\n self.root = root\n self.resource = resource\n self.wadl_method = wadl_method", "def init_package():\n global PlatformConfiguration\n if not PlatformConfiguration.LoadedAny:\n # May switch over to \"silent\" loading, but not knowing which config files were loaded can\n # cause a lot of errors...\n PlatformConfiguration = econ_platform_core.configuration.load_platform_configuration(display_steps=True)\n # By default, go into the \"logs\" directory below this file.\n if len(LogInfo.LogDirectory) == 0:\n # If it has not been set manually, use the config information.\n LogInfo.LogDirectory = utils.parse_config_path(PlatformConfiguration['Logging']['LogDirectory'])\n Databases.Initialise()\n Providers.Initialise()\n UpdateProtocolList.Initialise()\n global ExtensionList\n ExtensionList.LoadedExtensions, ExtensionList.FailedExtensions, ExtensionList.DecoratedFailedExtensions = \\\n econ_platform_core.extensions.load_extensions()", "def root_resource(app_request):\n heptet_app.reset_root(app_request)\n root = get_root(app_request)\n yield root\n heptet_app.reset_root(app_request)\n assert get_root(app_request) is not root", "def __init__(self, root=None):\n self.root = root or os.getcwd()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unnormalize a given image.
def unnormalize(self, image, transpose=False): return unnormalize(image, self.mean, self.std, transpose)
[ "def normalise(image):", "def reverse_normalize(image):\n\n reverse = transforms.Normalize(mean=[-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.255],\n std=[1 / 0.229, 1 / 0.224, 1 / 0.255])\n return reverse(image)", "def inverse_normalize(img):\n if opt.caffe_pretrain:\n img = img + (np.array([122.7717, 115.9465, 102.9801]).reshape((3, 1, 1)))\n return img[::-1, :, :] # convert to RGB\n\n return (img * 0.255 + 0.45).clip(min=0, max=1) * 255.", "def denormalize(image):\n return np.clip(image * 127.5 + 127.5, 0, 255).astype(np.uint8)", "def _normalize_image(self, img: np.ndarray) -> np.ndarray:\n i2 = img.astype(float) - self.bg\n i2 /= i2.max()\n return i2", "def normalise(self):\n self.img = self.img.astype(np.float32) / 255.0\n self.img -= self.img.mean()", "def normalize(img):\n img_min = img.min()\n img_max = img.max()\n return (img - img_min) / (img_max - img_min)", "def normalize_img(img: np.ndarray, bit_depth: int) -> np.ndarray:\n return img / ((1 << bit_depth) - 1)", "def normalize(self):\n self.image = rescale_intensity(self.image, out_range=(0, 255))", "def normalize(img):\n norm = cvCreateImage(cvSize(img.width, img.height), IPL_DEPTH_32F, 1)\n cvCopy(img, norm)\n cvNormalize(norm, norm, 1, 0, CV_MINMAX)\n norm_u = cvCreateImage(cvSize(img.width, img.height), IPL_DEPTH_8U, 1)\n cvConvertScale(norm, norm_u, 255)\n return norm_u", "def pytorch_normalize(img):\n img = torch.from_numpy(img) / 255.\n normalize = tfs.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n img = normalize(img)\n return img.numpy()", "def unnormalize(images, mean, std):\n \n unnorm_images = images * std + mean\n \n \n return unnorm_images", "def turn_intensity_normalization_off(self):\n self.intensity_normalize_image = False", "def _reshape_and_normalize_img(self, image):\n\t\timage = skimage.transform.resize(image, (self.height, self.width, self.channels), mode='reflect', preserve_range=True)\n\t\timage = np.reshape(image, (1, self.height, self.width, self.channels))\n\t\timage = image - VGG_MEANS\n\t\treturn image", "def normalized_images(image, config):\n image = tf.image.per_image_standardization(image)\n return image", "def normalize_to_image(img):\n if img.min() >= 0 and img.max() <= 1:\n img *= 255.\n else:\n img = cv2.normalize(img, None, alpha = 0, beta = 255, norm_type = cv2.NORM_MINMAX, dtype = cv2.CV_32F)\n #img = np.clip(((img + 1) / 2.0) * 256, 0, 255)\n return img", "def normalize_image(img):\n\n # Load image and convert to grayscale\n img = rgb2gray(img)\n\n # Normalize values, range 0 to 255\n img = (img - img.min()) / (img.max() - img.min())\n img *= 255\n\n # Make int values\n img = img.astype(int)\n\n # Return new image\n return img", "def reshape_and_normalize_image(image):\n # Reshape image to mach expected input of VGG16\n image = np.reshape(image, ((1,) + image.shape))\n # Substract the mean to match the expected input of VGG16\n image = image - CONFIG.MEANS\n \n return image", "def normalize(img):\n img = img.astype(np.float32)\n img -= img.min()\n img /= img.max()\n img *= 255\n img = img.astype(np.uint8)\n\n return img" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle imbalanced dataset through sampler.
def create_class_imbalance_sampler(self): count = [0] * len(self.classes) for item in self.train_data.imgs: count[item[1]] += 1 weight_per_class = [0.] * len(self.classes) for i in range(len(self.classes)): weight_per_class[i] = float(sum(count)) / float(count[i]) weights = [0] * len(self.train_data.imgs) for idx, val in enumerate(self.train_data.imgs): weights[idx] = weight_per_class[val[1]] weights = torch.DoubleTensor(weights) self.sampler = torch.utils.data.sampler.WeightedRandomSampler( weights, len(weights) )
[ "def balanced_sampling(dat: pd.DataFrame, logger=None):\n if logger == None:\n logging.basicConfig(\n level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n logger = logging.getLogger(__name__)\n \n \n # upsampling\n logger.info('Start balanced sampling')\n subsample = []\n num_of_each_class = dat.iloc[:, -1].value_counts().to_numpy()\n if num_of_each_class.std()*1.0 / num_of_each_class.mean() < 0.1:\n logger.info('The given data is balance.')\n # the dataset is balanced\n return dat\n logger.info('Given dataset is unbalance')\n logger.info('Sampling data from each class to generate a new dataset')\n n_smp = num_of_each_class.max()\n for label in dat.iloc[:, -1].value_counts().index:\n samples = dat[dat.iloc[:, -1] == label]\n num_samples = len(samples)\n index_range = range(num_samples)\n # take all from the set\n indexes = list(np.random.choice(index_range, size=num_samples, replace=False))\n indexes2 = list(np.random.choice(\n index_range, size=n_smp-num_samples, replace=True)) # add random items\n indexes.extend(indexes2)\n subsample.append(samples.iloc[indexes, :])\n logger.info('End with sampling')\n out = pd.concat(subsample)\n out = out.sample(frac=1).reset_index(drop=True) # shuffle and re index\n return out", "def resampling():\n oversampling = over_sampling.SMOTE(random_state=42)\n undersampling = under_sampling.RandomUnderSampler(random_state=42)\n\n\n sampling_pipeline = imbPipeline([\n ('oversample', oversampling),\n ('undersample', undersampling)\n ])\n return sampling_pipeline", "def downsample(imbalanced_data):\n y = imbalanced_data[\"Translation\"].astype(int)\n y = np.where((y == 0), 0, 1)\n\n # Indices of each class' observations\n i_class0 = np.where(y == 0)[0]\n i_class1 = np.where(y == 1)[0]\n\n # Number of observations in each class\n n_class0 = len(i_class0)\n n_class1 = len(i_class1)\n print(\"Class 0 size: {}\".format(n_class0))\n print(\"Class 1 size: {}\".format(n_class1))\n\n # For every observation of class 1, randomly sample from class 0 without replacement\n i_class0_downsampled = np.random.choice(i_class0, size=n_class1, replace=False)\n print(\"After Downsampling:\")\n print(\"Class 0 size: {}\".format(len(i_class0_downsampled)))\n print(\"Class 1 size: {}\".format(n_class1))\n index_balanced = i_class0_downsampled.tolist() + i_class1.tolist()\n index_balanced = shuffle(index_balanced, random_state=42)\n return imbalanced_data.iloc[index_balanced, :]", "def setup_sampler(self):\n pass", "def handle_imbalance(dataset, minority_class):\n for i, l in enumerate(dataset):\n if l == minority_class:\n dataset[i] = 2\n return dataset", "def balance_data(df, y, do_undersample):\n if do_undersample:\n print('Under sampling the \\'0\\' class of our outcome data...')\n # Under sample -50K so we can better learn.\n ones = df[df['binary_income']==1]\n zeros = df[df['binary_income']==0]\n \n subsampled_df = pd.concat([ones, zeros.sample(ones.shape[0])])\n subsampled_y = subsampled_df['binary_income']\n subsampled_df = subsampled_df.drop('binary_income',axis=1)\n \n return subsampled_df, subsampled_y\n \n else:\n return df, y", "def balanceData(self, method: str = \"mixsampling\") -> None:\n\n if method == \"mixsampling\":\n from imblearn.combine import SMOTETomek\n self.balanceObj = SMOTETomek(sampling_strategy='auto')\n\n elif method == \"undersampling\":\n from imblearn.under_sampling import NearMiss\n self.balanceObj = NearMiss(sampling_strategy= \"auto\", n_neighbors=3, version=2)\n\n elif method == \"oversampling\":\n from imblearn.over_sampling import RandomOverSampler\n self.balanceObj = RandomOverSampler(sampling_strategy = \"auto\")\n\n else:\n raise NameError(f\"{method} method not defined\")", "def over_sampling(self):\n assert hasattr(self, 'X_train') == True, \"Object does not contain X_train attribute. Run method split_data() to create train/test splits\"\n \n # sm= SMOTE(random_state= 24)\n\n # X_train_sm, y_train_sm= sm.fit_resample(self.X_train, self.y_train)\n \n # print('Shape of X_train prior to SMOTE: %s' % (self.X_train.shape))\n # print('Shape of X_train after SMOTE: %s' % (self.X_train_sm.shape))\n # print('Class balance after SMOTE: \\n%s' % (y_train_sm.shape.value_counts(normalize= True) * 100))\n # self.X_train_SMOTE= X_train_sm\n # self.y_train_SMOTE= y_train_sm", "def class_balancing_sampler(y):\n weights = WeightedSampler.class_balancing_sample_weights(y)\n return WeightedSampler(weights)", "def get_dataset_sampler(self):\n return None", "def should_sample(self, span_context):\n raise NotImplementedError", "def prepare_dataset():\n with open('gold-posts.txt', encoding='utf-8') as f:\n posts = f.readlines()\n with open('gold-labels.txt', encoding='utf-8') as f:\n labels = f.readlines()\n\n def to_cat(x: str) -> int:\n if x == 'p':\n return 1\n elif x == 'n':\n return 2\n else:\n return 0\n X = np.array([x.strip() for x in posts])\n y = np.array([to_cat(x.strip()) for x in labels])\n\n # DOES NOT WORK - too imbalanced\n #skf = StratifiedKFold(n_splits=5, random_state=None, shuffle=False)\n #for train_index, test_index in skf.split(X, y):\n # X_train, X_test = X[train_index], X[test_index]\n # y_train, y_test = y[train_index], y[test_index]\n # break\n\n # WORKS better\n trI, teI = balanced_split(y)\n\n train_texts = X[trI].tolist()\n train_labels = y[trI].tolist()\n valid_texts = X[teI].tolist()\n valid_labels = y[teI].tolist()\n return train_texts, train_labels, valid_texts, valid_labels", "def sampled(X_train, y_train):\n \n from imblearn.over_sampling import SMOTE\n #from imblearn.over_sampling import RandomOverSampler\n \n smote = SMOTE(sampling_strategy='auto', k_neighbors=3, random_state=33)\n #X_train, y_train = smote.fit_sample(X_train, y_train) \n #X_test, y_test = smote.fit_sample(X_test, y_test) \n \n #return X_train, X_test, y_train, y_test\n\n # Create instance of SMOTE\n #smote = SMOTE()\n\n # Apply smote\n X_train_resampled, y_train_resampled = smote.fit_sample(X_train, y_train)\n return X_train_resampled, y_train_resampled", "def process_sample(self, sample):\n pass", "def undersample_dataset(X_train_orig, y_train):\n df_train = pd.concat([X_train_orig, y_train], axis = 1)\n count_class_0, count_class_1 = df_train.target.value_counts()\n df_class_0 = df_train[df_train['target'] == 0]\n df_class_1 = df_train[df_train['target'] == 1]\n df_class_0_under = df_class_0.sample(count_class_1)\n df_test_under = pd.concat([df_class_0_under, df_class_1], axis=0)\n\n logger.info(df_test_under.target.value_counts())\n X_train_under = df_test_under.drop(columns=[\"target\"])\n y_train_under = df_test_under[\"target\"].astype(int)\n\n return X_train_under, y_train_under", "def _balanced_sample(self, meters):\n nr_pos, nr_neg = self.pos_data.size, self.neg_data.size\n assert nr_pos + nr_neg > 0\n if nr_neg == 0:\n use_pos_data = True\n elif nr_pos == 0:\n use_pos_data = False\n else:\n use_pos_data = random.rand() < self.prob_pos_data\n meters.update(pos_data_ratio=int(use_pos_data))\n pool = self.pos_data if use_pos_data else self.neg_data\n return pool.get()", "def scenario_4(data):\n classifier = SupervisedModel(data.input, data.classData)\n classifier.splitData(0.3)\n\n classifier.benchAlgorithms()", "def load_binary_imbalanced(classes=(1,7), ratio=0.1):\r\n train_set, train_set_target = load_data()\r\n \r\n # binarize\r\n mask_train_set_imb = np.logical_or(train_set_target == classes[0],train_set_target == classes[1])\r\n (data_set_imb,data_set_imb_target)= (train_set[mask_train_set_imb], train_set_target[mask_train_set_imb])\r\n\r\n # imbalance\r\n data_minority = data_set_imb[data_set_imb_target == classes[1]]\r\n data_minority_target = data_set_imb_target[data_set_imb_target == classes[1]]\r\n data_majority = data_set_imb[data_set_imb_target == classes[0]]\r\n data_majority_target = data_set_imb_target[data_set_imb_target == classes[0]]\r\n original_size = data_minority_target.shape[0]\r\n majority_size = data_majority_target.shape[0]\r\n target_size = int(np.floor(majority_size * ratio))\r\n indices = np.random.choice(original_size, size=target_size)\r\n data_minority = data_minority[indices]\r\n data_minority_target = data_minority_target[indices]\r\n\r\n # merge\r\n train_set = np.concatenate([data_minority, data_majority])\r\n train_set_target = np.concatenate([data_minority_target, data_majority_target])\r\n\r\n #shuffle\r\n train_set, train_set_target = np.hsplit(\r\n np.random.permutation(\r\n np.hstack((train_set, train_set_target.reshape((train_set_target.shape[0], 1))))\r\n ), [-1]\r\n )\r\n train_set_target = np.asarray(train_set_target, dtype='int').reshape((train_set_target.shape[0],))\r\n return (train_set[:],train_set_target[:])", "def class_balancing_sampler(y, indices):\n weights = WeightedSampler.class_balancing_sample_weights(y[indices])\n return WeightedSubsetSampler(weights, indices=indices)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the selected locale from user settings.
def get_locale(): setting = Setting.query.filter(Setting.name == 'default_language').first() if setting is not None: return setting.value # Return default language when none found return 'en'
[ "def _get_locale(self):\n context = self.context\n locales = {\n 'en': 'en_GB',\n 'de': 'de_DE'\n }\n language = context.language if context.language else 'en'\n return locales.get(language)", "def _get_user_locale():\n if 'Windows' in platform.system():\n import ctypes\n windll = ctypes.windll.kernel32\n default_locale = windows_locale[windll.GetUserDefaultUILanguage()]\n else:\n default_locale = getdefaultlocale()\n if default_locale:\n if isinstance(default_locale, tuple):\n user_locale = [0][:2]\n else:\n user_locale = default_locale[:2]\n else:\n user_locale = 'en'\n return user_locale", "def get_user_locale(self):\r\n return None", "def get_current_locale(self, req):\n if req.view_args and 'locale' in req.view_args:\n for locale in self.locales:\n if locale.code == req.view_args['locale']:\n return locale\n\n # Return the default locale\n return self.default_locale", "def get_locale_for_user(self):\n return 'en_US' # TODO(psimakov): choose proper locale from profile", "def getLocale(self):\r\n return locale.getlocale()", "def get_locale():\n return request.accept_languages.best_match(current_app.config['LANGUAGES'])\n # return 'es'", "def get_current_locale(self) -> str:\n return self.locale", "def get_service_locale():\n try:\n locale = get_locale()\n except TypeError as e:\n print('Error with getting locale - %s' % str(e))\n locale = request.cookies.get(app.config.get('LANGUAGE_COOKIE_KEY', None))\n if not locale or locale not in app.config.get('LANGUAGES', ['en', 'ru']):\n locale = request.accept_languages.best_match(app.config.get('LANGUAGES', ['en', 'ru']))\n except Exception as e:\n print('Exception with getting locale - %s' % str(e))\n locale = 'en'\n\n return locale.language if isinstance(locale, Locale) else str(locale)", "def get_language():\n val = getattr(_active, 'value', None)\n return val.language if val else config.LANGUAGE_CODE", "def locale(self):\r\n if not hasattr(self, \"_locale\"):\r\n self._locale = self.get_user_locale()\r\n if not self._locale:\r\n self._locale = self.get_browser_locale()\r\n assert self._locale\r\n return self._locale", "def get_locale():\n return request.accept_languages.best_match(LANGUAGES.keys())", "def locale(self):\n return None", "def default_locale(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"default_locale\")", "def init_language(self):\n\n if 'HTTP_COOKIE' in os.environ:\n cookies = os.environ['HTTP_COOKIE'].split(';')\n for cookie in cookies:\n (key, value) = cookie.split('=')\n if key == Intuition.COOKIE_USERLANG:\n return value\n \n return self.default_language", "def get_language():\n lang = os.getenv('LANGUAGE') or ''\n if ':' in lang:\n lang = lang.split(':')[1]\n return lang", "def getUILanguage(prefClass=Prefs):\n lang = prefClass.settings.value(\"UI/Language\",\n prefClass.uiDefaults[\"Language\"])\n if lang == \"None\" or lang == \"\" or lang is None:\n return None\n else:\n return lang", "def _contextual_locale(context):\n locale = context['request'].locale\n if not localedata.exists(locale):\n locale = settings.LANGUAGE_CODE\n return locale", "def get_language(self):\n return self.language if self.language is not None else get_language()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decodes a Base58Check encoded key.
def from_b58check(key): return HDKey.from_bytes(base58.b58decode_check(key))[0]
[ "def base58_decode(v: bytes) -> bytes:\n try:\n prefix_len = next(\n len(encoding[2])\n for encoding in base58_encodings\n if len(v) == encoding[1] and v.startswith(encoding[0])\n )\n except StopIteration:\n raise ValueError('Invalid encoding, prefix or length mismatch.')\n\n return base58.b58decode_check(v)[prefix_len:]", "def multibase_b58decode(data):\n if data.startswith('z'):\n return base58.b58decode((data[1:]).encode())\n raise ValueError('{} cannot be decoded by multibase'\n ' base58.'.format(str(data)))", "def decode(cls, received: bytes) -> 'CK':\n\n return CoseKey.from_dict(cbor2.loads(received))", "def decode_key(self, key: Any, msg: SecureMessage) -> Optional[bytes]:\n raise NotImplemented", "def decode(s):\n try:\n if not s:\n return b''\n\n # Convert the string to an integer\n n = 0\n for c in s:\n n *= 58\n if c not in b58_digits:\n raise InvalidBase58Error('Character %r is not a valid base58 character' % c)\n digit = b58_digits.index(c)\n n += digit\n\n # Convert the integer to bytes\n h = '%x' % n\n if len(h) % 2:\n h = '0' + h\n res = unhexlify(h.encode('utf8'))\n\n # Add padding back.\n pad = 0\n for c in s[:-1]:\n if c == b58_digits[0]: pad += 1\n else: break\n return hexlify(b'\\x00' * pad + res).decode('utf8')", "def decode_base58(s):\n if not s:\n return b''\n # Convert the string to an integer\n n = 0\n for c in s:\n n *= 58\n if c not in b58_digits:\n raise Exception('Character %r is not a valid base58 character' % c)\n digit = b58_digits.index(c)\n n += digit\n # Convert the integer to bytes\n h = '%x' % n\n if len(h) % 2:\n h = '0' + h\n res = binascii.unhexlify(h.encode('utf8'))\n # Add padding back.\n pad = 0\n for c in s[:-1]:\n if c == b58_digits[0]:\n pad += 1\n else:\n break\n return b'\\x00' * pad + res", "def base58_decode(s):\n if not s:\n return b''\n alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'\n # Convert the string to an integer\n n = 0\n for c in s:\n n *= 58\n if c not in alphabet:\n raise Exception('Character %r is not a valid base58 character' % c)\n digit = alphabet.index(c)\n n += digit\n\n # Convert the integer to bytes\n h = '%x' % n\n if len(h) % 2:\n h = '0' + h\n # res = \"\"\n res = bytearray.fromhex(h)\n\n # Add padding back.\n pad = 0\n for c in s[:-1]:\n if c == alphabet[0]: pad += 1\n else: break\n return b'\\x00' * pad + res", "def b58_paycode_decode(pcode):\n assert is_bip47_code(pcode)\n return b58check_to_hex(pcode)", "def decode_public_key(bytes):\r\n return RSA.importKey(bytes)", "def rc4_decode(data, key, decode=base64.b64decode, salt_length=16):\n if decode:\n data = decode(data)\n salt = data[:salt_length]\n return crypt(data[salt_length:], sha1(key + salt).digest())", "def parse_key(raw_key):\n raw_key_bytes = raw_key.encode('ascii')\n try:\n validate_cmek(raw_key)\n key_type = KeyType.CMEK\n sha256 = None\n except errors.Error:\n if len(raw_key) != 44:\n raise\n key_type = KeyType.CSEK\n sha256 = hash_util.get_base64_hash_digest_string(\n hashlib.sha256(base64.b64decode(raw_key_bytes)))\n return EncryptionKey(key=raw_key, sha256=sha256, type=key_type)", "def __decode_key(self, key_type, key):\n return key_type(key, encoder=KEY_ENCODING)", "def decode_public_key(as_bytes: typing.List[int]) -> PublicKey:\n raise NotImplementedError()", "def _from_bytes(self, byte_data, key=''):\n\n if not can_encrypt and key:\n result = decode_safely(byte_data)\n elif can_encrypt and key:\n cipher = AESCipher(key)\n result = decode_safely(cipher.decrypt(byte_data))\n else:\n result = decode_safely(byte_data)\n return result", "def parse_signature(data: bytes):\n return base58_encode(data, b'sig').decode()", "def decode_base58(smartAddress, length):\n n = 0\n for char in smartAddress:\n try:\n n = n * 58 + digits58.index(char)\n except:\n msg = u\"Character not part of SmartCashs's base58: '%s'\"\n raise ValueError(msg % (char,))\n\n return n.to_bytes(length, 'big')", "def b58decode(v, length):\n long_value = 0L\n for (i, c) in enumerate(v[::-1]):\n long_value += __b58chars.find(c) * (__b58base**i)\n result = ''\n while long_value >= 256:\n div, mod = divmod(long_value, 256)\n result = chr(mod) + result\n long_value = div\n result = chr(long_value) + result\n nPad = 0\n for c in v:\n if c == __b58chars[0]: nPad += 1\n else: break\n result = chr(0)*nPad + result\n if length is not None and len(result) != length:\n return None\n return result", "def from_b58_string(multihash: str) -> bytes:\n if not isinstance(multihash, str):\n raise TypeError(\"input must be string\")\n return base58.b58decode(multihash)", "def b58decode(v, length):\n\tlong_value = 0L\n\tfor (i, c) in enumerate(v[::-1]):\n\t\tlong_value += __b58chars.find(c) * (__b58base**i)\n\tresult = ''\n\twhile long_value >= 256:\n\t\tdiv, mod = divmod(long_value, 256)\n\t\tresult = chr(mod) + result\n\t\tlong_value = div\n\tresult = chr(long_value) + result\n\tnPad = 0\n\tfor c in v:\n\t\tif c == __b58chars[0]: nPad += 1\n\t\telse: break\n\tresult = chr(0)*nPad + result\n\tif length is not None and len(result) != length:\n\t\treturn None\n\treturn result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates either a HDPrivateKey or HDPublicKey from the underlying bytes.
def from_bytes(b): if len(b) < 78: raise ValueError("b must be at least 78 bytes long.") version = int.from_bytes(b[:4], 'big') depth = b[4] parent_fingerprint = b[5:9] index = int.from_bytes(b[9:13], 'big') chain_code = b[13:45] key_bytes = b[45:78] rv = None if version == HDPrivateKey.MAINNET_VERSION or version == HDPrivateKey.TESTNET_VERSION: if key_bytes[0] != 0: raise ValueError("First byte of private key must be 0x00!") private_key = int.from_bytes(key_bytes[1:], 'big') rv = HDPrivateKey(key=private_key, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) elif version == HDPublicKey.MAINNET_VERSION or version == HDPublicKey.TESTNET_VERSION: if key_bytes[0] != 0x02 and key_bytes[0] != 0x03: raise ValueError("First byte of public key must be 0x02 or 0x03!") public_key = PublicKey.from_bytes(key_bytes) rv = HDPublicKey(x=public_key.point.x, y=public_key.point.y, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) else: raise ValueError("incorrect encoding.") return (rv, b[78:])
[ "def generate_ecdh_key_pair() -> tuple[X25519PrivateKey, bytes]:\n private_key = X25519PrivateKey.generate()\n public_key_raw = private_key.public_key().public_bytes(\n serialization.Encoding.Raw, serialization.PublicFormat.Raw\n )\n return private_key, public_key_raw", "def decode_public_key(bytes):\r\n return RSA.importKey(bytes)", "def mk_keyobj_from_private_key_der(self, derdat):\n self.private_key_obj = serialization.load_der_private_key(derdat, password=None, backend=default_backend())\n self._get_naive_private_key_bytes()\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_public_key_bytes()", "def he_key_gen(private_keyring=None, n_length=phe.paillier.DEFAULT_KEYSIZE):\n\n public_key, private_key = phe.generate_paillier_keypair(private_keyring, n_length)\n return public_key, private_key", "def gen_private_key():\n return DH.b2i(Random.new().read(DH_SIZE))", "def generate_keys():\r\n key = rsa.generate_private_key(\r\n backend=crypto_default_backend(),\r\n public_exponent=65537,\r\n key_size=4096\r\n )\r\n private_key = key.private_bytes(\r\n crypto_serialization.Encoding.PEM,\r\n crypto_serialization.PrivateFormat.PKCS8,\r\n crypto_serialization.NoEncryption())\r\n public_key = key.public_key().public_bytes(\r\n crypto_serialization.Encoding.OpenSSH,\r\n crypto_serialization.PublicFormat.OpenSSH\r\n )\r\n return private_key, public_key", "def decode_public_key(as_bytes: typing.List[int]) -> PublicKey:\n raise NotImplementedError()", "def load(data):\n e, n = RSAUtils.parse_key(data)\n return RSAPublicKey(e, n)", "def gen_private_key():\n import base64\n import os\n from pykern import pkcompat\n\n return pkcompat.from_bytes(base64.urlsafe_b64encode(os.urandom(32)))", "def generate_key(seed):\n private_key = sha256(seed)\n public_key = privtopub(private_key)\n return {\"private\": private_key, \"public\": public_key}", "def get_key_pair() -> typing.Tuple[bytes, bytes]:\n sk = ed25519.Ed25519PrivateKey.generate()\n\n return _get_key_pair_from_sk(sk)", "def mk_keyobj_from_private_key_pem(self, pemdat_string):\n if isinstance(pemdat_string, str):\n pemdat_string = pemdat_string.encode()\n self.private_key_obj = serialization.load_pem_private_key(pemdat_string, password=None, backend=default_backend())\n self._get_naive_private_key_bytes()\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_public_key_bytes()", "def dh_get_key():\n G = EcGroup()\n priv_dec = G.order().random()\n pub_enc = priv_dec * G.generator()\n return (G, priv_dec, pub_enc)", "def generate_keys() -> tuple:\n private_key = ecdsa.SigningKey.generate(curve=curve)\n public_key = private_key.get_verifying_key()\n\n private_key = encode_private_key(private_key)\n public_key = encode_public_key(public_key)\n\n return public_key, private_key", "def ECDH(sk, pk, peer_pk):\n curve = pk.curve\n enc_peer_pk = bytes(peer_pk)\n peer_pk_good = True\n if isinstance(peer_pk, Jq255Curve.Point):\n if not(pk.curve is peer_pk.curve):\n raise Exception('Curve mismatch in ECDH')\n if pk.is_neutral():\n raise Exception('Peek public key is invalid (neutral element)')\n else:\n # We are going to decode the public key bytes. In that mode,\n # failures should trigger the alternate key derivation feature,\n # instead of being reported as exceptions. This implementation\n # is not constant-time, and the exception-catching process below\n # may leak to outsider through timing-based side channels that\n # the received bytes were not a valid public key; in a\n # production-level secure implementation, this side channel\n # should be avoided as well.\n try:\n peer_pk = pk.curve.Decode(enc_peer_pk)\n if peer_pk.is_neutral():\n raise Exception('key is neutral')\n except Exception:\n peer_pk_good = False\n peer_pk = curve.G\n\n # The ECDH core: multiply the peer point by our private key.\n # The shared secret is the _square_ of the w coordinate of the result\n # (a square is used to make ECDH implementable with a ladder\n # algorithm that avoids full decoding of the input point).\n P = peer_pk * sk\n\n # For key generation, we want to use the digest over the concatenation of:\n # - the two public keys;\n # - a byte of value 0x53 (on success) or 0x46 (on failure, because the\n # provided peer key bytes are not the valid encoding of a valid\n # public key);\n # - the shared secret (our own private key on failure).\n # We order the public keys by interpreting them as integers\n # (big-endian convention) so that both parties use the same order\n # (equivalently, the two keys are ordered lexicographically).\n pk1 = bytes(pk)\n ipk1 = int.from_bytes(pk1, byteorder='big')\n pk2 = enc_peer_pk\n ipk2 = int.from_bytes(pk2, byteorder='big')\n if ipk1 > ipk2:\n (pk1, pk2) = (pk2, pk1)\n\n sh = hashlib.blake2s()\n sh.update(pk1)\n sh.update(pk2)\n if peer_pk_good:\n sh.update(b'\\x53')\n sh.update(bytes(P))\n else:\n sh.update(b'\\x46')\n sh.update(bytes(sk))\n return (sh.digest(), peer_pk_good)", "def _generate_rsa_keypair() -> Any:\n private_key = rsa.generate_private_key(\n public_exponent=65537,\n key_size=2048,\n backend=cryptography.hazmat.backends.default_backend())\n\n privkey_pem = private_key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.PKCS8,\n encryption_algorithm=serialization.NoEncryption())\n\n public_key = private_key.public_key()\n pubkey_pem = public_key.public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo)\n\n return privkey_pem.decode('ascii'), pubkey_pem.decode('ascii')", "def get_key_pair() -> typing.Tuple[bytes, bytes]: \n return _get_key_pair_from_sk(ecdsa.SigningKey.generate(curve=CURVE))", "def create_RO_keypair(tenant_id):\n\n bits = 2048\n key = RSA.generate(bits)\n try:\n public_key = key.publickey().exportKey('OpenSSH')\n if isinstance(public_key, ValueError):\n raise NfvoException(\"Unable to create public key: {}\".format(public_key), httperrors.Internal_Server_Error)\n private_key = key.exportKey(passphrase=tenant_id, pkcs=8)\n except (ValueError, NameError) as e:\n raise NfvoException(\"Unable to create private key: {}\".format(e), httperrors.Internal_Server_Error)\n if isinstance(public_key, bytes):\n public_key = public_key.decode(encoding='UTF-8')\n if isinstance(private_key, bytes):\n private_key = private_key.decode(encoding='UTF-8')\n return public_key, private_key", "def unwrap(self):\n\n if self.algorithm == 'rsa':\n return self.asn1['private_key'].parsed\n\n if self.algorithm == 'dsa':\n params = self.asn1['private_key_algorithm']['parameters']\n return DSAPrivateKey({\n 'version': 0,\n 'p': params['p'],\n 'q': params['q'],\n 'g': params['g'],\n 'public_key': self.public_key.unwrap(),\n 'private_key': self.asn1['private_key'].parsed,\n })\n\n if self.algorithm == 'ec':\n output = self.asn1['private_key'].parsed\n output['parameters'] = self.asn1['private_key_algorithm']['parameters']\n output['public_key'] = self.public_key.unwrap()\n return output" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Whether or not this is a hardened node. Hardened nodes are those with indices >= 0x80000000.
def hardened(self): # A hardened key is a key with index >= 2 ** 31, so # we check that the MSB of a uint32 is set. return self.index & 0x80000000
[ "def isItFreeNode(self):\n for c in self.children:\n if c:\n return False\n return True", "def is_dedicated_node(self):\n return self.is_node() and not self.is_master()", "def has_extra_nodes(self):\n return (self.n_edge_nod + self.n_face_nod + self.n_bubble_nod) > 0", "def has_node(self, i):\r\n return i in self.nodes", "def has_node(self, n):\n return n in self.graph", "def is_infected(self, node):\n return self.times[node] > -1", "def is_flooded(self):\n return len(self.flooded_tiles) == self._num_tiles", "def _is_leaf(self, index):\r\n return 2*index+1 > self._size - 1", "def IsEulerGraph(self):\n\n for node in self.nodes:\n if ((len(node.neighbours) % 2) == 1) or (len(node.neighbours) == 0):\n return False\n return True", "def has_node(self, n):\n return n in self.node_dict", "def _is_red(node: Node) -> bool:\n if node is None:\n return False\n return node.is_red", "def isWellFormedNode(self, *args):\n return _libsbml.ASTBasePlugin_isWellFormedNode(self, *args)", "def nid_is_entity(node_id):\n return node_id % 2 == 1 and node_id >= 1", "def is_never_infected(self, node):\n return self.times[node] == -1", "def is_linear(node):\n return len(node.next_states) == 1 and \\\n len(node.prev_states) == 1\n # return len(node.next_states) == 1", "def _is_left_edge(self, ndx):\n if len(self._dims)== 1:\n return ndx == 0\n return ndx < self._dims[1]", "def is_ghost(self):\n\t\treturn False", "def __nonzero__(self):\n return self.root or bool(self.entries)", "def aboveFreezing(self):\r\n if self.degree>32:\r\n return True\r\n else:\r\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the key's fingerprint, which is the first 4 bytes of its identifier.
def fingerprint(self): return self.identifier[:4]
[ "def key_fingerprint(self):\n return self._key_fingerprint", "def fingerprint(public_key):\r\n\r\n return hashlib.new('ripemd160', hashlib.sha256(public_key).digest()).digest()[:4]", "def fingerprint(self, key):\n base64_pub = self.base64_pub_encode(key)\n return SHA256.new(base64_pub.encode('utf-8')).digest()", "def fingerprint(self):\n return self.gpg.list_keys()[0]['fingerprint']", "def _get_fingerprint(self, key: bytes) -> bytes:\n return sha3.keccak_256(key).hexdigest().encode()", "def fingerprint_key(key):\n try: key = key.public_key()\n except: pass\n\n serialized = key.public_bytes(\n encoding = serialization.Encoding .OpenSSH,\n format = serialization.PublicFormat.OpenSSH)\n\n blob = b64decode(serialized.split(None,2)[1])\n return fingerprint_public_key_blob(blob)", "def fingerprint(self):\n return bip32.fingerprint(self.xpub)", "def fingerprint(self):\n return self._fingerprint", "def _fingerprint(self):\n\n return self._fingerprints.data[self._reference_structure_id]", "def fingerprint(datum):\n return b64enc(sha256(toBytes(datum)).digest())", "def get_master_fingerprint(self) -> bytes:\n return self.get_pubkey_at_path(\"m/0h\").parent_fingerprint", "def ssh_key_fingerprint(self):\n if hasattr(self, \"_ssh_key_fingerprint\"):\n return getattr(self, \"_ssh_key_fingerprint\")\n return prompt_for_key()", "def key_get_fingerprint():\n\n token = token_by_header_data(request.headers.get(\"X-Keydom-Session\"))\n\n if not token:\n resp = routing.base.generate_error_response(code=401)\n resp[\"message\"] = \"Invalid authentication token.\"\n return json.dumps(resp) + \"\\n\"\n\n if token.has_expired:\n resp = routing.base.generate_error_response(code=403)\n resp[\"message\"] = \"Authentication token has expired. Request another.\"\n return json.dumps(resp) + \"\\n\"\n\n key_data = {\n \"short_name\": request.query.short_name or None,\n \"user\": request.query.user or token.for_user.username,\n }\n\n res = (Key\n .select()\n .where(Key.short_name == key_data[\"short_name\"]))\n\n resp = routing.base.generate_bare_response()\n resp[\"fingerprints\"] = []\n\n if res.count() == 0:\n return json.dumps(resp) + \"\\n\"\n\n if key_data[\"user\"].lower() == \"self\":\n keys = filter(\n lambda key: key.belongs_to.username == token.for_user.username,\n res)\n elif key_data[\"user\"].lower() in [\"all\", \"any\"]:\n keys = res\n else:\n keys = filter(\n lambda key: key.belongs_to.username == key_data[\"user\"],\n res)\n\n for key in keys:\n resp[\"fingerprints\"].append({\n \"short_name\": key.short_name,\n \"owner\": key.belongs_to.username,\n \"fingerprint\": key.fingerprint()\n })\n\n return json.dumps(resp) + \"\\n\"", "def identity(self):\n if self.fingerprint:\n return fromHex(self.fingerprint)", "def getFingerprint(self):\r\n return b2a_hex(SHA1(self.bytes))", "def api_key_file_fingerprint(self):\n return self.attributes.get(\n \"{}.API Key File Fingerprint\".format(self._cs_model_name)\n )", "def get_short_fingerprint(length=6):\n assert 6 <= length <= 32\n #\n return get_fingerprint(md5=True)[-length:]", "def label_fingerprint(self) -> str:\n return pulumi.get(self, \"label_fingerprint\")", "def fingerprint(keyed_data, digest_size=16):\n h = blake2b(digest_size=16)\n for key in sorted(keyed_data.keys()):\n val = keyed_data[key]\n s = json.dumps(val, sort_keys=True, cls=NpEncoder).encode()\n h.update(s)\n return h.hexdigest()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get inventory list from config files builds a NetworkRunner inventory object and a mac_map dictionary according to ansible inventory file yaml definition
def __init__(self): self.inventory = {} self.mac_map = {} for conffile in CONF.config_file: # parse each config file sections = {} parser = cfg.ConfigParser(conffile, sections) try: parser.parse() except IOError as e: LOG.error(str(e)) # filter out sections that begin with the driver's tag hosts = {k: v for k, v in sections.items() if k.startswith(c.DRIVER_TAG)} # munge the oslo_config data removing the device tag and # turning lists with single item strings into strings for host in hosts: dev_id = host.partition(c.DRIVER_TAG)[2] dev_cfg = {k: v[0] for k, v in hosts[host].items()} for b in c.BOOLEANS: if b in dev_cfg: dev_cfg[b] = types.Boolean()(dev_cfg[b]) self.inventory[dev_id] = dev_cfg # If mac is defined add it to the mac_map if 'mac' in dev_cfg: self.mac_map[dev_cfg['mac'].upper()] = dev_id LOG.info('Ansible Host List: %s', ', '.join(self.inventory))
[ "def main():\n with open('group_vars/all.yaml', 'r') as file:\n config = yaml.safe_load(file)\n\n if not os.path.exists('host_vars'):\n os.makedirs('host_vars')\n\n nso_ip = config['nso']['ip']\n username = config['nso']['username']\n password = config['nso']['password']\n nso = NSO(nso_ip, username, password)\n #\n print(\"Syncing Configuration from Devices\")\n nso.sync_from()\n\n devices = nso.get_device_list()\n\n # track devices to be added to inventory\n inv_devices = list()\n\n for dev in devices:\n print(\"Generating host_vars for {}\".format(dev))\n config = dict()\n try:\n config['config'] = nso.get_device_config(dev)['tailf-ncs:config']\n with open('host_vars/{}.yaml'.format(dev), 'w') as file:\n yaml.dump(config, file, default_flow_style=False,\n explicit_start=False)\n inv_devices.append(dev)\n except ValueError:\n print(\"Failed to parse JSON for {}\".format(dev))\n\n # create inventory yaml\n inv_dict = {\"all\": {\"hosts\": {}}}\n inv_dict[\"all\"][\"hosts\"] = {k: None for k in inv_devices}\n\n with open('inventory.yaml', 'w') as inv:\n yaml.safe_dump(inv_dict, inv, default_flow_style=False,\n explicit_start=False,\n encoding='utf-8')", "def read_inventory(inputfile):\n try:\n with open(INVENTORY_FOLDER + inputfile + '.yml', 'r') as f:\n invyaml = f.read()\n except IOError as err:\n print(err)\n onlyfiles = [f for f in os.listdir(INVENTORY_FOLDER) if os.path.isfile(os.path.join(INVENTORY_FOLDER, f))]\n inv_list = []\n for file in onlyfiles:\n inv_list.append(file.split('_')[-1][:-4])\n print('These are the valid inventories: {}'.format(inv_list))\n sys.exit(1)\n devices = yaml.load(invyaml)\n return devices", "def scan_inv_directory():\n invhosts = {}\n script_dir = os.path.dirname(os.path.realpath(__file__))\n invfiles = os.listdir(script_dir)\n\n # Scan every yml file found inside inventory directory\n for invfile in [x for x in invfiles if x.endswith('.yml')]:\n with open('{}/{}'.format(script_dir, invfile), 'r') as inventory:\n try:\n hosts = yaml.safe_load(inventory)\n except yaml.scanner.ScannerError as e:\n raise Exception('Inventory syntax error {}'.format(str(e.context_mark).lstrip()))\n\n # If there are hosts defined with range indicator [1:3], expand them\n hosts_expanded = {}\n\n if hosts:\n for key, val in hosts.items():\n if detect_range(key):\n key = expand_hostname_range(key)\n hosts_expanded.update({x:val for x in key})\n else:\n hosts_expanded[key] = val\n\n invhosts.update(hosts_expanded)\n\n if not invhosts:\n raise Exception('No hosts found inside inventory directory.')\n return invhosts", "def readConfig(file=\"config.ini\"):\n ips = []\n dic={}\n config = ConfigParser.ConfigParser()\n config.read(file)\n print config.get('MACHINES','CENTOS')\n print \"-----\"\n machines = config.items(\"MACHINES\")\n print machines\n commands = config.items(\"COMMANDS\")\n print commands\n for ip in machines:\n dic[ip[0]]=ip[1]\n ips.append(ip[1])\n\n ip=config.items('test')\n print ip\n return dic", "def get_inventory(configs):\n nova = get_client(configs)\n if not nova:\n raise Error(\"Cannot get OpenStack inventory. \"\n \"Make sur that your OpenStack credential is well loaded. \"\n \"(e.g. source openstack.rc\")\n\n default_section = configs.get(\"Default\", {})\n namespace = default_section.get(\"metadata_namespace\",\n DEFAULT_METADATA_NAMESPACE)\n key_folder = default_section.get(\"key_folder\", DEFAULT_KEY_FOLDER)\n key_folder = os.path.abspath(os.path.expanduser(key_folder))\n\n use_creation_key = default_section.get(\"use_creation_key\",\n \"false\").lower() == \"true\"\n group_key = namespace + 'groups'\n\n server_list = [s for s in nova.servers.list() if group_key in s.metadata]\n\n # Ansible requires that if there is no host, the script must return {}\n if not server_list:\n return {}\n \n inventory = get_template(configs)\n\n for s in server_list:\n inventory_hostname = s.name\n metadata = s.metadata\n for group in metadata[group_key].split(','):\n if group not in inventory:\n inventory[group] = {\"hosts\": [inventory_hostname]}\n elif \"hosts\" not in inventory[group]:\n inventory[group][\"hosts\"] = [inventory_hostname]\n else:\n inventory[group][\"hosts\"].append(inventory_hostname)\n variables = {}\n # Take the first address as ansible_host by default.\n # If host has more than one addresses (e.g. multiple NICs,\n # Floating IP), then user should specify host address by\n # '<metadata_namespace>:ansible_host' key in metadata\n address = s.networks[s.networks.keys()[0]][0]\n variables['ansible_host'] = address\n for key, value in metadata.items():\n if key == (namespace + \"ansible_private_key_file\"):\n variables[\"ansible_private_key_file\"] = os.path.join(\n key_folder, value)\n elif (key.startswith(namespace) and (key != group_key)):\n keyname = key[len(namespace):]\n variables[keyname] = value\n if ((\"ansible_private_key_file\" not in variables) and use_creation_key\n and not s.key_name):\n variables[\"ansible_private_key_file\"] = os.path.join(\n key_folder, s.key_name)\n inventory[\"_meta\"][\"hostvars\"][inventory_hostname] = variables\n return inventory", "def readConfig(conf_file='config.ini'):\n config = ConfigParser()\n config.read(conf_file)\n machines = config.items('MACHINES')\n commands = config.items('COMMANDS')\n ips = [ip[1] for ip in machines]\n cmds = [cmd[1] for cmd in commands]\n return ips, cmds", "def main(nso, username, password):\n\n # creates basic folder structure\n create_dirs()\n\n url = \"http://{}:{}\".format(nso, 8080)\n nso = NSO(url, username, password)\n\n # url is now only used for ansible\n url = url + '/jsonrpc'\n try:\n nso.sync_from()\n click.echo(\"Syncing Configuration from Devices\")\n except Exception as e:\n click.secho(\"Error Connecting to NSO: {}\".format(e), fg=\"red\")\n sys.exit(1)\n\n devices = nso.get_device_list()\n # track devices to be added to inventory\n inv_devices = list()\n\n for d in devices:\n print(\"Generating host_vars for {}\".format(d))\n config = dict()\n try:\n config['config'] = nso.get_device_config(d)['tailf-ncs:config']\n with open('host_vars/{}.yaml'.format(d), 'w') as fh:\n yaml.safe_dump(config, fh, default_flow_style=False,\n explicit_start=False)\n inv_devices.append(d)\n except ValueError:\n print(\"Failed to parse JSON for {}\".format(d))\n\n inv_dict = {k: None for k in inv_devices}\n # inventory representing source\n create_inventory(\"prod\",\n url,\n username,\n password,\n inv_dict)\n\n # also creates a netsim dev environment\n create_inventory(\"dev\",\n \"http://localhost:8080/jsonrpc\",\n username,\n password,\n inv_dict)\n\n click.echo(\"Generating Ansible Playbook...\")\n\n with open('site.yaml', 'w') as pb:\n pb.write(DEFAULT_PLAYBOOK)\n\n click.echo(\"Exporting Netsim configuration\")\n nso.generate_netsim_configs(devices)", "def read_config(self,cli_args):\n config_file = configparser.get_config()\n config_cmd = cli.get_args_config(cli.get_parser(),cli_args) \n\n config = dict()\n config = config_file\n\n for key in config_cmd:\n config[key] = config_cmd[key]\n\n # patch_path from command line will override orchestra_file_path\n if config.get(\"patch_path\"):\n config[\"orchestra_file_path\"] = config[\"patch_path\"]\n\n # hard code\n\n config[\"task_script_name\"] = \"ansible.sh\"\n\n logging.debug(config)\n return config", "def build_inventory(self):\n self.inventory = {\n 'all': {\n 'hosts': [],\n 'vars': self.group_variables\n },\n '_meta': {'hostvars': {}}\n }\n\n # add all droplets by id and name\n for droplet in self.data['droplets']:\n for net in droplet['networks']['v4']:\n if net['type'] == 'public':\n dest = net['ip_address']\n else:\n continue\n\n self.inventory['all']['hosts'].append(dest)\n\n self.inventory[droplet['id']] = [dest]\n self.inventory[droplet['name']] = [dest]\n\n # groups that are always present\n for group in ('region_' + droplet['region']['slug'],\n 'image_' + str(droplet['image']['id']),\n 'size_' + droplet['size']['slug'],\n 'distro_' + DigitalOceanInventory.to_safe(droplet['image']['distribution']),\n 'status_' + droplet['status']):\n if group not in self.inventory:\n self.inventory[group] = {'hosts': [], 'vars': {}}\n self.inventory[group]['hosts'].append(dest)\n\n # groups that are not always present\n for group in (droplet['image']['slug'],\n droplet['image']['name']):\n if group:\n image = 'image_' + DigitalOceanInventory.to_safe(group)\n if image not in self.inventory:\n self.inventory[image] = {'hosts': [], 'vars': {}}\n self.inventory[image]['hosts'].append(dest)\n\n if droplet['tags']:\n for tag in droplet['tags']:\n if tag not in self.inventory:\n self.inventory[tag] = {'hosts': [], 'vars': {}}\n self.inventory[tag]['hosts'].append(dest)\n\n # hostvars\n info = self.do_namespace(droplet)\n self.inventory['_meta']['hostvars'][dest] = info", "def read_inventory_file_to_dict(inventory_filename):\n config = configparser.ConfigParser(allow_no_value=True)\n config.read(inventory_filename)\n # extract the IPs into lower-case groups per section\n _groups = {\n key.lower(): [ ip.lower().split(\" \")[0] for (ip, group) in config.items(key)]\n for key in config.sections()\n }\n # resolve one level groups hierarchy\n groups = {}\n for group, ips in _groups.items():\n ips_set = set()\n for ip in ips:\n subgroup = _groups.get(ip)\n if subgroup is not None:\n for sub in subgroup:\n ips_set.add(sub)\n else:\n ips_set.add(ip)\n groups[group] = list(ips_set)\n return groups", "def build_inventory(self):\n self.inventory = {\n 'all': {\n 'hosts': [],\n 'vars': self.group_variables\n },\n '_meta': {'hostvars': {}}\n }\n\n # add all droplets by id and name\n for droplet in self.data['droplets']:\n for net in droplet['networks']['v4']:\n if net['type'] == 'private':\n dest = net['ip_address']\n else:\n continue\n\n self.inventory['all']['hosts'].append(dest)\n\n self.add_host(droplet['id'], dest)\n\n self.add_host(droplet['name'], dest)\n\n # groups that are always present\n for group in ('digital_ocean',\n 'region_' + droplet['region']['slug'],\n 'image_' + str(droplet['image']['id']),\n 'size_' + droplet['size']['slug'],\n 'distro_' + DigitalOceanInventory.to_safe(droplet['image']['distribution']),\n 'status_' + droplet['status']):\n self.add_host(group, dest)\n\n # groups that are not always present\n for group in (droplet['image']['slug'],\n droplet['image']['name']):\n if group:\n image = 'image_' + DigitalOceanInventory.to_safe(group)\n self.add_host(image, dest)\n\n if droplet['tags']:\n for tag in droplet['tags']:\n self.add_host(tag, dest)\n\n # hostvars\n info = self.do_namespace(droplet)\n self.inventory['_meta']['hostvars'][dest] = info", "def build_inventory(self):\n self.inventory = {\n 'all': {\n 'hosts': [],\n 'vars': self.group_variables\n },\n '_meta': {'hostvars': {}}\n }\n\n # add all droplets by id and name\n for droplet in self.data['droplets']:\n for net in droplet['networks']['v4']:\n if net['type'] == 'public':\n dest = net['ip_address']\n else:\n continue\n\n self.inventory['all']['hosts'].append(dest)\n\n self.add_host(droplet['id'], dest)\n\n self.add_host(droplet['name'], dest)\n\n # groups that are always present\n for group in ('digital_ocean',\n 'region_' + droplet['region']['slug'],\n 'image_' + str(droplet['image']['id']),\n 'size_' + droplet['size']['slug'],\n 'distro_' + DigitalOceanInventory.to_safe(droplet['image']['distribution']),\n 'status_' + droplet['status']):\n self.add_host(group, dest)\n\n # groups that are not always present\n for group in (droplet['image']['slug'],\n droplet['image']['name']):\n if group:\n image = 'image_' + DigitalOceanInventory.to_safe(group)\n self.add_host(image, dest)\n\n if droplet['tags']:\n for tag in droplet['tags']:\n self.add_host(tag, dest)\n\n # hostvars\n info = self.do_namespace(droplet)\n self.inventory['_meta']['hostvars'][dest] = info", "def create_inventory():\n\n # terraform output -json\n # {\n # \"Test01\": {\n # \"sensitive\": False,\n # \"type\": \"string\",\n # \"value\": \"172.24.76.139\"\n # }\n # }\n\n # Capture the terraform output and convert it into python objects that can be manipulated.\n output = subprocess.check_output([\"terraform\", \"output\", \"-json\"], universal_newlines=True)\n terraform_output = json.loads(output)\n\n # Transform the json from terraform into what ansible needs.\n list_hosts = list(terraform_output.keys())\n json_for_ansible = {'all': {'hosts': list_hosts}}\n host_vars = {}\n for name_host in list_hosts:\n host_vars[name_host] = {'ansible_host': terraform_output[name_host]['value']}\n # A git hosting provider needs port 22 to for git to work properly.\n # The OS level SSH connection can't use 22 then.\n if \"gitlab\" in name_host.lower():\n host_vars[name_host]['ansible_port'] = '2222'\n json_for_ansible['_meta'] = {'hostvars': host_vars}\n\n # Return the ansible json.\n # {\n # \"all\": {\n # \"hosts\": [\"Test01\"]\n # },\n # \"_meta\": {\n # \"hostvars\": {\n # \"Test01\": {\n # \"ansible_host\": \"172.24.76.139\"\n # }\n # }\n # }\n # }\n\n return json_for_ansible", "def loadfileconfig():\n zones = []\n for infile in INPUT_FILES:\n # read file into array\n rawinput = []\n with open(infile) as inputfile:\n rawinput = inputfile.readlines()\n inputfile.close()\n\n selectedinput = []\n for line in rawinput:\n # remove leading and trailing whitespace\n line = line.strip()\n\n # remove double quotes, \"\n line = line.replace('\"', '')\n\n # select the lines we need and extract\n # the second field\n if re.search(INPUT_SELECTOR, line):\n line = line.split(' ')\n line = line[1]\n selectedinput.append(line)\n\n\n # Merge two and two items into a list of tuples\n # put into zones\n iterator = iter(selectedinput)\n zones.extend(zip(iterator, iterator))\n\n return zones", "def readConfig(file=\"config.ini\"):\n ip_pool = []\n cmd_pool = []\n Config=ConfigParser.ConfigParser()\n Config.read(file)\n machines = Config.items(\"MACHINES\")\n commands = Config.items(\"COMMANDS\")\n for ip in machines:\n ip_pool.append(ip[1])\n for cmd in commands:\n cmd_pool.append(cmd[1])\n print cmd[1]\n return ip_pool,cmd_pool", "def load_config(self, config_src, report_metadata):\n for card_type in config_src.keys(): #card_type is project|assignment|epic\n for board_t in config_src[card_type].keys():\n board_id = config_src[card_type][board_t][':board_id']\n if not board_id in report_metadata: # initialize if the board wasn't present during the iterations over other card_type's\n if not board_id in report_metadata[':boards']:\n report_metadata[':boards'][board_id] = {};\n report_metadata[':boards'][board_id][':board_id'] = config_src[card_type][board_t][':board_id'] #copy board id\n report_metadata[':boards'][board_id][':board_name'] = board_t\n if not ':lists' in report_metadata[':boards'][board_id]:\n report_metadata[':boards'][board_id][':lists'] = []\n\n #iterate through all the lists and populate them\n for list_t in config_src[card_type][board_t][':lists'].keys():\n self.logger.debug(\"Adding board %s, list %s to the report\" % (config_src[card_type][board_t][':board_id'], config_src[card_type][board_t][':lists'][list_t]))\n list_id = config_src[card_type][board_t][':lists'][list_t]\n report_metadata[':lists'][list_id] = {};\n report_metadata[':lists'][list_id][':list_id'] = list_id\n report_metadata[':lists'][list_id][':completed'] = False;\n report_metadata[':lists'][list_id][':card_type'] = card_type;\n report_metadata[':lists'][list_id][':board_id'] = board_id\n report_metadata[':boards'][board_id][':lists'].append(list_id)\n if ':done_lists' in config_src[card_type][board_t]:\n for list_t in config_src[card_type][board_t][':done_lists'].keys():\n self.logger.debug(\"Adding board %s, Done list %s to the report\" % (config_src[card_type][board_t][':board_id'], config_src[card_type][board_t][':done_lists'][list_t]))\n list_id = config_src[card_type][board_t][':done_lists'][list_t]\n report_metadata[':lists'][list_id] = {};\n report_metadata[':lists'][list_id][':list_id'] = list_id\n report_metadata[':lists'][list_id][':completed'] = True;\n report_metadata[':lists'][list_id][':card_type'] = card_type;\n report_metadata[':lists'][list_id][':board_id'] = board_id\n report_metadata[':boards'][board_id][':lists'].append(list_id)", "def generate_config(self):\n self.log.debug(\"generate-config\")\n self.qemu.args = [\n \"-nodefaults\",\n \"-only-migratable\",\n \"-cpu {cpu_model},enforce\",\n # Watch out: kvm.name is used for sanity checking critical actions.\n \"-name {name},process=kvm.{name}\",\n \"-chroot {{chroot}}\",\n \"-runas nobody\",\n \"-serial file:/var/log/vm/{name}.log\",\n \"-display vnc={{vnc}}\",\n \"-pidfile {{pidfile}}\",\n \"-vga std\",\n # We use this '-m' flag to find what a running VM is actually\n # using at the moment. If this flag is changed then that code must\n # be adapted as well. This is used in incoming.py and qemu.py.\n \"-m {memory}\",\n \"-readconfig {{configfile}}\",\n ]\n self.qemu.args = [a.format(**self.cfg) for a in self.qemu.args]\n\n vhost = ' vhost = \"on\"' if self.vhost else \"\"\n\n netconfig = []\n for net, net_config in sorted(self.cfg[\"interfaces\"].items()):\n ifname = \"t{}{}\".format(net, self.cfg[\"id\"])\n netconfig.append(\n \"\"\"\n[device]\n driver = \"virtio-net-pci\"\n netdev = \"{ifname}\"\n mac = \"{mac}\"\n\n[netdev \"{ifname}\"]\n type = \"tap\"\n ifname = \"{ifname}\"\n script = \"/etc/kvm/kvm-ifup\"\n downscript = \"/etc/kvm/kvm-ifdown\"\n{vhost}\n\"\"\".format(\n ifname=ifname, mac=net_config[\"mac\"], vhost=vhost\n )\n )\n\n with open(self.vm_config_template) as f:\n tpl = f.read()\n accelerator = (\n ' accel = \"{}\"'.format(self.accelerator)\n if self.accelerator\n else \"\"\n )\n machine_type = detect_current_machine_type(self.machine_type)\n self.qemu.config = tpl.format(\n accelerator=accelerator,\n machine_type=machine_type,\n disk_cache_mode=self.qemu.disk_cache_mode,\n network=\"\".join(netconfig),\n **self.cfg,\n )", "def load(identifier, network):\n file = f\"{network}.{DEPLOYMENTS_FILENAME}\"\n\n if not os.path.exists(file):\n return\n\n with open(file) as fp:\n for line in fp:\n [address, abi, *alias] = line.split(\":\")\n identifiers = [x.strip() for x in [address] + alias]\n if identifier in identifiers:\n yield address, abi", "def parse_config():\n # find the jobfunnel root dir\n jobfunnel_path = os.path.normpath(\n os.path.join(os.path.dirname(__file__), '..'))\n\n # load the default settings\n default_yaml_path = os.path.join(jobfunnel_path, 'config/settings.yaml')\n default_yaml = yaml.safe_load(open(default_yaml_path, 'r'))\n\n # parse the command line arguments\n cli = parse_cli()\n\n # parse the settings file for the line arguments\n given_yaml = None\n given_yaml_path = None\n if cli.settings is not None:\n given_yaml_path = os.path.dirname(cli.settings)\n given_yaml = yaml.safe_load(open(cli.settings, 'r'))\n\n # prepare the configuration dictionary\n config = {}\n\n # parse the data path\n config['data_path'] = os.path.join(default_yaml['output_path'], 'data')\n config['master_list_path'] = os.path.join(\n default_yaml['output_path'], 'master_list.csv')\n config['duplicate_list_path'] = os.path.join(\n default_yaml['output_path'], 'duplicate_list.csv')\n\n if given_yaml_path is not None:\n config['data_path'] = os.path.join(\n given_yaml_path, given_yaml['output_path'], 'data')\n config['master_list_path'] = os.path.join(\n given_yaml_path, given_yaml['output_path'], 'master_list.csv')\n config['duplicate_list_path'] = os.path.join(\n given_yaml_path, given_yaml['output_path'], 'duplicate_list.csv')\n\n if cli.output_path is not None:\n config['data_path'] = os.path.join(cli.output_path, 'data')\n config['master_list_path'] = os.path.join(\n cli.output_path, 'master_list.csv')\n config['duplicate_list_path'] = os.path.join(\n cli.output_path, 'duplicate_list.csv')\n\n # parse the provider list\n config['providers'] = default_yaml['providers']\n if given_yaml_path is not None:\n config['providers'] = given_yaml['providers']\n for i, p in enumerate(config['providers']):\n config['providers'][i] = p.lower()\n\n # parse the search terms\n config['search_terms'] = default_yaml['search_terms']\n if given_yaml_path is not None:\n config['search_terms'] = given_yaml['search_terms']\n if cli.keywords is not None:\n config['search_terms']['keywords'] = cli.keywords\n\n # search term state is inserted as province if province does not already \n # exist\n if 'state' in config['search_terms']['region']:\n if (config['search_terms']['region']['state'] is not None) and \\\n (config['search_terms']['region']['province'] is None):\n config['search_terms']['region']['province'] = \\\n config['search_terms']['region']['state']\n\n # parse the blacklist\n config['black_list'] = default_yaml['black_list']\n if given_yaml_path is not None:\n config['black_list'] = given_yaml['black_list']\n\n # parse the similar option\n config['similar'] = cli.similar\n\n # parse the no_scrape option\n config['no_scrape'] = cli.no_scrape\n\n # parse the recovery option\n config['recover'] = cli.recover\n\n # parse the log level\n config['log_level'] = log_levels[default_yaml['log_level']]\n if given_yaml_path is not None:\n config['log_level'] = log_levels[given_yaml['log_level']]\n if cli.log_level is not None:\n config['log_level'] = log_levels[cli.log_level]\n\n # parse save_duplicates option\n config['save_duplicates'] = default_yaml['save_duplicates']\n if given_yaml_path is not None:\n config['save_duplicates'] = given_yaml['save_duplicates']\n if cli.save_duplicates is not None:\n config['save_duplicates'] = cli.save_duplicates\n\n # define the log path\n config['log_path'] = os.path.join(config['data_path'], 'jobfunnel.log')\n\n # define the filter list path\n config['filter_list_path'] = os.path.join(\n config['data_path'], 'filter_list.json')\n\n # set delaying\n config['set_delay'] = default_yaml['set_delay']\n if given_yaml_path is not None:\n config['set_delay'] = given_yaml['set_delay']\n if cli.set_delay is not None:\n config['set_delay'] = cli.set_delay\n\n # parse options for delaying if turned on\n if config['set_delay']:\n config['delay_config'] = default_yaml['delay_config']\n if given_yaml_path is not None:\n config['delay_config'] = given_yaml['delay_config']\n\n # cli options for delaying configuration\n if cli.function is not None:\n config['delay_config']['function'] = cli.function\n if cli.delay is not None:\n config['delay_config']['delay'] = cli.delay\n if cli.min_delay is not None:\n config['delay_config']['min_delay'] = cli.min_delay\n if cli.random is not None:\n config['delay_config']['random'] = cli.random\n if cli.converge is not None:\n config['delay_config']['converge'] = cli.converge\n\n # converts function name to lower case in config\n config['delay_config']['function'] = \\\n config['delay_config']['function'].lower()\n else:\n config['delay_config'] = None\n\n # set proxy (need get because value should be None when missing)\n config['proxy'] = default_yaml.get('proxy', None)\n if given_yaml_path is not None:\n config['proxy'] = given_yaml.get('proxy', None)\n if cli.proxy is not None:\n config['proxy'] = split_url(cli.proxy)\n\n # normalize paths\n for p in ['data_path', 'master_list_path', 'duplicate_list_path',\n 'log_path', 'filter_list_path']:\n config[p] = os.path.normpath(config[p])\n\n return config" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for get_liveness Get job service liveness
def test_get_liveness(self): response = self.client.open('/api/v1//liveness', method='GET', content_type='application/json') self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
[ "def test_liveness(client):\n response = client.get(api_route_for(\"liveness\"))\n assert response.status_code == 200\n json_data = get_json_from_response(response)\n assert json_data == {}", "def test_get_refresh_job_status(self):\n pass", "def liveness():\n return jsonify({}), 200", "def test_list(client, job_status):\n ret = client.get(url_for('api.get_list'))\n assert ret.json == {}", "def liveness_probe():\n return \"I am still alive!\"", "def test_jobs_get(self):\n pass", "def test_health_check_get(self):\n pass", "def test_estimate_liveness_batch(self):\n detection = self.detector.detectOne(VLImage.load(filename=SPOOF), detect68Landmarks=True)\n estimations = self.livenessEstimator.estimateBatch([self.detection, detection])\n assert isinstance(estimations, list)\n assert len(estimations) == 2\n for estimation in estimations:\n self.assertLivenessEstimation(estimation)", "def lantern_check():\n if not app.config.get(\"ENABLE_LANTERN\", False):\n print \"[{x}] Not checking Lantern jobs - interface disabled\".format(x=dates.now())\n return\n print \"[{x}] Checking Lantern jobs\".format(x=dates.now())\n LanternApi.check_jobs()", "def test_health_get(self):\n pass", "def check(n, service):\n s = job(n, service)\n return s[u'Status']", "def test_job_list(self):\n response = app.get('/jobs/kubernetes-jenkins/logs')\n self.assertIn('somejob/\">somejob</a>', response)", "def test_lpar_start_stop(self):\n\n # CPC1 is in classic mode\n lpar1 = self.urihandler.get(self.hmc, '/api/logical-partitions/1',\n True)\n assert lpar1['status'] == 'not-activated'\n lpar1_name = lpar1['name']\n\n # the function to be tested:\n self.urihandler.post(self.hmc,\n '/api/logical-partitions/1/operations/activate',\n {'activation-profile-name': lpar1_name},\n True, True)\n\n lpar1 = self.urihandler.get(self.hmc, '/api/logical-partitions/1',\n True)\n assert lpar1['status'] == 'not-operating'\n\n # the function to be tested:\n self.urihandler.post(self.hmc,\n '/api/logical-partitions/1/operations/load',\n {'load-address': '5176'}, True, True)\n\n lpar1 = self.urihandler.get(self.hmc, '/api/logical-partitions/1',\n True)\n assert lpar1['status'] == 'operating'\n\n # the function to be tested:\n self.urihandler.post(self.hmc,\n '/api/logical-partitions/1/operations/deactivate',\n {'force': True}, True, True)\n\n lpar1 = self.urihandler.get(self.hmc, '/api/logical-partitions/1',\n True)\n assert lpar1['status'] == 'not-activated'", "def test_lpar_start_stop(self):\n\n # CPC1 is in classic mode\n lpar1 = self.urihandler.get(self.hmc, '/api/logical-partitions/1',\n True)\n assert lpar1['status'] == 'not-activated'\n lpar1_name = lpar1['name']\n\n # the function to be tested:\n self.urihandler.post(self.hmc,\n '/api/logical-partitions/1/operations/activate',\n {'activation-profile-name': lpar1_name},\n True, True)\n\n lpar1 = self.urihandler.get(self.hmc, '/api/logical-partitions/1',\n True)\n assert lpar1['status'] == 'not-operating'\n\n # the function to be tested:\n self.urihandler.post(self.hmc,\n '/api/logical-partitions/1/operations/nvme-load',\n {'load-address': '5176'},\n True, True)\n\n lpar1 = self.urihandler.get(self.hmc, '/api/logical-partitions/1',\n True)\n assert lpar1['status'] == 'operating'\n\n # the function to be tested:\n self.urihandler.post(self.hmc,\n '/api/logical-partitions/1/operations/deactivate',\n {'force': True}, True, True)\n\n lpar1 = self.urihandler.get(self.hmc, '/api/logical-partitions/1',\n True)\n assert lpar1['status'] == 'not-activated'", "def test_async_estimate_liveness(self):\n detection = self.detector.detectOne(VLImage.load(filename=SPOOF))\n task = self.livenessEstimator.estimate(detection, asyncEstimate=True)\n self.assertAsyncEstimation(task, LivenessV1)\n task = self.livenessEstimator.estimateBatch([detection] * 2, asyncEstimate=True)\n self.assertAsyncBatchEstimation(task, LivenessV1)", "def test_lpar_start_stop(self):\n\n # CPC1 is in classic mode\n lpar1 = self.urihandler.get(self.hmc, '/api/logical-partitions/1',\n True)\n assert lpar1['status'] == 'not-activated'\n lpar1_name = lpar1['name']\n\n # the function to be tested:\n self.urihandler.post(self.hmc,\n '/api/logical-partitions/1/operations/activate',\n {'activation-profile-name': lpar1_name},\n True, True)\n\n lpar1 = self.urihandler.get(self.hmc, '/api/logical-partitions/1',\n True)\n assert lpar1['status'] == 'not-operating'\n\n # the function to be tested:\n self.urihandler.post(self.hmc,\n '/api/logical-partitions/1/operations/scsi-load',\n {'load-address': '5176',\n 'world-wide-port-name': '1234',\n 'logical-unit-number': '5678'},\n True, True)\n\n lpar1 = self.urihandler.get(self.hmc, '/api/logical-partitions/1',\n True)\n assert lpar1['status'] == 'operating'\n\n # the function to be tested:\n self.urihandler.post(self.hmc,\n '/api/logical-partitions/1/operations/deactivate',\n {'force': True}, True, True)\n\n lpar1 = self.urihandler.get(self.hmc, '/api/logical-partitions/1',\n True)\n assert lpar1['status'] == 'not-activated'", "def test_status(self):\n\n url = '/%s/jobs/?status=RUNNING' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['job_type']['id'], self.job1.job_type.id)", "def test_api_v1_deployment_serverless_progress_get(self):\n pass", "def liveness_route(self) -> Optional[pulumi.Input['RouteArgs']]:\n return pulumi.get(self, \"liveness_route\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for get_readiness Get job service readiness
def test_get_readiness(self): response = self.client.open('/api/v1//readiness', method='GET', content_type='application/json') self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
[ "def test_get_refresh_job_status(self):\n pass", "def readiness():\n return run_health_check()", "def test_readiness(client):\n response = client.get(api_route_for(\"readiness\"))\n assert response.status_code == 200\n json_data = get_json_from_response(response)\n assert json_data == {}", "def get_readiness():\n return {}, 200", "def test_readiness_endpoint(self):\n url = f'{BASE_URL}/ready'\n response = requests.get(url)\n response_json = response.json()\n assert response.status_code == 503\n assert response_json['status'] == 503", "def test_jobs_get(self):\n pass", "def test_status(self):\n\n url = '/%s/jobs/?status=RUNNING' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['job_type']['id'], self.job1.job_type.id)", "def check(n, service):\n s = job(n, service)\n return s[u'Status']", "def test_health_check_get(self):\n pass", "def test_get_status(self):\n pass", "def test_get_job_queue(self):\n response = self.client.open(\n '/tx-queue/2/scheduler/job',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def readiness():\n return jsonify({}), 200", "def test_active(self):\n\n url = '/%s/job-types/status/?is_active=true' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 3)\n self.assertEqual(result['results'][0]['job_type']['name'], self.job1.job_type.name)\n self.assertEqual(result['results'][0]['job_counts'][0]['count'], 1)", "def test_cachedjob_get_status(cached_job):\n \n # Setup\n c_job = cached_job\n \n # Execute\n expected_status = StatusEnum(JOB_DETAILS_HTML['status'])\n cached_status = c_job.status\n\n # Verify\n assert expected_status == cached_status", "def test_running_job(self):\n running_job = json.loads(BASE_JSON % ('null', 'null', 0, 'null'))[0]\n self.assertEquals(self.query_api.get_job_status(running_job), RUNNING)", "def test_health_get(self):\n pass", "def test_successful_on_get(self):\n\n url = '/%s/jobs/' % self.api\n\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)", "def test_is_active(self):\n\n url = '/%s/job-types/?is_active=false' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 2)", "def test_get_scheduled_job(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
=============================================================== save_obj(obj, saved_name ) =============================================================== this function is used to save any python object to your hard desk
def save_obj(obj, saved_name ): with open( saved_name + '.pkl', 'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
[ "def saveObject(obj):\r\n\r\n name = QtWidgets.QFileDialog.getSaveFileName(mw,'Save File','New SDE.sde',\r\n 'SDE-file *.sde')\r\n name = name[0]\r\n name = name.split('/')[-1]\r\n print(\"Save, name: \",name)\r\n with open(name,'wb') as output:\r\n pickle.dump(obj, output, -1)", "def save_object(obj, filename):\n with open(filename,'wb+') as f:\n f = open(filename, \"wb\")\n dill.dump(obj, f)\n return", "def save_object(obj, destination):\n print(\"Saving pickle object\")\n with open(destination, 'wb') as output:\n pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)", "def pickle_object(obj, name):\n with open(name+\".pkl\", 'wb') as f:\n pickle.dump(obj, f)", "def save_object(path,object):\r\n with open(path,\"wb\") as f:\r\n pickle.dump(object,f,pickle.HIGHEST_PROTOCOL)", "def save_object_to_file(filename, obj):\n with open(file=filename, mode='wb') as f:\n pickle.dump(obj, f)\n return", "def save_to_obj(self):\n\n filename = '{0}{1}.obj'.format(self.path, self.name)\n with open(filename, 'wb') as f:\n pickle.dump(self, f)\n print('***** Structure saved to: {0} *****\\n'.format(filename))", "def save(self, obj):\n raise NotImplementedError", "def save(self, obj):\n raise NotImplementedError()", "def dumpObject(self, obj, name = None):\n try:\n with open(name, \"wb\") as output_file:\n pickle.dump(obj, output_file, -1)\n print \"Successfully dumped \" + obj + \" into \" + name + \".\"\n except IOError:\n with open(repr(obj), \"wb\") as output_file:\n pickle.dump(obj, output_file, -1)\n print \"Successfully dumped \" + obj + \\\n \" into \" + repr(obj) + \".\"", "def save_pickle(obj, path):\n pickle_out = open(path, \"wb\")\n dill.dump(obj, pickle_out)\n pickle_out.close()", "def write_to_file(name, obj):\n\n print 'writing structures to pickle'\n print '----------------------------'\n\n path = os.getcwd() + '/pickles/' + name + '.pkl'\n file = open(path, 'wb')\n pickle.dump(obj, file)\n file.close()", "def write(obj, filename):\n with open(filename, 'wb') as handle:\n dump(obj, handle)", "def pickle_object(obj, ofname: \"Path|str\"):\n ofname = Path(ofname)\n maybe_make_output_dir(ofname)\n with ofname.open(\"wb\") as f:\n pickle.dump(obj, f)", "def write_pickle(obj, file_name):\n with open(file_name, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def save(obj, filename, format = \"JSON\"):\n if format == \"Python\":\n s = str(obj)\n else:\n s = json.dumps(obj, indent=2)\n open(filename,'w').write(s)\n return", "def save(object, filename, protocol = 0):\n file = gzip.GzipFile(filename, 'wb')\n file.write(pickle.dumps(object, protocol))\n file.close()", "def write_pickle(obj, relnm):\n with open(relnm, 'wb') as f:\n pickle.dump(obj, f, protocol = -1)\n return 'Serialized object to disk at {}'.format(relnm)", "def save(object, filename, protocol = -1):\n file = gzip.GzipFile(filename, 'wb')\n cPickle.dump(object, file, protocol)\n file.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
=============================================================== load_obj(saved_name) =============================================================== this function is used to save any python object to your hard desk
def load_obj(saved_name): with open( saved_name + '.pkl', 'rb') as f: return pickle.load(f)
[ "def save_obj(obj, saved_name ):\n with open( saved_name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def pickle_object(obj, name):\n with open(name+\".pkl\", 'wb') as f:\n pickle.dump(obj, f)", "def load_obj(name):\n with open('../../data/' + name + '.pkl', 'rb') as f:\n return pickle.load(f)", "def saveObject(obj):\r\n\r\n name = QtWidgets.QFileDialog.getSaveFileName(mw,'Save File','New SDE.sde',\r\n 'SDE-file *.sde')\r\n name = name[0]\r\n name = name.split('/')[-1]\r\n print(\"Save, name: \",name)\r\n with open(name,'wb') as output:\r\n pickle.dump(obj, output, -1)", "def save_to_obj(self):\n\n filename = '{0}{1}.obj'.format(self.path, self.name)\n with open(filename, 'wb') as f:\n pickle.dump(self, f)\n print('***** Structure saved to: {0} *****\\n'.format(filename))", "def save_object(path,object):\r\n with open(path,\"wb\") as f:\r\n pickle.dump(object,f,pickle.HIGHEST_PROTOCOL)", "def save_object(obj, filename):\n with open(filename,'wb+') as f:\n f = open(filename, \"wb\")\n dill.dump(obj, f)\n return", "def pickle_object(obj, ofname: \"Path|str\"):\n ofname = Path(ofname)\n maybe_make_output_dir(ofname)\n with ofname.open(\"wb\") as f:\n pickle.dump(obj, f)", "def dumpObject(self, obj, name = None):\n try:\n with open(name, \"wb\") as output_file:\n pickle.dump(obj, output_file, -1)\n print \"Successfully dumped \" + obj + \" into \" + name + \".\"\n except IOError:\n with open(repr(obj), \"wb\") as output_file:\n pickle.dump(obj, output_file, -1)\n print \"Successfully dumped \" + obj + \\\n \" into \" + repr(obj) + \".\"", "def pycloud_pickle(file_name, obj):\r\n # type: (Text, Any) -> None\r\n with io.open(file_name, 'wb') as f:\r\n cloudpickle.dump(obj, f)", "def save_pickle(obj, path):\n pickle_out = open(path, \"wb\")\n dill.dump(obj, pickle_out)\n pickle_out.close()", "def load_as_object(filename, obj):\n with open(filename, \"rb\") as f:\n dict = pickle.load(file = f)\n Utility.dict_to_object(obj, dict)", "def save_object(obj, destination):\n print(\"Saving pickle object\")\n with open(destination, 'wb') as output:\n pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)", "def write_to_file(name, obj):\n\n print 'writing structures to pickle'\n print '----------------------------'\n\n path = os.getcwd() + '/pickles/' + name + '.pkl'\n file = open(path, 'wb')\n pickle.dump(obj, file)\n file.close()", "def load_object(path):\r\n with open(path,\"rb\") as f:\r\n object = pickle.load(f) \r\n return object", "def load_obj():\n path = os.path.join(DIR_PATH, \"insta.pkl\")\n logger.debug(\"loading object from {0}...\".format(path))\n if not os.path.isfile(path):\n logger.debug(\"{0} pickle file not found.\".format(path))\n return\n with open(path, \"rb\") as pkl:\n insta = pickle.load(pkl)\n return insta", "def load_obj(path: str):\n with open(path, 'rb') as h:\n return pickle.load(h)", "def save_object_to_file(filename, obj):\n with open(file=filename, mode='wb') as f:\n pickle.dump(obj, f)\n return", "def load_object(fpath):\r\n with open(fpath, 'rb') as i:\r\n return pickle.load(i)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
=========================================================== DateFormatedSQL(x) =========================================================== this function converts the the date read from a list to a datetime format
def DateFormatedSQL(x): x=[i[0] for i in x] x1=[] for i in x: if len(i)==19: x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(i[14:16]),int(i[17:18]) )) # elif len(i)==13: # x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(0),int(0) )) # else: # x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(0),int(0),int(0) )) # del i,x return x1
[ "def mapillary_to_sql_date(date):\r\n date_sql = date[:10] + \" \" + date[11:13] + \":\" + date[14:16] + \":\" + date[17:]\r\n return date_sql", "def change_format_from_input_to_datetime(list_d_t_t):\n data_output = []\n\n for row in list_d_t_t:\n data_output.append([datetime.datetime.strptime(row[0] + \" \" + row[1], \"%Y-%m-%d %H:%M:%S\"),\n datetime.datetime.strptime(row[0] + \" \" + row[2], \"%Y-%m-%d %H:%M:%S\")])\n\n return data_output", "def datetime_cast_sql(self):\n return \"%s\"", "def sql_date(date):\n return \"to_date('{}', 'dd.mm.yyyy')\".format(date)", "def DateFormated(x):\n \n x1=[]\n for i in x:\n if len(i)==19:\n x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(i[14:16]),int(i[17:18]) ))\n# elif len(i)==13:\n# x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(0),int(0) ))\n# else:\n# x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(0),int(0),int(0) ))\n# del i,x\n return x1", "def list_to_SQL_list(list, outfilename, SQL_table_name):\n string = \"INSERT {} VALUES\\n\".format(SQL_table_name)\n for row in list:\n string2 = \"(\"\n\n for item in row:\n if isinstance(item, str):\n item = stringify(item)\n elif isinstance(item, datetime.date):\n item = item.strftime(\"\\'%Y-%m-%d\\'\")\n string2 += str(item) + \",\"\n\n string += string2[:-1] + \"),\\n\"\n\n string = string[:-2] + \";\"\n\n outfile = open(outfilename, \"w\")\n print(string, file=outfile)\n outfile.close()\n\n return None", "def to_qldate(x):\n return ql.Date(x.day, x.month, x.year)", "def _date(self, column_value: list, column_name: str) -> str:\n pass", "def date_to_db_format(date):\n # Validate date format\n if len(date) != 10:\n current_app.logger.info(\"String {} is not of ะ”ะ”.ะœะœ.ะ“ะ“ะ“ะ“ format\".format(date))\n raise (BadDateFormatError(\"String {} is not of ะ”ะ”.ะœะœ.ะ“ะ“ะ“ะ“ format\".format(date)))\n d, m, y = date.split(\".\")\n if len(d) != 2 or len(m) != 2 or len(y) != 4:\n current_app.logger.info(\"String {} is not of ะ”ะ”.ะœะœ.ะ“ะ“ะ“ะ“ format\".format(date))\n raise (BadDateFormatError(\"String {} is not of ะ”ะ”.ะœะœ.ะ“ะ“ะ“ะ“ format\".format(date)))\n db_date = \"-\".join((y, m, d))\n try:\n parse(db_date)\n except ValueError:\n current_app.logger.info(\"String {} is not valid date\".format(date))\n raise (BadDateFormatError(\"String {} is not valid date\".format(date)))\n return datetime.datetime(int(y), int(m), int(d))", "def to_sqlite_str(d: date) -> str:\n return d.strftime('%Y-%m-%d 00:00:00.000000')", "def _to_date(self, x):\n if isinstance(x, datetime.datetime):\n return x.date()\n return x", "def handleDateList(dates, strformat):\n dl = []\n\n for d in dates:\n #convertedDate = datetime.datetime(*(time.strptime(d[:-7].replace('T', ' '), '%Y-%m-%d %H:%M:%S')[0:6]))\n convertedDate = datetime.datetime(*(time.strptime(d.replace('T', ' '), '%Y-%m-%d %H:%M:%S')[0:6]))\n dl.append(getDateTimeAsString(\n dt = convertedDate,\n inTimezone = 'PST',\n withStringFormat = strformat))\n\n return dl", "def convert_datetime_objs(list_of_dates):\n datetime_list = []\n for date in list_of_dates:\n date_obj = datetime.datetime.strptime(date, '%d.%m.%Y')\n datetime_list.append(date_obj)\n return datetime_list", "def datetimefstr(date_list, datetimeformat, longdatetimeformat):\n try:\n # including year\n parts = longdatetimeformat.count(' ') + 1\n dtstring = ' '.join(date_list[0:parts])\n dtstart = datetime.strptime(dtstring, longdatetimeformat)\n for _ in range(parts):\n date_list.pop(0)\n except ValueError:\n # without year\n parts = datetimeformat.count(' ') + 1\n dtstring = ' '.join(date_list[0:parts])\n dtstart = datetime.strptime(dtstring, datetimeformat)\n if dtstart.timetuple()[0] == 1900:\n dtstart = datetime(date.today().timetuple()[0],\n *dtstart.timetuple()[1:5])\n # if start date lies in the past use next year\n #if dtstart < datetime.today():\n #dtstart = datetime(dtstart.timetuple()[0] + 1,\n #*dtstart.timetuple()[1:6])\n for _ in range(parts):\n date_list.pop(0)\n return dtstart", "def udf_str2sqldt(date_time):\n return udf_strftime(\"%Y-%m-%d %H:%M:%S.%f%z\", date_time)", "def fixDate(weatherRDDRecord):\n fieldList = weatherRDDRecord.split(\",\")\n fieldList = [i.replace('\\\"', '') for i in fieldList] #remove quotation marks\n fieldList[0] = fieldList[0].replace('-', '/')\n \n swapDateOrder = fieldList[0].split('/')\n fieldList[0] = swapDateOrder[2] + '/' + swapDateOrder[1] + '/' + swapDateOrder[0]\n \n return (fieldList[0],(fieldList[1:]))", "def datetimeToQuantLib(d):\n return ql.Date(d.day, d.month, d.year)", "def __CastDate(self, values):\n\n if len(values) == 1:\n value = self.__EncodeIfNeeded(values[0])\n if isinstance(value, str):\n try:\n time_tuple = time.strptime(value, '%Y-%m-%d')[0:6]\n except ValueError, err:\n self.__CastError('DATE', values, err)\n else:\n self.__CastError('DATE', values, 'Single input value not a string')\n elif len(values) == 3:\n time_tuple = (values[0], values[1], values[2], 0, 0, 0)\n else:\n self.__CastError('DATE', values,\n 'function takes 1 string or 3 integer values')\n\n try:\n return datetime.datetime(*time_tuple)\n except ValueError, err:\n self.__CastError('DATE', values, err)", "def Convert2date(T):\n\tif isinstance(T,list)==True:\n\t\tpass\n\telif isinstance(T,np.ndarray)==True:\n\t\tpass\n\telse:\n\t\tT=[T]\n\t# else:\n\t# \traise TypeError('scalar, list or array expected')\n\n\n\tfor i in range(len(T)):\n\t\tif isinstance(T[i],basestring):\n\t\t\ttry:\n\t\t\t\tT[i]=pd.to_datetime(T[i]).date()\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\n\t\telif type(T[i])==datetime.datetime or type(T[i])==pd.datetime: \n\t\t\tT[i]= T[i].date()\n\n\t\telif type(T[i])==pd.datetime.date or type(T[i])==datetime.date:\n\t\t\tpass\n\t\telse:\n\t\t\traise TypeError('No date found')\n\n\n\tif len(T)==1:\n\t\treturn T[0]\n\telse:\n\t\treturn T" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
=========================================================== dateformated(x) =========================================================== this function converts the the date read from a list to a datetime format
def DateFormated(x): x1=[] for i in x: if len(i)==19: x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(i[14:16]),int(i[17:18]) )) # elif len(i)==13: # x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(0),int(0) )) # else: # x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(0),int(0),int(0) )) # del i,x return x1
[ "def change_format_from_input_to_datetime(list_d_t_t):\n data_output = []\n\n for row in list_d_t_t:\n data_output.append([datetime.datetime.strptime(row[0] + \" \" + row[1], \"%Y-%m-%d %H:%M:%S\"),\n datetime.datetime.strptime(row[0] + \" \" + row[2], \"%Y-%m-%d %H:%M:%S\")])\n\n return data_output", "def DateFormatedSQL(x):\n x=[i[0] for i in x]\n \n x1=[]\n for i in x:\n if len(i)==19:\n x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(i[14:16]),int(i[17:18]) ))\n# elif len(i)==13:\n# x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(0),int(0) ))\n# else:\n# x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(0),int(0),int(0) ))\n# del i,x\n return x1", "def convert_datetime_objs(list_of_dates):\n datetime_list = []\n for date in list_of_dates:\n date_obj = datetime.datetime.strptime(date, '%d.%m.%Y')\n datetime_list.append(date_obj)\n return datetime_list", "def datetimefstr(date_list, datetimeformat, longdatetimeformat):\n try:\n # including year\n parts = longdatetimeformat.count(' ') + 1\n dtstring = ' '.join(date_list[0:parts])\n dtstart = datetime.strptime(dtstring, longdatetimeformat)\n for _ in range(parts):\n date_list.pop(0)\n except ValueError:\n # without year\n parts = datetimeformat.count(' ') + 1\n dtstring = ' '.join(date_list[0:parts])\n dtstart = datetime.strptime(dtstring, datetimeformat)\n if dtstart.timetuple()[0] == 1900:\n dtstart = datetime(date.today().timetuple()[0],\n *dtstart.timetuple()[1:5])\n # if start date lies in the past use next year\n #if dtstart < datetime.today():\n #dtstart = datetime(dtstart.timetuple()[0] + 1,\n #*dtstart.timetuple()[1:6])\n for _ in range(parts):\n date_list.pop(0)\n return dtstart", "def handleDateList(dates, strformat):\n dl = []\n\n for d in dates:\n #convertedDate = datetime.datetime(*(time.strptime(d[:-7].replace('T', ' '), '%Y-%m-%d %H:%M:%S')[0:6]))\n convertedDate = datetime.datetime(*(time.strptime(d.replace('T', ' '), '%Y-%m-%d %H:%M:%S')[0:6]))\n dl.append(getDateTimeAsString(\n dt = convertedDate,\n inTimezone = 'PST',\n withStringFormat = strformat))\n\n return dl", "def _to_date(self, x):\n if isinstance(x, datetime.datetime):\n return x.date()\n return x", "def convert_str_list_to_date(self, str_date_list: List[str]) -> List[datetime.date]:\n date_list = [\n datetime.datetime.strptime(date, '%Y-%m-%d').date()\n for date in str_date_list\n ]\n return date_list", "def Convert2date(T):\n\tif isinstance(T,list)==True:\n\t\tpass\n\telif isinstance(T,np.ndarray)==True:\n\t\tpass\n\telse:\n\t\tT=[T]\n\t# else:\n\t# \traise TypeError('scalar, list or array expected')\n\n\n\tfor i in range(len(T)):\n\t\tif isinstance(T[i],basestring):\n\t\t\ttry:\n\t\t\t\tT[i]=pd.to_datetime(T[i]).date()\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\n\t\telif type(T[i])==datetime.datetime or type(T[i])==pd.datetime: \n\t\t\tT[i]= T[i].date()\n\n\t\telif type(T[i])==pd.datetime.date or type(T[i])==datetime.date:\n\t\t\tpass\n\t\telse:\n\t\t\traise TypeError('No date found')\n\n\n\tif len(T)==1:\n\t\treturn T[0]\n\telse:\n\t\treturn T", "def format_date(date_string): \n # Transforming into a list [jul, 8, 2020]\n date_list = date_string.replace(',', '').split(' ')\n date_list[0] = my_utils.month_pt_to_en(date_list[0])\n # Transforming 'Jul' into 7\n date_list[0] = datetime.datetime.strptime(date_list[0], \"%b\").month\n # Changing format from MM/DD/YYYY to be DD/MM/YYYY.\n day = date_list[1]\n month = date_list[0]\n date_list[0] = day\n date_list[1] = month\n\n return date_list", "def date_parser(dates):\n\n #splitting the dates(containing datetime data) list and returning only the datetime\n return([item.split()[0] for item in dates])\n pass", "def timestamp2dateDMY(value1, feature, parent):\n outdate = datetime.datetime.fromtimestamp(int(value1[0:10])).strftime('%d/%m/%Y')\n return outdate", "def datefstr_year(dtime_list, dateformat):\n parts = dateformat.count(' ') + 1\n dtstring = ' '.join(dtime_list[0:parts])\n dtstart = strptime(dtstring, dateformat)\n if dtstart.tm_mon == 2 and dtstart.tm_mday == 29 and not isleap(default_day.year):\n raise ValueError\n\n for _ in range(parts):\n dtime_list.pop(0)\n\n a_date = datetime(*(default_day.timetuple()[:1] + dtstart[1:5]))\n return a_date", "def _tr_cal_date(self, date):\n items = []\n for code in self._datefmt:\n if code == 'Y':\n items += [date.year_str]\n elif code == 'M':\n if '/' in self._datefmt or '.' in self._datefmt:\n month = date.month_num\n if month is not None:\n month = \"{:02d}\".format(month)\n else:\n month = self._monthName(date.month)\n if month is not None:\n items += [month]\n elif code == 'D':\n day = date.day\n if day is not None and ',' in self._datefmt:\n items += [str(\"{:02d},\".format(day))]\n elif day is not None:\n items += [\"{:02d}\".format(day)]\n if '/' in self._datefmt:\n sep = '/'\n elif '.' in self._datefmt:\n sep = '.'\n elif '-' in self._datefmt:\n sep = '-'\n else:\n sep = ' '\n return sep.join(items)", "def dateify(datestring):\r\n return denumify(datestring, \"XXXX-XX-XX XX:XX:XX\")", "def datemake(datestring):\n return dtt.datetime.strptime(datestring,'%m/%d/%Y')", "def txfDate(date):\n return date.strftime('%m/%d/%Y')", "def buildDate(date):\n parts = date.split(\"-\")\n yDate = parts[1] + \" \" + parts[2] + ', ' + parts[0]\n return yDate", "def create_listing_date(data):\n data['Data_annuncio'] = (data['Riferimento e data annuncio']\n .str.split('-')\n .str[-1]\n .str.strip()\n .astype('datetime64[D]'))\n return data", "def conver_to_date(list_of_strings):\n from datetime import datetime\n import pandas as pd\n import numpy as np\n \n first_date = datetime.strptime('0001-01-01', '%Y-%m-%d')\n list_of_date = []\n for string in list_of_strings:\n if type(string) == str:\n days = datetime.strptime(string, '%Y-%m-%d') - first_date\n list_of_date.append(days.days)\n else:\n list_of_date.append(np.nan)\n list_of_date = pd.DataFrame(list_of_date)\n return list_of_date" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if a record exists matching the service pattern with the current host's ip
def record_exists(route53_zone, service_name, ip): # Match records belonging to the service for particular service and # environment. match_regex = "{}\d+\.{}\.?".format(service_name, route53_zone.name) for record in route53_zone.get_records(): match = re.match(match_regex, record.name) if match and ip in record.resource_records: return True return False
[ "def _record_exists(self, cusip, dt):\n where_str = \"(cusip == '{}') & (as_of_date == {})\"\n if not isinstance(dt,int):\n dt = time.mktime(dt.timetuple())\n p_data = self.pools_table.read_where(where_str.format(cusip, dt))\n return p_data.shape[0] > 0", "def check_if_ip_exists(com, server, ip):\n\n for vm in com.getNodeContainerIndex(server.proxmox_node_name)['data']:\n if vm['ip'] == ip:\n return False\n\n return True", "def check_ip(record):\n my_ip = requests.get(\"https://api.ipify.org/?format=json\").json()[\"ip\"]\n dns_ip = record.data['answers'][0]['answer'][0]\n if my_ip == dns_ip:\n log_print(\"Current IP ({ip}) matches DNS record for {record}\"\n .format(record=record.domain, ip=my_ip))\n return {'matches': True}\n else:\n log_print(\"Current IP ({my_ip}) does not match DNS record for {record} (record IP={dns_ip})\"\n .format(record=record.domain, my_ip=my_ip, dns_ip=dns_ip))\n return {'matches': False, 'my_ip': my_ip}", "def exists(self, **kwargs):\n requests_params = self._handle_requests_params(kwargs)\n self._check_load_parameters(**kwargs)\n kwargs['uri_as_parts'] = True\n session = self._meta_data['bigip']._meta_data['icr_session']\n base_uri = self._meta_data['container']._meta_data['uri']\n kwargs.update(requests_params)\n try:\n response = session.get(base_uri, **kwargs)\n except HTTPError as err:\n if err.response.status_code == 404:\n return False\n else:\n raise\n rdict = response.json()\n if \"address\" not in rdict:\n # We can add 'or' conditions to be more restrictive.\n return False\n # Only after all conditions are met...\n return True", "def checkDynamicEndpointExists(self,ipAddr,phoneNo):\n # Find whether an endpoints with the given ip address exists\n filterOutput1 = self.assertCommand(\"cli iedge lkup %s | egrep -e 'IpAddr|State'\" %ipAddr)\n if ((filterOutput1.find(ipAddr)!=-1) and (filterOutput1.find(\"Dynamic\")!=-1)):\n return True\n else:\n return False", "def exists(self, key, **fields):\n reqtime = time.time()\n findings = None\n while findings is None and time.time() - reqtime < self.RequestTimeout:\n findings = self.__buff.get(Record.match(key, **fields))\n # print(findings,fields)\n if len(findings) == 0:\n findings = self.readFor(key, blocking=False, **fields)\n time.sleep(0.01)\n\n return findings is not None and len(findings) > 0", "def record_name_exist(domain_id, server, name):\n record_name_query = \"SELECT * FROM records where domain_id={0} and name='{1}'\".format(domain_id, name)\n record_name = perform_query(server.name, server.port, server.user, server.password, server.database, record_name_query, 'query')\n if any(record_name):\n return True\n else:\n return False", "def name_matches_ip(name, ip, state):\n for client in state['clients']:\n if client['name'] == name:\n if client['ip'] == ip:\n return True\n else:\n return False\n return False", "def _exists(self, model, identity, time):\n query = Query(model).identity(identity).time(time)\n records = query.fetch()\n logger.debug(\"Found {} matches for time {}\".format(len(records), time))\n return len(records) > 0", "def searchRecordToLocalRecord() -> bool:\n recordList: list = connectToAPI.getRecordList(config.getTokenName(), config.getToken(), config.getDomain())\n if recordList is None:\n return False\n for record in recordList:\n if config.getHost() + \".\" + config.getDomain() + \".\" == record['fqdn']:\n global localRecord\n localRecord = record\n return True\n return False", "def _check_host_existence(self, hostname: str) -> bool:\n with self.lock:\n hosts = self.hosts.all()\n for host in hosts:\n if host['hostname'] == hostname:\n return True\n return False", "def test_fn_saintsxctf_com_api_route53_record_exists(self) -> None:\n try:\n a_record = Route53.get_record(f'saintsxctf.com.', f'{self.domain_name}.', 'A')\n except IndexError:\n self.assertTrue(False)\n return\n\n print(a_record)\n self.assertTrue(a_record.get('Name') == f'{self.domain_name}.' and a_record.get('Type') == 'A')", "def valid_addr(self, addr):\n if self.table.has_addr(addr):\n return Status(200, \"success\")\n else:\n return Status(420, \"Not registered address\" + str(addr))", "def member_exists(self, service, bigip):\n pool = self.service_adapter.get_pool(service)\n member = self.service_adapter.get_member(service)\n part = pool[\"partition\"]\n try:\n p = self.pool_helper.load(bigip,\n name=pool[\"name\"],\n partition=part)\n\n m = p.members_s.members\n if m.exists(name=urllib.quote(member[\"name\"]), partition=part):\n return True\n except Exception as e:\n # log error but continue on\n LOG.error(\"Error checking member exists: %s\", e.message)\n return False", "def hasIP(self,ip):\n\treturn ip in self.ip_list", "def request_exists(self):\r\n result = False\r\n query = \"SELECT idRequest FROM Request WHERE idRequest = %s AND idMember = %s \" \\\r\n \"AND idService = %s;\"\r\n param = [self.id_request,\r\n self.id_member_ate,\r\n self.id_service]\r\n response = self.connect.select(query, param)\r\n if response:\r\n result = True\r\n return result", "def check_service_unique(name):\n cursor.execute(\"SELECT * FROM service WHERE service_name = ? OR shorthand_name = ?;\", (name, name))\n\n return len(cursor.fetchall()) < 1", "def match_api_keys(key, ip):", "def canDo_url(self, url):\n hostname = urlparse.urlsplit(url)[1]\n for hostEnd in self.highwireHosts:\n if hostname.endswith(hostEnd):\n logging.log(5, 'url hostname %s ends with %s -> highwire' % (hostname, hostEnd))\n return True\n\n if hostname in self.hostCache:\n ipAddr = self.hostCache[hostname]\n else:\n logging.debug('Looking up IP for %s' % hostname)\n try:\n ipAddr = socket.gethostbyname(hostname)\n self.hostCache[hostname] = ipAddr\n except socket.gaierror:\n raise pubGetError('Illegal hostname %s in link' % hostname, 'invalidHostname', hostname)\n\n ipParts = ipAddr.split('.')\n ipParts = [ int(x) for x in ipParts ]\n result = ipParts[0] == 171 and ipParts[1] in range(64, 68)\n if result == True:\n logging.log(5, 'hostname %s is highwire host' % hostname)\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates record with record_name and ip; updates record if it already exists with different ip does nothing if record already exists with same ip
def upsert_record(route53_zone, record_name, ip): # Only upsert the dns record if it doesn't resolve to us. try: record_ip = socket.gethostbyname(record_name) except socket.error: # Ignore if we can't connect to the host pass else: if ip == record_ip: return print str(dt.now()), "Registering host as", record_name record = route53_zone.get_a(record_name) if record and ip not in record.resource_records: route53_zone.update_a(record_name, ip) elif not record: route53_zone.add_a(record_name, ip)
[ "def _update_record(self, record_name, ip):\n if ((not hasattr(self, '_current_zone')) or (not self._current_zone)) or ((not hasattr(self, '_new_zone_version_number')) or (not self._new_zone_version_number)):\n raise GandiApiException(\"Can't update record, no cloned zone available.\")\n \n list_record = self._api.domain.zone.record.list(self._api_key, self._current_zone['id'], \n self._new_zone_version_number)\n for record in list_record:\n if record['name'] == record_name:\n myrecord = record\n # Create new record\n self._api.domain.zone.record.update(self._api_key, self._current_zone['id'], \n self._new_zone_version_number, {'id': myrecord['id']}, \n {\n 'name': myrecord['name'],\n 'type': myrecord['type'],\n 'value': ip,\n 'ttl': myrecord['ttl']\n })\n logging.info('Update record %s with ip %s successfully.' % (record_name, ip))", "def create(self, key, record, overwrite=False):\n if key in self.db and not overwrite:\n raise ValueError(\"A record for key \\\"%s\\\" already exists.\" % key)\n self.db[key] = copy(record)", "def insert_or_update(self, table, record):\n try:\n request = s.query(table=table, query={'sys_id': record['sys_id']})\n #request.get_single()\n response = request.update(record)\n print >> sys.stderr, 'update'\n except NoResults:\n # Record does not exist so create it\n response = self.snow.insert(table=table, payload=record)\n print >> sys.stderr, 'create'\n return response", "def write_record(self, req):\n\n with sql.connect(self.db) as connection:\n cur = connection.cursor()\n insert_sql = \"INSERT INTO requests (ip, timestamp) values(?, ?)\"\n data = (req.remote_addr, str(datetime.now(tz=None)))\n\n cur.execute(insert_sql, data)\n\n connection.commit()", "def __update_dns_record(self, endpoint, ip_address):\n self.conn.change_resource_record_sets(\n HostedZoneId=self.endpoint_zone,\n ChangeBatch={\n 'Changes': [\n {\n 'Action': 'UPSERT',\n 'ResourceRecordSet':\n {\n 'Name': endpoint,\n 'Type': 'A',\n 'TTL': 60,\n 'ResourceRecords': [\n {\n 'Value': ip_address\n },\n ],\n }\n },\n ]\n }\n )", "def add_record(self):\n if not self.record_exists(self.args.date):\n record = self.create_record()\n self.records.append(record)\n self.write_json_file(self.records_file, self.records)\n return True\n return False", "def new_ip(self, ip):\n if not ip in self.ip_list:\n self.ip_list.add(ip)\n host = self.hs.id_to_object(ip)\n host.add_tag('sniffer')\n host.save()\n print_success(\"New ip address: {}\".format(ip))", "def upload_record(self,\n record: Optional[Record] = None,\n style: Optional[str] = None,\n name: Optional[str] = None,\n model: Union[str, io.IOBase, DM, None] = None,\n workspace: Union[str, pd.Series, None] = None,\n overwrite: bool = False,\n verbose: bool = False):\n if record is None:\n record = load_record(style, model, name=name)\n \n try:\n self.remote_database.add_record(record=record, workspace=workspace,\n verbose=verbose) \n except ValueError as e:\n if overwrite:\n self.remote_database.update_record(record=record, workspace=workspace,\n verbose=verbose)\n else:\n raise ValueError('Matching record already exists: use overwrite=True to change it') from e", "def post_create(self, record):", "def _create_record(self, api_args):\n if self.create:\n if not self._fqdn.endswith('.'):\n self._fqdn += '.'\n if not self._record_type.endswith('Record'):\n self._record_type += 'Record'\n uri = '/{}/{}/{}/'.format(self._record_type, self._zone,\n self._fqdn)\n response = DynectSession.get_session().execute(uri, 'POST',\n api_args)\n self._build(response['data'])", "def create_ns_record(self, name, nameservers):\n self.get_hosted_zone_id()\n record = {\n \"Name\": name,\n \"Type\": \"NS\",\n \"TTL\": 172800,\n \"ResourceRecords\": [{\"Value\": x} for x in nameservers or []]\n }\n self.client.change_record(self._id, \"UPSERT\", record)", "def test_create_record_with_existing_key(self):\n key = 'test_key'\n value = {\"First Name\": \"Mahendra\", \"Last Name\": \"Gaur\", \"unique_key\": key}\n value = json.dumps(value)\n self.task_obj.create_record(key=key, value=value)\n\n value = {\"First Name\": \"Mahendra\", \"Last Name\": \"Gaur\", \"unique_key\": key}\n value = json.dumps(value)\n actual_response = self.task_obj.create_record(key=key, value=value)\n expected_response = \"ERROR: Data with key {} is already available\".format(key)\n self.assertEqual(actual_response, expected_response)", "def create_record(self, id, record, type, data, ttl=60):\n self.record.createObject({\n 'domainId': id,\n 'ttl': ttl,\n 'host': record,\n 'type': type,\n 'data': data})", "def update_record():\n if 'json' not in request.files:\n return \"no json file in the request!\", 400\n try:\n _record = json.loads(request.files['json'].read())\n except ValueError:\n return \"failed to parse JSON file correctly!\", 400\n if type(_record) is not dict or 'name' not in _record:\n return \"expecting a dictionary with a name, post failed!\", 400\n with RECORD_LOCK:\n for _index, _rec in enumerate(RECORDS):\n if _rec['name'] == _record['name']:\n RECORDS[_index] = _record\n return \"OK\"\n return \"Failed to update record!\", 500", "def add_remote_duplicate_entry(self, ip):\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite-remote')\n l3out = OutsideL3('l3out', tenant)\n other_epg = OutsideEPG('other', l3out)\n subnet = OutsideNetwork(ip, other_epg)\n subnet.ip = ip + '/32'\n\n resp = tenant.push_to_apic(site2)\n self.assertTrue(resp.ok)", "def add_host(self, name, ip):\n rdataa = dns.rdata.from_text(dns.rdataclass.IN,dns.rdatatype.A,str(ip))\n rdataseta = dns.rdataset.from_rdata(300,rdataa)\n self.update.add(name,rdataseta)\n return dns.query.tcp(self.update,self.server_address)", "def update_dns(self):\n if self.ptr:\n which_zone = None\n zones = dns.models.Zone.objects.all()\n for zone in zones:\n if self.ptr.endswith(zone.name) or self.ptr.endswith(zone.name + '.'):\n which_zone = zone\n break\n\n if which_zone:\n zone_name = which_zone.name\n record_name = self.ptr[:-len(zone_name)] if not self.ptr.endswith('.') else self.ptr[:-len(zone_name) - 1]\n if record_name.endswith('.'):\n record_name = record_name[:-1]\n record_type = 'A' if self.family == 4 else 'AAAA'\n\n dns.models.Record.objects.get_or_create(\n name=record_name,\n record_type=record_type,\n zone=which_zone,\n address=self\n )", "def _write_record(self, record):\n self.db.write_record(record)", "def add_route53_record(emr_internal_ips, cr):\n\n conn = connect_route53(aws_access_key_id = cr.get_config(\"aws_access_key\"), aws_secret_access_key = cr.get_config(\"aws_secret_key\"))\n\n zone = conn.get_zone(\"alpinenow.local\")\n\n print \"Adding DNS Records for: {0}\".format(emr_internal_ips)\n for ip in emr_internal_ips:\n internal_dns = \"ip-\" + ip.replace(\".\", \"-\") + \".alpinenow.local\"\n response = zone.add_a(internal_dns, ip) # TODO: Do something with response" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new EC2 instance with specific parameters SecurityGroup (sg) and KeyPair (key) have to be previously created (see cassandgo initSG and cassandgo initKP)
def createInstance(ec2,ami,nb_nodes,placement,instance_type,key,sg,user_data=None): reservation = ec2.run_instances(ami,min_count=nb_nodes,max_count=nb_nodes,placement = placement,key_name=key,security_groups=[sg],instance_type=instance_type,user_data=user_data) instance = reservation.instances[0] return instance
[ "def create_instance(ami, sg_name):\n instance = None\n ec2 = boto3.resource('ec2',region_name=\"us-east-1\")\n # TODO: Create an EC2 instance\n # Wait for the instance to enter the running state\n # Reload the instance attributes\n\n try:\n instance = ec2.create_instances(\n ImageId=ami,\n InstanceType=INSTANCE_TYPE,\n KeyName=KEY_NAME,\n MaxCount=1,\n MinCount=1,\n SecurityGroupIds=[\n sg_name,\n ],\n TagSpecifications=[{\n 'ResourceType': 'instance',\n 'Tags': TAGS\n }, {\n 'ResourceType': 'volume',\n 'Tags': TAGS\n }]\n )[0]\n instance.wait_until_running()\n instance.reload()\n print(instance.state)\n except ClientError as e:\n print(e)\n\n return instance", "def create_ec2_instance(image_id, instance_type, keypair_name, user_data):\n\n # provision and launch the ec2 instance\n #ec2_resource = boto3.resource('ec2')\n ec2_client = boto3.client('ec2')\n #ec2_instance = ec2_client.instance('id')\n try:\n response = ec2_client.run_instances(imageid=image_id,\n instancetype=instance_type,\n keyname=keypair_name,\n mincount=1,\n maxcount=1,\n userdata=user_data,\n securitygroups=['allowsshandosb']\n )\n instance = response['instances'][0]\n #instance = response[0]\n #instance.wait_until_running()\n #instance.reload()\n print(\"inside create function:\",instance)\n except clienterror as e:\n logging.error(e)\n return none\n\n\n return instance\n #return response[0]", "def create_sec_group(ec2, sec_group_name):\n sec = ec2.create_security_group(sec_group_name, 'Jvivian Boto SecGroup')\n port = 22\n sec.authorize('tcp', port, port, '0.0.0.0/0')", "def init_region ( aws, region_name, aws_account_type, init_params ) :\n ec2_conn = aws.ec2_conn( )\n keypair_savedir = os.environ[ 'PWD' ]\n print \"Creating new keypairs for region \" + region_name\n for keytype in init_params.get( 'keypairs', [] ) :\n keypair_name = get_keypair_name( aws_account_type, region_name, keytype )\n keypair = ec2_conn.get_key_pair( keypair_name )\n if keypair :\n print 'Keypair ' + keypair_name + ' already exists. Skipping.'\n else :\n keypair = ec2_conn.create_key_pair( keypair_name )\n keypair.save( keypair_savedir )\n keypair_filename = keypair_savedir + '/' + keypair_name + '.pem'\n print 'Created keypair ' + keypair_filename\n store_keypair( s3_infra_conn = aws.s3_infrastructure_conn( ),\n region_name = region_name,\n aws_account_type = aws_account_type,\n keypair_name = get_keypair_keypath( aws_account_type ) + keypair_name,\n keypair_filename = keypair_filename )\n print 'Stored keypair in S3 at: ' + get_keypair_keypath( aws_account_type )\n os.remove( keypair_filename )\n\n if init_params.get( 'init-deployment', 'YES' ) == 'YES' :\n print \"Creating Deployment security group.\"\n deploy_secgrp = ec2_conn.create_security_group( get_deployment_secgrp_name( ),\n \"Used by the deployment server.\" )\n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 22,\n to_port = 22,\n cidr_ip = hbo_cidr_list ) \n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 8080,\n to_port = 8080,\n cidr_ip = hbo_cidr_list ) \n\n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 22,\n to_port = 22,\n cidr_ip = build_server_cidr ) \n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 8080,\n to_port = 8080,\n cidr_ip = build_server_cidr ) \n\n if init_params.get( 'init-ami-update', 'YES' ) == 'YES' :\n print \"Creating ami-update security group.\"\n amiupdate_secgrp = ec2_conn.create_security_group( get_amiupdate_secgrp_name( ),\n \"Used by the ami update instances.\" )\n amiupdate_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 22,\n to_port = 22,\n cidr_ip = hbo_cidr_list ) \n amiupdate_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 8080,\n to_port = 8080,\n cidr_ip = hbo_cidr_list )", "def create_ec2_with_eip(ec2, ec2_client, subnet_pub_ec2):\n ## create EC2 instance\n print(\"\\n===Creating an EC2 instance\")\n instances = ec2.create_instances(\n ImageId=AMI_ID,\n MinCount=1,\n MaxCount=1,\n InstanceType=EC2_TYPE,\n KeyName=KEY_PAIR_NAME,\n NetworkInterfaces=[{\n \"DeviceIndex\":0,\n \"SubnetId\": subnet_pub_ec2.id}],\n TagSpecifications=[{\n \"ResourceType\":\"instance\",\n \"Tags\":[{\"Key\": \"Name\", \"Value\": EC2_NAME}]\n }]\n )\n \n ## get instance ids\n instances_ids = [i.instance_id for i in instances]\n\n ## wait till instance is ready\n waiter = ec2_client.get_waiter(\"instance_running\")\n waiter.wait(InstanceIds=instances_ids)\n print(\"An EC2 instance is ready.\")\n\n ## create new EIP and attach it to existing EC2 instance\n instance_id = instances[0].instance_id\n try:\n allocation = ec2_client.allocate_address(Domain=\"vpc\")\n response = ec2_client.associate_address(AllocationId=allocation[\"AllocationId\"],\n InstanceId=instance_id)\n print(response)\n except ClientError as e:\n print(e)\n print(f\"===EIP {allocation['PublicIp']} has been assigned to the EC2 instance!\")\n return instances, allocation[\"PublicIp\"]", "def create_secgroup(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n sgid = args[\"Group-Name\"]\n desc = args[\"Description\"]\n\n # Boto3 client creation by providing the access_id and access_secret\n ec2 = boto3.client(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n response = ec2.describe_vpcs()\n vpc_id = response.get('Vpcs', [{}])[0].get('VpcId', '')\n\n response = ec2.create_security_group(GroupName=sgid,\n Description=desc,\n VpcId=vpc_id)\n attachment = MessageAttachmentsClass()\n d = response[\"GroupId\"]\n attachment.title = d\n message.message_text = \"Security group created:\"\n message.attach(attachment)\n\n return message.to_json()", "def cloud_control_create_ec2(event, context):\n\n msg = \"\"\n validate_with_context_payload = {\n \"LastInstanceName\": event[\"body\"][\"InstanceName\"],\n \"LastSubnetName\": event[\"body\"][\"SubnetName\"],\n \"LastKeyPairName\": event[\"body\"][\"KeyName\"],\n \"LastSecGroupName\": event[\"body\"][\"SecGroupName\"],\n \"LastInstanceType\": event[\"body\"][\"InstanceType\"]\n }\n response = {}\n response = validate_with_dynamo(validate_with_context_payload)\n payload_response = json.loads(response)\n ValidatedInstanceName = payload_response[\"LastInstanceName\"]\n ValidatedSubnetName = payload_response[\"LastSubnetName\"]\n ValidatedKeyPairName = payload_response[\"LastKeyPairName\"]\n ValidatedSecGroupName = payload_response[\"LastSecGroupName\"]\n ValidatedInstanceType = payload_response[\"LastInstanceType\"]\n # Validate instance name\n ec2_client = boto3.client('ec2')\n response = ec2_client.describe_instances(\n Filters=[\n {\n 'Name': 'tag:Name',\n 'Values': [ValidatedInstanceName]\n }\n ]\n )\n instance_list = []\n for reservation in response['Reservations']:\n for instance in reservation['Instances']:\n instance_list.append(instance['InstanceId'])\n\n if instance_list:\n msg = \"Instance with name {} exists!\".format(ValidatedInstanceName)\n return {\"msg\": msg}\n\n# to refactor\n\n msg = \"Instance {} is created \".format(ValidatedInstanceName)\n #subnet_name = ValidatedSubnetName.lower()\n success_code, msg, subnet_id = ec2_find_subnet(ValidatedSubnetName.lower(), msg)\n if not success_code == 0:\n return {\"msg\": msg}\n\n success_code, msg, sg_id = ec2_find_sg(ValidatedSecGroupName, msg)\n if not success_code == 0:\n return {\"msg\": msg}\n\n success_code, msg, key_name = ec2_find_key(ValidatedKeyPairName, msg)\n if not success_code == 0:\n return {\"msg\": msg}\n\n # Prepare data\n # This should be improved.\n # It looks bad, but I do not have idea now, how to write it better.\n if not key_name == \"none\":\n response = ec2_client.run_instances(\n BlockDeviceMappings=[\n {\n 'DeviceName': '/dev/xvda',\n 'Ebs': {\n\n 'DeleteOnTermination': True,\n 'VolumeSize': 8,\n 'VolumeType': 'gp2'\n },\n },\n ],\n ImageId='ami-030dbca661d402413',\n InstanceType=ValidatedInstanceType,\n KeyName=key_name,\n MaxCount=1,\n MinCount=1,\n Monitoring={\n 'Enabled': False\n },\n SecurityGroupIds=[\n sg_id,\n ],\n SubnetId=subnet_id,\n TagSpecifications=[\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': ValidatedInstanceName\n },\n ]\n },\n ]\n )\n else:\n response = ec2_client.run_instances(\n BlockDeviceMappings=[\n {\n 'DeviceName': '/dev/xvda',\n 'Ebs': {\n\n 'DeleteOnTermination': True,\n 'VolumeSize': 8,\n 'VolumeType': 'gp2'\n },\n },\n ],\n ImageId='ami-030dbca661d402413',\n InstanceType=ValidatedInstanceType,\n MaxCount=1,\n MinCount=1,\n Monitoring={\n 'Enabled': False\n },\n SecurityGroupIds=[\n sg_id,\n ],\n SubnetId=subnet_id,\n TagSpecifications=[\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': ValidatedInstanceName\n },\n ]\n },\n ]\n )\n write_to_table_payload = {\n \"LastInstanceName\": ValidatedInstanceName,\n \"LastSubnetName\": ValidatedSubnetName,\n \"LastKeyPairName\": ValidatedKeyPairName,\n \"LastSecGroupName\": ValidatedSecGroupName,\n \"LastInstanceType\": ValidatedInstanceType\n }\n write_to_dynamo(write_to_table_payload)\n return {\"msg\": msg}", "def check_or_start_instance():\n\n MY_AMI = config.get('ec2', 'AMI')\n SECURITY_GROUP = config.get('ec2', 'SECURITY_GROUP')\n KEY_PATH = config.get('ec2', 'KEY_PATH')\n INSTANCE_TYPE = config.get('ec2', 'INSTANCE_TYPE')\n REGION = config.get('ec2', 'REGION')\n os.environ[\"AWS_ACCESS_KEY_ID\"] = config.get('ec2', 'AWS_ACCESS_KEY_ID')\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = config.get('ec2', 'AWS_SECRET_ACCESS_KEY')\n\n conn = boto.ec2.connect_to_region(REGION)\n\n security_groups = conn.get_all_security_groups()\n\n if 'own_ip' not in env:\n env.own_ip = get_own_ip()\n\n # check ssh access from own IP is allowed\n try:\n [ep_group] = [x for x in security_groups if x.name == SECURITY_GROUP]\n except ValueError:\n pass\n try:\n # iterate over rules (grouped by protocol et al)\n [own_cidr_ip_grant] = [rule for rule in ep_group.rules if\n # iterate over grants inside rules (IP ranges)\n filter(lambda grant: grant.cidr_ip == env.own_ip + '/32', rule.grants)]\n except ValueError:\n print(\n 'no rule for TCP/22 with own IP %(own_ip)s found in security group: %(sgroup)s' % {'own_ip': env.own_ip,\n 'sgroup': SECURITY_GROUP})\n # ep_group.authorize('tcp', 22, 22, env.own_ip + '/32')\n\n image = conn.get_image(MY_AMI)\n\n try:\n [ep_host_key_pair] = [x for x in conn.get_all_key_pairs() if x.name == 'ep-host']\n except ValueError:\n # this probably means the key is not defined\n # get the first one in the belt for now:\n print \"GeoNode file not found in the server\"\n ep_host_key_pair = conn.get_all_key_pairs()[0]\n\n reservations = conn.get_all_instances(filters={\"tag:Name\": \"ep\"})\n instances = [i for r in reservations for i in r.instances]\n\n instance = instances[0]\n\n instance.start()\n print \"Firing up instance\"\n\n # Give it 10 minutes to appear online\n for i in range(120):\n time.sleep(5)\n instance.update()\n print instance.state\n if instance.state == \"running\":\n break\n\n if instance.state == \"running\":\n dns = instance.dns_name\n print \"Instance up and running at %s\" % dns\n\n config.set('ec2', 'HOST', dns)\n config.set('ec2', 'INSTANCE', instance.id)\n env.hosts = [dns, ]\n env.user = config.get('ec2', 'USER')\n env.key_filename = KEY_PATH\n with open(CONFIG_FILE, 'wb') as configfile:\n config.write(configfile)\n\n print \"ssh -i %s ubuntu@%s\" % (KEY_PATH, dns)\n print \"Terminate the instance via the web interface %s\" % instance", "def create_security_group(self, sg_name):\n pass", "def test_deploy_instance_with_new_network_and_sec_group(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_sec_group_\" + suffix\n sec_group_name = TEST_SEC_GROUP_PREFIX + \"_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 249\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n sec_group_name=sec_group_name)", "def create_security_group(ec2_client):\r\n\r\n print(f\"creating RDS security group with name {get_config_item('security_group_name')}\")\r\n\r\n return ec2_client.create_security_group(\r\n GroupName=get_config_item('security_group_name'),\r\n Description='RDS security group for public access',\r\n VpcId=get_config_item('security_group_vpc_id')\r\n )", "def launch_instance_vpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n subnet_id,\n security_group_id,\n machine_type = 'm3.medium',\n user_data = None,\n wait_for_running = True,\n public_ip = False,\n static_ip_address = None,\n monitor_params = None ) :\n interfaces = None\n subnet = None\n security_group_ids = None\n \n if static_ip_address is None:\n spec = boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id = subnet_id,\n groups = [ security_group_id ],\n associate_public_ip_address = public_ip )\n interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection( spec )\n else:\n subnet = subnet_id\n security_group_ids = [security_group_id]\n\n instance_r = ec2_conn.run_instances( image_id = ami.id,\n key_name = keypair,\n instance_type = machine_type,\n monitoring_enabled = True,\n network_interfaces = interfaces,\n subnet_id = subnet, \n user_data = user_data,\n security_group_ids = security_group_ids,\n private_ip_address = static_ip_address )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n \n print \"Waiting for instance to be ready\"\n \n if wait_for_running :\n running = wait_on_object_state( instance, 'running', max_wait = 600, failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n if monitor_params :\n print \"Adding monitoring to the instance.\"\n\n return instance", "def create_ec2_instance(name, instance_type=\"t2.micro\", ami=CONFIG[\"AWS_SPARK_AMI\"]):\n\n conn = ec2.connect_to_region(CONFIG[\"AWS_REGION\"])\n reservation = conn.run_instances(\n ami,\n key_name=CONFIG[\"AWS_KEYNAME\"],\n instance_type=instance_type,\n security_group_ids=[CONFIG[\"AWS_SECURITYGROUP\"]],\n subnet_id=CONFIG[\"AWS_SUBNET\"]\n )\n time.sleep(15)\n instance = reservation.instances[0]\n status = instance.update()\n\n while status == 'pending':\n time.sleep(10)\n status = instance.update()\n\n if status == 'running':\n instance.add_tag(\"Name\", name)\n else:\n raise \"Unknown instance status:\", status\n\n while not is_ssh_available(instance.public_dns_name):\n time.sleep(5)\n\n return instance", "def _create_instance(self, options):\n reservation = self.connection.run_instances(options['image'],\n key_name=options['key_name'], instance_type=options['instance_type'],\n security_groups=options['security_groups'])\n\n return reservation.instances[0]", "def create_instance(StackId=None, LayerIds=None, InstanceType=None, AutoScalingType=None, Hostname=None, Os=None, AmiId=None, SshKeyName=None, AvailabilityZone=None, VirtualizationType=None, SubnetId=None, Architecture=None, RootDeviceType=None, BlockDeviceMappings=None, InstallUpdatesOnBoot=None, EbsOptimized=None, AgentVersion=None, Tenancy=None):\n pass", "def create_keypair(econfig_file=None, region=None, keyname=\"bcbio\"):\n import boto\n import boto.ec2\n if econfig_file:\n keypair_dir = os.path.dirname(econfig_file).replace(\"elasticluster\", \"aws_keypairs\")\n else:\n keypair_dir = os.path.join(os.getcwd(), \"aws_keypairs\")\n if not os.path.exists(keypair_dir):\n os.makedirs(keypair_dir)\n private_key = os.path.join(os.path.join(keypair_dir, keyname))\n new_key = not os.path.exists(private_key)\n if new_key:\n cmd = [\"ssh-keygen\", \"-t\", \"rsa\", \"-N\", \"\", \"-f\", private_key, \"-C\", \"bcbio_aws_keypair\"]\n subprocess.check_call(cmd)\n public_key = private_key + \".pub\"\n if region:\n ec2 = boto.ec2.connect_to_region(region)\n else:\n ec2 = boto.connect_ec2()\n key = ec2.get_key_pair(keyname)\n if key and new_key:\n print(\"Non matching key %s found in AWS, removing.\" % keyname)\n ec2.delete_key_pair(keyname)\n key = None\n if not key:\n print(\"Key %s not found in AWS, importing created key\" % keyname)\n with open(public_key) as in_handle:\n body = in_handle.read()\n try:\n ec2.import_key_pair(keyname, body)\n except TypeError as e:\n body = body.encode('utf-8')\n ec2.import_key_pair(keyname, body)\n return {\"user_key_name\": keyname, \"user_key_private\": private_key,\n \"user_key_public\": public_key}", "def create_instances(region_name, app_name, image_name,\n storage_enckey=None,\n s3_logs_bucket=None,\n identities_url=None,\n ssh_key_name=None,\n company_domain=None,\n ldap_host=None,\n instance_type=None,\n security_group_ids=None,\n instance_profile_arn=None,\n subnet_type=SUBNET_PRIVATE,\n subnet_id=None,\n vpc_id=None,\n vpc_cidr=None,\n tag_prefix=None,\n dry_run=False,\n template_name=None,\n ec2_client=None,\n **kwargs):\n if not instance_type:\n instance_type = 't3a.micro'\n if not template_name:\n template_name = \"%s-cloud-init-script.j2\" % app_name\n if not ec2_client:\n ec2_client = boto3.client('ec2', region_name=region_name)\n resp = ec2_client.describe_instances(\n Filters=[\n {'Name': 'tag:Name', 'Values': [\"*%s*\" % app_name]},\n {'Name': 'instance-state-name',\n 'Values': [EC2_RUNNING, EC2_STOPPED, EC2_PENDING]}])\n\n instances = None\n instance_ids = []\n stopped_instance_ids = []\n for reserv in resp['Reservations']:\n instances = reserv['Instances']\n for instance in reserv['Instances']:\n names = []\n for tag in instance['Tags']:\n if tag['Key'] == 'Name':\n names = [name.strip() for name in tag['Value'].split(',')]\n break\n if app_name not in names:\n continue\n instance_ids += [instance['InstanceId']]\n if instance['State']['Name'] == EC2_STOPPED:\n stopped_instance_ids += [instance['InstanceId']]\n if stopped_instance_ids:\n ec2_client.start_instances(\n InstanceIds=stopped_instance_ids,\n DryRun=dry_run)\n LOGGER.info(\"%s restarted instances %s for '%s'\",\n tag_prefix, stopped_instance_ids, app_name)\n if instance_ids:\n LOGGER.info(\"%s found instances %s for '%s'\",\n tag_prefix, instance_ids, app_name)\n # If instances are running and there is a message queue,\n # we assume the infrastructure for this app is ready to accept\n # containers.\n return instances\n\n search_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), 'templates')\n template_loader = jinja2.FileSystemLoader(searchpath=search_path)\n template_env = jinja2.Environment(loader=template_loader)\n template = template_env.get_template(template_name)\n user_data = template.render(\n logs_storage_location=\"s3://%s\" % s3_logs_bucket,\n identities_url=identities_url,\n remote_drop_repo=\"https://github.com/djaodjin/drop.git\",\n company_domain=company_domain,\n ldap_host=ldap_host,\n **kwargs)\n\n # Find the ImageId\n image_id = _get_image_id(\n image_name, instance_profile_arn=instance_profile_arn,\n ec2_client=ec2_client, region_name=region_name)\n\n if not storage_enckey:\n # Always make sure the EBS storage is encrypted.\n storage_enckey = _get_or_create_storage_enckey(\n region_name, tag_prefix, dry_run=dry_run)\n\n block_devices = [\n {\n # `DeviceName` is required and must match expected name otherwise\n # an extra disk is created.\n 'DeviceName': '/dev/xvda', # XXX '/dev/sda1',\n #'VirtualName': 'string',\n # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html\n 'Ebs': {\n 'DeleteOnTermination': False,\n #'Iops': 100, # 'not supported for gp2'\n #'SnapshotId': 'string',\n 'VolumeSize': 8,\n 'VolumeType': 'gp2'\n },\n #'NoDevice': 'string'\n },\n ]\n if storage_enckey:\n # XXX Haven't been able to use the key we created but the default\n # aws/ebs is OK...\n for block_device in block_devices:\n block_device['Ebs'].update({\n 'KmsKeyId': storage_enckey,\n 'Encrypted': True\n })\n\n network_interfaces = [{\n 'DeviceIndex': 0,\n 'SubnetId': subnet_id,\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n 'Groups': security_group_ids\n }]\n if not subnet_id:\n if not vpc_id:\n vpc_id, _ = _get_vpc_id(tag_prefix, ec2_client=ec2_client,\n region_name=region_name)\n web_subnet_cidrs, dbs_subnet_cidrs, app_subnet_cidrs = _split_cidrs(\n vpc_cidr, ec2_client=ec2_client, region_name=region_name)\n if subnet_type == SUBNET_PRIVATE:\n app_subnet_by_cidrs = _get_subnet_by_cidrs(\n app_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(app_subnet_by_cidrs.values()))['SubnetId']\n elif subnet_type == SUBNET_DBS:\n dbs_subnet_by_cidrs = _get_subnet_by_cidrs(\n dbs_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(dbs_subnet_by_cidrs.values()))['SubnetId']\n elif subnet_type in [SUBNET_PUBLIC_READY, SUBNET_PUBLIC]:\n web_subnet_by_cidrs = _get_subnet_by_cidrs(\n web_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(web_subnet_by_cidrs.values()))['SubnetId']\n if subnet_type == SUBNET_PUBLIC:\n network_interfaces = [{\n 'AssociatePublicIpAddress': True,\n 'DeviceIndex': 0,\n 'SubnetId': subnet_id,\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n 'Groups': security_group_ids\n }]\n\n if not instances or not instance_ids:\n for _ in range(0, NB_RETRIES):\n # The IAM instance profile take some time to be visible.\n try:\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n resp = ec2_client.run_instances(\n BlockDeviceMappings=block_devices,\n ImageId=image_id,\n KeyName=ssh_key_name,\n InstanceType=instance_type,\n MinCount=1,\n MaxCount=1,\n#botocore.exceptions.ClientError: An error occurred (InvalidParameterCombination) when calling the RunInstances operation: Network interfaces and an instance-level subnet ID may not be specified on the same request\n# SubnetId=subnet_id,\n# SecurityGroupIds=security_group_ids,\n IamInstanceProfile={'Arn': instance_profile_arn},\n NetworkInterfaces=network_interfaces,\n TagSpecifications=[{\n 'ResourceType': \"instance\",\n 'Tags': [{\n 'Key': 'Name',\n 'Value': app_name\n }, {\n 'Key': 'Prefix',\n 'Value': tag_prefix\n }]\n }],\n UserData=user_data,\n DryRun=dry_run)\n instances = resp['Instances']\n instance_ids = [\n instance['InstanceId'] for instance in instances]\n break\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidParameterValue':\n raise\n LOGGER.info(\"%s waiting for IAM instance profile %s to be\"\\\n \" operational ...\", tag_prefix, instance_profile_arn)\n time.sleep(RETRY_WAIT_DELAY)\n LOGGER.info(\"%s started instances %s for '%s'\",\n tag_prefix, instance_ids, app_name)\n for _ in range(0, NB_RETRIES):\n # It can take some time before the instances will appear\n # in a `describe_instances` call. We want to make sure\n # not to get errors later on if we execute too fast.\n try:\n resp = ec2_client.describe_instances(InstanceIds=instance_ids)\n break\n except botocore.exceptions.ClientError as err:\n err_code = err.response.get('Error', {}).get('Code', 'Unknown')\n LOGGER.error(\"XXX err_code=%s\", err_code)\n if not err_code == 'InvalidInstanceID.NotFound':\n raise\n LOGGER.info(\"%s waiting for EC2 instances %s to be\"\\\n \" operational ...\", tag_prefix, instance_ids)\n time.sleep(RETRY_WAIT_DELAY)\n\n return instances", "def create_default_sg(self):\n logger.debug(\"Creating default (very open) security group '%s' on VPC %s\", DEFAULT_SG_GROUP_NAME, self.vpc_id)\n try:\n if self.vpc_id is not None:\n _ = self.client.create_security_group(GroupName=DEFAULT_SG_GROUP_NAME,\n Description=\"Default security group for adam\",\n VpcId=self.vpc_id)\n else:\n _ = self.client.create_security_group(GroupName=DEFAULT_SG_GROUP_NAME,\n Description=\"Default security group for adam\")\n print(_)\n except ClientError as e:\n error_code = e.response[\"Error\"][\"Code\"]\n if error_code == \"InvalidGroup.Duplicate\":\n logger.debug(\"Default security group already exists\")\n else:\n raise e\n\n logger.debug(\"Setting up default values for the '%s' security group\", DEFAULT_SG_GROUP_NAME)\n security_group = self.get_security_groups(DEFAULT_SG_GROUP_NAME)[0]\n\n IpPermissions = [{\n \"IpProtocol\": \"tcp\",\n \"FromPort\": 0,\n \"ToPort\": 65535,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\"\n },\n ],\n }, {\n \"IpProtocol\": \"udp\",\n \"FromPort\": 0,\n \"ToPort\": 65535,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\"\n },\n ],\n }, {\n \"IpProtocol\": \"icmp\",\n \"FromPort\": -1,\n \"ToPort\": -1,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\"\n },\n ],\n }]\n\n try:\n security_group.authorize_egress(IpPermissions=IpPermissions)\n except ClientError as e:\n error_code = e.response[\"Error\"][\"Code\"]\n if error_code == \"InvalidPermission.Duplicate\":\n logger.debug(\"Outbound Permissions for default security group already set\")\n else:\n raise e\n\n try:\n security_group.authorize_ingress(IpPermissions=IpPermissions)\n except ClientError as e:\n error_code = e.response[\"Error\"][\"Code\"]\n if error_code == \"InvalidPermission.Duplicate\":\n logger.debug(\"Inbound Permissions for default security group already set\")\n else:\n raise e\n\n return security_group", "def ec2_start(resource, metadata):\n\n # do minimal provisioning of machine through cloud-init\n # this installs git and bootstraps puppet to provision the rest\n # requires recent ubuntu (14.04/16.04) or RHEL/CentOS 7\n userdata = \"\"\"#cloud-config\npackage_update: true\nhostname: {hostname}\nfqdn: {fqdn}\nmanage_etc_hosts: true\npackages:\n - git\nwrite_files:\n - path: /etc/facter/facts.d/hostgroup.txt\n content: hostgroup=aws\n - path: /etc/facter/facts.d/role.txt\n content: role={role}\nruncmd:\n - git clone {repo} /etc/puppet\n - /etc/puppet/support_scripts/bootstrap-puppet.sh\"\"\".format(\n hostname=metadata['hostname'], fqdn=metadata['fqdn'],\n role=metadata['role'], repo=metadata['repo'])\n\n instances = resource.create_instances(\n ImageId=metadata['ami'],\n MinCount=1,\n MaxCount=1,\n InstanceType=metadata['type'],\n SubnetId=metadata['subnet'],\n SecurityGroupIds=[metadata['secgroup']],\n KeyName=metadata['keypair'],\n UserData=userdata,\n BlockDeviceMappings=[\n {\n 'DeviceName': '/dev/sda1', # root so far, sometimes /dev/xvdh ?\n 'Ebs': {\n 'VolumeSize': 20,\n 'DeleteOnTermination': True,\n 'VolumeType': 'gp2'\n },\n },\n ]\n )\n\n # not sure if we really need to sleep before tagging but\n # we wait until running anyway which takes much longer than 1 second\n time.sleep(1)\n for instance in instances:\n # first set tags, Name and Role\n instance.create_tags(\n Resources=[instance.id],\n Tags=[\n {\n 'Key': 'Role',\n 'Value': metadata['role']\n },\n {\n 'Key': 'Name',\n 'Value': metadata['fqdn']\n },\n ]\n )\n\n # ensure system is running before we print address to connect to\n instance.wait_until_running()\n # instance.load()\n ec2_status(resource, metadata)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List all instances for a specific region and zone
def listInstancesRegionZone(region,zone): print "-"*80 print "# Region :",region," Zone", zone print "-"*80 instances = getInstancesRegionZone(region,zone) if instances: for instance in instances: print "[",instance.ami_launch_index,"]",instance.ip_address," (",instance.private_ip_address,") ",instance.instance_type," key=",instance.key_name
[ "def instances_by_region(aws_region=config.AWS_AWS_REGION):\n instances = _get_instances(aws_region)\n formatter = InstanceFormatter(instances)\n formatter.display()", "def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances", "def all_instances():\n objects = _get_ec2_objects_across_all_regions(_get_instances)\n formatter = InstanceFormatter(objects)\n formatter.display()", "def list_instances(compute, project, zone):\n result = compute.instances().list(project=project, zone=zone).execute()\n if 'items' not in result:\n return []\n return result['items']", "def get_all_reservations(config):\n reservations = []\n region_list = regions(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for region in region_list:\n _logger.info(\"Searching %s\", region)\n cnx = region.connect(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for reservation in cnx.get_all_instances():\n _logger.info(\"Found %s %s\", reservation,\n [str(i.id) for i in reservation.instances])\n reservations.append(reservation)\n return reservations", "def get_region_instances(region, tables, auth):\n table = PrettyTable([\n \"Name\", \"Key-Name\", \"Type\", \"Placement\", \"Public-DNS\",\n \"Private-IP\", \"Instance-ID\", \"State\", \"Launch Time\"\n ])\n table.padding_width = 1\n ec2 = boto.ec2.connect_to_region(\n region.name, aws_access_key_id=auth.aws_access_key_id,\n aws_secret_access_key=auth.aws_secret_access_key)\n if ec2:\n reservations = ec2.get_all_instances()\n if reservations:\n for reservation in reservations:\n for i in reservation.instances:\n try:\n instance_name = i.tags['Name']\n except KeyError:\n instance_name = \"N/A\"\n if i.public_dns_name:\n accessname = i.public_dns_name\n elif i.ip_address:\n accessname = i.ip_address\n else:\n accessname = \"n/a\"\n table.add_row([\n instance_name,\n i.key_name,\n i.instance_type,\n i.placement,\n accessname,\n i.private_ip_address,\n i.id,\n i.state,\n i.launch_time\n ])\n tables[region.name] = table\n return", "def list_instances(self):\n\n response = self.client.service.instances().aggregatedList(\n project=self.client.project_id).execute()\n\n zones = response.get('items', {})\n instances = []\n for zone in zones.values():\n for instance in zone.get('instances', []):\n instances.append(instance)\n\n return instances", "def yield_instances_in_zone(self, zone, instance_filter=None):\n if instance_filter and set(\"\\\"\\\\'\").intersection(instance_filter):\n raise ValueError('Invalid instance filter: %s' % instance_filter)\n page_token = None\n while True:\n params = {'maxResults': 250}\n if instance_filter:\n params['filter'] = 'name eq \"%s\"' % instance_filter\n if page_token:\n params['pageToken'] = page_token\n try:\n resp = self.call_api(\n '/zones/%s/instances' % zone, params=params, deadline=120)\n except net.Error as exc:\n if not page_token and exc.status_code == 400:\n return # no such zone, this is fine...\n raise\n for instance in resp.get('items', []):\n yield instance\n page_token = resp.get('nextPageToken')\n if not page_token:\n break", "def GetAvailabilityZones(region):\r\n ec2 = _Connect(region)\r\n return [z.name for z in ec2.get_all_zones()]", "def get_all_instances(self):\n final_result = list()\n\n for region in AWSAccount.get_aws_account().regions.values():\n AWSAccount.set_aws_region(region)\n for instance in self.execute(self.client.describe_instances, \"Reservations\"):\n final_result.extend(instance['Instances'])\n return [EC2Instance(instance) for instance in final_result]", "def list_instances():\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region.\".format(SESSION.region_name))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n try:\n for instance in EC2_MANAGER.list_instances():\n # get the instance name in the tags list\n name = next((item for item in instance.tags if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance.id,\n instance.instance_type,\n instance.state['Name'],\n name['Value']))\n except ClientError as e:\n ErrManager.err_manager(e)\n\n print(str_sep)", "def list_availability_zones(vm_=None):\n ret = {}\n\n params = {\n \"Action\": \"DescribeAvailabilityZones\",\n \"Filter.0.Name\": \"region-name\",\n \"Filter.0.Value.0\": get_location(vm_),\n }\n result = aws.query(\n params,\n location=get_location(vm_),\n provider=get_provider(),\n opts=__opts__,\n sigver=\"4\",\n )\n\n for zone in result:\n ret[zone[\"zoneName\"]] = zone[\"zoneState\"]\n\n return ret", "def list(self, zone=None):\n print(\"%-25s\\t%-15s\" % (_('host'), _('zone')))\n ctxt = context.get_admin_context()\n services = db.service_get_all(ctxt)\n if zone:\n services = [\n s for s in services if s['availability_zone']['name'] == zone]\n hosts = []\n for srv in services:\n if not [h for h in hosts if h['host'] == srv['host']]:\n hosts.append(srv)\n\n for h in hosts:\n print(\"%-25s\\t%-15s\" % (h['host'], h['availability_zone']['name']))", "def list_instances(self):\n print '# AWS EC2 instances'\n self.compute.list_instances()", "def list_nodes(self, ex_zone=None):\n list_nodes = []\n # Use provided zone or default zone\n zone = ex_zone or self.zone\n # Setting ex_zone to 'all' overrides the default zone\n if zone == 'all':\n zone = None\n if zone is None:\n request = '/aggregated/instances'\n elif hasattr(zone, 'name'):\n request = '/zones/%s/instances' % zone.name\n else:\n request = '/zones/%s/instances' % zone\n\n response = self.connection.request(request, method='GET').object\n\n if 'items' in response:\n # The aggregated response returns a dict for each zone\n if zone is None:\n for v in response['items'].values():\n zone_nodes = [self._to_node(i) for i in\n v.get('instances', [])]\n list_nodes.extend(zone_nodes)\n else:\n list_nodes = [self._to_node(i) for i in response['items']]\n return list_nodes", "def _get_running_ec2_instances(theargs):\n mapstr = ''\n if theargs.profile is not None:\n boto3.setup_default_session(profile_name=theargs.profile)\n ec2 = boto3.client('ec2', region_name='us-west-2')\n\n response = ec2.describe_regions()\n for region in response['Regions']:\n rname = region['RegionName']\n sys.stdout.write('Running ec2 query in region: ' + rname + '\\n')\n ec2 = boto3.client('ec2', region_name=rname)\n mapstr += 'Region: ' + rname + '\\n'\n respy = ec2.describe_instances()\n for reso in respy['Reservations']:\n for entry in reso['Instances']:\n namey = ''\n try:\n for keyval in entry['Tags']:\n if keyval['Key'] == 'Name':\n namey = keyval['Value']\n break\n except KeyError:\n pass\n\n mapstr += ('\\t\\t' + entry['PublicDnsName'] + '\\n' +\n '\\t\\tLaunch Date: ' + str(entry['LaunchTime']) +\n '\\n' + \n '\\t\\tId: ' + entry['InstanceId'] + '\\n' +\n '\\t\\tType: ' + entry['InstanceType'] + '\\n' +\n '\\t\\tName: ' + namey + '\\n' +\n '\\t\\tState: ' + entry['State']['Name'] + '\\n\\n')\n sys.stdout.write('\\nResults:\\n\\n')\n return mapstr", "def get_all_vpc_instances ( ec2_conn, vpc ) :\n return ec2_conn.get_only_instances( filters = { \"vpc-id\" : vpc.id } )", "def yield_instances_in_zones(self, zones, instance_filter=None):\n for zone in zones:\n for instance in self.yield_instances_in_zone(zone, instance_filter):\n yield instance", "def list_ec2_instances():\n s = Session(**dict((k,v) for k, v in config.AWS_DEPLOY.items()\n if k in ('aws_access_key_id', 'aws_secret_access_key', 'region_name')))\n ec2 = s.resource('ec2')\n for i in ec2.instances.all():\n print(\n \"{0}: state: {1}\\n\\tdns: {2}\\n\\timage: {3}\\n\\tlaunched: {4}\"\n ).format(i.id, i.state, i.public_dns_name, i.image_id, i.launch_time)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create all Cassandra security groups in all regions
def createAllSG(): for info in conf_HVM: ec2 = boto.ec2.connect_to_region(info['region']+'-'+info['zone']) createSG(ec2,'SG-Cassandra-'+info['region']+'-'+info['zone'],CASSANDRA_RULES)
[ "def _create_allow_all_security_group(self):\n pass", "def create_auth_groups ():\n auth_group_list = ['Cores', 'Coords', 'Vols', 'Super-Coords']\n for auth_group_name in auth_group_list:\n create_auth_group (auth_group_name)\n print 'Group %s created' %(auth_group_name)", "def create_groups ():\n group_list = ['Cores', 'Coords', 'Vols',]\n for group_name in group_list:\n create_group (group_name)", "def all_security_groups():\n objects = _get_ec2_objects_across_all_regions(_get_security_groups)\n formatter = SecurityGroupFormatter(objects)\n formatter.display()", "def create_groups(self, role):\n security_group_names = self._get_all_group_names()\n\n cluster_group_name = self.get_cluster_group_name()\n if not cluster_group_name in security_group_names:\n self.ec2Connection.create_security_group(cluster_group_name, \"Hadoop cluster (%s)\" % (self.name))\n self.ec2Connection.authorize_security_group(cluster_group_name, cluster_group_name)\n # Allow SSH from anywhere\n self.ec2Connection.authorize_security_group(cluster_group_name, ip_protocol=\"tcp\", from_port=22, to_port=22, cidr_ip=\"0.0.0.0/0\")\n\n role_group_name = self.group_name_for_role(role)\n if not role_group_name in security_group_names:\n self.ec2Connection.create_security_group(role_group_name, \"Hadoop %s (%s)\" % (role, self.name))", "def create_security_group(self, sg_name):\n pass", "def ResourceGroupManager_initialiseAllResourceGroups():\n ogre.ResourceGroupManager.getSingleton().initialiseAllResourceGroups()", "def _set_security_group(client, instance_id_list, security_groups):\n logging.info('Setting the security group of instances.')\n for instance_id in instance_id_list:\n client.modify_instance_attribute(InstanceId=instance_id, Groups=security_groups)", "def __create_all_groups(self):\n self.__create_group_red_eyes()\n self.__create_group_green_eyes()\n self.__create_group_blue_eyes()\n self.__create_group_red_chest()\n self.__create_group_green_chest()\n self.__create_group_blue_chest()", "def test_aws_service_api_security_groups_get(self):\n pass", "def createCIDGroups(self: object, body: dict) -> dict:\n # [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/mssp/createCIDGroups\n operation_id = \"createCIDGroups\"\n target_url = f\"{self.base_url}{[ep[2] for ep in ENDPOINTS if operation_id in ep[0]][0]}\".replace(\"?ids={}\", \"\")\n header_payload = self.headers\n body_payload = body\n returned = service_request(caller=self,\n method=\"POST\",\n endpoint=target_url,\n body=body_payload,\n headers=header_payload,\n verify=self.ssl_verify\n )\n return returned", "def get_all_security_groups(self, full_information=False):\n final_result = list()\n\n for region in AWSAccount.get_aws_account().regions.values():\n AWSAccount.set_aws_region(region)\n for ret in self.execute(self.client.describe_security_groups, \"SecurityGroups\"):\n obj = EC2SecurityGroup(ret)\n if full_information is True:\n raise NotImplementedError()\n\n final_result.append(obj)\n\n return final_result", "def get_create_authorize_security_group(client, vpc_id, seg_name, lst_ports, ingress_cidr, group_desc):\n group_setup = False\n ports_already_setup = []\n segs = client.describe_security_groups()\n for g in segs[\"SecurityGroups\"]:\n if g[\"GroupName\"] == seg_name:\n group_setup = True\n security_group_id = g[\"GroupId\"]\n for p in g[\"IpPermissions\"]:\n for port in lst_ports:\n if port.get(\"p_from\", None) == p[\"FromPort\"] and port.get(\"p_to\", None) == p[\"ToPort\"]:\n # ports already setup\n ports_already_setup.append(port)\n if len(lst_ports) == len(ports_already_setup):\n break\n\n if not group_setup:\n print(\"Creating security group {}\".format(seg_name))\n response = client.create_security_group(VpcId=vpc_id, GroupName=seg_name, Description=group_desc)\n security_group_id = response[\"GroupId\"]\n else:\n print(\"Security group {} exists, GroupId is {}\".format(seg_name, security_group_id))\n\n ports_to_setup = [x for x in lst_ports if x not in ports_already_setup]\n if len(ports_to_setup):\n for p in ports_to_setup:\n response = client.authorize_security_group_ingress(GroupId=security_group_id, IpPermissions=[\n {'FromPort': p[\"p_from\"], 'ToPort': p[\"p_to\"], 'IpProtocol': 'tcp',\n 'IpRanges': [{'CidrIp': ingress_cidr}]}])\n assert response[\"ResponseMetadata\"][\"HTTPStatusCode\"] == 200\n return security_group_id", "def list_secgroups(self, name=None):", "def module_create_all_tenant(self):\n self.test_runner.run_module_create_all_tenant()", "def init_valet_groups(self):\n\n for rk, r in self.stack.items():\n properties = r.get(\"properties\", {})\n metadata = properties.get(\"metadata\", {})\n\n if len(metadata) > 0:\n valet_rules = metadata.get(\"valet_groups\", None)\n\n if valet_rules is not None and valet_rules != \"\":\n rule_list = []\n if isinstance(valet_rules, six.string_types):\n rules = valet_rules.split(\",\")\n for gr in rules:\n rule_list.append(gr.strip())\n else:\n self.status = \"incorrect valet group metadata format\"\n self.logger.error(self.status)\n return\n\n # Check rule validation of valet_groups.\n self.status = self.resource.check_valid_rules(self.tenant_id,\n rule_list,\n use_ex=self.use_dha)\n if self.status != \"ok\":\n self.logger.error(self.status)\n return\n\n self.status = self._make_valet_groups(properties.get(\"name\"),\n properties[\"availability_zone\"][0],\n rule_list)\n if self.status != \"ok\":\n self.logger.error(self.status)\n return\n\n # Check and create server groups if they do not exist.\n scheduler_hints = properties.get(\"scheduler_hints\", {})\n if len(scheduler_hints) > 0:\n for hint_key in scheduler_hints.keys():\n if hint_key == \"group\":\n hint = scheduler_hints[hint_key]\n self.status = self._make_group(properties.get(\"name\"), hint)\n if self.status != \"ok\":\n self.logger.error(self.status)\n return", "def create_security_group(ec2_client):\r\n\r\n print(f\"creating RDS security group with name {get_config_item('security_group_name')}\")\r\n\r\n return ec2_client.create_security_group(\r\n GroupName=get_config_item('security_group_name'),\r\n Description='RDS security group for public access',\r\n VpcId=get_config_item('security_group_vpc_id')\r\n )", "def do_secgroup_list(cs, args):\n wrapper = cs.security_groups.list()\n sec_grps = wrapper.items\n while (wrapper.next):\n wrapper = cs.security_groups.list()\n sec_grps += wrapper.items\n\n utils.print_list(sec_grps, ['id', 'name', 'rules', 'instance_id'])", "def _generate_ec2_instance_and_sg(resource):\n for instance in resource.instances.all():\n for security_group in instance.security_groups:\n yield instance, security_group" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create all key pairs in all regions
def createAllKP(): if not os.path.exists(keysDir): os.makedirs(keysDir) for info in conf_HVM: keyName = 'Key-'+info['region']+'-'+info['zone'] try: os.remove(keysDir+'/'+keyName+'.pem') except OSError: pass print "Key creation :",keyName ec2 = boto.ec2.connect_to_region(info['region']+'-'+info['zone']) # check if the key pair exists kps = [kp for kp in ec2.get_all_key_pairs() if kp.name == keyName] if kps: ec2.delete_key_pair(keyName) key = ec2.create_key_pair(keyName) key.save(keysDir)
[ "def createAllSG():\n\tfor info in conf_HVM:\n\t\tec2 = boto.ec2.connect_to_region(info['region']+'-'+info['zone'])\n\t\tcreateSG(ec2,'SG-Cassandra-'+info['region']+'-'+info['zone'],CASSANDRA_RULES)", "def setup_space_keys(cls):\n if cls.KEYS:\n return\n\n from pkg_resources import iter_entry_points\n\n for entry_point in iter_entry_points(group=cls.CATKIN_SPACES_GROUP):\n ep_dict = entry_point.load()\n cls.STORED_KEYS.append(entry_point.name + '_space')\n cls.SPACES[entry_point.name] = ep_dict\n cls._create_space_methods(entry_point.name)\n\n cls.KEYS = cls.STORED_KEYS + cls.EXTRA_KEYS", "def init_region ( aws, region_name, aws_account_type, init_params ) :\n ec2_conn = aws.ec2_conn( )\n keypair_savedir = os.environ[ 'PWD' ]\n print \"Creating new keypairs for region \" + region_name\n for keytype in init_params.get( 'keypairs', [] ) :\n keypair_name = get_keypair_name( aws_account_type, region_name, keytype )\n keypair = ec2_conn.get_key_pair( keypair_name )\n if keypair :\n print 'Keypair ' + keypair_name + ' already exists. Skipping.'\n else :\n keypair = ec2_conn.create_key_pair( keypair_name )\n keypair.save( keypair_savedir )\n keypair_filename = keypair_savedir + '/' + keypair_name + '.pem'\n print 'Created keypair ' + keypair_filename\n store_keypair( s3_infra_conn = aws.s3_infrastructure_conn( ),\n region_name = region_name,\n aws_account_type = aws_account_type,\n keypair_name = get_keypair_keypath( aws_account_type ) + keypair_name,\n keypair_filename = keypair_filename )\n print 'Stored keypair in S3 at: ' + get_keypair_keypath( aws_account_type )\n os.remove( keypair_filename )\n\n if init_params.get( 'init-deployment', 'YES' ) == 'YES' :\n print \"Creating Deployment security group.\"\n deploy_secgrp = ec2_conn.create_security_group( get_deployment_secgrp_name( ),\n \"Used by the deployment server.\" )\n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 22,\n to_port = 22,\n cidr_ip = hbo_cidr_list ) \n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 8080,\n to_port = 8080,\n cidr_ip = hbo_cidr_list ) \n\n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 22,\n to_port = 22,\n cidr_ip = build_server_cidr ) \n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 8080,\n to_port = 8080,\n cidr_ip = build_server_cidr ) \n\n if init_params.get( 'init-ami-update', 'YES' ) == 'YES' :\n print \"Creating ami-update security group.\"\n amiupdate_secgrp = ec2_conn.create_security_group( get_amiupdate_secgrp_name( ),\n \"Used by the ami update instances.\" )\n amiupdate_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 22,\n to_port = 22,\n cidr_ip = hbo_cidr_list ) \n amiupdate_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 8080,\n to_port = 8080,\n cidr_ip = hbo_cidr_list )", "def generate_all_keys(self) -> Iterator[StorageTestData]:\n yield from self.all_keys_for_lifetimes()\n yield from self.all_keys_for_usage_flags()\n yield from self.all_keys_for_types()\n yield from self.all_keys_for_algorithms()", "def create_key ():", "def _create_region_mapping():\n df_csa_region = df_csa.drop(\n columns=['OBJECTID', 'CITY_TYPE', 'LCITY', 'COMMUNITY', 'SOURCE',\n 'ShapeSTArea', 'ShapeSTLength']\n ).copy()\n df_csa_region.rename(columns={'LABEL': AREA}, inplace=True)\n df_csa_region[REGION] = df_csa_region.apply(\n lambda x: determine_region(x, df_spa, 'SPA_NAME', AREA,\n MANUAL_REGION, csa_scale=0.8),\n axis='columns',\n )\n df_csa_region.drop(columns='geometry', inplace=True)\n df_csa_region.sort_values(AREA, inplace=True)\n df_csa_region.set_index(AREA, inplace=True)\n output = {}\n for area, region in df_csa_region.loc[:, REGION].items():\n output[area] = region\n return output", "def _generate_helm_keyspaces(self):\n starting_cell_index = 0\n if len(self.app_options.cells) > 1:\n starting_cell_index = self.cell_epsilon\n keyspaces = []\n for ks_index, ks in enumerate(self.app_options.keyspaces):\n keyspace = dict(name=ks['name'], shards=[])\n keyspaces.append(keyspace)\n\n for shard_index, shard_name in enumerate(\n sharding_utils.get_shard_names(ks['shard_count'])):\n shard = dict(\n name=shard_name,\n tablets=[dict(\n type='replica',\n vttablet=dict(\n replicas=ks['replica_count'],\n ),\n )],\n )\n uid_base = (\n (100 + shard_index * self.shard_epsilon) + starting_cell_index + (\n ks_index * self.keyspace_epsilon))\n shard['tablets'][0]['uidBase'] = uid_base\n if ks['rdonly_count']:\n shard['tablets'].append(dict(\n type='rdonly',\n uidBase=uid_base + ks['replica_count'],\n vttablet=dict(\n replicas=ks['rdonly_count'],\n )))\n keyspace['shards'].append(shard)\n return keyspaces", "def create_keys(i):\n sk = elgamal.create_sk()\n secret_keys.append(sk)\n\n keys = [0, 0]\n\n keys[x[i]] = elgamal.gen(sk)\n keys[1 - x[i]] = elgamal.o_gen()\n\n public_keys.append(keys)", "def create_all_taxonomic_keys(point_locations: dict, location_species: dict, location_range_species: dict,\n trait_data: dict, all_taxa_data: dict) -> dict:\n\n all_keys = {}\n\n # find all unique sets of species\n species_sets = set()\n for p in point_locations:\n loc = point_locations[p]\n all_species = set()\n all_species |= location_species[loc.name]\n if loc.n_direct_children() > 0:\n for c in loc.direct_children():\n all_species |= fetch_child_data(c, location_species)\n\n range_species = set(find_species_by_name(s) for s in location_range_species[loc])\n all_species |= range_species\n if len(all_species) > 0:\n species_sets.add(frozenset(all_species))\n\n # create keys for each unique set of species\n warnings = set()\n for sp_set in species_sets:\n taxa_data = {}\n for s in sp_set:\n try:\n taxa_data[\"Male \" + s.binomial()] = all_taxa_data[\"โ™‚ Male {{\" + s.species + \"}}\"]\n taxa_data[\"Female \" + s.binomial()] = all_taxa_data[\"โ™€ Female {{\" + s.species + \"}}\"]\n except KeyError:\n report_error(\"Missing taxonomic key data: \" + s.species)\n\n all_keys[sp_set], new_warning = TMB_TaxKeyGen.generate_taxonomic_key(trait_data, taxa_data, verbose=False)\n warnings |= new_warning\n\n # global key for all species\n all_keys[\"all\"], new_warning = TMB_TaxKeyGen.generate_taxonomic_key(trait_data, all_taxa_data, verbose=False)\n warnings |= new_warning\n\n for w in sorted(warnings):\n report_error(w)\n\n return all_keys", "def attach_mappings(template):\n template.add_mapping(\n \"Region2Principal\",\n {u'ap-northeast-1': {u'EC2Principal': u'ec2.amazonaws.com',\n u'OpsWorksPrincipal': u'opsworks.amazonaws.com'},\n u'ap-northeast-2': {u'EC2Principal': u'ec2.amazonaws.com',\n u'OpsWorksPrincipal': u'opsworks.amazonaws.com'},\n u'ap-south-1': {u'EC2Principal': u'ec2.amazonaws.com',\n u'OpsWorksPrincipal': u'opsworks.amazonaws.com'},\n u'ap-southeast-1': {u'EC2Principal': u'ec2.amazonaws.com',\n u'OpsWorksPrincipal': u'opsworks.amazonaws.com'},\n u'ap-southeast-2': {u'EC2Principal': u'ec2.amazonaws.com',\n u'OpsWorksPrincipal': u'opsworks.amazonaws.com'},\n u'cn-north-1': {u'EC2Principal': u'ec2.amazonaws.com.cn',\n u'OpsWorksPrincipal': u'opsworks.amazonaws.com.cn'},\n u'eu-central-1': {u'EC2Principal': u'ec2.amazonaws.com',\n u'OpsWorksPrincipal': u'opsworks.amazonaws.com'},\n u'eu-west-1': {u'EC2Principal': u'ec2.amazonaws.com',\n u'OpsWorksPrincipal': u'opsworks.amazonaws.com'},\n u'sa-east-1': {u'EC2Principal': u'ec2.amazonaws.com',\n u'OpsWorksPrincipal': u'opsworks.amazonaws.com'},\n u'us-east-1': {u'EC2Principal': u'ec2.amazonaws.com',\n u'OpsWorksPrincipal': u'opsworks.amazonaws.com'},\n u'us-west-1': {u'EC2Principal': u'ec2.amazonaws.com',\n u'OpsWorksPrincipal': u'opsworks.amazonaws.com'},\n u'us-west-2': {u'EC2Principal': u'ec2.amazonaws.com',\n u'OpsWorksPrincipal': u'opsworks.amazonaws.com'}}\n )\n\n template.add_mapping(\n \"Region2KeyPair\",\n {u'ap-northeast-1': {u'key': 'tokyo'},\n u'ap-northeast-2': {u'key': 'seoul'},\n u'ap-south-1': {u'key': 'mumbai'},\n u'ap-southeast-1': {u'key': 'singapore'},\n u'ap-southeast-2': {u'key': 'sydney'},\n u'cn-north-1': {u'key': 'NO ACCESS TO CHINA FOR US USERS'},\n u'eu-central-1': {u'key': 'frankfurt'},\n u'eu-west-1': {u'key': 'ireland'},\n u'eu-west-2': {u'key': 'london'},\n u'sa-east-1': {u'key': 'sao-paulo'},\n u'ca-central-1': {u'key': 'central-canada'},\n u'us-east-1': {u'key': 'north-virginia'},\n u'us-east-2': {u'key': 'ohio'},\n u'us-west-1': {u'key': 'northern-california'},\n u'us-west-2': {u'key': 'oregon'}}\n )\n\n template.add_mapping(\n \"Region2ARNPrefix\",\n {u'ap-northeast-1': {u'ARNPrefix': u'arn:aws:'},\n u'ap-northeast-2': {u'ARNPrefix': u'arn:aws:'},\n u'ap-south-1': {u'ARNPrefix': u'arn:aws:'},\n u'ap-southeast-1': {u'ARNPrefix': u'arn:aws:'},\n u'ap-southeast-2': {u'ARNPrefix': u'arn:aws:'},\n u'cn-north-1': {u'ARNPrefix': u'arn:aws-cn:'},\n u'eu-central-1': {u'ARNPrefix': u'arn:aws:'},\n u'eu-west-1': {u'ARNPrefix': u'arn:aws:'},\n u'sa-east-1': {u'ARNPrefix': u'arn:aws:'},\n u'us-east-1': {u'ARNPrefix': u'arn:aws:'},\n u'us-west-1': {u'ARNPrefix': u'arn:aws:'},\n u'us-west-2': {u'ARNPrefix': u'arn:aws:'}}\n )\n\n template.add_mapping(\n \"AWSInstanceType2Arch\",\n {u'c1.medium': {u'Arch': u'PV64'},\n u'c1.xlarge': {u'Arch': u'PV64'},\n u'c3.2xlarge': {u'Arch': u'HVM64'},\n u'c3.4xlarge': {u'Arch': u'HVM64'},\n u'c3.8xlarge': {u'Arch': u'HVM64'},\n u'c3.large': {u'Arch': u'HVM64'},\n u'c3.xlarge': {u'Arch': u'HVM64'},\n u'c4.2xlarge': {u'Arch': u'HVM64'},\n u'c4.4xlarge': {u'Arch': u'HVM64'},\n u'c4.8xlarge': {u'Arch': u'HVM64'},\n u'c4.large': {u'Arch': u'HVM64'},\n u'c4.xlarge': {u'Arch': u'HVM64'},\n u'cc2.8xlarge': {u'Arch': u'HVM64'},\n u'cr1.8xlarge': {u'Arch': u'HVM64'},\n u'd2.2xlarge': {u'Arch': u'HVM64'},\n u'd2.4xlarge': {u'Arch': u'HVM64'},\n u'd2.8xlarge': {u'Arch': u'HVM64'},\n u'd2.xlarge': {u'Arch': u'HVM64'},\n u'g2.2xlarge': {u'Arch': u'HVMG2'},\n u'g2.8xlarge': {u'Arch': u'HVMG2'},\n u'hi1.4xlarge': {u'Arch': u'HVM64'},\n u'hs1.8xlarge': {u'Arch': u'HVM64'},\n u'i2.2xlarge': {u'Arch': u'HVM64'},\n u'i2.4xlarge': {u'Arch': u'HVM64'},\n u'i2.8xlarge': {u'Arch': u'HVM64'},\n u'i2.xlarge': {u'Arch': u'HVM64'},\n u'm1.large': {u'Arch': u'PV64'},\n u'm1.medium': {u'Arch': u'PV64'},\n u'm1.small': {u'Arch': u'PV64'},\n u'm1.xlarge': {u'Arch': u'PV64'},\n u'm2.2xlarge': {u'Arch': u'PV64'},\n u'm2.4xlarge': {u'Arch': u'PV64'},\n u'm2.xlarge': {u'Arch': u'PV64'},\n u'm3.2xlarge': {u'Arch': u'HVM64'},\n u'm3.large': {u'Arch': u'HVM64'},\n u'm3.medium': {u'Arch': u'HVM64'},\n u'm3.xlarge': {u'Arch': u'HVM64'},\n u'm4.10xlarge': {u'Arch': u'HVM64'},\n u'm4.2xlarge': {u'Arch': u'HVM64'},\n u'm4.4xlarge': {u'Arch': u'HVM64'},\n u'm4.large': {u'Arch': u'HVM64'},\n u'm4.xlarge': {u'Arch': u'HVM64'},\n u'r3.2xlarge': {u'Arch': u'HVM64'},\n u'r3.4xlarge': {u'Arch': u'HVM64'},\n u'r3.8xlarge': {u'Arch': u'HVM64'},\n u'r3.large': {u'Arch': u'HVM64'},\n u'r3.xlarge': {u'Arch': u'HVM64'},\n u't1.micro': {u'Arch': u'PV64'},\n u't2.large': {u'Arch': u'HVM64'},\n u't2.medium': {u'Arch': u'HVM64'},\n u't2.micro': {u'Arch': u'HVM64'},\n u't2.nano': {u'Arch': u'HVM64'},\n u't2.small': {u'Arch': u'HVM64'}}\n )\n template.add_mapping(\n \"AWSRegionArch2Centos7LinuxAMI\",\n {u'ap-northeast-1': {u'HVM64': u'ami-571e3c30',\n u'HVMG2': u'NOT_SUPPORTED',\n u'PV64': u'NOT_SUPPORTED'},\n u'ap-northeast-2': {u'HVM64': u'ami-97cb19f9',\n u'HVMG2': u'NOT_SUPPORTED',\n u'PV64': u'NOT_SUPPORTED'},\n u'ap-south-1': {u'HVM64': u'ami-11f0837e',\n u'HVMG2': u'NOT_SUPPORTED',\n u'PV64': u'NOT_SUPPORTED'},\n u'ap-southeast-1': {u'HVM64': u'ami-30318f53',\n u'HVMG2': u'NOT_SUPPORTED',\n u'PV64': u'NOT_SUPPORTED'},\n u'ap-southeast-2': {u'HVM64': u'ami-24959b47',\n u'HVMG2': u'NOT_SUPPORTED',\n u'PV64': u'NOT_SUPPORTED'},\n u'cn-north-1': {u'HVM64': u'NOT_SUPPORTED',\n u'HVMG2': u'NOT_SUPPORTED',\n u'PV64': u'NOT_SUPPORTED'},\n u'eu-central-1': {u'HVM64': u'ami-7cbc6e13',\n u'HVMG2': u'NOT_SUPPORTED',\n u'PV64': u'NOT_SUPPORTED'},\n u'eu-west-1': {u'HVM64': u'ami-0d063c6b',\n u'HVMG2': u'NOT_SUPPORTED',\n u'PV64': u'NOT_SUPPORTED'},\n u'sa-east-1': {u'HVM64': u'ami-864f2dea',\n u'HVMG2': u'NOT_SUPPORTED',\n u'PV64': u'NOT_SUPPORTED'},\n u'us-east-1': {u'HVM64': u'ami-ae7bfdb8',\n u'HVMG2': u'NOT_SUPPORTED',\n u'PV64': u'NOT_SUPPORTED'},\n u'us-west-1': {u'HVM64': u'ami-7c280d1c',\n u'HVMG2': u'NOT_SUPPORTED',\n u'PV64': u'NOT_SUPPORTED'},\n u'us-west-2': {u'HVM64': u'ami-0c2aba6c',\n u'HVMG2': u'NOT_SUPPORTED',\n u'PV64': u'NOT_SUPPORTED'}}\n )\n\n template.add_mapping(\n \"AWSRegionArch2AmazonLinuxAMI\",\n {u'ap-northeast-1': {u'HVM64': u'ami-374db956',\n u'HVMG2': u'ami-e0ee1981',\n u'PV64': u'ami-3e42b65f'},\n u'ap-northeast-2': {u'HVM64': u'ami-2b408b45',\n u'HVMG2': u'NOT_SUPPORTED',\n u'PV64': u'NOT_SUPPORTED'},\n u'ap-south-1': {u'HVM64': u'ami-ffbdd790',\n u'HVMG2': u'ami-f5b2d89a',\n u'PV64': u'NOT_SUPPORTED'},\n u'ap-southeast-1': {u'HVM64': u'ami-a59b49c6',\n u'HVMG2': u'ami-0cb5676f',\n u'PV64': u'ami-df9e4cbc'},\n u'ap-southeast-2': {u'HVM64': u'ami-dc361ebf',\n u'HVMG2': u'ami-a71c34c4',\n u'PV64': u'ami-63351d00'},\n u'cn-north-1': {u'HVM64': u'ami-8e6aa0e3',\n u'HVMG2': u'NOT_SUPPORTED',\n u'PV64': u'ami-77559f1a'},\n u'eu-central-1': {u'HVM64': u'ami-ea26ce85',\n u'HVMG2': u'ami-7f04ec10',\n u'PV64': u'ami-6527cf0a'},\n u'eu-west-1': {u'HVM64': u'ami-f9dd458a',\n u'HVMG2': u'ami-b9bd25ca',\n u'PV64': u'ami-4cdd453f'},\n u'sa-east-1': {u'HVM64': u'ami-6dd04501',\n u'HVMG2': u'NOT_SUPPORTED',\n u'PV64': u'ami-1ad34676'},\n u'us-east-1': {u'HVM64': u'ami-6869aa05',\n u'HVMG2': u'ami-2e5e9c43',\n u'PV64': u'ami-2a69aa47'},\n u'us-west-1': {u'HVM64': u'ami-31490d51',\n u'HVMG2': u'ami-fd76329d',\n u'PV64': u'ami-a2490dc2'},\n u'us-west-2': {u'HVM64': u'ami-7172b611',\n u'HVMG2': u'ami-83b770e3',\n u'PV64': u'ami-7f77b31f'}}\n )\n\n return template", "def test_aws_service_api_keypair_generate_post(self):\n pass", "def _create_keys(bucket_name, keys=[]):\n bucket = connection.create_bucket(bucket_name)\n\n for s in keys:\n key = bucket.new_key(s)\n key.set_contents_from_string(s)\n\n return bucket", "def get_regions(self, key):\n pass", "def createAllDictionaries(self):\n self.makeSentenceLengths()\n self.makeWords()\n self.makeStems()\n self.makeWordLengths()", "def make_consistent(self):\r\n\r\n for key in self.get_keys():\r\n self.eliminate_key(key)\r\n\r\n for i_temp in self.indexes(): #i will be a note index\r\n for j_temp in self.get_keys_from_note(i_temp):\r\n if self.key_dict_contains(j_temp):\r\n self.add_key(j_temp,Index(i_temp))\r\n## self.key_dict[j_temp].add(str(Index(i_temp)))\r\n else:\r\n self.initiate_new_key(j_temp,Index(i_temp))", "def Dictionary_create(nMarkers, markerSize):\n pass", "def _create_keys(bucket=None, keys=[]):\n if bucket is None:\n bucket = get_new_bucket()\n\n for s in keys:\n key = bucket.new_key(s)\n key.set_contents_from_string(s)\n\n return bucket", "def generateNewRegion(self):\n \n # regions related with \"near/within\" preposition\n for (regionName,dist) in self.regionNear:\n for region in self.proj.rfi.regions:\n if region.name == regionName:\n oldRegion = region\n newRegion = oldRegion.findRegionNear(dist,mode=\"overEstimate\",name='near$'+regionName+'$'+str(dist))\n self.proj.rfi.regions.append(newRegion)\n \n \n # regions related with \"between\" preposition\n for (regionNameA,regionNameB) in self.regionBetween:\n\n for region in self.proj.rfi.regions:\n if region.name == regionNameA:\n regionA = region\n elif region.name == regionNameB:\n regionB = region\n \n newRegion = findRegionBetween(regionA,regionB,name='between$'+regionNameA+'$and$'+regionNameB+\"$\")\n self.proj.rfi.regions.append(newRegion)", "def fill_container(self, key_val):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Interpret for left != right
def _op_ne(self, left: Any, right: Any) -> BoolOrIter: out = self._op_eq(left, right) if isinstance(out, (numpy.ndarray, Series)): neout = ~out # neout[pandas.isna(out)] = numpy.nan return neout # out is always a numpy.ndarray return not out # pragma: no cover
[ "def nexact(cls, lhs, rhs):\n return lhs != rhs", "def match_LR(left,right):\n\n if left[1] == right[3][::-1]:\n return True\n\n return False", "def __ne__(self, other):\n return tf.math.not_equal(self._ordinals, other.ordinal())", "def same_level(left, right):\n for i in range(min(len(left), len(right))):\n l = left[-i-1]\n r = right[i]\n if l == '(' and r == ')':\n return True\n if l == r:\n return False", "def _isLeft(P0, P1, P2):\n return (P1.x - P0.x)*(P2.y - P0.y) - (P2.x - P0.x)*(P1.y - P0.y)", "def _isLeft(a, b, c):\n return ((b[0] - a[0])*(c[1] - a[1]) - (b[1] - a[1])*(c[0] - a[0])) > 0", "def is_left_of(self, other):\n return self.x < other.x", "def equal_mirror(t, s):\n if t is None and s is None:\n return True\n if t is None or s is None:\n return False\n if t.value != s.value:\n return False\n return equal_mirror(t.left, s.right) and equal_mirror(t.right, s.left)", "def test_not_equal(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"notEqual\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::ne\"},\n )", "def opposite(self, other):\n return self.pth == other.pth and self.action != other.action", "def eq(left, right):\n return is_zero(dist(left, right))", "def __ne__(self, other):\r\n if isinstance(other, vec4):\r\n return self.x!=other.x or self.y!=other.y or self.z!=other.z\r\n else:\r\n return 1", "def __ne__(self, other):\r\n\t\treturn (self.type != other.type or self.value != other.value)", "def logical_oper(): # ๋…ผ๋ฆฌ ์—ฐ์‚ฐ์ž\n print ((\"=====๋…ผ๋ฆฌ์—ฐ์‚ฐ \"))\n\n #๋…ผ๋ฆฌ๊ณฑ (and): ๋‘˜ ๋‹ค true์ผ๋•Œ true\n #๋…ผ๋ฆฌํ•ฉ (or): ๋‘˜ ์ค‘ ํ•˜๋‚˜๋ฉด true์ด๋ฉด true\n #๋…ผ๋ฆฌ๋ถ€์ • (not): true <-> false\n\n a,b=20,30\n print (not a<b) #a<b์˜ ๋…ผ๋ฆฌ๋ฅผ ๋ถ€์ •\n print(a<b and a!=b) #a<b ์˜ ๋…ผ๋ฆฌ๊ฐ’๊ณผ a!=b์˜ ๋…ผ๋ฆฌ๊ฐ’์˜ ๋…ผ๋ฆฌ๊ณฑ\n print(a==b or a!=b) #a==b์˜ ๋…ผ๋ฆฌ๊ฐ’๊ณผ a!=b์˜ ๋…ผ๋ฆฌ๊ฐ’์˜ ๋…ผ๋ฆฌํ•ฉ", "def __ne__(self, other):\n return not self.vehid == other.vehid", "def __ne__(self, other):\n if isinstance(other, quat):\n return self.x!=other.x or self.y!=other.y or self.z!=other.z or self.w!=other.w\n else:\n return 1", "def NOR(A, B):\n if A==0 and B==0:\n return 1\n else:\n return 0", "def imright(h1, h2):\n return h1 - h2 == 1", "def not_equal(\n left_node: NodeInput,\n right_node: NodeInput,\n auto_broadcast: str = \"NUMPY\",\n name: Optional[str] = None,\n) -> Node:\n return _get_node_factory_opset1().create(\n \"NotEqual\",\n [left_node, right_node],\n {\"auto_broadcast\": auto_broadcast.upper()},\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recycle left right operands to each other
def _recycle_left_right(left: Any, right: Any) -> Tuple: try: left = recycle_value(left, length_of(right)) except DataUnrecyclable: right = recycle_value(right, length_of(left)) return left, right
[ "def right_func(self, left):\n if self.right is None:\n if self.left is None:\n new = copy(self)\n new.left = left\n return new\n else:\n raise SyntaxError(\"Infix operator already has its \"\n \"left argument\")\n else:\n return self.function(left, self.right)", "def __radd__(self, left):\n return self.value() + left", "def __rshift__(self, other: Any) -> ColumnOperators:\n return self.operate(rshift, other)", "def apply(self) -> Operation:\n op = self.popleft()\n op()\n return op", "def leftToRight(self):\n pass", "def commutator(left_operator, right_operator):\n if not isinstance(left_operator, type(right_operator)):\n raise TypeError('operator_a and operator_b are not of the same type.')\n valueable_type = (QubitOperator, FermionOperator, QubitExcitationOperator)\n if not isinstance(left_operator, valueable_type):\n raise TypeError(\n \"Operator should be QubitOperator, FermionOperator or QubitExcitationOperator.\"\n )\n\n result = left_operator * right_operator\n result -= right_operator * left_operator\n return result", "def gen_binop(self, expr: expressions.BinaryOperator):\n if expr.op in [\"*\", \"/\", \"%\", \"^\", \"|\", \"&\", \">>\", \"<<\"]:\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n op = expr.op\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, op, rhs, ir_typ)\n elif expr.op == \",\":\n # Handle the comma operator by returning the second result\n self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n value = rhs\n elif expr.op == \"+\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n # left and right are swapped in semantics if right is pointer.\n if expr.a.typ.is_pointer:\n assert expr.b.typ.is_integer\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, \"+\", rhs, ir_typ)\n elif expr.op == \"-\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n ir_typ = self.get_ir_type(expr.typ)\n if expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if expr.b.typ.is_pointer:\n # pointer - pointer\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir.ptr)\n value = self.emit(ir.Cast(value, \"typecast\", ir_typ))\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", ir_typ))\n value = self.emit(\n ir.Binop(value, \"/\", esize, \"rhs\", ir_typ)\n )\n else:\n # pointer - numeric\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n else:\n # numeric - numeric\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n\n elif expr.op in [\"<\", \">\", \"==\", \"!=\", \"<=\", \">=\", \"||\", \"&&\"]:\n value = self.gen_condition_to_integer(expr)\n elif expr.op in [\n \"=\",\n \"+=\",\n \"-=\",\n \"*=\",\n \"%=\",\n \"/=\",\n \">>=\",\n \"<<=\",\n \"&=\",\n \"|=\",\n \"~=\",\n \"^=\",\n ]:\n # Handle struct assignment special case:\n if expr.op == \"=\" and expr.a.typ.is_struct:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=False)\n amount = self.sizeof(expr.a.typ)\n self.gen_copy_struct(lhs, rhs, amount)\n value = None\n else:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n if expr.op == \"=\":\n value = rhs\n else:\n # Handle '+=' and friends:\n op = expr.op[:-1]\n ir_typ = self.get_ir_type(expr.typ)\n loaded = self._load_value(lhs, expr.typ)\n\n # pointer arithmatic:\n if op in [\"+\", \"-\"] and expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n\n value = self.builder.emit_binop(loaded, op, rhs, ir_typ)\n self._store_value(value, lhs)\n else: # pragma: no cover\n raise NotImplementedError(str(expr.op))\n return value", "def _rconcat(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(concat_op, other)", "def left_func(self, right):\n\n if self.left is None:\n if self.right is None:\n new = copy(self)\n new.right = right\n return new\n else:\n raise SyntaxError(\"Infix operator already has its \"\n \"right argument\")\n else:\n return self.function(self.left, right)", "def __rshift__(self, other):\n return rule(self, other)", "def compose_left_unary(*args) -> base_types.GraphType:\n return compose_unary(*reversed(args))", "def re_model_trees(trees):\n left = trees.pop(0).evaluate(OPERATORS)\n for right in trees:\n left = left & right.evaluate(OPERATORS)\n return left", "def _CombineBinaryExpressions(self, operator):\n operator_lower = operator.lower()\n\n item_index = 1\n number_of_items = len(self._stack) - 1\n while item_index < number_of_items:\n item = self._stack[item_index]\n if (isinstance(item, expressions.BinaryExpression) and\n item.operator.lower() == operator_lower and not item.args):\n previous_item = self._stack[item_index - 1]\n next_item = self._stack[item_index + 1]\n\n if (isinstance(previous_item, expressions.Expression) and\n isinstance(next_item, expressions.Expression)):\n item.AddOperands(previous_item, next_item)\n\n self._stack.pop(item_index + 1)\n self._stack.pop(item_index - 1)\n\n item_index -= 2\n number_of_items -= 2\n\n item_index += 1\n if item_index == 0:\n item_index += 1", "def chain_left_right(left, right):\n\n # Glue together the left & right hand lists\n\n if not right:\n return left\n\n if not isinstance(left, list):\n left = [\n left,\n ]\n\n if not isinstance(right, list):\n right = [\n right,\n ]\n\n left.extend(right)\n return left", "def _binary_new_copy(self, left, right):\n return self._binary_evaluate(left, right)", "def __mul__(self,other):\n return compositeORGenerator(left = self, right = other)", "def reverse_distribute(node: NodeT) -> NodeT:\n\n def visitor(node: NodeT, left_distribute: bool) -> NodeT:\n \"\"\"Apply left- or right-distributive property in reverse, if possible\n\n Args:\n node: ir.Node to process.\n left_distribute: Whether to apply *left*-distributive property.\n\n Returns:\n Processed node.\n \"\"\"\n if isinstance(node, ir.AddSub):\n items = OrderedDict() # type: Dict[ir.Node, List[Tuple[str, ir.Node]]]\n new_operators = []\n new_operands = []\n for operator, operand in zip(('+',) + getattr(node, 'operator'),\n getattr(node, 'operand')):\n if (operator == '+' and isinstance(operand, ir.MulDiv) and\n getattr(operand, 'operator') == ('*',)):\n if left_distribute:\n coeff, item = getattr(operand, 'operand')\n else:\n item, coeff = getattr(operand, 'operand')\n items.setdefault(coeff, []).append((operator, item))\n else:\n new_operators.append(operator)\n new_operands.append(operand)\n for coeff, item in items.items():\n operator, operand = zip(*item)\n assert operator[0] == '+'\n new_operators.append(operator[0])\n if len(operand) > 1:\n new_item = ir.AddSub(operator=operator[1:], operand=operand)\n else:\n new_item = operand[0]\n if left_distribute:\n children = coeff, new_item\n else:\n children = new_item, coeff\n new_operands.append(ir.MulDiv(operator=('*',), operand=children))\n if len(new_operands) > 1:\n assert new_operators[0] == '+'\n new_node = ir.AddSub(operator=tuple(new_operators[1:]),\n operand=tuple(new_operands))\n if new_node != node:\n return new_node # type: ignore\n elif new_operands and new_operands[0] != node:\n return new_operands[0]\n return node\n\n return node.visit(visitor, True).visit(visitor, False)", "def right_shift(lhs, rhs):\n return _make.right_shift(lhs, rhs)", "def __rshift__(self, other):\n other.set_upstream(self)\n # return other so a >> b >> c works\n return other" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns the sparsity penalty on network activations combined as a sum
def get_sparsity_penalty(nnet, inputs, sparsity, mode="mean", deterministic=False): assert mode in ("mean", "l1") rho = sparsity penalty = 0 eps = 0.0001 # for numerical stability for layer in nnet.all_layers: if layer.isactivation: activation = lasagne.layers.get_output(layer, inputs=inputs, deterministic=deterministic) if mode == "mean": if layer.isrelu: avg_activation = T.mean(T.gt(activation, T.zeros_like(activation)), axis=0, dtype='floatX') if layer.issigmoid: avg_activation = T.mean(activation, axis=0, dtype='floatX') KL_div = T.sum((rho+eps) * (T.log(rho+eps) - T.log(avg_activation+eps)) + (1-rho+eps) * (T.log(1-rho+eps) - T.log(1-avg_activation+eps)), dtype='floatX') penalty = penalty + KL_div if mode == "l1": penalty = penalty + T.sum(abs(activation), dtype='floatX') return T.cast(penalty, dtype='floatX')
[ "def sparsity_penalty(rbm, hidden_units, v0_vmap, target):\n # complete units lists\n hidden_units = rbm.complete_units_list(hidden_units)\n \n # complete the supplied vmap\n v0_vmap = rbm.complete_vmap(v0_vmap)\n \n hidden_vmap = rbm.mean_field(hidden_units, v0_vmap)\n\n penalty_terms = []\n for hu in hidden_units:\n mean_activation = T.mean(hidden_vmap[hu], 0) # mean over minibatch dimension\n penalty_terms.append(T.sum(T.nnet.binary_crossentropy(mean_activation, target))) # sum over the features\n\n total_penalty = sum(penalty_terms)\n return total_penalty", "def l2_weight_penalty(self):\r\n #######################################################################\r\n # ** START OF YOUR CODE **\r\n #######################################################################\r\n params_list = [] \r\n for param in self.parameters():\r\n params_list.append(param.view(-1))\r\n torch_params = torch.cat(params_list)\r\n \r\n return (torch.sqrt(torch.pow(torch_params, 2).sum()))\r\n #######################################################################\r\n # ** END OF YOUR CODE **\r\n #######################################################################\r", "def params_penalty(self):\n params_penalty = 0\n if self.kernels_penalty is not None:\n params_penalty += self.kernels_penalty(self.kernels)\n if self.biases_penalty is not None:\n params_penalty += self.biases_penalty(self.biases)\n return params_penalty", "def sparsity(self):\n return 1 - self.inter_num / self.user_num / self.item_num", "def penalty(self):\n return 0", "def sparsity(self):\n nelem = self._nelem\n\n if nelem is None:\n self._logger.warning(\n \"this matrix will be considered as dense as it has not had its number of elements defined\")\n nelem = self._size\n\n return 1.0 - nelem / self._size", "def compute_sparsity(stim):\n sparsity = np.sum(stim)/(len(stim))\n \n return sparsity", "def test_calc_layer_sparsity():\n test_ndarray = np.array([[0, 2, 0], [1, 0, 1]])\n assert lu.calc_layer_sparsity(test_ndarray) == 3 / 6, 'correct sparsity value'\n\n test_ndarray = np.array([[0, 0, 0], [1, 0, 1]])\n assert abs(lu.calc_layer_sparsity(test_ndarray) - 4 / 6) < 10**-8, 'correct sparsity value'\n assert lu.calc_layer_sparsity(np.zeros((20, 20))) == 1.0, 'zero array should have 1.0 sparsity'\n assert lu.calc_layer_sparsity(\n np.random.rand(20, 20)) == 0.0, 'random array should have 0.0 sparsity'\n assert type(lu.calc_layer_sparsity(np.zeros((10, 10)))) is float, 'return value should be of type float'", "def calc_assn_weights():\n\t\n\t\t\t#\n\t\t\t#\n\ttext(\"\"\"INSERT INTO assignments (mentor_id, course_id, cost)\n\t\t\tSELECT M.mentor_id, C.course_id, SUM(COALESCE(PW.weight_value,PT.def_weight_val))\n\t\t\tFROM mentors M, courses C\n\t\t\tJOIN course2pref C2P ON C2P.course_id = C.course_id\n\t\t\tJOIN prefs P ON P.pref_id = C2P.pref_id\n\t\t\tJOIN pref_types PT ON PT.pref_type_id = P.pref_type_id\n\t\t\tJOIN pref_weights PW ON PW.pref_type_id = P.pref_type_id\n\t\t\tLEFT JOIN choices Ch ON Ch.mentor_id = M.mentor_id AND Ch.weight_id = PW.pref_id", "def get_strength(self):\n return 10 - self.get_agility()", "def cost(self) -> float:", "def test_sparsity(config):\n total_zeros = 0\n total_nonzeros = 0\n\n print ('<===sparsity type is {}'.format(config.sparsity_type))\n print ('<===layers to be pruned are {}'.format(config._prune_ratios))\n if config.masked_progressive and (config.sparsity_type == 'filter' or config.sparsity_type =='column'or config.sparsity_type == \"bn_filter\" ):\n ### test both column and row sparsity\n print (\"***********checking column sparsity*************\")\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n column_l2_norm = LA.norm(W2d,2,axis=0)\n zero_column = np.sum(column_l2_norm == 0)\n nonzero_column = np.sum(column_l2_norm !=0)\n\n print (\"column sparsity of layer {} is {}\".format(name,zero_column/(zero_column+nonzero_column)))\n print (\"***********checking filter sparsity*************\") \n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n row_l2_norm = LA.norm(W2d,2,axis=1)\n zero_row = np.sum(row_l2_norm == 0)\n nonzero_row = np.sum(row_l2_norm !=0)\n print (\"filter sparsity of layer {} is {}\".format(name,zero_row/(zero_row+nonzero_row)))\n print (\"************checking overall sparsity in conv layers*************\")\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy() \n total_zeros +=np.sum(W==0)\n total_nonzeros +=np.sum(W!=0)\n print ('only consider conv layers, compression rate is {}'.format((total_zeros+total_nonzeros)/total_nonzeros))\n return\n \n if config.sparsity_type == \"irregular\":\n for name,W in config.model.named_parameters():\n if 'bias' in name:\n continue\n W = W.cpu().detach().numpy()\n zeros = np.sum(W==0)\n total_zeros+=zeros\n nonzeros = np.sum(W!=0)\n total_nonzeros+=nonzeros\n print (\"sparsity at layer {} is {}\".format(name,zeros/(zeros+nonzeros)))\n total_weight_number = total_zeros+total_nonzeros\n print ('overal compression rate is {}'.format(total_weight_number/total_nonzeros))\n elif config.sparsity_type == \"column\":\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n column_l2_norm = LA.norm(W2d,2,axis=0)\n zero_column = np.sum(column_l2_norm == 0)\n nonzero_column = np.sum(column_l2_norm !=0)\n total_zeros +=np.sum(W==0)\n total_nonzeros +=np.sum(W!=0)\n print (\"column sparsity of layer {} is {}\".format(name,zero_column/(zero_column+nonzero_column)))\n print ('only consider conv layers, compression rate is {}'.format((total_zeros+total_nonzeros)/total_nonzeros)) \n elif config.sparsity_type == \"filter\":\n print ('inside if')\n print (config.prune_ratios)\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n row_l2_norm = LA.norm(W2d,2,axis=1)\n zero_row = np.sum(row_l2_norm == 0)\n nonzero_row = np.sum(row_l2_norm !=0)\n total_zeros +=np.sum(W==0)\n total_nonzeros +=np.sum(W!=0)\n print (\"filter sparsity of layer {} is {}\".format(name,zero_row/(zero_row+nonzero_row)))\n print ('only consider conv layers, compression rate is {}'.format((total_zeros+total_nonzeros)/total_nonzeros))\n elif config.sparsity_type == \"bn_filter\":\n print ('inside bn_filter')\n print (config.prune_ratios)\n for i,(name,W) in enumerate(config.model.named_parameters()):\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n zeros = np.sum(W==0)\n nonzeros = np.sum(W!=0)\n print (\"sparsity at layer {} is {}\".format(name,zeros/(zeros+nonzeros)))", "def get_sparsity(self, exclude=[]):\n nnz = 0 # number of nonzero elements\n nz = 0 # number of zero elements\n for key in self.variables:\n if key in exclude:\n continue\n nnz += amath.sum(self.variables[key] != 0)\n nz += amath.sum(self.variables[key] == 0)\n sparsity = float(nz) / (nnz + nz)\n return sparsity", "def cost_func(weights)->float:\n\n cost = 0\n for ith_element in training_set:\n cost += math.pow(hypothesis_value(weights, ith_element[:-1]) - ith_element[-1], 2)\n return cost / 2", "def sparsity(model: keras.Model):\n zero = tf.constant(0, dtype=tf.float32)\n model_weight_size = 0\n model_zeros = 0\n sparsity_dict = {}\n\n for layer in model.layers:\n layer_sparsity_dict = {}\n\n for i, weight in enumerate(layer.trainable_weights):\n mask = tf.cast(tf.equal(weight, zero), tf.uint8)\n\n weight_size = tf.size(weight)\n zeros = tf.cast(tf.math.count_nonzero(mask), tf.int32)\n layer_sparsity_dict[weight.name] = zeros / weight_size\n\n model_weight_size += weight_size\n model_zeros += zeros\n\n sparsity_dict[layer.name] = layer_sparsity_dict\n\n model_sparsity = model_zeros / model_weight_size\n\n return model_sparsity, sparsity_dict", "def update_weights_sum(self):\n vals = self.nn.get_param_values()\n # only use the last layer for summation (w, b)\n self.w_sum = np.sum(vals[-2]) + np.sum(vals[-1])", "def get_penalty():\n return 3", "def get_weights(self):", "def strength(self) -> float:\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns the offset to balance the polynomial parameters possible by the bias terms of the network.
def get_bias_offset(nnet): offset = 0 L = len(nnet.trainable_layers) for l in range(L-1): layer = nnet.trainable_layers[l] if layer.b is not None: W_prod = T.eye(int(layer.b.shape.eval()[0])) for k in range(1, L-1): W_prod = T.dot(nnet.trainable_layers[k].W.T, W_prod) offset = offset + T.dot(W_prod, layer.b) offset = T.dot(nnet.ocsvm_layer.W.T, offset) return T.sum(offset)
[ "def _get_bias(self) -> JTensor:\n p = self.params\n b = self.local_theta().b\n if p.forget_gate_bias != 0.0:\n b = b + self.get_adjustment()\n\n return b", "def get_bias(self):", "def getbias(x, bias):\n return x / ((1.0 / bias - 2.0) * (1.0 - x) + 1.0 + 1e-6)", "def GetBiasVector(self):\n ...", "def bias(self):\n return self._bias", "def get_bias(self):\n return self.__bias", "def maximal_linear_bias_relative(self):\n return self.maximal_linear_bias_absolute()/(2.0**self.m)", "def offset(self):\n if self.valid:\n if self.type == 4:\n return ipv4_to_dec(self.addr) - ipv4_to_dec(self.network)\n else:\n return ipv6_to_dec(self.addr) - ipv6_to_dec(self.network)", "def bias(self) -> Optional[str]:\n return pulumi.get(self, \"bias\")", "async def get_focus_offset(self, **kwargs: Any) -> float:\n return 0", "def ub_offset(input_ub):\n ub_shape = input_ub.shape\n if len(ub_shape) in (0, 1):\n return 0\n\n return input_ub.offset", "def bias(self):\n if self._bias is None:\n with self:\n self._bias = nengo.Node([1], label='cortical bias')\n return self._bias", "def get_parameters(self):\n if self.add_bias:\n params = np.concatenate((self.bias, self.W), 0)\n else:\n params = self.W\n return params", "def bias_prior(self):", "def get_relative_offset(self):\n\n\t\treturn self.get_offset_1()", "def _get_flat_param_offsets(self) -> List[Tuple[int, int]]:\n cumulative_sum = list(accumulate(self.flat_param._numels))\n starts = [0] + cumulative_sum[:-1]\n ends = [end - 1 for end in cumulative_sum] # inclusive\n param_offsets = list(zip(starts, ends))\n return param_offsets", "def adjust_bias(self):\n self.bias += self.lr * self.delta", "def _get_kernel_bias(self):\n\n # get weights and bias of conv branches\n kernel_conv = 0\n bias_conv = 0\n for ix in range(self.num_conv_branches):\n _kernel, _bias = self._fuse_bn_tensor(self.rbr_conv[ix])\n _kernel = self._pad_tensor(_kernel, to_size=self.kernel_size)\n kernel_conv += _kernel\n bias_conv += _bias\n\n # get weights and bias of skip branch\n kernel_identity = 0\n bias_identity = 0\n if self.rbr_skip is not None:\n kernel_identity, bias_identity = self._fuse_bn_tensor(self.rbr_skip)\n\n kernel_final = kernel_conv + kernel_identity\n bias_final = bias_conv + bias_identity\n\n # get kx1 1xk branch\n hor_k, hor_b = self._fuse_bn_tensor(self.hor_branch)\n ver_k, ver_b = self._fuse_bn_tensor(self.ver_branch)\n self._add_to_square_kernel(kernel_final, hor_k)\n self._add_to_square_kernel(kernel_final, ver_k)\n\n bias_final += hor_b + ver_b\n\n return kernel_final, bias_final", "def Offset(self) -> int:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create a OCSVM loss for network given in argument with rho=1 fixed
def compile_update_ocsvm_rho_fixed(nnet, inputs, targets): floatX = Cfg.floatX C = Cfg.C nu = Cfg.nu if len(nnet.all_layers) > 1: feature_layer = nnet.all_layers[-2] else: feature_layer = nnet.input_layer final_layer = nnet.ocsvm_layer trainable_params = lasagne.layers.get_all_params(final_layer, trainable=True) # Regularization Wsvm_penalty = T.sum(abs(final_layer.W) ** Cfg.pow) l2_penalty = get_l2_penalty(nnet, include_bias=Cfg.include_bias, pow=Cfg.pow) l2_penalty += Wsvm_penalty l2_penalty *= (1/C) # Backpropagation prediction = lasagne.layers.get_output(final_layer, inputs=inputs, deterministic=False) scores = T.ones_like(prediction) - prediction objective, train_acc = final_layer.objective(-scores, targets) # OC-SVM loss train_loss = T.cast(objective / (targets.shape[0] * nu), dtype='floatX') train_acc = T.cast(train_acc * 1. / targets.shape[0], dtype='floatX') train_obj = T.cast(floatX(0.5) * l2_penalty + train_loss, dtype='floatX') updates = get_updates(nnet, train_obj, trainable_params, solver=nnet.solver) nnet.backprop = theano.function([inputs, targets], [train_obj, train_acc], updates=updates) # Forwardpropagation test_prediction = lasagne.layers.get_output(final_layer, inputs=inputs, deterministic=True) test_scores = T.ones_like(prediction) - test_prediction objective, test_acc = final_layer.objective(-test_scores, targets) # Get network feature representation test_rep = lasagne.layers.get_output(feature_layer, inputs=inputs, deterministic=True) test_rep_norm = test_rep.norm(L=2, axis=1) test_ball_penalty = T.cast(0, dtype='floatX') test_l2_output = T.cast(0, dtype='floatX') # OC-SVM test loss test_loss = T.cast(objective / (targets.shape[0] * nu), dtype='floatX') test_acc = T.cast(test_acc * 1. / targets.shape[0], dtype='floatX') test_obj = T.cast(floatX(0.5) * l2_penalty + test_loss, dtype='floatX') nnet.forward = theano.function([inputs, targets], [test_obj, test_acc, -test_scores, floatX(0.5) * l2_penalty, floatX(0.5) * test_l2_output, test_rep, test_rep_norm, test_loss, floatX(0.5) * test_ball_penalty])
[ "def loss_creator(config):\n return torch.nn.BCELoss()", "def heteroscedastic_loss(network, params, x):\n\n pred_mean, pred_var = network(x)\n logvar = tf.reduce_sum(0.5 * tf.math.log(pred_var), axis=-1)\n squared_error = tf.reduce_sum(0.5 * tf.math.square(params - pred_mean) / pred_var, axis=-1)\n loss = tf.reduce_mean(squared_error + logvar)\n return loss", "def compile_update_svdd(nnet, inputs, targets):\n\n floatX = Cfg.floatX\n B = Cfg.B\n C = Cfg.C\n nu = Cfg.nu\n\n # initialize R\n if nnet.R_init > 0:\n nnet.Rvar = shared(floatX(nnet.R_init), name=\"R\")\n else:\n nnet.Rvar = shared(floatX(1), name=\"R\") # initialization with R=1\n\n # Loss\n feature_layer = nnet.all_layers[-1]\n rep = lasagne.layers.get_output(feature_layer, inputs=inputs,\n deterministic=False)\n\n # initialize c (0.5 in every feature representation dimension)\n rep_dim = feature_layer.num_units\n # nnet.cvar = shared(floatX(np.ones(rep_dim) * (1. / (rep_dim ** 0.5))),\n # name=\"c\")\n nnet.cvar = shared(floatX(np.ones(rep_dim) * 0.5), name=\"c\")\n\n dist = T.sum(((rep - nnet.cvar.dimshuffle('x', 0)) ** 2),\n axis=1, dtype='floatX')\n scores = dist - nnet.Rvar\n stack = T.stack([T.zeros_like(scores), scores], axis=1)\n loss = T.cast(T.sum(T.max(stack, axis=1)) / (inputs.shape[0] * nu),\n dtype='floatX')\n\n y_pred = T.argmax(stack, axis=1)\n acc = T.cast((T.sum(T.eq(y_pred.flatten(), targets), dtype='int32')\n * 1. / targets.shape[0]), 'floatX')\n\n # Network weight decay\n if Cfg.weight_decay:\n l2_penalty = (1/C) * get_l2_penalty(nnet,\n include_bias=Cfg.include_bias,\n pow=Cfg.pow)\n else:\n l2_penalty = T.cast(0, dtype='floatX')\n\n # Network activation sparsity regularization\n if Cfg.sparsity_penalty:\n sparsity_penalty = (1/B) * get_sparsity_penalty(nnet, inputs,\n Cfg.sparsity,\n mode=Cfg.sparsity_mode,\n deterministic=False)\n else:\n sparsity_penalty = T.cast(0, dtype='floatX')\n\n # Backpropagation (hard-margin: only minimizing everything to a ball\n # centered at c)\n trainable_params = lasagne.layers.get_all_params(feature_layer,\n trainable=True)\n if Cfg.gaussian_blob:\n avg_dist = T.mean(1-T.exp(-dist), dtype=\"floatX\")\n else:\n avg_dist = T.mean(dist, dtype=\"floatX\")\n obj_ball = T.cast(floatX(0.5) * l2_penalty + avg_dist + sparsity_penalty,\n dtype='floatX')\n updates_ball = get_updates(nnet, obj_ball, trainable_params,\n solver=nnet.solver)\n nnet.backprop_ball = theano.function([inputs, targets], [obj_ball, acc],\n updates=updates_ball)\n\n # Backpropagation (without training R)\n obj = T.cast(floatX(0.5) * l2_penalty + nnet.Rvar + loss + sparsity_penalty,\n dtype='floatX')\n updates = get_updates(nnet, obj, trainable_params, solver=nnet.solver)\n nnet.backprop_without_R = theano.function([inputs, targets], [obj, acc],\n updates=updates)\n\n # Backpropagation (with training R)\n trainable_params.append(nnet.Rvar) # add radius R to trainable parameters\n updates = get_updates(nnet, obj, trainable_params, solver=nnet.solver)\n nnet.backprop = theano.function([inputs, targets], [obj, acc],\n updates=updates)\n\n\n # Forwardpropagation\n test_rep = lasagne.layers.get_output(feature_layer, inputs=inputs,\n deterministic=True)\n test_rep_norm = test_rep.norm(L=2, axis=1)\n\n test_dist = T.sum(((test_rep - nnet.cvar.dimshuffle('x', 0)) ** 2),\n axis=1, dtype='floatX')\n\n test_scores = test_dist - nnet.Rvar\n test_stack = T.stack([T.zeros_like(test_scores), test_scores], axis=1)\n test_loss = T.cast(T.sum(T.max(test_stack, axis=1)) / (inputs.shape[0]*nu),\n dtype='floatX')\n\n test_y_pred = T.argmax(test_stack, axis=1)\n test_acc = T.cast((T.sum(T.eq(test_y_pred.flatten(), targets),\n dtype='int32')\n * 1. / targets.shape[0]), dtype='floatX')\n\n # Network activation sparsity regularization (with determinisitc=True)\n if Cfg.sparsity_penalty:\n test_sparsity_penalty = ((1 / B) *\n get_sparsity_penalty(nnet, inputs,\n Cfg.sparsity,\n mode=Cfg.sparsity_mode,\n deterministic=True))\n else:\n test_sparsity_penalty = T.cast(0, dtype='floatX')\n\n test_obj = T.cast(floatX(0.5) * l2_penalty + nnet.Rvar + test_loss\n + test_sparsity_penalty, dtype='floatX')\n nnet.forward = theano.function([inputs, targets],\n [test_obj, test_acc, test_scores,\n floatX(0.5) * l2_penalty,\n test_sparsity_penalty, test_rep,\n test_rep_norm, test_loss, nnet.Rvar])", "def loss(params, a, m, cutoff):\n fidelity, _, normIn, normOut, _ = circuit(params, a, m, cutoff)\n loss = -fidelity + 10*(1-np.abs(normIn)) + 10*(1-np.abs(normOut))\n return loss", "def _create_objective(cu, co):\n\n def nv_objective(y_true, y_pred):\n residual = (y_true - y_pred).astype('float')\n grad = np.where(residual < 0, 2 * (co ** 2) * (y_pred - y_true), 2 * (cu ** 2) * (y_pred - y_true))\n hess = np.where(residual < 0, 2 * (co ** 2), 2 * (cu ** 2))\n return grad, hess\n\n return nv_objective", "def geo_loss_interface(pred_odo):\n def geo_loss_det(y_true, y_pred):\n odo_pose = pred_odo[:, 0:3]\n odo_orien = pred_odo[:, 3:]\n geo_pose = 0\n print('In Construction')\n return geo_loss_det", "def get_rpn_cls_loss(net):\n\t# ================== RPN classification loss =============================\n\trpn_cls_score = tf.reshape(net.get_output('rpn_cls_score_reshape'),[-1,2]) \n\t# [H*W*9,2], object or not\n\trpn_label = tf.reshape(net.get_output('rpn-data')[0],[-1])\n\t# anchor label, [H*W*9], 1 for object, 0 for background, -1 for do not care\n\tvalid_index = tf.where(tf.not_equal(rpn_label,-1))\n\trpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, valid_index), [-1,2]) #[num_box_reserved, 2]\n\trpn_label = tf.reshape( tf.gather(rpn_label, valid_index), [-1]) #[num_box_reserved,2]\n\trpn_cross_entropy = tf.reduce_mean(\n\t\t\t\t\t\ttf.nn.sparse_softmax_cross_entropy_with_logits(\n\t\t\t\t\t\t\tlogits = rpn_cls_score,\n\t\t\t\t\t\t\tlabels = rpn_label))\n\t\n\treturn rpn_cross_entropy", "def compute_loss(theta_vector, *args):\n\n psi = args[0]\n circ_depth = args[1]\n num_qbits = args[2]\n theta = np.reshape(theta_vector, (circ_depth, num_qbits))\n\n fidelity = get_fidelity(theta, psi)\n loss = get_loss(fidelity)\n return loss", "def build_loss(loss_type: str, hyper_params: DictConfig, use_cuda: bool) -> Loss:\n loss_cfg = DictConfig(dict())\n loss_cfg[\"class\"] = loss_type\n loss_cfg[\"params\"] = dict(hyper_params=hyper_params, use_cuda=use_cuda)\n loss_fn = hydra.utils.instantiate(loss_cfg)\n return loss_fn", "def _build_loss(self):\n def reconstruction_loss(y_true, y_hat):\n weight = y_true * (self._options.beta - 1.) + 1.\n return tf.reduce_mean(tf.pow((y_true - y_hat) * weight, 2))\n\n self._recon_loss = reconstruction_loss", "def loss_der(network_y, real_y):\n return (network_y - real_y)", "def loss(self, coeffs):\n raise NotImplementedError('Must be instanciated')", "def loss(posterior, pars_to_penalize, c_rim):\n marginal = posterior.mean(axis=0)\n cond_entropy = misc.cat_entropy(posterior).mean()\n entropy = misc.cat_entropy(marginal.dimshuffle('x', 0)).sum()\n\n nmi = cond_entropy - entropy\n\n n_samples = posterior.shape[0]\n penalties = [(i ** 2).sum() / n_samples for i in pars_to_penalize]\n penalty = sum(penalties)\n\n loss = nmi + c_rim * penalty\n\n return get_named_variables(locals())", "def get_loss():\n ##################\n # YOUR CODE HERE #\n ##################", "def compute_loss(self, features, mode, params, precomputed):\n raise NotImplementedError(\"Model does not implement loss.\")", "def loss(self):\n pass", "def compute_loss(\n action_probs: tf.Tensor, values: tf.Tensor, returns: tf.Tensor\n) -> tf.Tensor:\n\n advantage = returns - values\n td = tf.subtract(returns, values)\n\n # actor\n # action_log_probs = tf.math.log(action_probs)\n # actor_loss = -tf.math.reduce_sum(action_log_probs * advantage)\n action_log_probs = tf.math.log(action_probs)\n actor_loss = -tf.math.reduce_mean(action_log_probs * td)\n\n # critic\n # td = tf.subtract(returns, values)\n # critic_loss = tf.reduce_mean(tf.square(td))\n critic_loss = huber_loss(values, returns)\n\n tf.print(\"a_loss:\", actor_loss, \"c_loss:\", critic_loss)\n\n return actor_loss + critic_loss", "def nnObjFunction(params, *args):\r\n \r\n n_input, n_hidden, n_class, training_data, training_label, lambdaval = args\r\n \r\n w1 = params[0:n_hidden * (n_input + 1)].reshape( (n_hidden, (n_input + 1)))\r\n w2 = params[(n_hidden * (n_input + 1)):].reshape((n_class, (n_hidden + 1)))\r\n\r\n data=(training_data)\r\n w1temp=np.transpose(w1)\r\n w2temp=np.transpose(w2)\r\n\r\n temp=np.ones(len(data))[...,None] #adding 1s to data\r\n x=np.append(data,temp,axis=1)\r\n a=np.dot(x,w1temp) #getting first sum-product at hidden node\r\n z=sigmoid(a) #applying sigma on every entry\r\n temp=np.ones(len(z))[...,None] #adding 1s to hidden node values\r\n z=np.append(z,temp,axis=1)\r\n b=np.dot(z,w2temp) #getting final sum-product at output node\r\n o=sigmoid(b) #applying sigma on every entry\r\n \r\n oneOfK = np.zeros((len(training_label), 10))\r\n for label in range(0,len(training_label)):\r\n oneOfK[label][math.floor(training_label[label])] = 1\r\n\r\n y=oneOfK\r\n onesarray=np.ones(y.shape)\r\n obj_val = 0 \r\n\r\n delta1= y-o\r\n delta2= onesarray-o\r\n delta=delta1*delta2\r\n delta=delta*o\r\n Jpw2=np.dot(np.transpose(delta),z)\r\n Jpw2=-Jpw2\r\n Jpw2=Jpw2+ w2*lambdaval\r\n grad_w2=Jpw2/len(y)\r\n grad_w2=grad_w2[...,None]\r\n\r\n newones=np.ones(z.shape)\r\n delta1=newones-z\r\n delta1=delta1*z\r\n delta1=-delta1\r\n delta2=np.dot(delta,w2)\r\n delta=delta1*delta2\r\n delta=np.delete(delta,delta.shape[1]-1,1)\r\n Jpw1=np.dot(np.transpose(delta),x)\r\n Jpw1=Jpw1+lambdaval*w1\r\n grad_w1=Jpw1/len(y)\r\n\r\n sca1=(y-o)*(y-o)\r\n sca2=np.sum(sca1,axis=1)[...,None]\r\n sca2=sca2/2\r\n sca3=np.sum(sca2,axis=0)[...,None]\r\n scalar1=np.asscalar(sca3)/50000\r\n \r\n sca1=w1*w1\r\n sca2=np.sum(sca1,axis=1)[...,None]\r\n sca3=np.sum(sca2,axis=0)[...,None]\r\n scalar2=np.asscalar(sca3)\r\n \r\n sca1=w2*w2\r\n sca2=np.sum(sca1,axis=1)[...,None]\r\n sca3=np.sum(sca2,axis=0)[...,None]\r\n scalar3=np.asscalar(sca3)\r\n\r\n scasum=scalar2+scalar3\r\n scasum=lambdaval*scasum\r\n scasum=scasum/(2*len(y))\r\n\r\n scalar=scalar1+scasum\r\n obj_val=scalar\r\n \r\n gr1=grad_w1.flatten()\r\n gr2=grad_w2.flatten()\r\n gr1=gr1[...,None]\r\n gr2=gr2[...,None]\r\n \r\n obj_grad = np.concatenate((grad_w1.flatten(), grad_w2.flatten()),0)\r\n \r\n return (obj_val,obj_grad)", "def netemDrop():\n return lossCmd(percent=\"100%\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create a SVDD loss for network given in argument
def compile_update_svdd(nnet, inputs, targets): floatX = Cfg.floatX B = Cfg.B C = Cfg.C nu = Cfg.nu # initialize R if nnet.R_init > 0: nnet.Rvar = shared(floatX(nnet.R_init), name="R") else: nnet.Rvar = shared(floatX(1), name="R") # initialization with R=1 # Loss feature_layer = nnet.all_layers[-1] rep = lasagne.layers.get_output(feature_layer, inputs=inputs, deterministic=False) # initialize c (0.5 in every feature representation dimension) rep_dim = feature_layer.num_units # nnet.cvar = shared(floatX(np.ones(rep_dim) * (1. / (rep_dim ** 0.5))), # name="c") nnet.cvar = shared(floatX(np.ones(rep_dim) * 0.5), name="c") dist = T.sum(((rep - nnet.cvar.dimshuffle('x', 0)) ** 2), axis=1, dtype='floatX') scores = dist - nnet.Rvar stack = T.stack([T.zeros_like(scores), scores], axis=1) loss = T.cast(T.sum(T.max(stack, axis=1)) / (inputs.shape[0] * nu), dtype='floatX') y_pred = T.argmax(stack, axis=1) acc = T.cast((T.sum(T.eq(y_pred.flatten(), targets), dtype='int32') * 1. / targets.shape[0]), 'floatX') # Network weight decay if Cfg.weight_decay: l2_penalty = (1/C) * get_l2_penalty(nnet, include_bias=Cfg.include_bias, pow=Cfg.pow) else: l2_penalty = T.cast(0, dtype='floatX') # Network activation sparsity regularization if Cfg.sparsity_penalty: sparsity_penalty = (1/B) * get_sparsity_penalty(nnet, inputs, Cfg.sparsity, mode=Cfg.sparsity_mode, deterministic=False) else: sparsity_penalty = T.cast(0, dtype='floatX') # Backpropagation (hard-margin: only minimizing everything to a ball # centered at c) trainable_params = lasagne.layers.get_all_params(feature_layer, trainable=True) if Cfg.gaussian_blob: avg_dist = T.mean(1-T.exp(-dist), dtype="floatX") else: avg_dist = T.mean(dist, dtype="floatX") obj_ball = T.cast(floatX(0.5) * l2_penalty + avg_dist + sparsity_penalty, dtype='floatX') updates_ball = get_updates(nnet, obj_ball, trainable_params, solver=nnet.solver) nnet.backprop_ball = theano.function([inputs, targets], [obj_ball, acc], updates=updates_ball) # Backpropagation (without training R) obj = T.cast(floatX(0.5) * l2_penalty + nnet.Rvar + loss + sparsity_penalty, dtype='floatX') updates = get_updates(nnet, obj, trainable_params, solver=nnet.solver) nnet.backprop_without_R = theano.function([inputs, targets], [obj, acc], updates=updates) # Backpropagation (with training R) trainable_params.append(nnet.Rvar) # add radius R to trainable parameters updates = get_updates(nnet, obj, trainable_params, solver=nnet.solver) nnet.backprop = theano.function([inputs, targets], [obj, acc], updates=updates) # Forwardpropagation test_rep = lasagne.layers.get_output(feature_layer, inputs=inputs, deterministic=True) test_rep_norm = test_rep.norm(L=2, axis=1) test_dist = T.sum(((test_rep - nnet.cvar.dimshuffle('x', 0)) ** 2), axis=1, dtype='floatX') test_scores = test_dist - nnet.Rvar test_stack = T.stack([T.zeros_like(test_scores), test_scores], axis=1) test_loss = T.cast(T.sum(T.max(test_stack, axis=1)) / (inputs.shape[0]*nu), dtype='floatX') test_y_pred = T.argmax(test_stack, axis=1) test_acc = T.cast((T.sum(T.eq(test_y_pred.flatten(), targets), dtype='int32') * 1. / targets.shape[0]), dtype='floatX') # Network activation sparsity regularization (with determinisitc=True) if Cfg.sparsity_penalty: test_sparsity_penalty = ((1 / B) * get_sparsity_penalty(nnet, inputs, Cfg.sparsity, mode=Cfg.sparsity_mode, deterministic=True)) else: test_sparsity_penalty = T.cast(0, dtype='floatX') test_obj = T.cast(floatX(0.5) * l2_penalty + nnet.Rvar + test_loss + test_sparsity_penalty, dtype='floatX') nnet.forward = theano.function([inputs, targets], [test_obj, test_acc, test_scores, floatX(0.5) * l2_penalty, test_sparsity_penalty, test_rep, test_rep_norm, test_loss, nnet.Rvar])
[ "def tv_loss(x, name='tv_loss'):\n raise NotImplementedError(\"Please use tensorflow total_variation loss.\")", "def loss_der(network_y, real_y):\n return (network_y - real_y)", "def train_loss(wb_vect, unflattener):\n wb_struct = unflattener(wb_vect)\n n_graphs = 100\n\n samp_graphs, samp_inputs, samp_nodes_nbrs, samp_graph_idxs = batch_sample(\n graphs, input_shape, n_graphs)\n\n preds = predict(wb_struct, samp_inputs, samp_nodes_nbrs, samp_graph_idxs)\n graph_scores = np.array([float(score_func(g)) for g in samp_graphs]).\\\n reshape((len(samp_graphs), 1))\n\n mse = np.mean(np.power(preds - graph_scores, 2))\n return mse", "def ssm_vr_loss(self, x, v):\n x = x.unsqueeze(0).expand(self.n_slices, *x.shape) # (n_slices, b, ...)\n x = x.contiguous().view(-1, *x.shape[2:]) # (n_slices*b, ...)\n x = x.requires_grad_()\n score = self.model.score(x) # (n_slices*b, ...)\n sv = torch.sum(score * v) # ()\n loss1 = torch.norm(score, dim=-1) ** 2 * 0.5 # (n_slices*b,)\n gsv = torch.autograd.grad(sv, x, create_graph=True)[0] # (n_slices*b, ...)\n loss2 = torch.sum(v*gsv, dim=-1) # (n_slices*b,)\n loss = (loss1 + loss2).mean() # ()\n return loss", "def dsm_loss(self, x, v, sigma=0.1):\n x = x.requires_grad_()\n v = v * sigma\n x_ = x + v\n s = self.model.score(x_)\n loss = torch.norm(s + v/(sigma**2), dim=-1)**2\n loss = loss.mean()/2.\n return loss", "def loss(self):\n return 'mse'", "def deen_loss(self, x, v, sigma=0.1):\n x = x.requires_grad_()\n v = v * sigma\n x_ = x + v\n s = sigma ** 2 * self.model.score(x_)\n loss = torch.norm(s+v, dim=-1)**2\n loss = loss.mean()/2.\n return loss", "def svd_tool():\n\n parser = argparse.ArgumentParser(\n description=\"SVD tool for fc layers of caffe network model.\"\n )\n\n SVDTool.populate_argument_parser(parser)\n\n args = parser.parse_args()\n if args.caffe:\n sys.path.insert(0, args.caffe)\n\n # init logging\n init_logging(args.quiet)\n setup_glog_environ(args.quiet or args.quiet_caffe)\n\n if hasattr(args, 'config_file') and args.config_file:\n svd_tool_ins = SVDTool.load_from_config_file(args.config_file)\n else:\n svd_tool_ins = SVDTool(args)\n\n if svd_tool_ins is not None:\n # run the inner svd tool\n status = svd_tool_ins.run()\n else:\n status = False\n\n sys.exit(0 if status else 1)", "def loss(input_shape, inp, out_VAE, z_mean, z_var, e=1e-8, weight_L2=0.1, weight_KL=0.1):\n c, H, W, D = input_shape\n n = c * H * W * D\n\n #loss_L2 = mse(inp, out_VAE)\n loss_L2 = K.mean(K.square(inp - out_VAE), axis=(1, 2, 3, 4))\n\n loss_KL = (1 / n) * K.sum(\n K.exp(z_var) + K.square(z_mean) - 1. - z_var,\n axis=-1\n )\n\n def loss_(y_true, y_pred):\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(K.abs(y_true_f * y_pred_f), axis=-1)\n loss_dice = (2. * intersection) / (\n K.sum(K.square(y_true_f), -1) + K.sum(K.square(y_pred_f), -1) + e)\n\n return - loss_dice + weight_L2 * loss_L2 + weight_KL * loss_KL\n\n return loss_", "def loss_function(inst):\n return -np.mean(np.log(inst))", "def calculate_loss(self, v_online: Tensor, v_target: Tensor) -> Tensor:\n _, z1 = self.online_network(v_online)\n h1 = self.predictor(z1)\n with torch.no_grad():\n _, z2 = self.target_network(v_target)\n return -0.5 * F.cosine_similarity(h1, z2).mean()", "def loss(self):\n pass", "def heteroscedastic_loss(network, params, x):\n\n pred_mean, pred_var = network(x)\n logvar = tf.reduce_sum(0.5 * tf.math.log(pred_var), axis=-1)\n squared_error = tf.reduce_sum(0.5 * tf.math.square(params - pred_mean) / pred_var, axis=-1)\n loss = tf.reduce_mean(squared_error + logvar)\n return loss", "def get_loss():\n ##################\n # YOUR CODE HERE #\n ##################", "def loss(self, predictions, real_values):", "def svm_loss(x, y):\n loss, dx = None, None\n ###########################################################################\n # TODO: Implement loss and gradient for multiclass SVM classification. #\n # This will be similar to the svm loss vectorized implementation in #\n # cs231n/classifiers/linear_svm.py. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n y_temp = np.ones((x.shape[0], x.shape[1])) # 1๋กœ ๊ตฌ์„ฑ๋œ x์™€ ๊ฐ™์€ ์‰์ž…์˜ ๋งคํŠธ๋ฆญ์Šค๋ฅผ ๋งŒ๋“ ๋‹ค\n #print(y_temp)\n y_score = x[np.arange(x.shape[0]), y] # ์ •๋‹ต๋ ˆ์ด๋ธ”์˜ ์Šค์ฝ”์–ด๋กœ๋งŒ ๊ตฌ์„ฑ๋œ ํ•˜๋‚˜์˜ ์ปฌ๋Ÿผ ๋ฒกํ„ฐ๋ฅผ ๋งŒ๋“ ๋‹ค\n y_score = np.reshape(y_score, (x.shape[0], 1)) # ๋ธŒ๋กœ๋“œ์บ์ŠคํŒ…์„ ์œ„ํ•ด ๋ฆฌ์‰์ž… ํ•ด์ค€๋‹ค\n y_temp[np.arange(x.shape[0]), y] = 0 # 1๋กœ ๊ตฌ์„ฑ๋œ ํ…œํ”„๋งคํŠธ๋ฆญ์Šค์˜ ์ •๋‹ต ๋ ˆ์ด๋ธ”์— ํ•ด๋‹น๋˜๋Š” ์ธ๋ฑ์Šค์— 0์„ ํ• ๋‹นํ•œ๋‹ค\n #print(y_temp)\n loss_temp = (x - y_score) - 1\n loss_temp = (-loss_temp * y_temp) / x.shape[0]\n loss = (np.sum(loss_temp))\n #print(loss_temp)\n\n #print(np.sum(loss_temp, axis = 1))\n \n temp = loss_temp * x.shape[0]\n temp[loss_temp > 0] = 1\n row_sum = np.sum(temp, axis = 1)\n temp[np.arange(x.shape[0]), y] = -row_sum.T\n dx = -temp\n\n dx /= x.shape[0]\n\n\n #print(dx)\n\n\n\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return loss, dx", "def train(net):\n\n # Set SGD hyperparameters\n n_iter = 200 # number of iterations of SGD\n learning_rate = 1e-3 # learning rate for SGD\n momentum = .99 # momentum parameter for SGD\n batch_size = 100 # number of data points in each mini-batch\n\n # Initialize binary cross-entropy loss function\n loss_fn = nn.BCELoss()\n\n # Initialize SGD optimizer with momentum\n optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=momentum)\n\n # Placeholder to save loss at each iteration\n track_loss = []\n\n # Loop over iterations\n for i in range(n_iter):\n\n # Sample minibatch of oriented grating stimuli\n stimuli, tilt = sample_stimuli(batch_size)\n\n # Evaluate loss and update network weights\n out = net(stimuli) # predicted probability of tilt right\n loss = loss_fn(out, tilt) # evaluate loss\n optimizer.zero_grad() # clear gradients\n loss.backward() # compute gradients\n optimizer.step() # update weights\n \n # Keep track of loss at each iteration\n track_loss.append(loss.item())\n\n # Track progress\n if (i + 1) % (n_iter / 10) == 0:\n print('iteration %i | loss: %.3f | percent correct: %.2f%%' % (i + 1, loss.item(), 100 * pcorrect(out, tilt)))\n \n # Plot loss\n plt.plot(track_loss)\n plt.xlabel('iterations of SGD')\n plt.ylabel('binary cross-entropy loss')\n plt.xlim([0, None])\n plt.ylim([0, None])\n plt.show()", "def surprise_SVD(train_file,test_file):\n print(\"SVD\")\n fold = [(train_file, test_file)]\n reader = Reader(line_format='user item rating', sep=',')\n data = Dataset.load_from_folds(fold, reader=reader)\n pkf = PredefinedKFold()\n # Algorithm\n algo = SVD(n_epochs=30,lr_all=0.01,reg_all=0.1)\n for trainset, testset in pkf.split(data):\n # Train\n algo.fit(trainset)\n\n # Predict\n predictions = algo.test(testset)\n pred = np.zeros(len(predictions))\n for i in range(len(predictions)):\n val = predictions[i].est\n pred[i] = val\n return pred", "def loss (self,target):\n self.target = target \n lossval = self.loss_function(self.out, target)\n self.lossval = lossval\n self.lossval_array.append(lossval)\n return lossval" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create autoencoder Theano update for network given in argument
def create_autoencoder(nnet): floatX = Cfg.floatX B = Cfg.ae_B C = Cfg.ae_C ndim = nnet.data._X_train.ndim if ndim == 2: inputs = T.matrix('inputs') elif ndim == 4: inputs = T.tensor4('inputs') final_layer = nnet.all_layers[-1] # Backpropagation trainable_params = lasagne.layers.get_all_params(final_layer, trainable=True) prediction = lasagne.layers.get_output(final_layer, inputs=inputs, deterministic=False) # use l2 or binary crossentropy loss (features are scaled to [0,1]) if Cfg.ae_loss == "l2": loss = lasagne.objectives.squared_error(prediction, inputs) if Cfg.ae_loss == "ce": loss = lasagne.objectives.binary_crossentropy(prediction, inputs) scores = T.sum(loss, axis=range(1, ndim), dtype='floatX') loss = T.mean(scores) # Regularization if Cfg.ae_weight_decay: l2_penalty = (floatX(0.5) / C) * regularize_network_params(final_layer, l2) else: l2_penalty = T.cast(0, dtype='floatX') # Network activation sparsity regularization if Cfg.ae_sparsity_penalty: sparsity_penalty = ((1 / B) * get_sparsity_penalty(nnet, inputs, Cfg.ae_sparsity, mode=Cfg.ae_sparsity_mode, deterministic=False)) else: sparsity_penalty = T.cast(0, dtype='floatX') train_obj = loss + l2_penalty + sparsity_penalty updates = get_updates(nnet, train_obj, trainable_params, solver=nnet.ae_solver) nnet.ae_backprop = theano.function([inputs], [loss, l2_penalty, sparsity_penalty, scores], updates=updates) # Forwardpropagation test_prediction = lasagne.layers.get_output(final_layer, inputs=inputs, deterministic=True) # use l2 or binary crossentropy loss (features are scaled to [0,1]) if Cfg.ae_loss == "l2": test_loss = lasagne.objectives.squared_error(test_prediction, inputs) if Cfg.ae_loss == "ce": test_loss = lasagne.objectives.binary_crossentropy(test_prediction, inputs) test_scores = T.sum(test_loss, axis=range(1, ndim), dtype='floatX') test_loss = T.mean(test_scores) # Network activation sparsity regularization (with determinisitc=True) if Cfg.ae_sparsity_penalty: test_sparsity_penalty = ((1 / B) * get_sparsity_penalty(nnet, inputs, Cfg.ae_sparsity, mode=Cfg.ae_sparsity_mode, deterministic=True)) else: test_sparsity_penalty = T.cast(0, dtype='floatX') nnet.ae_forward = theano.function([inputs], [test_loss, l2_penalty, test_sparsity_penalty, test_scores, test_prediction])
[ "def build_full_conv_autoencoder():\n input_img = Input(shape=(84, 84, 3))\n\n x = Convolution2D(48, 8, 8, activation='relu', border_mode='same', name='c1')(input_img)\n x = MaxPooling2D((2, 2), border_mode='same')(x)\n x = Convolution2D(32, 4, 4, activation='relu', border_mode='same', name='c2')(x)\n x = MaxPooling2D((2, 2), border_mode='same')(x)\n x = Convolution2D(32, 3, 3, activation='relu', border_mode='same', name='c3')(x)\n encoded = MaxPooling2D((3, 3), border_mode='same')(x)\n\n x = Convolution2D(32, 3, 3, activation='relu', border_mode='same', name='c4')(encoded)\n x = UpSampling2D((3, 3))(x)\n x = Convolution2D(32, 4, 4, activation='relu', border_mode='same', name='c5')(x)\n x = UpSampling2D((2, 2))(x)\n x = Convolution2D(48, 8, 8, activation='relu', border_mode='same', name='c6')(x)\n x = UpSampling2D((2, 2))(x)\n decoded = Convolution2D(3, 4, 4, activation='sigmoid', border_mode='same', name='c7')(x)\n\n autoencoder = Model(input_img, decoded)\n autoencoder.compile(optimizer='adam', metrics=['mse'], loss='mse')\n autoencoder.summary()\n return autoencoder", "def autoencoder(dims, act = 'relu', init = 'glorot_uniform'):\n n_stacks = len(dims) - 1\n # input\n input_img = Input(shape = (dims[0],), name = 'input')\n x = input_img\n # internal layers in encoder\n for i in range(n_stacks-1):\n x = Dense(dims[i + 1], activation = act, kernel_initializer = init, name = 'encoder_%d' % i)(x)\n\n # hidden layer\n encoded = Dense(dims[-1], kernel_initializer = init, name = 'encoder_%d' % (n_stacks - 1))(x) # hidden layer, features are extracted from here\n\n x = encoded\n # internal layers in decoder\n for i in range(n_stacks-1, 0, -1):\n x = Dense(dims[i], activation = act, kernel_initializer = init, name = 'decoder_%d' % i)(x)\n\n # output\n x = Dense(dims[0], kernel_initializer = init, name = 'decoder_0')(x)\n decoded = x\n return Model(inputs = input_img, outputs = decoded, name = 'AE'), Model(inputs = input_img, outputs = encoded, name = 'encoder')", "def autoencoder(X, inp_dims=2048):\n drop = tf.keras.layers.Dropout(rate=0.2)\n FC1 = tf.layers.Dense(units=inp_dims // 2, activation=\"tanh\", name='fc1')\n FC2 = tf.layers.Dense(units=inp_dims // 4, activation=\"tanh\", name='fc2')\n FC3 = tf.layers.Dense(units=inp_dims // 8, activation=None, name='fc3')\n Act = tf.keras.layers.Activation(activation=\"tanh\")\n # FC4 = tf.layers.Dense(units=inp_dims // 8,activation=\"tanh\",name='fc4')\n FC5 = tf.layers.Dense(units=inp_dims // 4, activation=\"tanh\", name='fc5')\n FC6 = tf.layers.Dense(units=inp_dims // 2, activation=None, name='fc6')\n FC7 = tf.layers.Dense(units=inp_dims, activation=None, name='fc7')\n X = FC1(drop(X))\n X = FC2(drop(X))\n X = FC3(X)\n fea = X\n X_up = Act(X)\n X_up = FC5(X_up)\n X_up = FC6(drop(X_up))\n pred = FC7(drop(X_up))\n return pred, fea", "def build_autoencoder(self):\r\n\t\tinput_state = Input(shape = self.state_dim)\r\n\t\tconv = Conv2D(self.number_of_convolutions, (5,5), activation = 'relu')(input_state)\r\n\t\tmax_pool = MaxPooling2D((2,2))(conv)\r\n\t\t#max_pool2 = MaxPooling2D((2,2))(max_pool)\r\n\t\t#up_sample2 = UpSampling2D((2,2))(max_pool2) \r\n\t\tup_sample = UpSampling2D((2,2))(max_pool)\r\n\t\tconv_trans = Conv2DTranspose(1, (5,5), activation = 'sigmoid')(up_sample)\r\n\r\n\t\tself.encoder = Model(input_state, max_pool)\r\n\t\tself.autoencoder = Model(input_state, conv_trans)\r\n\t\tself.autoencoder.compile(optimizer=Adam(learning_rate=0.0001, clipnorm=1.0), loss='binary_crossentropy', metrics=['accuracy'])\r\n\t\tprint(self.autoencoder.summary())", "def build_autoencoder(input_dim):\r\n input_layer = Input(shape=(input_dim, 1))\r\n enc = Conv1D(filters=16, kernel_size=2, padding='same', activation='relu')(input_layer)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(enc)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Conv1D(filters=64, kernel_size=2, padding='same', activation='relu')(enc)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Flatten()(enc)\r\n enc = Dense(64)(enc)\r\n\r\n dec = Dense(200704)(enc)\r\n dec = Reshape((3136, 64))(dec)\r\n dec = Conv1D(filters=64, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=16, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=1, kernel_size=2, padding='same', activation='relu')(dec)\r\n\r\n autoencoder = Model(input_layer, dec)\r\n autoencoder.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])\r\n autoencoder.summary()\r\n encoder = Model(input_layer, enc)\r\n return autoencoder, encoder", "def autoencoder(input_dim, encoding_dim):\n from keras.layers import Input, Dense\n from keras.models import Model\n \n input_layer = Input(shape=(input_dim, ))\n\n encoder = Dense(encoding_dim, activation=\"tanh\",\n activity_regularizer=regularizers.l1(10e-5))(input_layer)\n encoder = Dense(int(encoding_dim / 2), activation=\"relu\")(encoder)\n\n decoder = Dense(int(encoding_dim / 2), activation='tanh')(encoder)\n decoder = Dense(input_dim, activation='relu')(decoder)\n\n autoencoder = Model(inputs=input_layer, outputs=decoder)\n \n return autoencoder", "def _define_encoder(self):\n self.encoder = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=32, kernel_size=4, stride=2, padding=1), # B, 32, 32, 32\n nn.SELU(),\n nn.Conv2d(32, 32, 4, 2, 1), # B, 32, 16, 16\n nn.SELU(),\n nn.Conv2d(32, 64, 4, 2, 1), # B, 64, 8, 8\n nn.SELU(),\n nn.Conv2d(64, 64, 4, 2, 1), # B, 64, 4, 4\n nn.SELU(),\n nn.Conv2d(64, 256, 4, 1), # B, 256, 1, 1\n nn.SELU(),\n View((-1, 256 * 1 * 1)), # B, 256\n nn.Linear(256, self.encoding_shape * 2), # B, z_dim*2\n )", "def train_autoencoder(data, n_iters=10, batch_size=100):\n tqdm.write(f'Training a fully-convolutional autoencoder for {n_iters} iterations.')\n (trainx, trainy), (valx, valy), (testx, testy) = data\n train_size, val_size, test_size = trainx.shape[0], valx.shape[0], testx.shape[0]\n train_batches = (train_size - 1) // batch_size + 1\n val_batches = (val_size - 1) // batch_size + 1\n test_batches = (test_size - 1) // batch_size + 1\n\n model = Network()\n model.add_layer(ConvLayer(10, (2, 2), (2, 2), 1)) \\\n .add_layer(ConvLayer(10, (2, 2), (2, 2), 1)) \\\n .add_layer(ConvLayer(15, (1, 1), (2, 2), 1)) \\\n .add_layer(TransposedConvLayer(10, (1, 1), (2, 2), 1)) \\\n .add_layer(TransposedConvLayer(10, (2, 2), (2, 2), 1)) \\\n .add_layer(TransposedConvLayer(1, (2, 2), (2, 2), 1)) \\\n .add_layer(SSELayer())\n for i in range(1, n_iters + 1):\n train_order = np.random.permutation(train_size)\n bar = trange(train_batches, file=sys.stdout)\n for j in bar:\n cost = model.forward(trainx[train_order[j * batch_size: (j + 1) * batch_size]],\n trainx[train_order[j * batch_size: (j + 1) * batch_size]])\n bar.set_description(f'Curr squared error: {cost}')\n model.backward()\n model.adam_trainstep()\n errors = []\n for j in range(val_batches):\n errors.append(model.forward(valx[j * batch_size:(j + 1) * batch_size],\n valx[j * batch_size:(j + 1) * batch_size]))\n tqdm.write(f'Validation squared error: {np.mean(errors)}')\n tqdm.write('-------------------------------------------------------')\n\n errors = []\n for i in range(test_batches):\n errors.append(model.forward(testx[i * batch_size:(i + 1) * batch_size],\n testx[i * batch_size:(i + 1) * batch_size]))\n tqdm.write(f'Test squared error: {np.mean(errors)}')\n tqdm.write('-------------------------------------------------------')", "def _define_encoder(self):\n self.encoder = nn.Sequential(View((-1, 64 * 64 * 3)),\n nn.Linear(64 * 64 * 3, 5120, bias=False), nn.SELU(),\n nn.BatchNorm1d(5120),\n nn.Linear(5120, 2560, bias=False), nn.SELU(),\n nn.BatchNorm1d(2560),\n nn.Linear(2560, 512, bias=False), nn.SELU(),\n nn.BatchNorm1d(512),\n nn.Linear(512, 128, bias=False), nn.SELU(),\n nn.BatchNorm1d(128),\n nn.Linear(128, self.encoding_shape, bias=False), nn.SELU(),\n )", "def UNet_wiener(height, width, initial_psf, initial_K, \n encoding_cs=[24, 64, 128, 256, 512, 1024], \n center_cs=1024,\n decoding_cs=[512, 256, 128, 64, 24, 24], \n skip_connections=[True, True, True, True, True, True]):\n\n inputs = tf.keras.Input((height, width, 1))\n \n x = inputs\n \n # Multi-Wiener deconvolutions\n x = WienerDeconvolution(initial_psf, initial_K)(x)\n \n skips = []\n \n # Contracting path\n for c in encoding_cs:\n x, x_skip = encoder_block(x, c, kernel_size=3, padding='same', dilation_rate=1, pooling='average')\n skips.append(x_skip)\n\n skips = list(reversed(skips))\n \n # Center\n x = conv2d_block(x, center_cs, kernel_size=3, padding='same')\n \n # Expansive path\n for i, c in enumerate(decoding_cs):\n if skip_connections[i]:\n x = decoder_block_resize(x, skips[i], c, kernel_size=3, padding='same', dilation_rate=1)\n else:\n x = decoder_block(x, None, c, kernel_size=3, padding='same', dilation_rate=1)\n \n # Classify\n x = layers.Conv2D(filters=1, kernel_size=1, use_bias=True)(x)\n outputs = tf.squeeze(x, axis=3)\n \n model = tf.keras.Model(inputs=[inputs], outputs=[outputs])\n \n return model", "def create_autoencoder():\n\n model = create_model()\n model.compile(optimizer=Adam(), loss=binary_crossentropy)\n model.summary()\n model.save('autoencoder.h5')", "def build_liae_autoencoder(self, inputs):\n for side in (\"a\", \"b\"):\n encoder = self.networks[\"encoder\"].network(inputs[0])\n if side == \"a\":\n intermediate = Concatenate()([self.networks[\"intermediate\"].network(encoder),\n self.networks[\"intermediate\"].network(encoder)])\n else:\n intermediate = Concatenate()([self.networks[\"intermediate_b\"].network(encoder),\n self.networks[\"intermediate\"].network(encoder)])\n output = self.networks[\"decoder\"].network(intermediate)\n autoencoder = KerasModel(inputs, output)\n self.add_predictor(side, autoencoder)", "def _create_network(self):\n layer_dim = np.append(\n np.array(self.net_arch[\"n_input\"]), self.net_arch[\"hidden_dim\"]\n )\n\n self.z, self.y, self.p_X_chain = self._autoencoder(self.x, layer_dim)", "def compile_model(net):\n # Prepare Theano variables for inputs and targets\n target_var = T.ivector('targets')\n # target_var = T.vector('targets') # Theano requires an ivector\n\n # Create a loss expression for training, i.e., a scalar objective we want\n # to minimize (for our multi-class problem, it is the cross-entropy loss):\n prediction = lasagne.layers.get_output(net['output'])\n loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)\n loss = loss.mean()\n\n # We could add some weight decay as well here, see lasagne.regularization.\n # TODO(Mohit): Add L2/Dropout regularization on all weights.\n # reg_params = lasagne.layers.get_all_params(net['output'],regularizable=True)\n\n\n # Create update expressions for training, i.e., how to modify the\n # parameters at each training step. Here, we'll use Stochastic Gradient\n # Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more.\n params = lasagne.layers.get_all_params(net['output'], trainable=True)\n\n print(\"Computing updates ...\")\n # TODO(Mohit): Use Adagrad maybe?\n updates = lasagne.updates.nesterov_momentum(loss, params,\n learning_rate=0.01, momentum=0.9)\n\n # Create a loss expression for validation/testing. The crucial difference\n # here is that we do a deterministic forward pass through the network,\n # disabling dropout layers.\n test_prediction = lasagne.layers.get_output(net['output'],\n deterministic=True)\n test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,\n target_var)\n test_loss = test_loss.mean()\n # As a bonus, also create an expression for the classification accuracy:\n test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),\n dtype=theano.config.floatX)\n\n # Compile a function performing a training step on a mini-batch (by giving\n # the updates dictionary) and returning the corresponding training loss:\n print(\"Compiling functions ...\")\n train_fn = theano.function([net['input'].input_var, target_var],\n [loss, prediction], updates=updates,\n #mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True)\n )\n\n print('===== DEBUG INFO =====')\n #theano.printing.debugprint(train_fn)\n print('===== END =====')\n\n # Compile a second function computing the validation loss and accuracy:\n val_fn = theano.function([net['input'].input_var, target_var],\n [test_loss, test_acc, test_prediction])\n\n return train_fn, val_fn", "def theano_update(self, local_params):\n raise NotImplementedError()", "def separate_encoder():\n\n model = create_model(encoder_only=True)\n model.compile(optimizer=Adam(), loss=binary_crossentropy)\n model.load_weights('trained_autoencoder.h5', by_name=True, skip_mismatch=True)\n model.summary()\n model.save('trained_encoder.h5')", "def __init__(self, ksi=10., latent_dimension=2, activation=nn.ReLU(),\n dropout=.1, reconstruction_criterion=nn.MSELoss()):\n assert 0 <= dropout < 1\n\n super(WassersteinAutoEncoder, self).__init__()\n\n self.ksi = ksi\n self.hidden_dimension = latent_dimension\n\n self.reconstruction_criterion = reconstruction_criterion\n\n self.activation = activation\n\n self.dropout = nn.Dropout(dropout)\n\n self.max = nn.MaxPool2d(2)\n\n self.encoder_conv32 = nn.Conv2d(3, 64, kernel_size=3, padding=1)\n self.encoder_conv16 = nn.Conv2d(64, 128, kernel_size=3, padding=1)\n self.encoder_conv8 = nn.Conv2d(128, 256, kernel_size=3, padding=1)\n self.encoder_conv4 = nn.Conv2d(256, 256, kernel_size=3, padding=1)\n self.encoder_lin_100 = nn.Linear(256 * 4 * 4, 100)\n self.encoder_lin_l = nn.Linear(100, latent_dimension)\n\n self.upsample = UpSample(scale_factor=2)\n\n self.decoder_lin_l = nn.Linear(latent_dimension, 100)\n self.decoder_lin_100 = nn.Linear(100, 256 * 4 * 4)\n self.decoder_conv4 = nn.Conv2d(256, 256, kernel_size=3, padding=1)\n self.decoder_conv8 = nn.Conv2d(256, 128, kernel_size=3, padding=1)\n self.decoder_conv16 = nn.Conv2d(128, 64, kernel_size=3, padding=1)\n self.decoder_conv32 = nn.Conv2d(64, 3, kernel_size=3, padding=1)", "def build_encoder_bi(tparams, options):\n\t# word embedding (source)\n\tembedding = tensor.tensor3('embedding', dtype='float32')\n\tembeddingr = embedding[::-1]\n\tx_mask = tensor.matrix('x_mask', dtype='float32')\n\txr_mask = x_mask[::-1]\n\n\t# encoder\n\tproj = get_layer(options['encoder'])[1](tparams, embedding, options,\n\t\t\t\t\t\t\t\t\t\t\tprefix='encoder',\n\t\t\t\t\t\t\t\t\t\t\tmask=x_mask)\n\tprojr = get_layer(options['encoder'])[1](tparams, embeddingr, options,\n\t\t\t\t\t\t\t\t\t\t\t prefix='encoder_r',\n\t\t\t\t\t\t\t\t\t\t\t mask=xr_mask)\n\n\tctx = tensor.concatenate([proj[0][-1], projr[0][-1]], axis=1)\n\n\treturn embedding, x_mask, ctx", "def autoencoder_train(discriminator_loss, generator_loss, reconstruction_loss, global_step):\n # Variables that affect learning rate.\n decay_steps = NUM_ITERATIONS_PER_DECAY\n\n # Decay the learning rate exponentially based on the number of steps.\n lr = tf.train.exponential_decay(\n INITIAL_LEARNING_RATE,\n global_step,\n decay_steps,\n LEARNING_RATE_DECAY_FACTOR,\n staircase=True\n )\n\n tf.scalar_summary('learning_rate', lr)\n\n # Generate moving averages of all losses and associated summaries.\n loss_averages_op = _add_loss_summaries(\n [discriminator_loss, generator_loss, reconstruction_loss]\n )\n # Get total weight decay\n total_weight_loss = tf.add_n(tf.get_collection(\"losses\"), name=\"total_weight_loss\")\n\n # Get losses for each optimizer\n G_loss = generator_loss + total_weight_loss\n R_loss = reconstruction_loss + total_weight_loss\n D_loss = discriminator_loss + total_weight_loss\n\n # separate out the G and D variables\n trainable_vars = tf.trainable_variables()\n D_vars = [var for var in trainable_vars if \"discriminator\" in var.name]\n G_vars = [var for var in trainable_vars if not \"discriminator\" in var.name]\n\n # Compute gradients.\n with tf.control_dependencies([loss_averages_op]):\n # optimizer for Discriminator\n D_opt = tf.train.AdamOptimizer(lr, beta1=.5, name=\"D_optimizer\")\n D_grads = D_opt.compute_gradients(D_loss, D_vars)\n\n # optimizer for Reconstruction and generator\n R_opt = tf.train.AdamOptimizer(lr, name=\"R_optimizer\")\n R_grads = R_opt.compute_gradients(R_loss+G_loss, G_vars)\n\n\n # Apply gradients.\n R_apply_gradient_op = R_opt.apply_gradients(R_grads, global_step=global_step)\n D_apply_gradient_op = D_opt.apply_gradients(D_grads, global_step=global_step)\n\n\n # Add histograms for trainable variables.\n for var in trainable_vars:\n tf.histogram_summary(var.op.name, var)\n\n # Add histograms for gradients for each optimizer\n for grads, name in [(D_grads, '/D_gradients'), (R_grads, '/R_gradients')]:\n for grad, var in grads:\n if grad is not None:\n tf.histogram_summary(var.op.name + name, grad)\n\n # Track the moving averages of the batch norm variables.\n variable_averages = tf.train.ExponentialMovingAverage(\n MOVING_AVERAGE_DECAY, global_step)\n\n # average the batch norm variables\n variables_to_average = list(\n set(\n [v for v in tf.all_variables() if \"_mean\" in v.name or \"_variance\" in v.name]\n )\n )\n variables_averages_op = variable_averages.apply(variables_to_average)\n\n # generate training op for reconstruction\n with tf.control_dependencies([R_apply_gradient_op, variables_averages_op]):\n R_train_op = tf.no_op(name='R_train')\n # generate training op for discriminator\n with tf.control_dependencies([D_apply_gradient_op, variables_averages_op]):\n D_train_op = tf.no_op(name='D_train')\n\n #return G_train_op, R_train_op, D_train_op\n return R_train_op, D_train_op" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests gotoField if there is a mismatch between MCP and guider.
def test_goto_field_cartridge_mismatch(self): sopTester.updateModel('guider', TestHelper.guiderState['bossLoaded']) mcpState = TestHelper.mcpState['boss_science'] mcpState.update({'instrumentNum': [15]}) sopTester.updateModel('mcp', mcpState) cmdState = self.actorState.gotoField cmdState.reinitialize(self.cmd) masterThread.goto_field(self.cmd, cmdState, myGlobals.actorState) self._check_cmd(0, 14, 0, 0, finish=True, didFail=True)
[ "def field_reached(self):\r\n try:\r\n if abs(self.actual_field - self.goto_field) < 0.0001:\r\n return True\r\n else:\r\n return False\r\n except Exception,e:\r\n log(\"Couldn't determine if field reached!\",e)\r\n return False", "def _get_expected_field(self, watch):", "def test_fields(self):\n m = self.get_msg()\n m.interface_msg = 1\n self.assertFalse(\"interface_msg\" in m.fields)", "def test_defining_only_or_defer_on_nonexistant_fields_fails(self):", "def _validator_target(self, field, value):\n if not REG.match(value):\n self._error(field, \"{} is not a valid target\".format(value))", "def verifyField(self, pv, field, reference):\n full_pv = pv + \".\" + field\n if (caget(full_pv) != reference):\n msg = \"ERROR: \" + full_pv + \" not equal to \" + str(reference)\n raise Exception(__name__ + msg)\n\n return self.__g.SUCCESS", "def test_mutate_field(self):\n # Test adding a field\n with self.assertRaises(ValueError):\n self.email.add_field('', '')\n\n self.email.add_field(self.key, self.regex)\n\n found_key = False\n found_regex = r''\n for field in self.email.fields:\n if field['key'] == self.key:\n found_key = True\n found_regex = field['regex']\n\n self.assertTrue(found_key)\n self.assertEqual(found_regex, self.regex)\n\n # Test getting a field\n with self.assertRaises(LookupError):\n self.email.get_field('')\n\n field = self.email.get_field(self.key)\n self.assertEqual(\n field, {'key': self.key, 'regex': self.regex, 'value': []})\n\n # Test removing a field\n with self.assertRaises(LookupError):\n self.email.remove_field('')\n\n self.email.remove_field(self.key)\n\n found_key = False\n found_regex = r''\n for field in self.email.fields:\n if field['key'] == self.key:\n found_key = True\n found_regex = field['regex']\n\n self.assertFalse(found_key)\n self.assertNotEqual(found_regex, self.regex)", "def test_set_field(self):\n pass", "def test_wrong_field(self):\n msg = self._create_message(self.adt_a01)\n unkn_field = Field(version='2.6')\n msg.pid.add(unkn_field)\n self.assertRaises(ValidationError, msg.validate, report_file=self.report_file)\n self._test_report_file('ERROR')", "def samefield(a, b):\n if a.field != b.field:\n raise RuntimeError(\"field mismatch\")\n return True", "def checkField(fieldset, text=True):\n if text:\n print \"\\nFieldset contains the following fields:\"\n for i in range(len(fieldset.fields)):\n print fieldset.fields[i].name\n\n ulon = fieldset.U.grid.lon\n ulat = fieldset.U.grid.lat\n udep = fieldset.U.grid.depth\n vlon = fieldset.V.grid.lon\n vlat = fieldset.V.grid.lat\n vdep = fieldset.V.grid.depth\n\n if text:\n if np.all(ulon == vlon):\n print \"longitudes are the same for U and V\"\n else:\n print \"longitudes are not the same for U and V. Note that not all functions will work as intended.\"\n if np.all(ulat == vlat):\n print \"latitudes are the same for U and V\"\n else:\n print \"latitudes are not the same for U and V. Note that not all functions will work as intended.\"\n if np.all(udep == vdep):\n print \"depths are the same for U and V\"\n else:\n print \"depths are not the same for U and V. Note that not all functions will work as intended.\"\n\n return np.all(ulon == vlon) and np.all(ulat == vlat) and np.all(udep == vdep)", "def check_fields(fields, name):\n msg=[]\n #check that all the rapp or resp are filled in order\n index=check_order_fields(fields)\n if index!=True:\n msg.append(\"Please select a \"+var_name_data.var_name[name+\"_\"+index]+\" first.\")\n #check that all the rapp or resp are different\n if check_duplicates(fields):\n msg.append(var_name_data.var_name[name]+\" must be different.\")\n return msg", "def check_field_name(field_name):\n\n error_checking.assert_is_string(field_name)\n if field_name in ALL_PREDICTOR_NAMES + ALL_TARGET_NAMES:\n return\n\n error_string = (\n '\\nField \"{0:s}\" is not valid predictor or target variable. Valid '\n 'options listed below:\\n{1:s}'\n ).format(field_name, str(ALL_PREDICTOR_NAMES + ALL_TARGET_NAMES))\n\n raise ValueError(error_string)", "def _is_ifgoto(self, words):\n if words[0] == 'if-goto':\n if len(words) != 2:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_IFGOTO command.\".format(self._file_line))\n return True\n else:\n return False", "def test_02_visit_again(self):", "def test_get_critical_from_existing_fields(self):\n name = 'generic_field'\n opts = {'names': (name, ), 'alt_field': '', 'computed': False}\n expected_field = self.form.fields.get(name, None)\n actual_name, actual_field = self.form.get_critical_field(opts['names'])\n self.assertEqual(name, actual_name)\n self.assertEqual(expected_field, actual_field)", "def _check_field_type(self, pkt, index):\n my_idx = pkt.fields_desc.index(self)\n try:\n next_field = pkt.fields_desc[my_idx + index]\n if type(next_field) is not LEBitField and \\\n next_field.__class__.__base__ is not LEBitField:\n raise LEBitFieldSequenceException('field after field {} must '\n 'be of type LEBitField or '\n 'derived classes'.format(self.name)) # noqa: E501\n except IndexError:\n # no more fields -> error\n raise LEBitFieldSequenceException('Missing further LEBitField '\n 'based fields after field '\n '{} '.format(self.name))", "def assert_known_field(self, name):\n if not (name == self.id_field_name or self.is_known_field(name)):\n raise ChipsError(\"Unknown field in model %s [%s]\", self.__class__.__name__, name)", "def check_validity(self, field_name, value):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }