query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
sequencelengths 19
20
| metadata
dict |
---|---|---|---|
Get features (for regression) based on this bikedata's weather data | def get_weather_features(self):
if self.weather_features is None:
raise Exception("Weather features not made yet.")
### self.make_weather_features()
else:
return self.weather_features | [
"def load_weather():\n filename = (\n \"https://api.featurelabs.com/datasets/daily-min-temperatures.csv?library=evalml&version=\"\n + evalml.__version__\n )\n X, y = load_data(filename, index=None, target=\"Temp\")\n return X, y",
"def compute_features(daily):\n\n features = {\n 'min_temp': float(daily['low']['fahrenheit']),\n 'max_temp': float(daily['high']['fahrenheit']),\n 'max_wind': daily['maxwind']['mph'],\n 'mean_wind': daily['avewind']['mph'],\n 'mean_humidity': daily['avehumidity'],\n }\n\n mean_temp = (features['min_temp'] + features['max_temp'])/2.0\n features['temp_squared'] = mean_temp**2\n features['temp_humidity'] = features['min_temp']*features['mean_humidity']\n features['temp_wind'] = features['min_temp']*features['max_wind']\n return features",
"def features_dataset(self):\n df = self.get_prepared_df()\n features= df.drop('price', axis = 1)\n features = np.array(features) \n \n return features",
"def extractFeatures(self, datum):\n abstract",
"def featurizer(self):\n features = []\n for i in range(0, len(self.x_traj)):\n point_feature = [self.x_traj[i], self.y_traj[i], self.t_traj[i]]\n features.append(point_feature)\n return features",
"def extract_features(self):\n\t\tfeatures = []\n\t\tfor feature_name in self.feature_names:\n\t\t\tfeatures.append(self.data_df[feature_name].values.astype(float))\n\t\tlabels = self.data_df[self.label_name].values.astype(float)\n\t\tproxy_groups_tensor = None\n\t\ttrue_groups_tensor = None\n\t\tif self.uniform_groups:\n\t\t\tproxy_groups_tensor = torch.tensor(generate_proxy_groups_uniform(\n\t\t\t\t\tlen(self.data_df), min_group_frac=self.min_group_frac)).long()\n\t\t\ttrue_groups_tensor = torch.tensor(generate_proxy_groups_uniform(\n\t\t\t\t\tlen(self.data_df), min_group_frac=self.min_group_frac)).long()\n\t\telse:\n\t\t\tproxy_groups = []\n\t\t\ttrue_groups = []\n\t\t\tfor group_name in self.proxy_columns:\n\t\t\t\tproxy_groups.append(self.data_df[group_name].values.astype(float))\n\t\t\tfor group_name in self.protected_columns:\n\t\t\t\ttrue_groups.append(self.data_df[group_name].values.astype(float))\n\t\t\tproxy_groups_tensor = torch.tensor(proxy_groups).T.long()\n\t\t\ttrue_groups_tensor = torch.tensor(true_groups).long()\n\t\treturn torch.tensor(features).T.float(), torch.tensor(labels).reshape(\n\t\t\t\t(-1, 1)).squeeze(), proxy_groups_tensor, true_groups_tensor",
"def extract_features(self):\n if not os.path.isfile(self.feature_filename):\n self.limit_order_df = pd.read_excel(self.limit_order_filename)\n # index starting from the valid level\n self.delimiter_indices = self.get_delimiter_indices()\n print(\"len delimiter_indices\", len(self.delimiter_indices))\n # index at the end of every interval\n self.time_interval_indices = (np.array(self.get_time_interval_indices()) - 1).tolist() \n basic_set, timestamps, mid_prices = self.extract_basic_set()\n time_insensitive_set = self.extract_time_insensitive_set(basic_set)\n labels = self.get_mid_price_labels(mid_prices)\n self.save_feature_json(self.feature_filename, timestamps, basic_set,\n time_insensitive_set, labels, mid_prices)\n df = pd.read_json(self.feature_filename, orient=\"records\", lines=\"True\")\n timestamps = df[\"timestamps\"].tolist()\n basic_set = df[\"basic_set\"].tolist()\n time_insensitive_set = df[\"time_insensitive_set\"].tolist()\n labels = df[\"labels\"].tolist()\n return np.array(timestamps), np.array(basic_set), \\\n np.array(time_insensitive_set), np.array(labels)",
"def feature_list(self):\n return self._dataset[\"features\"]",
"def get_features(self):\n return self.features",
"def _produce_features(self, data, variables):\n length = data.shape[1]\n features = []\n # iterate though all the data points\n for i, row in enumerate(data):\n # bring them into the format the tsfresh wants them to compute the\n # features\n if i == 0:\n for_tsfresh = pd.DataFrame({'id': np.ones(length),\n 'time': np.arange(length),\n 'value' + str(i): row})\n else:\n temp_df = pd.DataFrame({'value' + str(i): row})\n for_tsfresh = pd.concat([for_tsfresh, temp_df], axis=1)\n\n X = extract_features(for_tsfresh, column_id='id',\n column_sort='time', n_jobs=16)\n features = np.zeros((data.shape[0], self.tsfresh_num_features))\n for i in range(0, data.shape[0]):\n for j in range(0, self.tsfresh_num_features):\n idx = int(X.columns[self.tsfresh_num_features * i + j].split(\n '__')[0].replace('value', ''))\n features[idx, j] = X[X.columns[\n self.tsfresh_num_features * i + j]]\n temp_feature_keys = [value.replace('value0__', '') for value in\n X.columns.values[:self.tsfresh_num_features]]\n feature_keys = self._construct_feature_keys(\n temp_feature_keys, variables)\n\n # bring the features from format [num_data*num_features, len_winter] to\n # [num_data, num_features*len_winter]\n length = len(variables)\n new_features = [\n np.concatenate((features[i - 2], features[i - 1],\n features[i]), axis=0)\n for i, feature in enumerate(features)\n if i % length == 2\n ]\n\n features = new_features[:]\n return feature_keys, np.asarray(features)",
"def features(self) -> _Features:\n return np.array(self._df[self.feature_names],\n dtype=np.float32)",
"def extract_features(\n traffic: Traffic,\n features: List[str],\n init_features: List[str] = [],\n) -> np.ndarray:\n X = np.stack(list(f.data[features].values.ravel() for f in traffic))\n\n if len(init_features) > 0:\n init_ = np.stack(\n list(f.data[init_features].iloc[0].values.ravel() for f in traffic)\n )\n X = np.concatenate((init_, X), axis=1)\n\n return X",
"def features(data):\n\n return data[:,1:]",
"def get_features(self):\n feat = Features()\n self.fill_blanks_timeseries()\n feat.withdrawals = self.withdrawals\n feat.wd_dups = self.wd_dups\n feat.nlri_ann = self.nlri_ann\n feat.imp_wd_spath = self.implicit_withdrawals_spath\n feat.imp_wd_dpath = self.implicit_withdrawals_dpath\n feat.announcements = self.announcements\n feat.news = self.new_announcements\n feat.dups = self.dup_announcements\n feat.nadas = self.new_ann_after_wd\n feat.flaps = self.flap_announcements\n feat.origin = self.count_origin\n feat.origin_changes = self.count_origin_changes\n feat.as_path_max = self.as_path_max_length\n feat.as_path_avg = self.as_path_avg_length\n feat.unique_as_path_max = self.unique_as_path_max\n feat.unique_as_path_avg = self.unique_as_path_avg\n feat.rare_ases_max = self.rare_ases_max\n feat.rare_ases_avg = self.rare_ases_avg\n feat.number_rare_ases = self.number_rare_ases\n feat.edit_distance_max = self.edit_distance_max\n feat.edit_distance_avg = self.edit_distance_avg\n feat.edit_distance_dict = self.edit_distance_dict\n feat.edit_distance_unique_dict = self.edit_distance_unique_dict\n feat.ann_to_shorter = self.ann_to_shorter\n feat.ann_to_longer = self.ann_to_longer\n feat.imp_wd = self.imp_wd\n feat.timestamp = dict(zip(self.announcements.keys(), [dt.datetime.fromtimestamp(ts*self.bin_size + self.first_ts) for ts in self.announcements.keys()]))\n feat.timestamp2 = dict(zip(self.announcements.keys(), [(ts*self.bin_size + self.first_ts) for ts in self.announcements.keys()]))\n feat.class_traffic = self.class_traffic\n return feat",
"def extract_features(batches):\n pass",
"def extract_features(time_series, window):\n if not tsd_common.is_standard_time_series(time_series, window):\n # add your report of this error here...\n\n return []\n\n # spilt time_series\n split_time_series = tsd_common.split_time_series(time_series, window)\n # nomalize time_series\n normalized_split_time_series = tsd_common.normalize_time_series(split_time_series)\n max_min_normalized_time_series = tsd_common.normalize_time_series_by_max_min(split_time_series)\n s_features = statistical_features.get_statistical_features(normalized_split_time_series[4])\n f_features = fitting_features.get_fitting_features(normalized_split_time_series)\n c_features = classification_features.get_classification_features(max_min_normalized_time_series)\n # combine features with types\n features = s_features + f_features + c_features\n return features",
"def extract_features(self, inputs):\n pass",
"def gen_features(self, X):",
"def get_features_function(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Publish response to kafka topic | def publish_response(class_label):
client = KProducer(config=publisher_config)
client.produce(class_label, PUBLISHER_TOPIC) | [
"def publish_to_kafka(response, args):\n for persuasion in response:\n if args.get(\"push_to_es\", \"false\") == \"true\":\n watson_kafka_response = KafkaServices.publish_to_watson_kafka(persuasion)\n logger.info(watson_kafka_response)\n if args.get(\"push_to_inflow\", \"false\") == \"true\":\n inflow_kafka_response = KafkaServices.publish_to_inflow_kafka(persuasion)\n logger.info(inflow_kafka_response)",
"def publish(self, node, topic, data={}, on_publish=None, on_response=None):\n pass",
"async def kafka_produce(msg: ProducerMessage, topicname: str):\n\n await aioproducer.send(topicname, json.dumps(msg.dict()).encode(\"ascii\"))\n response = ProducerResponse(\n name=msg.name, message_id=msg.message_id, topic=topicname\n )\n logger.info(response)\n return response",
"def publish (self, topic, msg):\n #connect/publish/disconnect safer, but _very_ slow.\n #self.client.connect() \n self.client.publish(topic, json.dumps(msg))\n #self.client.disconnect()\n self.log(\n level = 20,\n msg = \"published to %s: %s\" % (topic, msg),\n log_mqtt=False\n )",
"def kafka_publish_message(self, message):\n self.kf_sender = self.kf_producer.send(self.kf_topic, value=message.encode('utf-8'));",
"def reply(self, topic, callback):\n \n msg = self.topics[topic].recv()\n rep = callback(msg)\n self.topics[topic].send(rep)",
"def kafka_publish(self, kafka_publish):\n\n self._kafka_publish = kafka_publish",
"def publish(self, topic, data):\n raise NotImplementedError('You must override this')",
"def publish(self, topic, payload):\n self.q.put((topic, payload))",
"def publish(self, topic:str, data:bytes) -> None:\n\t\tself.mqttClient.publish(topic, data)",
"def publish(self, message, topic):\n raise NotImplementedError",
"def publish(self, topic, value):\n msg = self.topics[topic]['msg']\n msg.data = value\n self.topics[topic]['publisher'].publish(msg)\n print(\"published \\t{} \\t{}\".format(topic, value))",
"def publish(self, topic, payload, json_encode=True):\n if json_encode:\n payload = json.dumps(payload)\n\n return self.mqtt.publish(topic, payload)",
"def publish_message(client, topic, message):\n\n # Publish message.\n print(\"- Publishing message... \", end=\"\")\n client.publish(topic, message)\n print(\"[OK]\")",
"def output_topic_callback(self, msg):\n with self.callback_lock:\n if self._time_received_input != 0:\n # Get actual time from ROS\n time_now = self.node.get_clock().now().nanoseconds\n\n # Compute the amount of time elapsed from receiving the last\n # message in the input topic\n measure = time_now - self._time_received_input\n\n # Transform from nanoseconds to milliseconds\n measure = measure / (1000 * 1000)\n\n publish_msg = Int64()\n publish_msg.data = int(measure)\n\n # Publish the measurement\n self._publisher.publish(publish_msg)\n\n self._time_received_input = 0",
"def publish(self, dev_id, msg):\n topic = \"$USR/DevRx/{}\".format(dev_id)\n msg_byte = bytearray()\n msg_byte.extend(map(ord, msg))\n print(msg_byte)\n self.client.publish(topic, msg_byte, qos=1)",
"def publish(self, topic, payload):\n complete_topic = \"{}/{}\".format(self._base_topic, topic)\n self._client.publish(complete_topic, payload, qos=2)\n logger.info(\"On topic %s published: %s\", complete_topic, payload)",
"def publish_message(producer_instance, topic_name, key, value):\n key_serializer = repr(key).encode()\n value_serializer = repr(value).encode()\n\n producer_instance.send(topic_name, key=key_serializer, value=value_serializer)\n producer_instance.flush()\n print('Message published successfully.')",
"def publish_and_wait(self, node, topic, data={}):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the ``BatchStats`` for a specific batch. | def get_batch_stats(self, batch):
return self.batch_stats[batch] | [
"def build_batch_stats():\n\n # We use the moving mean as an estimate of the mean in order to perform\n # a more numerically stable calculation of the batch mean.\n # Copy for better stability.\n shift = tf.add(self._moving_mean, 0)\n counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics(\n input_batch,\n reduction_indices,\n keep_dims=True,\n shift=shift,\n name=\"batch_norm_ss\")\n\n mean, variance = tf.nn.normalize_moments(counts,\n shifted_sum_x,\n shifted_sum_x2,\n shift,\n name=\"normalize_moments\")\n\n return mean, variance",
"def test_get_batch_statistics_request(self):\n self.trans_details.get_batch_statistics(\n batch_id = 123456,\n )",
"def build_batch_stats():\n\n # Copy for better stability.\n # We use the moving mean as an estimate of the mean in order to perform\n # a more numerically stable calculation of the batch mean.\n shift = tf.add(self._moving_mean, 0)\n counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics(\n input_batch,\n reduction_indices,\n keep_dims=True,\n shift=shift,\n name=\"batch_norm_ss\")\n\n mean, variance = tf.nn.normalize_moments(counts,\n shifted_sum_x,\n shifted_sum_x2,\n shift,\n name=\"normalize_moments\")\n second_moment = variance + tf.square(mean)\n\n return mean, variance, second_moment",
"def build_batch_stats():\n shift = tf.add(self._moving_mean, 0)\n counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics(\n input_batch,\n reduction_indices,\n keep_dims=True,\n shift=shift,\n name=\"batch_norm_ss\")\n\n mean, variance = tf.nn.normalize_moments(counts,\n shifted_sum_x,\n shifted_sum_x2,\n shift,\n name=\"normalize_moments\")\n\n return mean, variance",
"def get_plant_batch_stats(db_path: str) -> int:\n return get_db_count(db_path, 'batches.db', 'batches')",
"def batch_sampler(self):\n return self._pipes[0].batch_sampler",
"def update_stats(self, batch: th.Tensor) ->None:\n batch_mean = th.mean(batch, dim=0)\n batch_var = th.var(batch, dim=0, unbiased=False)\n batch_count = batch.shape[0]\n delta = batch_mean - self.running_mean\n tot_count = self.count + batch_count\n self.running_mean += delta * batch_count / tot_count\n self.running_var *= self.count\n self.running_var += batch_var * batch_count\n self.running_var += th.square(delta) * self.count * batch_count / tot_count\n self.running_var /= tot_count\n self.count += batch_count",
"def status_batch(\n client,\n batch_dir):\n # construct important paths\n batch_dir_name, batch_dir_subpaths = settings.BATCH_DIR_STRUCTURE\n batchid_file_name, _ = batch_dir_subpaths['batchid']\n incomplete_file_name = settings.INCOMPLETE_FILE_NAME\n\n batchid_file_path = os.path.join(\n batch_dir, batchid_file_name)\n incomplete_file_path = os.path.join(\n batch_dir, settings.INCOMPLETE_FILE_NAME)\n\n with open(batchid_file_path) as batchid_file:\n batch_id = batchid_file.read().strip()\n\n if not os.path.isfile(incomplete_file_path):\n raise ValueError(\n f'No {incomplete_file_name} file was found in {batch_dir}.'\n f' Please make sure that the directory is a batch that has'\n f' HITs waiting for review.')\n with open(incomplete_file_path) as incomplete_file:\n hit_ids = json.load(incomplete_file)['hit_ids']\n\n logger.info(f'Retrieving status for batch {batch_id}.')\n\n hit_count = 0\n hit_status_counts = collections.defaultdict(int)\n for hit_id in hit_ids:\n hit = client.get_hit(HITId=hit_id)\n hit_count += 1\n hit_status_counts[hit['HIT']['HITStatus']] += 1\n\n logger.info(f'Retrieving status of batch {batch_id} is complete.')\n\n return {\n 'batch_id': batch_id,\n 'hit_count': hit_count,\n 'hit_status_counts': hit_status_counts\n }",
"def print_batch_stats(self):\n\n # current epoch time, numfiles, numbytes, trans secs, status\n print(f\"TRANS_STATS_BATCH: {time.time()} {self.batchvals['transfer_name']} {self.batchvals['numfiles']} {self.filevals['totbytes']} {self.filevals['end_time'] - self.filevals['start_time']} {self.filevals['status']}\")",
"def batch_to_dict(batch: BatchTrial) -> Dict[str, Any]:\n return {\n \"__type\": batch.__class__.__name__,\n \"index\": batch.index,\n \"trial_type\": batch.trial_type,\n \"ttl_seconds\": batch.ttl_seconds,\n \"status\": batch.status,\n \"status_quo\": batch.status_quo,\n \"status_quo_weight_override\": batch._status_quo_weight_override,\n \"time_created\": batch.time_created,\n \"time_completed\": batch.time_completed,\n \"time_staged\": batch.time_staged,\n \"time_run_started\": batch.time_run_started,\n \"abandoned_reason\": batch.abandoned_reason,\n \"run_metadata\": batch.run_metadata,\n \"stop_metadata\": batch.stop_metadata,\n \"generator_run_structs\": batch.generator_run_structs,\n \"runner\": batch.runner,\n \"abandoned_arms_metadata\": batch._abandoned_arms_metadata,\n \"num_arms_created\": batch._num_arms_created,\n \"optimize_for_power\": batch.optimize_for_power,\n \"generation_step_index\": batch._generation_step_index,\n \"properties\": batch._properties,\n }",
"def get_stat(self, idx):\n return self._stats[idx]",
"def get(cls, id):\n\n batch = cls.query.get(id)\n if batch is None:\n raise InvalidBatchId\n return batch",
"def get_batch(self) -> pyglet.graphics.Batch:\n\n return self._batch",
"def stats_batchwise(data_source, batch_size=1024):\n mean = np.zeros(data_source.dshape, dtype=np.float32)\n mean_xs = np.zeros_like(mean, dtype=np.float32)\n\n for x, _ in iterate_batches(data_source, batch_size, expand=False):\n corr_fact = float(x.shape[0]) / batch_size\n mean += x.mean(axis=0) * corr_fact\n mean_xs += (x ** 2).mean(axis=0) * corr_fact\n\n corr_fact = float(batch_size) / data_source.n_data\n mean *= corr_fact\n mean_xs *= corr_fact\n std = np.sqrt(mean_xs - mean ** 2)\n\n return mean, std",
"def get_batch(self, name):\n batches = self._meta['sets'].get('batches', {})\n if batches.get(name):\n b = name\n elif batches.get(name):\n b = name\n else:\n raise KeyError('No Batch found named {}.'.format(name))\n return qp.Batch(self, b)",
"def getStat(self, stat):\n return self.battleDelegate.stats[stat]*self.battleDelegate.status.getStatMod(stat)",
"def sample_batch(self, batch_size):\n batch = []\n\n # Sample using prorities\n if(self.with_per):\n T = self.buffer.total() // batch_size\n #print(\"T is \",T)\n for i in range(batch_size):\n a, b = T * i, T * (i + 1)\n s = random.uniform(a, b)\n idx, error, data = self.buffer.get(s)\n #print(\"sampled data \", s, \" \",data, end=\" \")\n batch.append((*data, idx))\n\n idx = np.array([i[2] for i in batch])\n #idx in the offline buffer\n \n # Sample randomly from Buffer\n elif self.count < batch_size:\n idx = None\n batch = random.sample(self.buffer, self.count)\n else:\n idx = None\n batch = random.sample(self.buffer, batch_size)\n\n # Return a batch of experience\n names_batch = np.array([i[1] for i in batch])\n\n return names_batch, idx",
"def get_keyword_stats(self, adgroup_id, batch=False):\n path = '%s/keywordstats' % adgroup_id\n return self.make_request(path, 'GET', batch=batch)",
"def getSampleBatch(self):\n for batch in self:\n return batch"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convenience method that sums up all the sentences across all batches. | def get_total_sentences(self):
# loop through batches and add up all their individual sentence counts
total_sentences = 0
for batch in self.batch_stats:
total_sentences += self.batch_stats[batch].total_sentences
return total_sentences | [
"def calculate_texts(self) -> None:\n texts = []\n for text in self.texts:\n paragraphs = list(filter(lambda x: x != \"\", text.split(\"\\n\\n\")))\n for paragraph in paragraphs:\n text = paragraph.replace(\"\\n\", \" \").strip()\n if len(text) > self.split_threshold_min:\n text_sentences = nlp(text)\n sentences = []\n for sentence in text_sentences.sents:\n current = sentence.text\n sentences.append(current.strip())\n texts.extend(sentences)\n else:\n texts.append(text)\n self.texts = list(set(texts))",
"def combine(self, corpus: Corpus):\n for sentence in corpus.sentences:\n self.addSentence(sentence)",
"def calculate_stats(sentences: List[Text], summaries: List[Text]):\n\n print(\"-------Calculating statistics-------\")\n validate_dataset(sentences, summaries)\n\n # average, maximum, and minimum number of words in original sentences and their summaries\n count_words = []\n for sentence in sentences:\n count_words.append(len(sentence.split()))\n count_words = np.array(count_words)\n print(\"Average word count of original sentence is\",\n \"{:.2f}\".format(np.mean(count_words)), \"( std:\",\n \"{:.2f}\".format(np.std(count_words)), \")\")\n print(\"Max word count is\", np.max(count_words))\n print(\"Min word count is\", np.min(count_words))\n\n count_words = []\n for summary in summaries:\n count_words.append(len(summary.split()))\n count_words = np.array(count_words)\n print(\"Average word count of shortened sentence is\",\n \"{:.2f}\".format(np.mean(count_words)), \"( std:\",\n \"{:.2f}\".format(np.std(count_words)), \")\")\n print(\"Max Length is\", np.max(count_words))\n print(\"Min Length is\", np.min(count_words))\n\n # average number of sentences in each input sentences and summaries; average number of words that are in the summary\n # but not in the corresponding original sentence\n count_diff = []\n count_sentences = []\n for i in range(len(sentences)):\n tokens_sentence = nltk.word_tokenize(sentences[i])\n tokens_sentence = [x.lower() for x in tokens_sentence]\n tokens_headline = nltk.word_tokenize(summaries[i])\n tokens_headline = [x.lower() for x in tokens_headline]\n count_diff.append(len(list(set(tokens_headline) - set(tokens_sentence))))\n count_sentences.append(\n np.max([\n tokens_sentence.count(\".\") + tokens_sentence.count(\"!\") +\n tokens_sentence.count(\"?\"), 1\n ]))\n count_sentences = np.array(count_sentences)\n count_diff = np.array(count_diff)\n print(\"On average, there are\", \"{:.2f}\".format(np.mean(count_sentences)),\n \"sentences in each original text\", \"( std:\",\n \"{:.2f}\".format(np.std(count_sentences)), \")\")\n print(\n \"On average, there are\", \"{:.2f}\".format(np.mean(count_diff)),\n \"words in each shortened sentence that are not in the original sentence.\",\n \"( std:\", \"{:.2f}\".format(np.std(count_diff)), \")\")\n\n # average compression ratio\n compression_ratio = []\n for i in range(len(sentences)):\n compression_ratio.append(\n len(summaries[i].split()) / len(sentences[i].split()))\n compression_ratio = np.array(compression_ratio)\n print(\"The average compression ratio is\",\n \"{:.2f}\".format(np.mean(compression_ratio)), \"( std:\",\n \"{:.2f}\".format(np.std(compression_ratio)), \")\")",
"def aggregate_sent(preds):\n count = 0\n out = {\n 'eFreq_n': 0, 'eFreq_y': 0, 'eRatio_n': 0, 'eRatio_y': 0,\n 'nFreq_n': 0, 'nFreq_y': 0, 'nRatio_n': 0, 'nRatio_y': 0,\n 'aFreq_n': 0, 'aFreq_y': 0, 'aRatio_n': 0, 'aRatio_y': 0,\n 'cFreq_n': 0, 'cFreq_y': 0, 'cRatio_n': 0, 'cRatio_y': 0,\n 'oFreq_n': 0, 'oFreq_y': 0, 'oRatio_n': 0, 'oRatio_y': 0,\n 'count': 0\n }\n for key, val in preds.items():\n word = val[1]\n out['count'] += word.count\n out['eFreq_n'] += word.eFreq_n\n out['eFreq_y'] += word.eFreq_y\n out['nFreq_n'] += word.nFreq_n\n out['nFreq_y'] += word.nFreq_y\n out['aFreq_n'] += word.aFreq_n\n out['aFreq_y'] += word.aFreq_y\n out['cFreq_n'] += word.cFreq_n\n out['cFreq_y'] += word.cFreq_y\n out['oFreq_n'] += word.oFreq_n\n out['oFreq_y'] += word.oFreq_y\n\n if out['count'] != 0:\n for r_key, f_key in zip(RATIO_KEYS, FREQ_KEYS):\n out[r_key] = float( out[f_key] / out['count'] )\n\n return out",
"def tokenize_all_v2(sentences, batch_size=100):\r\n\r\n session = requests.Session()\r\n res = []\r\n end_idx = 0\r\n end_at = len(sentences)\r\n i = 1\r\n while end_idx < end_at:\r\n begin_idx = end_idx\r\n end_idx = begin_idx + batch_size\r\n\r\n escaped = [sent.encode(\"unicode_escape\") for sent in sentences[begin_idx:end_idx]]\r\n concat_sentences = b\"\\n\".join(escaped)\r\n result = api_call(session, concat_sentences)\r\n\r\n if len(result[\"sentences\"]) != len(sentences[begin_idx:end_idx]):\r\n print(\"something went wrong\")\r\n\r\n for res_sent in result['sentences']:\r\n sent = []\r\n for token in res_sent['tokens']:\r\n tok = token['originalText'] or token['word']\r\n sent.append(tok)\r\n res.append(sent)\r\n\r\n print(\"Batch {} done.\".format(i))\r\n i += 1\r\n\r\n return res",
"def normalizeTextSentences(self):\n #Get cluster per language\n lang2clusterDict = self._getLanguage2ClustersDict()\n\n bEmpty = True\n\n #Normalize text per language\n for languageId, clusterList in list(lang2clusterDict.items()):\n #Read all cluster texts\n textList = []\n for textCluster in clusterList:\n textList.append(textCluster.getTextSentence())\n\n #Join all text\n allText = self.MERGECLUSTERSEP.join(textList)\n\n #Normalize text\n allText = self.regexSubstitutionFormula.apply(allText, languageId)\n sentencesList = allText.split(self.MERGECLUSTERSEP)\n\n #Add and set language id\n self._addSentences(sentencesList, languageId, bEmpty)\n\n if bEmpty:\n bEmpty = False",
"def bag_of_words(batch, TEXT):\n V = len(TEXT.vocab)\n X = torch.zeros(batch.text.size(0), V)\n ones = torch.ones(batch.text.size(1))\n for b in range(batch.text.size(0)):\n X[b].index_add_(0, batch.text.data[b], ones)\n X[b][TEXT.vocab.stoi['<pad>']] = 0\n X = Variable(X, requires_grad=False)\n return X",
"def yield_batches(self, texts):\n batch = []\n for text in self._iter_texts(texts):\n batch.append(text)\n if len(batch) == self.batch_size:\n yield batch\n batch = []\n\n if batch:\n yield batch",
"def summarize(self, text, n):\n sents = sent_tokenize(text)\n assert n <= len(sents)\n word_sent = [word_tokenize(s.lower()) for s in sents]\n \n self._freq = self._compute_frequencies(word_sent)\n \n ranking = defaultdict(int)\n for i,sent in enumerate(word_sent):\n for w in sent:\n if w in self._freq:\n ranking[i] += self._freq[w]\n sents_idx = self._rank(ranking, n) \n return [sents[j] for j in sents_idx]",
"def compute_on_text(request: AnalysisRequest):\n text = request.text\n sents = text.split('\\n\\n')\n doc = nlp(text)\n sentence_propaganda = []\n sentences = list(doc.sents)\n print('number of sentences', len(sentences))\n print('number of sentences (newlines delimited)', len(sents))\n for sent in sents:\n sent = nlp(sent)\n sent_res = compute_on_sentence(sent)\n if sent_res:\n sentence_propaganda.append(sent_res)\n else:\n raise ValueError('Something is wrong')\n # more than 512 tokens\n for s in sent.sents:\n # try with sentencizer\n sent_res = compute_on_sentence(sent, trim=True)\n sentence_propaganda.append(sent_res)\n #\n return {\n # 'article_key'\n 'content': '\\n<br/>'.join(f'<div>{s}</div>' for s in sentences), # TODO span and sup annotations of techniques\n 'sentence_propaganda': sentence_propaganda,\n 'success': True\n }",
"def summarize(self, text, n):\r\n\t\tsentences = sent_tokenize(text)\r\n\t\tself.validate_summary_length(sentences, n)\r\n\t\tpreprText = self.preprocess_document(text)\r\n\t\tsentenceScores = self.text_rank(preprText)\r\n\t\treturn self.summary_from_sentence_scores(sentences, sentenceScores, n)",
"def summarize(self, doc_paths):\n\n\t\tsentences = self.preprocessor.sent_seg(doc_paths[0])\n\n\t\t# If the document already has word count at most the limit, than return\n\t\t# the whole document text as the summary.\n\t\tif sum(count_words(sent) for sent in sentences) <= self.limit:\n\t\t\treturn ' '.join(sentences)\n\n\t\tsummary, i = [], 0\n\t\t# Add sentences to the summary until word limit is exceeded.\n\t\twhile count_words(summary) <= self.limit:\n\t\t\tsummary.append(sentences[i])\n\t\t\ti += 1\n\n\t\t# Exclude the last sentence in the summary so the resulting summary is\n\t\t# in the word limit.\n\t\treturn ' '.join(summary[:-1])",
"def summarize(self, text, text_index, n):\r\n self.text_index = text_index\r\n sentences = sent_tokenize(text)\r\n if len(sentences) < n:\r\n raise ValueError(\"Cannot extract %s sentences from text with %s sentences\" % \\\r\n (n, len(sentences)))\r\n preprText = self.preprocess_document(text)\r\n words = self.word_tokenize_preprocessed(preprText)\r\n tfIdfTable = self._create_tf_idf_table(words)\r\n # print({k: v for k, v in sorted(freqTable.items(), key=lambda item: item[1], reverse=True)})\r\n sentenceScores = np.array(self._score_sentences(sentences, tfIdfTable))\r\n nBestIndexes = np.argpartition(sentenceScores, -n)[-n:] # indexes of sentences with n best scores\r\n nBestIndexes = sorted(nBestIndexes)\r\n\r\n summary = ''\r\n for index in nBestIndexes:\r\n summary += sentences[index] + \" \"\r\n\r\n self.text_index = None # reset text_index once completed\r\n return summary[:-1] # remove last space\r",
"def sentence_add_loop(vectors, sentences, S, B, L):\n total_length = sum([len(sent.split()) for sent in S])\n exceeded_length_count = 0\n\n for i in range(0, vectors.shape[0]):\n r = ortho_proj_vec(vectors, B)\n #print(DELIMITER)\n #print(\"Furthest sentence: \" + sentences[r])\n #print(\"Total words: {}\".format(total_length))\n #print(\"Length of sentence to add: {}\".format(len(sentences[r].split())))\n\n new_sentence_length = len(sentences[r].split())\n\n # Todo - norm may be zero if sentence only had stopwords\n norm = scipy.sparse.linalg.norm(vectors[r])\n\n if total_length + new_sentence_length <= L and norm != 0:\n b_r = np.divide(vectors[r], norm)\n\n S.add(sentences[r])\n B.add(b_r)\n\n total_length += new_sentence_length\n # Reset the exceeded_length_count\n\n exceeded_length_count = 0\n # Prevent us from adding this sentence again\n # Todo - original authors had this same problem?\n vectors[r] = np.zeros(vectors[r].shape)\n\n else:\n #print(\"Sentence too long to add to set, or sentence consists only of stopwords\")\n # Temporary hack to prevent us from choosing this vector again:\n vectors[r] = np.zeros(vectors[r].shape)\n\n exceeded_length_count += 1\n if exceeded_length_count >= 15:\n break\n\n #print(\"Final sentence count: \" + str(len(S)))\n return [str(e) for e in S]",
"def computeBagOfWordsForItems(self, corpus):\n\n for itemScore in self.itemScores:\n itemScore.sentence = corpus.doc2bow(\n self.preprocess(str(itemScore)))",
"def get_words_per_sentence(self, text: str) -> float:\n return np.mean(list(map(len, self._text_2_list_of_list_of_strings(text))))",
"def hf_summarizer(sentences):\n\n max_chunk = 512\n current_chunk = 0\n chunks = []\n\n for sentence in sentences:\n if len(chunks) == current_chunk +1 :\n if len(chunks[current_chunk]) + len(sentence.split()) <= max_chunk:\n chunks[current_chunk].extend(sentence.split())\n else:\n current_chunk += 1\n chunks.append(sentence.split())\n else:\n print(current_chunk)\n chunks.append(sentence.split())\n\n # print(chunks[0])\n\n for chunk_id in range(len(chunks)):\n chunks[chunk_id] = ' '.join(chunks[chunk_id])\n\n #print(len(chunks[0].split()))\n\n summarizer = pipeline(\"summarization\")\n summarized = summarizer(chunks, min_length = 50, max_length = 100, do_sample=False)\n\n text = ''.join([sum[\"summary_text\"] for sum in summarized])\n\n with open(\"static/files/book.txt\", \"w\",encoding=\"utf-8\") as f:\n f.write(text)\n \n return summarized",
"def sum_all_emissions(self):\n sum = Emission()\n for emission in self.emissions_by_step:\n sum += emission\n return sum",
"def train(self, corpus):\n for sentence in corpus.corpus:\n for datum in sentence.data: \n self.unigramCounts[datum.word] += 1\n self.totalCount += 1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds ``documents`` to the document inventory, writing to disk in batches of 500,000. | def add_documents(self, documents):
# flag for StopIteration exceptions
more_documents = True
# loop while there are still documents in the iterator
while more_documents:
# increment batch number
batch = len(self.batch_stats) + 1
# count sentences
sentences_count = 0
# create temporary batch data file in the version directory
batch_file = os.path.join(self.file_base.get_version_path(self.version), "data.jl.gz.temp")
# try to read the next batch of files, catch exception and stop if there are no more
try:
# get next document before opening the file just to make sure it's there
document = documents.next()
# open the data file
with gzip.open(batch_file, "wb") as outfile:
# loop through DOCUMENT_BATCH_SIZE documents
for i in range(DocumentDatabase.DOCUMENT_BATCH_SIZE):
# count sentences in document
for paragraph in document["paragraphs"]:
sentences_count += len(paragraph["sentences"])
# write JSON to file one line at a time
outfile.write("%s\n" % json.dumps(document))
# if we are not done with this batch, retrieve the next document
if i < DocumentDatabase.DOCUMENT_BATCH_SIZE - 1:
document = documents.next()
except StopIteration:
# the end of the documents stream, set the flag to False
more_documents = False
# make sure the batch isn't empty
if sentences_count > 0:
# create the new batch in the file system
self.version_batches.create_latest_version()
# add the stats to the statistics hash
self.batch_stats[batch] = BatchStats(sentences_count)
# write the batch statistics to file
with codecs.open(self._get_batch_stat_file(batch), "wb", "utf-8") as outfile:
# write the JSON representation for the stats
outfile.write(json.dumps(self.batch_stats[batch].to_json()))
# move the temp data file to the correct location inside the version folder
os.rename(batch_file, self._get_batch_file(batch)) | [
"def upload(self, documents: List[Document], vectorise_func) -> None:\n\n # Add doc_store to documents\n for d in documents:\n d.doc_store = self\n # Check ID uniqueness\n check_duplicate_documents(documents)\n # Check type consistency\n check_document_types(documents)\n # Batching\n batches = batch_items(documents)\n\n # Update document class conveniently\n if issubclass(type(documents[0]), ChunkedDocument):\n self._doc_class = ChunkedDocument\n\n for batch in batches:\n vectorise_func(batch, self)\n self.documents += batch",
"def insert_documents(connection: DBConnection, documents: Sequence[Document]) -> None:\n max_ = len(documents)\n current = 0\n print() # print an extra line, because we will delete lines with printing \\r\n for chunk in chunks(documents):\n connection.execute(\"BEGIN TRANSACTION\")\n for doc in chunk:\n # python doesn't support prepared statements, but instead has a builtin sql cache\n connection.execute(\n \"INSERT INTO docs(did, title, url) VALUES (?, ?, ?)\", doc.convert_to_tuple())\n current += 1\n print(f\"\\r[{current}/{max_}] doc done\", end='')\n connection.execute(\"COMMIT\")",
"def add_document_bulk(self, docs, commit=True, writer=None):\n if writer is None:\n writer = self.index.writer()\n with safe_write(writer, commit):\n for doc in docs:\n writer.add_document(**doc)",
"def add_documents(self, docs):\n for sent in docs:\n sent = map(self.process_token, sent)\n self._token_count.update(sent)",
"async def put_documents(self, collection, documents):\n await self.ensure_collection(collection)\n try:\n if SOLR_COMMIT_WITHIN:\n params = {'commitWithin': SOLR_COMMIT_WITHIN}\n else:\n params = {'commit': 'true'}\n await self.post(\n '/v2/collections/{}/update'.format(collection),\n params=params, json_data=documents\n )\n logger.info('Successfully indexed {} documents to collection {}'\n .format(len(documents), collection))\n except SolrError:\n logger.warning('Failed to put {} documents to collection {}'\n .format(len(documents), collection))\n raise",
"def add(self, document_list):\n self.lock.acquire()\n for doc in document_list:\n heapq.heappush(self.documents, doc)\n self.index.add(doc)\n self.clustering.add(doc)\n self.lock.release()",
"def _persist_documents(\n client: firestore.Client,\n collection_name: str,\n documents: List[Dict],\n cleanup: Optional[Callable] = None,\n):\n for block in documents:\n col_ref = client.collection(collection_name)\n document_id: str = block[\"data\"][\"name\"]\n doc_ref = col_ref.document(document_id)\n doc_ref.set(block[\"data\"])\n if cleanup is not None:\n cleanup(doc_ref.delete)\n\n if \"subcollections\" in block:\n for subcollection_name, inner_blocks in block[\"subcollections\"].items():\n _persist_documents(\n client,\n f\"{collection_name}/{document_id}/{subcollection_name}\",\n inner_blocks,\n )",
"def insert_many(self, documents: Iterable[dict]) -> None:\n for i, document in enumerate(documents):\n if isinstance(document, dict):\n self._store_document(document)\n else:\n raise TypeError(\n f\"The document at index {i} was not a dictionary. All documents must be dictionaries.\"\n )\n self._dump()",
"def add_doc(document_id, total_count):\n global filecount\n if wf.size() + total_count > 1000:\n wf.write()\n wf.work_file(dirpath + \"results\" + str(\"{:0>4d}\").format(filecount) + \".txt\")\n filecount += 1\n wf.add_doc(document_id, total_count)",
"def documents(self, documents):\n\n self._documents = documents",
"def createDocumentAll(self, documents):\n docs = []\n for document in documents:\n if isinstance(document, couch.Document):\n document = document.getData()\n\n # this is create method, no update allowed\n if \"_rev\" in document: del document[\"_rev\"]\n if \"_deleted\" in document: del document[\"_deleted\"]\n\n docs.append(document)\n\n return self.client.post(self.name +\"/_bulk_docs\", None,\n {\"docs\": docs}).getBodyData()",
"def add_docs(self, *args):\n self.documents.extend(args)",
"def scrape_documents(self, batch=10):\n cache = []\n statement = select(\n Document\n ).where(\n or_(Document.text == None, Document.text == \"\")\n ).where(\n Document.collected_by == self.name\n )\n docs = self.db.exec(statement)\n print(\"Scraping Document Text:\")\n for i, doc in enumerate(docs):\n text = self._scrape_document(doc.href).strip()\n doc.text = text\n cache.append(doc)\n if i % batch:\n self.db.create_all(cache)\n cache = []\n print(\"Total Documents\", i, \"//\", len(docs), end='\\r', flush=True)\n print(\"Total Documents\", i, \"//\", len(docs))",
"async def bulk_insert(self, documents, alias=None):\n\n is_valid = True\n docs_to_insert = []\n\n for document_index, document in enumerate(documents):\n self.update_field_on_save_values(document, document._id is not None)\n try:\n is_valid = is_valid and self.validate_document(document)\n except Exception:\n err = sys.exc_info()[1]\n raise ValueError(\n \"Validation for document %d in the documents you are saving failed with: %s\"\n % (document_index, str(err))\n )\n\n if not is_valid:\n return\n\n docs_to_insert.append(document.to_son())\n\n if not is_valid:\n return\n\n doc_ids = await self.coll(alias).insert(docs_to_insert)\n\n for object_index, object_id in enumerate(doc_ids):\n documents[object_index]._id = object_id\n\n return documents",
"def finish_documents():\n\n doc_ids = json.loads(request.form['doc_ids'])\n\n for docid in doc_ids:\n\n document = Document.query.filter_by(id=docid).first_or_404()\n\n document.status = \"OK\"\n\n db.session.add(document)\n\n db.session.commit()",
"def store_documents(self, partner, documents):\n for docs in documents:\n if docs and docs['type'] in DOCS_TYPES:\n document = DocumentDetails()\n document.partner_id = partner\n document.type = DOCS_TYPES[docs['type']]\n document.file_name = docs['file']\n document.file_data = os.path.join('documents/partner_doc', docs['file'])\n document.save()",
"def updateDocumentAll(self, documents):\n docs = []\n for document in documents:\n if isinstance(document, couch.Document):\n document = document.getData()\n\n # these are required params\n if \"_id\" not in document or \"_rev\" not in document:\n raise Exception(\"Both _id & _rev fields are required!\")\n\n docs.append(document)\n\n return self.client.post(self.name +\"/_bulk_docs\", None,\n {\"docs\": docs}).getBodyData()",
"def save(self, batch_of_documents, destination=\"exports\", *args, **kwargs):\n raise NotImplementedError",
"def index(self, documents, batchsize=500):\n\n ids, dimensions, batches, stream = [], None, 0, None\n\n # Convert all documents to embedding arrays, stream embeddings to disk to control memory usage\n with tempfile.NamedTemporaryFile(mode=\"wb\", suffix=\".npy\", delete=False) as output:\n stream = output.name\n batch = []\n for document in documents:\n batch.append(document)\n\n if len(batch) == batchsize:\n # Convert batch to embeddings\n uids, dimensions = self.batch(batch, output)\n ids.extend(uids)\n batches += 1\n\n batch = []\n\n # Final batch\n if batch:\n uids, dimensions = self.batch(batch, output)\n ids.extend(uids)\n batches += 1\n\n return (ids, dimensions, batches, stream)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Loads a document database with the specified version from the directory. | def load(db_path="data/documents/trigrams", version=None):
# create database at the desired path and with the desired version
db = DocumentDatabase(db_path, version)
# loop through batches
for batch in db._get_batches():
# get the path to the stats file
stats_file = db._get_batch_stat_file(batch)
# load the stats
stats_json = json.loads(codecs.open(stats_file, "rb", "utf-8").read())
# save in the batch statistics hash
db.batch_stats[batch] = BatchStats(stats_json["total_sentences"])
# return the database
return db | [
"def load_db():\n return",
"def db_load(file_path=Path(\"./song_database.pkl\")):\n with open(file_path, mode=\"rb\") as database:\n return pickle.load(database)",
"def load_database(db_file):\n f = Path(db_file)\n if f.is_file():\n filehandle = open(db_file, 'rb')\n db = pickle.load(filehandle)\n filehandle.close()\n else:\n db = Database()\n return db",
"def load_file(filename, database_format=None):\n\n with open(filename, 'r') as fin:\n return load(fin, database_format)",
"def loadComicDB(self):\n \n dbPathname = self.filePrefix + \"/resource/db.json\"\n if not os.path.exists(dbPathname):\n # if the database did not already exist, create an empty one \n self.db = []\n else:\n self.db = json.load(open(dbPathname, \"r\"))",
"def load(self, path):\n\n # Load an existing terms database\n self.connection = self.connect(path)\n self.cursor = self.connection.cursor()\n self.path = path\n\n # Load document attributes\n self.ids, self.deletes, self.lengths = [], [], array(\"q\")\n\n self.cursor.execute(Terms.SELECT_DOCUMENTS)\n for indexid, uid, deleted, length in self.cursor:\n # Index id - id\n self.ids.append(uid)\n\n # Deleted flag\n if deleted:\n self.deletes.append(indexid)\n\n # Index id - length\n self.lengths.append(length)\n\n # Cast ids to int if every id is an integer\n if all(uid.isdigit() for uid in self.ids):\n self.ids = [int(uid) for uid in self.ids]\n\n # Clear cache\n self.weights.cache_clear()",
"def openDatabase(self, parentPath: unicode, itemName: unicode, version: int, minChangeDataVer: int) -> db.buffers.ManagedBufferFileHandle:\n ...",
"def openDB(self, dbpath, updateOnIdle=True):\n\t\tself.openDBFile( last_file_in_directory(dbpath, \"*sqlite\"), updateOnIdle )",
"def db_loader(path):\n\n if os.path.isfile(path):\n f = open(path, 'r')\n db = json.load(f)\n else:\n db = {}\n return db",
"def get_db():\n db = load()\n return db",
"def load_database(self):\n self.database = json.load(open(self.json_file, \"r\"))",
"def attach_db(self, path, schema_version=1):\n\n error = ffi.new(\"char *\")\n path = _Data(path)\n if not libllbuild.llb_buildengine_attach_db(\n self._engine, path.key, schema_version, error):\n raise IOError(\"unable to attach database; %r\" % (\n ffi.string(error),))",
"def reload_database(self):\n self.db = self.load_database()",
"def load_db(dbpath):\n\n if not os.path.exists(dbpath):\n print(\"Cannot find %s directory, rerun from MacInfoPkg directory!\" % dbpath)\n sys.exit(1)\n\n db = []\n\n for root, dirs, files in os.walk(dbpath):\n for file in fnmatch.filter(files, '*.yaml'):\n path = os.path.join(root, file)\n with open(path, 'r') as fh:\n try:\n db.append(yaml.safe_load(fh))\n except yaml.YAMLError as e:\n print(\"Failed to parse file %s - %s\" % (path, e))\n sys.exit(1)\n\n if len(db) == 0:\n print(\"Empty database!\")\n sys.exit(1)\n\n # Sorting is required for fast lookup.\n return sorted(db, key=operator.itemgetter('SystemProductName'))",
"def _getLoadDB(self, fileName):\n if fileName is not None:\n # only yield 1 database if the file name is specified\n if self._db is not None and fileName == self._db._fileName:\n yield self._db\n elif os.path.exists(fileName):\n yield Database3(fileName, \"r\")\n else:\n if self._db is not None:\n yield self._db\n if os.path.exists(self.cs[\"reloadDBName\"]):\n yield Database3(self.cs[\"reloadDBName\"], \"r\")",
"def database(db):\n if type(db) is str:\n # Database name\n if db.endswith('.py'):\n # Python source, exec it\n globals = {}\n exec(compile(open(db).read(), db, 'exec'), globals)\n if 'DB' in globals:\n db = globals['DB']\n else:\n storage = globals['Storage']\n from ZODB.DB import DB\n db = DB(storage, cache_size=4000)\n elif db.endswith(\".fs\"):\n from ZODB.DB import DB\n from ZODB.FileStorage import FileStorage\n storage = FileStorage(db)\n db = DB(storage, cache_size=4000)\n\n # The following will fail unless the application has been configured.\n from zope.event import notify\n notify(zope.processlifetime.DatabaseOpened(db))\n\n return db",
"def load(self, path, old=False):\n path = os.path.abspath(path)\n logging.info('Loading functional group frequency database from %s...' % path)\n\n if old:\n dict_path = os.path.join(path, 'Dictionary.txt')\n tree_path = os.path.join(path, 'Tree.txt')\n libr_path = os.path.join(path, 'Library.txt')\n self.__loadDatabase(dict_path, tree_path, libr_path)\n\n logging.info('')",
"def db_file():\n return abspath('vmchecker.db')",
"def db_version(self, db_version):\n self._db_version = db_version"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the latest version of the documents inventory at the specified path. | def get_latest_version(db_path):
# create a file system and return latest version
return VersionedFile(db_path).get_latest_version() | [
"def current_version(self):\n try:\n return self.versions.latest()\n except DocumentVersion.DoesNotExist:\n return None",
"def get_current_version(file_path):\n\n raise RuntimeError('get_current_version function not implemented in Artella Abstract API!')",
"def get_latest_file(self):\n cur = self.current_version\n if cur:\n res = cur.files.order_by('-created')\n if res:\n return res[0]",
"def _get_latest(self, title):\n document = self.document_repository.get_by_title(title)\n try:\n revision = document.get_latest_revision()\n document.revisions = [revision]\n except ValueError:\n document.revisions = []\n document = self.document_schema.dump(document)\n return document",
"async def get_local_version(self, path):\n return_value = ''\n if os.path.isfile(path):\n with open(path, 'r') as local:\n ret = re.compile(\n r\"^\\b(VERSION|__version__)\\s*=\\s*['\\\"](.*)['\\\"]\")\n for line in local.readlines():\n matcher = ret.match(line)\n if matcher:\n return_value = str(matcher.group(2))\n return return_value",
"def check_version(self, path=None):\n versions_list = os.listdir(self.file_directory+\"\\\\versions\")\n versions = []\n for file in versions_list:\n remove_ext = file.replace(\".\", \"_\")\n split_version = remove_ext.split(\"_\")\n for item in split_version:\n if item.isdigit():\n versions.append(item)\n if versions:\n self.cur_version = max(versions)\n #print \"finished with check_version, version is %s\"%self.cur_version\n return",
"def get_version(path=VERSION_PATH):\n namespace = {}\n exec(read(path), namespace)\n return namespace['get_version'](short=True)",
"def getRowFromPath(self, path):\n query = \"SELECT resource_id, etag, title FROM docs WHERE local_path = ?\"\n res = self.db.execute(query, (path,)).fetchone()\n return res",
"def find_latest_cache_version(self, path):\n\n # An eventual list of cache version ints.\n cache_versions = []\n\n # Convert all cache dirs to ints.\n for version_dir in os.listdir(path):\n try:\n version_num = int(version_dir)\n except (TypeError, ValueError):\n # This was probably .DS_Store, or some other non-relevant dir.\n continue\n cache_versions.append(version_num)\n\n if not cache_versions:\n return None\n\n cache_versions.sort()\n return str(cache_versions[-1])",
"def archive_info_from_iso(path):\n log.debug(\"Reading archive version from %r\", path)\n\n cmd = \" \".join(\n [\n \"isoinfo\",\n \"-x\",\n r\"/PRODUCT.TXT\\;1\",\n \"-i\",\n '\"{}\"'.format(path),\n ]\n )\n result = __salt__[\"cmd.run_all\"](cmd=cmd)\n log.debug(\"Result: %r\", result)\n\n if result[\"retcode\"] != 0:\n raise CommandExecutionError(\n \"Failed to run isoinfo: {}\".format(result.get(\"stderr\", result[\"stdout\"]))\n )\n\n return _get_archive_info(result[\"stdout\"])",
"def get_latest_version(file_path, check_validity=True):\n\n raise RuntimeError('get_latest_version function not implemented in Artella Abstract API!')",
"def latest_version(self):\r\n return self.versions.get(latest=True)",
"def _get_next_version_info(self, path, item):\n\n if not path:\n self.logger.debug(\"Path is None. Can not determine version info.\")\n return None, None\n\n publisher = self.parent\n\n # if the item has a known work file template, see if the path\n # matches. if not, warn the user and provide a way to save the file to\n # a different path\n work_template = item.properties.get(\"work_template\")\n work_fields = None\n\n if work_template:\n if work_template.validate(path):\n work_fields = work_template.get_fields(path)\n\n # if we have template and fields, use them to determine the version info\n if work_fields and \"version\" in work_fields:\n\n # template matched. bump version number and re-apply to the template\n work_fields[\"version\"] += 1\n next_version_path = work_template.apply_fields(work_fields)\n version = work_fields[\"version\"]\n\n # fall back to the \"zero config\" logic\n else:\n next_version_path = publisher.util.get_next_version_path(path)\n cur_version = publisher.util.get_version_number(path)\n if cur_version is not None:\n version = cur_version + 1\n else:\n version = None\n\n return next_version_path, version",
"def __queryLatest(versionsPath, versionPattern):\n version = 0\n patternParts = __splitVersionPattern(versionPattern)\n versionRegEx = \"^\"+patternParts['prefix']+\"[0-9]{\"+str(len(patternParts['padding']))+\",}\"+patternParts['suffix']+\"$\"\n\n # finding the latest version\n if os.path.exists(versionsPath):\n for directory in os.listdir(versionsPath):\n if re.match(versionRegEx, directory):\n version = max(\n int(verNumber(directory, versionPattern)),\n version\n )\n return version",
"def get_file(self, path):\n return self.client._perform_raw(\n \"GET\", \"/projects/%s/managedfolders/%s/contents/%s\" % (self.project_key, self.odb_id, utils.quote(path)))",
"def get_latest_revison():\n # Now get the latest revision\n db = SQLgetConnection()\n c = db.cursor()\n query = \"SELECT (revision) FROM Revisions ORDER BY revision DESC LIMIT 1;\" \n c.execute(query)\n rows = c.fetchall()\n if (len(rows)>0):\n return int(rows[0][0])\n else:\n return 0",
"def get_versions(self, path):\n with os.scandir(path) as iter_:\n version_dirs = [d.name for d in iter_ if \\\n d.is_dir() and not (d.name.startswith('.') or d.name == 'docs')]\n return self.v_cleanup(version_dirs)",
"def get_version(course_path):\r\n format_file = course_path / EXPORT_VERSION_FILE\r\n if not format_file.isfile():\r\n return 0\r\n with open(format_file, \"r\") as f:\r\n data = json.load(f)\r\n if EXPORT_VERSION_KEY in data:\r\n return data[EXPORT_VERSION_KEY]\r\n\r\n return None",
"def get_version_details(self, project_id, document_id, version=None):\n url = base_url + 'portal/' + str(self.portal_id) + '/projects/' + str(project_id) + '/documents/' + str(document_id) + '/'\n if version is not None: \n param = {\n 'version': version\n }\n else:\n param = None\n response = zoho_http_client.get(url, self.details, param)\n return parser.get_documents(response)[0]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A.K.I 가 생성된 유저를 조회하면서 stale user 를 삭제하는 메서드 | def delete_staleuser():
# 이 함수가 users.models 에서 쓰이기 때문에, global 선언은 로드될 때 충돌을 야기한다.
from users.models import ActivationKeyInfo
for aki in ActivationKeyInfo.objects.all():
if aki.user.is_active is False and aki.expires_at < timezone.now():
aki.delete() | [
"def forget(self, uid):",
"def delete_user(self):\n\n \tUser.user_list.remove(self)",
"def cleanup(self, lifetime):\n c = self.db.cursor()\n cur = c.execute(\"select login from users where created < date('now', ? || ' days')\",\n (-lifetime,))\n for (login,) in cur:\n d = self.user_dir(login)\n shutil.rmtree(d)\n self.db.execute(\"delete from users where login = ?\", (login,))\n self.db.commit()",
"def clear_lockout_counter(cls, user):\r\n try:\r\n entry = LoginFailures.objects.get(user=user)\r\n entry.delete()\r\n except ObjectDoesNotExist:\r\n return",
"def recover(self):\n self.deleted = False\n self.save()\n self.history.create(user_id=self.pk, action=user_history.RECOVERY)",
"def tearDown(self):\n User.user_list = []",
"def removeHost(self):\n if not self.user:\n return self\n self.user.host_for = []\n self.user.put()\n return self.user",
"def purge_last_active(self):\n time_ago = time.time() - UserManager.ACTIVE_TIME\n for nick, last in self.last_active.items():\n if last < time_ago:\n del self.last_active[nick]",
"def deep_user_remove(self, username): \n self.remove_user_from_escalations(username)\n self.remove_user_from_schedules(username)\n self.remove_user_from_teams(username)\n self.delete_user(username)",
"def _ClearUserHistory():\r\n _history.clear()",
"def delete_user():",
"def connection_lost(self, exc):\n if isinstance(self.current, Session):\n self.current.removeUser(self)\n elif self.current == self:\n del super.clients[self]\n else:\n anon.remove(self)",
"def test_user_deletion(self):\n User.objects.filter(username=self.user.username).delete()\n self.assertTrue(AuditTrail.objects.count() >= 2)\n self.assertEqual(\n AuditTrail.objects.last().level, AuditTrail.LEVEL_WARNING)",
"def db_delete_user_data(self):\n util.log(\"Clearing all user data\", util.LogLevel.Info)\n self.db.db_clear_data_user()\n util.log(\"Done\", util.LogLevel.Info)",
"def tearDown(self):\n del self.user",
"def backwards(apps, schema_editor):\n Referral = apps.get_model(\"core\", \"Referral\")\n\n for referral in Referral.objects.all():\n referral.users.clear()\n referral.save()",
"def reload(self):\n self.sessionactivity_set.all().delete()\n for i in Activity.objects.filter(parent=self):\n i.sessionactivity_set.all().delete()",
"def delete_leader(self):",
"def _ClearUserHistory():\n _history.clear()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns OAuth2 credentials if we have valid credentials in the session. This is a 'truthy' value. Return None if we don't have credentials, or if they have expired or are otherwise invalid. This is a 'falsy' value. | def valid_credentials():
if 'credentials' not in flask.session:
return None
credentials = client.OAuth2Credentials.from_json(
flask.session['credentials'])
if (credentials.invalid or
credentials.access_token_expired):
return None
return credentials | [
"def valid_credentials():\n if 'credentials' not in flask.session:\n return None\n\n credentials = client.OAuth2Credentials.from_json(\n flask.session['credentials'])\n\n if credentials.invalid or credentials.access_token_expired:\n return None\n return credentials",
"def valid_credentials():\n if 'credentials' not in flask.session:\n return None\n\n credentials = client.OAuth2Credentials.from_json(\n flask.session['credentials'])\n\n if (credentials.invalid or\n credentials.access_token_expired):\n return None\n return credentials",
"def get_credentials():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists(TOKEN_FILE):\n with open(TOKEN_FILE, \"rb\") as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(CLIENT_SECRET_FILE, SCOPES)\n creds = flow.run_local_server()\n # Save the credentials for the next run\n with open(TOKEN_FILE, \"wb\") as token:\n pickle.dump(creds, token)\n return creds",
"def auth_credentials():\n creds = None\n if session.get('credentials'):\n creds = Credentials(**session.get('credentials'))\n\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_config(\n GOOGLE_CREDENTIALS,\n SCOPES\n )\n # flow.redirect_uri = 'http://localhost:8000/google-callback'\n creds = flow.run_local_server()\n return creds",
"def _aws_credentials(session):\n try:\n creds = session.get_credentials()\n access_key = creds.access_key\n secret_key = creds.secret_key\n token = creds.token\n\n # The presence of a token indicates the credentials are short-lived and as such are risky\n # to be used as they might expire while running.\n # Long-lived credentials are available either through\n # 1. boto session\n # 2. EC2 Metadata Service (SageMaker Notebook instances or EC2 instances with roles\n # attached them)\n # Short-lived credentials available via boto session are permitted to support running on\n # machines with no EC2 Metadata Service but a warning is provided about their danger\n if token is None:\n logger.info(\"Using the long-lived AWS credentials found in session\")\n return [\n \"AWS_ACCESS_KEY_ID=%s\" % (str(access_key)),\n \"AWS_SECRET_ACCESS_KEY=%s\" % (str(secret_key)),\n ]\n if _use_short_lived_credentials() or not _aws_credentials_available_in_metadata_service():\n logger.warning(\n \"Using the short-lived AWS credentials found in session. They might expire while \"\n \"running.\"\n )\n return [\n \"AWS_ACCESS_KEY_ID=%s\" % (str(access_key)),\n \"AWS_SECRET_ACCESS_KEY=%s\" % (str(secret_key)),\n \"AWS_SESSION_TOKEN=%s\" % (str(token)),\n ]\n logger.info(\n \"No AWS credentials found in session but credentials from EC2 Metadata Service are \"\n \"available.\"\n )\n return None\n except Exception as e: # pylint: disable=broad-except\n logger.info(\"Could not get AWS credentials: %s\", e)\n\n return None",
"def get_credentials():\n store = Storage(CREDENTIAL_PATH)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, None)\n return credentials",
"def get_http_auth():\n if 'credentials' not in flask.session:\n raise Unauthorized('Unable to get valid credentials')\n try:\n credentials = client.OAuth2Credentials.from_json(\n flask.session['credentials'])\n http_auth = credentials.authorize(httplib2.Http())\n if credentials.access_token_expired:\n credentials.refresh(http_auth)\n except Exception as ex:\n logger.exception(ex.message)\n del flask.session['credentials']\n raise Unauthorized('Unable to get valid credentials.')\n flask.session['credentials'] = credentials.to_json()\n return http_auth",
"def load_session_credentials(request_handler):\n session = sessions.LilCookies(request_handler, SESSION_SECRET)\n userid = session.get_secure_cookie(name='userid')\n if userid:\n return userid, StorageByKeyName(Credentials, userid, 'credentials').get()\n else:\n return None, None",
"def get_credentials():\n try:\n netrc_path = netrc.path()\n auths = netrc(netrc_path).authenticators(\n urlparse(solvebio.api_host).netloc)\n except (IOError, TypeError, NetrcParseError) as e:\n raise CredentialsError(\n 'Could not open credentials file: ' + str(e))\n\n if auths:\n return (auths[0], auths[2])\n else:\n return None",
"def get_spot_credentials():\n\n if not current_user.is_authenticated:\n return None\n\n user_creds = SpotCredentials.query.get(current_user.get_id())\n\n if not user_creds:\n return None\n\n sp = create_spot_oauth()\n\n token_info = {\n \"access_token\": user_creds.token,\n \"refresh_token\": user_creds.refresh_token,\n \"expires_at\": int(user_creds.expires_at),\n \"scope\": spot_scope\n }\n\n if 'spot_credentials' not in session:\n session['spot_credentials'] = token_info\n\n t = sp.validate_token(token_info)\n user_creds.token = t[\"access_token\"]\n db.session.commit()\n\n return t",
"def load(self):\n access_key = self.session.get_config_variable('access_key', methods=('config',))\n secret_key = self.session.get_config_variable('secret_key', methods=('config',))\n token = self.session.get_config_variable('token', ('config',))\n if access_key and secret_key:\n logger.info('Found credentials in config file.')\n return Credentials(access_key, secret_key, token,\n method=self.method)\n return None",
"def authorize_credentials():\n credentials = STORAGE.get()\n # If the credentials doesn't exist in the storage location then run the flow\n if credentials is None or credentials.invalid:\n flow = flow_from_clientsecrets(CREDENTIAL_JSON, scope=SCOPE)\n http = httplib2.Http()\n credentials = run_flow(flow, STORAGE, http=http)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,'credentials.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n boto_session = boto3.session.Session()\n\n try:\n secret_name = os.environ['DOCUMENTDB_SECRET']\n\n logger.debug('Retrieving secret {} from Secrets Manger.'.format(secret_name))\n\n secrets_client = boto_session.client(service_name='secretsmanager',\n region_name=boto_session.region_name)\n secret_value = secrets_client.get_secret_value(SecretId=secret_name)\n\n secret = secret_value['SecretString']\n secret_json = json.loads(secret)\n username = secret_json['username']\n password = secret_json['password']\n\n logger.debug('Secret {} retrieved from Secrets Manger.'.format(secret_name))\n\n return (username, password)\n\n except Exception as ex:\n logger.error('Failed to retrieve secret {}'.format(secret_name))\n raise",
"def test_retrieve_client_credentials_returns_none(self):\n s = self.build_session()\n assert s.retrieve_client_credentials() == (None, None)",
"def getsessionpasswd(cls, session):\n sessionkey = cls.sessionkey(session)\n if sessionkey in sessionmgr.keys():\n return True, sessionmgr[sessionkey]['password']\n return False, None",
"def _session_auth_credentials_defined(self):\n _defined = False\n if 'session_auth' in self.core_settings:\n _defined = True if ('username' in self.core_settings.get('session_auth') and\n 'password' in self.core_settings.get('session_auth')) else _defined\n return _defined",
"def _credentials():\n from oauth2client.client import SignedJwtAssertionCredentials\n return SignedJwtAssertionCredentials",
"def get_credentials():\n # add option to pass profile name\n try:\n config = ConfigParser()\n config.read(os.getenv(\"HOME\") + \"/.aws/credentials\")\n return (\n config.get(\"default\", \"aws_access_key_id\"),\n config.get(\"default\", \"aws_secret_access_key\"),\n )\n except:\n ACCESS = os.getenv(\"AWS_ACCESS_KEY_ID\")\n SECRET = os.getenv(\"AWS_SECRET_ACCESS_KEY\")\n if not ACCESS and SECRET:\n raise AttributeError(\"No AWS credentials found.\")\n return (ACCESS, SECRET)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read time in a humancompatible format and interpret as ISO format with local timezone. May throw exception if time can't be interpreted. In that case it will also flash a message explaining accepted formats. | def interpret_time( text ):
app.logger.debug("Decoding time '{}'".format(text))
time_formats = ["ha", "h:mma", "h:mm a", "H:mm"]
try:
as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal())
as_arrow = as_arrow.replace(year=2016) #HACK see below
app.logger.debug("Succeeded interpreting time")
except:
app.logger.debug("Failed to interpret time")
flask.flash("Time '{}' didn't match accepted formats 13:30 or 1:30pm"
.format(text))
raise
return as_arrow.isoformat()
#HACK #Workaround
# isoformat() on raspberry Pi does not work for some dates
# far from now. It will fail with an overflow from time stamp out
# of range while checking for daylight savings time. Workaround is
# to force the date-time combination into the year 2016, which seems to
# get the timestamp into a reasonable range. This workaround should be
# removed when Arrow or Dateutil.tz is fixed.
# FIXME: Remove the workaround when arrow is fixed (but only after testing
# on raspberry Pi --- failure is likely due to 32-bit integers on that platform) | [
"def interpret_time( text ):\n app.logger.debug(\"Decoding time '{}'\".format(text))\n time_formats = [\"ha\", \"h:mma\", \"h:mm a\", \"H:mm\"]\n try: \n as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal())\n as_arrow = as_arrow.replace(year=2016) #HACK see below\n app.logger.debug(\"Succeeded interpreting time\")\n except:\n app.logger.debug(\"Failed to interpret time\")\n flask.flash(\"Time '{}' didn't match accepted formats 13:30 or 1:30pm\"\n .format(text))\n raise\n return as_arrow.isoformat()",
"def interpret_time(text):\n app.logger.debug(\"Decoding time '{}'\".format(text))\n time_formats = [\"ha\", \"h:mma\", \"h:mm a\", \"H:mm\"]\n try:\n as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal())\n # Workaround for raspberry Pi because isoformat doesn't work on some dates:\n as_arrow = as_arrow.replace(year=2016)\n app.logger.debug(\"Succeeded interpreting time\")\n except:\n app.logger.debug(\"Failed to interpret time\")\n flask.flash(\"Time '{}' didn't match accepted formats 13:30 or 1:30pm\"\n .format(text))\n raise\n return as_arrow.isoformat()",
"def TryReadTime(self):\n splitVals = self.ReadString().replace(\" \", \"\").split(':')\n if len(splitVals) < 2 or len(splitVals) > 3:\n return None\n try:\n tData = [None] * 3\n for i, item in enumerate(splitVals):\n tData[i] = int(item)\n if tData[i] < 0:\n return None\n elif i == 0 and tData[i] > 24:\n return None\n elif i > 0 and tData[i] > 60:\n return None\n result = Project.Time(tData[0], tData[1], tData[2])\n if result > Project.Time(24, 0, 0):\n return None\n else:\n return result\n except:\n return None",
"def interpret_time(text):\n app.logger.debug(\"Decoding time '{}'\".format(text))\n time_formats = [\"ha\", \"h:mma\", \"h:mm a\", \"H:mm\"]\n try:\n as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal())\n as_arrow = as_arrow.replace(year=2016) # HACK see below\n app.logger.debug(\"Succeeded interpreting time\")\n except:\n app.logger.debug(\"Failed to interpret time\")\n flask.flash(\"Time '{}' didn't match accepted formats 13:30 or 1:30pm\"\n .format(text))\n raise\n return as_arrow.isoformat()\n # HACK Workaround\n # isoformat() on raspberry Pi does not work for some dates\n # far from now. It will fail with an overflow from time stamp out\n # of range while checking for daylight savings time. Workaround is\n # to force the date-time combination into the year 2016, which seems to\n # get the timestamp into a reasonable range. This workaround should be\n # removed when Arrow or Dateutil.tz is fixed.\n # FIXME: Remove the workaround when arrow is fixed (but only after testing\n # on rasp Pi failure is likely due to 32-bit integers on that platform)",
"def test_parseTimeInvalidFormat(self):\n self.assertRaises(ValueError, imap4.parseTime, u\"invalid\")",
"def _validate_format(date: str, time: str) -> None:\n\tif \"-\" in date and \":\" not in time and len(time) != 2 or \"-\" not in date and \":\" in time:\n\t\traise ValueError(\"Cannot mix extended and basic format\")",
"def _parse_time(time_string: str, source: str = \"input\") -> Optional[datetime.datetime]:\n if not time_string:\n return None\n\n format_string = \"%Y-%m-%d\" if source == \"input\" else \"%Y-%m-%dT%H:%M:%SZ\"\n try:\n return datetime.datetime.strptime(time_string, format_string)\n except ValueError:\n raise AnalyzerError(\"Incorrect date format\")",
"def _time_fromisoformat(string):\r\n # Try to create a datetime object from the string.\r\n try:\r\n # First try with microseconds.\r\n return datetime.datetime.strptime(string, \"%H:%M:%S.%f\").time()\r\n except ValueError:\r\n try:\r\n # Then try without microseconds.\r\n return datetime.datetime.strptime(string, \"%H:%M:%S\").time()\r\n except ValueError:\r\n pass\r\n # end try\r\n return string\r\n # end try\r",
"def iso_time_converter(time):\n if type(time) == datetime.datetime:\n converted_time = time.isoformat()\n elif type(time) in [str, unicode]:\n converted_time = dateutil.parser.parse(time)\n else:\n raise UserWarning('Invalid time type.')\n return converted_time",
"def parseTime(self, line):\n timestr = line.split(\" \", 1)[0][1:-1]\n if len(timestr) == 8:\n return datetime.datetime.strptime(timestr, \"%H:%M:%S\").time()\n elif len(timestr) == 5:\n return datetime.datetime.strptime(timestr, \"%H:%M\").time()\n else:\n raise ValueError(\"Unknown time format: '{}'\".format(timestr))",
"def process_time_input(time_input):\n if type(time_input) == datetime:\n ts = time_input\n elif type(time_input) == str:\n time_input = time_input.strip()\n if len(time_input) == 10:\n time_input += \" 00:00:00\"\n ts = datetime.strptime(time_input, '%Y-%m-%d %H:%M:%S')\n else:\n raise ValueError(\"time input must be str or datetime\")\n\n return ts",
"def check_valid_time_format(section, dataset, text, var_name):\n if var_name in dataset.variables:\n s = str(netCDF4.chartostring(dataset.variables[var_name][:]))\n try:\n time.strptime(s, \"%Y-%m-%dT%H:%M:%SZ\")\n except:\n tup = (text, var_name, s, \"yyyy-mm-ddThh:mm:ssZ\")\n t = \"%s '%s' has an invalid format: %s should be %s\" % (tup)\n log_error(section, t)",
"def parse_time(text):\n formats = ['%I:%M %p','%I:%M:%S %p','%H:%M:%S']\n out_format = '%H:%M:%S'\n for fmt in formats:\n try:\n dt = pydt.strptime(text, fmt)\n return pydt.strftime(dt,out_format)\n except ValueError:\n pass\n try:\n dt = pydt.strptime(text, '%H:%M %p') \n return pydt.strftime(dt,out_format)\n except ValueError:\n pass\n return None",
"def format_time(self, data):\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_time(data)\r\n\r\n return data.isoformat()",
"def checktime(self, timestr):\n rfdtregex = \"\\\\d\\\\d\\\\d\\\\d-\\\\d\\\\d-\\\\d\\\\dT\\\\d\\\\d:\\\\d\\\\d:\\\\d\\\\dZ?\"\n if not re.match(rfdtregex, timestr):\n raise InvalidCommandLineError(\n \"Invalid redfish date-time format. \"\n \"Accepted formats: YYYY-MM-DDThh:mm:ss, YYYY-MM-DDThh:mm:ssZ\"\n )",
"def properTimeInput(time_):\r\n if not time_.isdigit() or len(time_) > 4 or len(time_) < 4 or int(time_) > 2400 or int(time_) < 0 or int(time_[2])>5:\r\n print(\"'\",time_, \"' is an invalid input for the time. Use 24 hr format.\\nExamples: 8 a.m = 0800, 1 p.m = 1300, 2:30 = 1430, 12:50 a.m = 0050\\n\")\r\n return False\r\n return True",
"def excel_time_parser(t):\n EXCEL_TIME_FORMAT = '%H:%M:%S'\n return datetime.datetime.strptime(t, EXCEL_TIME_FORMAT)",
"def read_time(time_string):\n factors = {\n \"n\": 1e-9,\n \"u\": 1e-6,\n \"m\": 1e-3,\n \"s\": 1\n }\n \n # Check that the time string is properly formatted, e. g. time part\n # is followed by the unit part. The string should contain at least two\n # character, otherwise splitting it into two parts will raise an IndexError.\n try:\n number, unit = time_string[:-1], time_string[-1]\n except (IndexError, TypeError):\n raise ValueError(\"Invalid time string given.\")\n\n # If the 'time part' cannot be converted to float, this raises a ValueError.\n number = float(number)\n \n if number < 0:\n raise ValueError(\"Negative time values are not allowed.\")\n \n # Check that a valid time unit was specified. If no unit was specified,\n # then what we call 'unit' will in fact be the last digit of the time value\n # and as we do not use numeric unit symbols, we still get an error.\n try:\n factor = factors[unit]\n except KeyError:\n raise ValueError(\"Invalid time unit given.\")\n\n time = number * factor\n return time",
"def fromisoformat(string):\n string = string.replace(\"T\", \" \")\n if \".\" in string:\n return datetime.strptime(string, \"%Y-%m-%d %H:%M:%S.%f\")\n return datetime.strptime(string, \"%Y-%m-%d %H:%M:%S\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert text of date to ISO format used internally, with the local time zone. | def interpret_date( text ):
try:
as_arrow = arrow.get(text, "MM/DD/YYYY").replace(
tzinfo=tz.tzlocal())
except:
flask.flash("Date '{}' didn't fit expected format 12/31/2001")
raise
return as_arrow.isoformat() | [
"def date_to_iso(string):\r\n\r\n # disregard tokenisation, if it's there, to make this an easier conversion for GUTime\r\n string = re.sub(r'<([^~]*)~.+?>', r'\\1 ', string)\r\n\r\n # Defaults\r\n d = None\r\n m = None\r\n y = None\r\n h = None\r\n min = None\r\n s = None\r\n fs = None\r\n zone = None\r\n\r\n # ACE format\r\n match = re.search(r'(\\d\\d\\d\\d\\d\\d\\d\\d:\\d\\d\\d\\d)', re.sub('\\s', '', string))\r\n if match is not None:\r\n d = match.group(1)\r\n d = re.sub(r':', r'T', d)\r\n return d\r\n\r\n # Already in ISO format\r\n match = re.search(r'(\\d\\d\\d\\d-?\\d\\d-?\\d\\d)(-?(T\\d\\d(:?\\d\\d)?(:?\\d\\d)?([+-]\\d{1,4})?))?', re.sub('\\s', '', string))\r\n if match is not None:\r\n d = match.group(1)\r\n d = re.sub(r'-', r'', d)\r\n h = match.group(3)\r\n if h is not None:\r\n h = re.sub(r':', r'', h)\r\n return d + h\r\n else:\r\n return d\r\n\r\n # some pre-processing\r\n match = re.search('T\\d\\d(:?\\d\\d)?(:?\\d\\d)?([+-]\\d{1,4})?', re.sub('\\s', '', string))\r\n if match is not None:\r\n return re.sub(r':', r'', re.sub('\\s', '', string))\r\n\r\n # extract date\r\n if re.search(\r\n r'(\\d\\d?|' + expressions.ORDINAL_WORDS + r'|' + expressions.ORDINAL_NUMS + r')\\s+'\r\n r'(' + expressions.MONTHS + r'|' + expressions.MONTH_ABBRS + r'\\s*\\.?)\\s*,?\\s+(\\d\\d(\\s|\\Z)|\\d{4}\\b)',\r\n string, re.I) is not None:\r\n match = re.search(\r\n r'(\\d\\d?|' + expressions.ORDINAL_WORDS + r'|' + expressions.ORDINAL_NUMS + r')\\s+'\r\n r'(' + expressions.MONTHS + r'|' + expressions.MONTH_ABBRS + r'\\s*\\.?)\\s*,?\\s+(\\d\\d(\\s|\\Z)|\\d{4}\\b)',\r\n string, re.I)\r\n d = ordinal_to_num(match.group(1))\r\n m = month_to_num(match.group(5))\r\n y = match.group(7)\r\n\r\n elif re.search(\r\n r'(' + expressions.MONTHS + r'|' + expressions.MONTH_ABBRS + r'\\s*\\.?)\\s+'\r\n r'(\\d\\d?|' + expressions.ORDINAL_WORDS + r'|' + expressions.ORDINAL_NUMS + r')\\b,?\\s*(\\d\\d(\\s|\\Z)|\\d{4}\\b)',\r\n string, re.I) is not None:\r\n match = re.search(\r\n r'(' + expressions.MONTHS + r'|' + expressions.MONTH_ABBRS + r'\\s*\\.?)\\s+'\r\n r'(\\d\\d?|' + expressions.ORDINAL_WORDS + r'|' + expressions.ORDINAL_NUMS + r')\\b,?\\s*(\\d\\d(\\s|\\Z)|\\d{4}\\b)',\r\n string, re.I)\r\n d = ordinal_to_num(match.group(4))\r\n m = month_to_num(match.group(1))\r\n y = match.group(7)\r\n\r\n elif re.search(r'(\\d\\d\\d\\d)(\\/|\\-)(\\d\\d?)\\2(\\d\\d?)', re.sub('\\s', '', string)) is not None:\r\n match = re.search(r'(\\d\\d\\d\\d)(\\/|\\-)(\\d\\d?)\\2(\\d\\d?)', re.sub('\\s', '', string))\r\n m = match.group(3)\r\n d = match.group(4)\r\n y = match.group(1)\r\n\r\n elif re.search(r'(\\d\\d?)(\\/|\\-|\\.)(\\d\\d?)\\2(\\d\\d(\\d\\d)?)', re.sub('\\s', '', string)) is not None:\r\n match = re.search(r'(\\d\\d?)(\\/|\\-|\\.)(\\d\\d?)\\2(\\d\\d(\\d\\d)?)', re.sub('\\s', '', string))\r\n m = match.group(1)\r\n d = match.group(3)\r\n y = match.group(4)\r\n\r\n if y is not None:\r\n # check for European style date\r\n if 12 < int(m) <= 31 and int(d) <= 12:\r\n new_d = m\r\n m = d\r\n d = new_d\r\n\r\n # check for 2 digit year\r\n y = normalise_two_digit_year(str(y))\r\n\r\n iso = \"%4d%02d%02d\" % (int(y), int(m), int(d))\r\n\r\n else:\r\n iso = \"XXXXXXXX\"\r\n\r\n # Extract time\r\n match = re.search(r'(\\d?\\d):(\\d\\d)(:(\\d\\d)(\\.\\d+)?)?(([AP])\\.?M\\.?)?(([+\\-]\\d+|[A-Z][SD]T|GMT([+\\-]\\d+)?))?',\r\n re.sub('\\s', '', string), re.I)\r\n if match is not None:\r\n h = match.group(1)\r\n min = match.group(2)\r\n s = match.group(4)\r\n fs = match.group(5)\r\n ampm = match.group(7)\r\n zone = match.group(9)\r\n\r\n if ampm is not None and ampm[0].lower() == 'p':\r\n h = str(int(h) + 12)\r\n\r\n if zone is not None:\r\n zm = re.search(r'(GMT)([+\\-]\\d+)', zone)\r\n if zm is not None:\r\n zone = zm.group(2)\r\n elif zone.lower().find('gmt') > -1:\r\n zone = 'Z'\r\n elif re.search(r'([A-Z])([SD])T', zone) is not None:\r\n zm = re.search(r'([A-Z])([SD])T', zone)\r\n # Timezone offsets from GMT\r\n timezones = {\r\n \"R\": 1,\r\n \"E\": -5,\r\n \"C\": -6,\r\n \"M\": -7,\r\n \"P\": -8\r\n }\r\n if zm.group(1).upper() in timezones:\r\n zone = timezones[zm.group(1).upper()]\r\n if zm.group(2).lower() == 'd':\r\n zone += 1\r\n if zone < 0:\r\n zone = '-%02d00' % (-1 * zone)\r\n else:\r\n zone = '+%02d00' % zone\r\n elif re.search(r'(\\d\\d)(\\d\\d)\\s+(h(ou)?rs?|(on\\s+)?\\d\\d?\\/\\d)', string, re.I) is not None:\r\n match = re.search(r'(\\d\\d)(\\d\\d)\\s+(h(ou)?rs?|(on\\s+)?\\d\\d?\\/\\d)', string, re.I)\r\n h = match.group(1)\r\n min = match.group(2)\r\n\r\n if h is not None:\r\n if fs is not None:\r\n fs = re.sub(r'\\.', r'', fs)\r\n iso += 'T%02d%02d%02d.%02d' % (int(h), int(min), int(s), int(fs))\r\n elif s is not None:\r\n iso += 'T%02d%02d%02d' % (int(h), int(min), int(s))\r\n elif min is not None:\r\n iso += 'T%02d%02d' % (int(h), int(min))\r\n\r\n if zone is not None:\r\n iso += zone.lstrip()\r\n\r\n return iso",
"def date_to_iso(date):\n return date.isoformat()",
"def formatISO2str(self, iso_time, timezone=8):\n # iso8601 读取\n datetime_UTC = iso8601.parse_date(iso_time)\n # 时区转换\n datetime_real = datetime_UTC + datetime.timedelta(hours=timezone)\n # 格式化\n date = datetime_real.strftime(\"%Y年%m月%d日\")\n return date",
"def _isoformat(d):\n if isinstance(d, datetime) and not d.tzinfo:\n d = d.replace(tzinfo=tzlocal())\n return d.isoformat()",
"def convertFromISODate(date):\n if date:\n try:\n datetime_object = datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%fZ')\n except ValueError:\n return date\n else:\n return datetime_object.strftime('%Y-%m-%d')\n else:\n return None",
"def format_date(self, data):\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_date(data)\r\n\r\n return data.isoformat()",
"def to_local(self, txt=False):\n rv = self.local_tz.normalize(self.dt.astimezone(self.local_tz))\n if txt:\n return rv.strftime(self.default_fmt)\n else:\n return rv",
"def date_str_to_iso(date_str):\n return datetime.strptime(date_str, \"%m/%d/%Y\").isoformat()",
"def datetime_to_isoformat(obj: datetime.datetime) -> str:\n return obj.replace(tzinfo=datetime.timezone.utc).isoformat().replace(\"+00:00\", \"Z\")",
"def format_iso(dt, default_tzinfo=local_timezone):\n dt = dt if dt.tzinfo else dt.replace(tzinfo=default_tzinfo)\n return dt.astimezone(utc_timezone).replace(tzinfo=None).isoformat()+'Z'",
"def iso_date(self, t=None):\n if t is None:\n t = time.time()\n time_str = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(t))\n\n return time_str",
"def _format_date_for_filenames(self, date):\n if date.tzinfo is not None and self.source == 'owm':\n return date.astimezone(pytz.utc)\n else:\n return date",
"def convert_from_iso(s):\n # TODO: Allow for more timezones than just -6 GMT\n return datetime.datetime.strptime(s, \"%Y-%m-%dT%H:%M:%S-06:00\")",
"def to_iso(dt):\n return dt.strftime(ISO_FORMAT)",
"def format_datetime(self, data):\r\n data = make_naive(data)\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_datetime(data)\r\n\r\n return data.isoformat()",
"def date_string_to_iso(string):\n date=None\n if string is not None:\n try:\n #if separator is \"-\"\n if \"-\" in string:\n strings=string.split(\"-\")\n else:\n strings=string.split(\"/\")\n\n #~ print \"strings\"\n #~ print strings\n \n #if year is first\n if len(strings[0])==4:\n year, month, day=strings[0], strings[1], strings[2]\n #if year is last\n else:\n #the year must be coded on 4 digits\n year, month, day=strings[2], strings[1], strings[0]\n date=date_split_to_iso(year, month, day)\n except Exception, e:\n print \"pb\", string\n print \"wrong date format\", e\n\n #return None if date string is None\n return date",
"def isoformat(dt):\n if not dt.utcoffset():\n dt = dt.replace(tzinfo=None)\n return dt.isoformat(\"T\") + \"Z\"\n return dt.isoformat(\"T\")",
"def _IsoDate(cls, timestamp):\r\n return datetime.datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d')",
"def _date_fromisoformat(string):\r\n # Try to create a datetime object from the string.\r\n try:\r\n return datetime.datetime.strptime(string, \"%Y-%m-%d\").date()\r\n except ValueError:\r\n return string\r\n # end try\r"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a google 'service' object, return a list of calendars. Each calendar is represented by a dict. The returned list is sorted to have the primary calendar first, and selected (that is, displayed in Google Calendars web app) calendars before unselected calendars. | def list_calendars(service):
app.logger.debug("Entering list_calendars")
calendar_list = service.calendarList().list().execute()["items"]
result = [ ]
for cal in calendar_list:
kind = cal["kind"]
id = cal["id"]
if "description" in cal:
desc = cal["description"]
else:
desc = "(no description)"
summary = cal["summary"]
# Optional binary attributes with False as default
selected = ("selected" in cal) and cal["selected"]
primary = ("primary" in cal) and cal["primary"]
result.append(
{ "kind": kind,
"id": id,
"summary": summary,
"selected": selected,
"primary": primary
})
return sorted(result, key=cal_sort_key) | [
"def list_calendars(service):\n app.logger.debug(\"Entering list_calendars with service\")\n calendar_list = service.calendarList().list().execute()[\"items\"]\n app.logger.debug(\"Got calendar list\")\n result = []\n for cal in calendar_list:\n kind = cal[\"kind\"]\n id = cal[\"id\"]\n if \"description\" in cal:\n desc = cal[\"description\"]\n else:\n desc = \"(no description)\"\n summary = cal[\"summary\"]\n # Optional binary attributes with False as default\n selected = (\"selected\" in cal) and cal[\"selected\"]\n primary = (\"primary\" in cal) and cal[\"primary\"]\n\n result.append(\n {\"kind\": kind, \"id\": id, \"summary\": summary, \"selected\": selected,\n \"primary\": primary})\n app.logger.debug(\"About to return from list_calendars with: \", result)\n return sorted(result, key=cal_sort_key)",
"def list_calendars(service):\n app.logger.debug(\"Entering list_calendars\") \n calendar_list = service.calendarList().list().execute()[\"items\"]\n result = [ ]\n for cal in calendar_list:\n id = cal[\"id\"]\n summary = cal[\"summary\"]\n selected = (\"selected\" in cal) and cal[\"selected\"]\n primary = (\"primary\" in cal) and cal[\"primary\"]\n \n result.append(\n { \"id\": id,\n \"summary\": summary,\n \"selected\": selected,\n \"primary\": primary\n })\n return sorted(result, key=cal_sort_key)",
"def list_calendars(service):\n app.logger.debug(\"Entering list_calendars\")\n calendar_list = service.calendarList().list().execute()[\"items\"]\n result = []\n for cal in calendar_list:\n kind = cal[\"kind\"]\n cal_id = cal[\"id\"]\n summary = cal[\"summary\"]\n # Optional binary attributes with False as default\n selected = (\"selected\" in cal) and cal[\"selected\"]\n primary = (\"primary\" in cal) and cal[\"primary\"]\n\n result.append(\n {\"kind\": kind,\n \"id\": cal_id,\n \"summary\": summary,\n \"selected\": selected,\n \"primary\": primary})\n return sorted(result, key=cal_sort_key)",
"def list_calendars(service):\n app.logger.debug(\"Entering list_calendars\")\n calendar_list = service.calendarList().list().execute()[\"items\"]\n result = []\n for cal in calendar_list:\n kind = cal[\"kind\"]\n id = cal[\"id\"]\n if \"description\" in cal:\n desc = cal[\"description\"]\n else:\n desc = \"(no description)\"\n summary = cal[\"summary\"]\n # Optional binary attributes with False as default\n selected = (\"selected\" in cal) and cal[\"selected\"]\n primary = (\"primary\" in cal) and cal[\"primary\"]\n result.append(\n {\"kind\": kind,\n \"id\": id,\n \"summary\": summary,\n \"selected\": selected,\n \"primary\": primary})\n return sorted(result, key=cal_sort_key)",
"def mock_calendars_list(\n google_service: GoogleCalendarService,\n) -> ApiResult:\n\n def _put_result(response: dict[str, Any]) -> None:\n google_service.return_value.get.return_value.calendarList.return_value.list.return_value.execute.return_value = (\n response\n )\n return\n\n return _put_result",
"def getUsrCals(self, service):\n return self.service.calendarList().list().execute()",
"def get_events(calendars, service):\n app.logger.debug(\"Entering get_events\")\n time_min = arrow.get(flask.session['begin_datetime'])\n time_max = arrow.get(flask.session['end_datetime']).shift(days=+1)\n event_list = []\n calendars = ast.literal_eval(calendars)\n\n for calendar in calendars: # Per calendar\n calendar = ast.literal_eval(calendar)\n calendar_events = service.events().list(calendarId=calendar['id'],\n singleEvents=True,\n timeMin=time_min,\n timeMax=time_max,\n orderBy='startTime').execute()['items']\n for calendar_event in calendar_events:\n if 'transparency' in calendar_event and calendar_event['transparency'] is \"transparent\":\n continue\n event_list.append(({\"calendar\": calendar['id']},\n {\"summary\": calendar_event['summary']},\n {\"start\": calendar_event['start']},\n {\"end\": calendar_event['end']}))\n return event_list",
"def getCal(calendar_service,calendar_name,prefix = 'https://www.google.com/calendar/feeds/'):\n feed = calendar_service.GetAllCalendarsFeed()\n for cal in feed.entry:\n if cal.title.text == calendar_name:\n cal.gcalcli_altLink = cal.GetAlternateLink().href\n match = re.match('^'+ prefix + '(.*?)/(.*?)/(.*)$',cal.gcalcli_altLink)\n cal.gcalcli_username = urllib.unquote(match.group(1))\n cal.gcalcli_visibility = urllib.unquote(match.group(2))\n cal.gcalcli_projection = urllib.unquote(match.group(3))\n return cal",
"def calendar(self):\n\n c = self.db.cursor()\n unique = [] # Dict of unique self.service.trip_id entries\n trips = [] # Corresponding lists of trip_ids matching each unique\n\n for trip_id in self.service:\n seek = self.service[trip_id]\n if seek in unique:\n trips[unique.index(seek)].append(trip_id)\n else:\n unique.append(seek)\n trips.append([trip_id])\n\n for i in range(len(unique)):\n match_id = -1\n c.execute(\n \"\"\"SELECT service_id FROM calendar WHERE monday=? AND\n tuesday=? AND wednesday=? AND thursday=? AND friday=? AND\n saturday=? AND sunday=? AND start_date=? AND end_date=?\"\"\",\n unique[i][\"calendar\"],\n )\n calendars = c.fetchall() # 1+ may match, some with calendar_dates\n\n for result in calendars:\n c.execute(\n \"\"\"SELECT date, exception_type FROM\n calendar_dates WHERE service_id=? ORDER BY date DESC,\n exception_type DESC\"\"\",\n result,\n )\n dates = c.fetchall()\n if (\n \"calendar_dates\" in unique[i]\n and unique[i][\"calendar_dates\"] == dates\n ) or (\"calendar_dates\" not in unique[i] and len(dates) == 0):\n match_id = result[0]\n break\n\n if match_id == -1:\n c.execute(\"\"\"SELECT COUNT(*) FROM calendar\"\"\")\n match_id = 1 + (c.fetchone()[0])\n c.execute(\n \"\"\"INSERT INTO calendar (service_id, monday,\n tuesday, wednesday, thursday, friday, saturday, sunday,\n start_date, end_date) VALUES (?,?,?,?,?,?,?,?,?,?)\"\"\",\n ([match_id] + unique[i][\"calendar\"]),\n )\n if \"calendar_dates\" in unique[i]:\n for dates in unique[i][\"calendar_dates\"]:\n c.execute(\n \"\"\"INSERT INTO calendar_dates (service_id,\n date, exception_type) VALUES (?,?,?)\"\"\",\n ([match_id] + dates),\n )\n\n for trip_id in trips[i]:\n c.execute(\n \"\"\"UPDATE trips SET service_id=? WHERE trip_id=?\"\"\",\n (\n match_id,\n trip_id,\n ),\n )\n\n self.db.commit()",
"def get_calendar_id_list(service):\n print('Getting list of calendars...')\n list_result = service.calendarList().list(showHidden=True, maxResults=250).execute().get('items', [])\n return [cal[\"id\"] for cal in list_result if \"@hubspot.com\" in cal[\"id\"]]",
"def calendars(self):\r\n return c.Calendars(self)",
"def get_calendar(gtfs_info):\n # Parse calendar\n use_cols = ['service_id', 'weekdays', 'start_date', 'end_date']\n calendar = gtfs_info.drop_duplicates(subset=use_cols)\n calendar = calendar[use_cols].copy()\n calendar = calendar.reset_index(drop=True)\n\n # Container for final results\n gtfs_calendar = pd.DataFrame()\n\n # Parse weekday columns\n for idx, row in calendar.iterrows():\n # Get dayinfo\n dayinfo = row['weekdays']\n\n # Parse day information\n dayrow = parse_day_range(dayinfo)\n\n # Add service and operation range info\n dayrow['service_id'] = row['service_id']\n dayrow['start_date'] = row['start_date']\n dayrow['end_date'] = row['end_date']\n\n # Add to container\n gtfs_calendar = gtfs_calendar.append(dayrow, ignore_index=True, sort=False)\n\n # Fix column order\n col_order = ['service_id', 'monday', 'tuesday', 'wednesday',\n 'thursday', 'friday', 'saturday', 'sunday',\n 'start_date', 'end_date']\n gtfs_calendar = gtfs_calendar[col_order].copy()\n\n # Ensure correct datatypes\n int_types = ['monday', 'tuesday', 'wednesday',\n 'thursday', 'friday', 'saturday', 'sunday']\n for col in int_types:\n gtfs_calendar[col] = gtfs_calendar[col].astype(int)\n\n return gtfs_calendar",
"def get_calendars(self):\n service = build('calendar', 'v3', credentials=self._get_creds())\n\n return service.calendarList().list().execute()",
"def calendars(self):\n return self.calendar_home_set.calendars()",
"def calendar_list(self, calendar_id):\r\n return CalendarList(self, calendar_id)",
"def pull_calendar_events(calendar_service, calendarId, lastUpdated):\n\n\t# Edit to set the upper bound for start times for which events should be returned.\n\t# This prevents us from pulling events in the distant future, for which there are no invitees save the organizer.\n\ttimeMax = (datetime.datetime.now() + datetime.timedelta(days=14)).isoformat('T')\n\n\t# Edit to determine what is returned by API \n\tfieldString = \"\"\"items(attendees(displayName,email,optional,resource,responseStatus),creator(displayName,email),description,htmlLink,id,location,organizer(displayName,email),recurrence,start/dateTime,status,summary,updated),nextPageToken\"\"\"\n\n\t# Edit to determine the upper bound of event start time that should be fetched\n\ttimeString = timeMax + 'z' # for some reason the Google APIs really need that 'z'\n\n\teventList = [] # List of events returned by API.\n\tnextPageToken = \"\"\n\twhile True:\n\t\tprint(\"------NEW REQUEST------\")\n\t\ttry:\n\t\t\tresponse = calendar_service.events().list(calendarId=calendarId, updatedMin=lastUpdated,orderBy=\"updated\", pageToken = nextPageToken, timeMax=timeString, fields = fieldString).execute()\n\t\texcept Exception, e:\n\t\t\traise e\n\t\tif 'items' in response:\n\t\t\teventList += response['items'] # if the response has events, add them\n\n\t\tif \"nextPageToken\" not in response:\n\t\t\tprint(\"DONE\")\n\t\t\tbreak \n\t\t\t# If there's no nextPageToken in response body, we break out and return what we have\n\t\telse:\n\t\t\tprint(\"NEXT PAGE TOKEN: \" + response['nextPageToken'])\n\t\t\tnextPageToken = response['nextPageToken'] # Otherwise, make another request for next page\n\n\treturn eventList",
"def calendars(self):\n return self.properties.get('calendars',\n CalendarCollection(self.context, ResourcePath(\"calendars\", self.resource_path)))",
"def get_events_in_calendar(calendarName, calendarMap, service, startDate, endDate=None):\n\n print '\\nGet ' + calendarName + ' Events Since the beginning of the year.............'\n startDate = get_start_date(startDate)\n endDate = get_now_date()\n # service = build_service()\n # calendarMap = get_calendar_list_map(service)\n calendarId = calendarMap.get(calendarName)\n eventsResult = (service.events().list(calendarId = calendarId, timeMin = startDate, timeMax = endDate, maxResults = 1000, singleEvents = True, orderBy = 'startTime').execute())\n \n events = eventsResult.get('items', [])\n events_list = []\n\n if not events:\n print 'No events found.'\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n end = event['end'].get('dateTime', event['end'].get('date'))\n startTS, endTS = get_ts_from_datetime(start, end)\n \n duration = (endTS - startTS) / 60\n event_type = calendarName\n \n eventTitle = event['summary']\n event_name = ''.join([i if ord(i) < 128 else '' for i in eventTitle.replace(',','')])\n\n #print start[:10], duration, event_type, event_name\n \n events_list.append([start[:10], duration, event_type, event_name])\n return events_list",
"def get_gcal_service(credentials):\n app.logger.debug(\"Entering get_gcal_service\")\n http_auth = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http_auth)\n plusService = discovery.build('plus', 'v1', http=http_auth)\n app.logger.debug(\"Returning service\")\n return [service, plusService]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A helper method that generates a dictionary of arguments needed to instantiate a BaseBoto object. The purpose of this method is to abstract out the code to handle optional CLI arguments and not duplicate the None handling code. | def __get_arguments(args=None, logger=None, stats=None):
if not args:
parser = get_parser()
add_boto_cli_arguments(parser)
# Parse only the known arguments added by add_boto_cli_arguments().
# We only need those arguments to create Boto object, nothing else.
# parse_known_args() return (Namespace, list of unknown arguments),
# we only care about the Namespace object here.
args = parser.parse_known_args()[0]
if not logger:
logger = get_logger(name=NAME)
if not stats:
stats = get_stats(prefix=NAME)
return {
'log_level': getattr(args, 'boto_log_level', DEFAULT['log_level']()),
'access_key': getattr(args, 'boto_access_key', DEFAULT['access_key']()),
'secret_key': getattr(args, 'boto_secret_key', DEFAULT['secret_key']()),
'region': getattr(args, 'boto_region', DEFAULT['region']()),
'logger': logger,
'stats': stats,
} | [
"def get_default_arg_dict(customizable_class: Any) -> Dict[str, Any]:\n init_arg_dicts = get_extra_argument_dicts(customizable_class)\n found_opts: Dict[str, Any] = {}\n for arg_group in init_arg_dicts:\n for arg_name, arg_attributes in arg_group[\"args\"].items():\n found_opts[arg_name] = arg_attributes[\"default\"]\n filtered_opts = {k: v for k, v in found_opts.items() if v is not None}\n return filtered_opts",
"def MakeArgKwargs(self):\n return {\n 'help': self.BuildHelpText(),\n 'required': self.IsArgRequired(),\n 'hidden': self.hidden,\n }",
"def parse_generate_arguments(arguments):\n return_value = {}\n for key in arguments:\n return_value[key] = CONFIG_KEY_PARSER[key](arguments[key])\n\n return return_value",
"def get_params_from_args(self, args):\n if not isinstance(args, dict):\n args = vars(args)\n\n client_secrets = os.path.expanduser(args['client_secrets'])\n if not os.path.exists(client_secrets):\n raise AgentConfigurationException(\"Couldn't find your client secrets \" + \\\n \"file at {0}\".format(client_secrets))\n shutil.copy(client_secrets, LocalState.get_CLIENT_SECRETS_LOCATION(\n args['keyname']))\n\n params = {\n self.PARAM_GROUP : args['group'],\n self.PARAM_IMAGE_ID : args['machine'],\n self.PARAM_INSTANCE_TYPE : args['instance_type'],\n self.PARAM_KEYNAME : args['keyname'],\n self.PARAM_PROJECT : args['project'],\n self.PARAM_SECRETS : self.CLIENT_SECRETS_LOCATION\n }\n\n return params",
"def generate_command_args_with_additional_fields(additional_fields):\n command_args: Dict[str, str] = {}\n actual_additional_fields: Dict[str, str] = {}\n for each_field in additional_fields:\n if each_field in DEFAULT_ARGS:\n command_args[each_field] = additional_fields[each_field]\n else:\n actual_additional_fields[each_field] = additional_fields[each_field]\n command_args[\"additional_fields\"] = remove_null_fields_and_convert_additional_fields_in_string(\n actual_additional_fields)\n return command_args",
"def test_add_cli_arguments(self):\n self.app = Application()\n\n args = vars(self.app.args)\n\n self.assertIn('boto_access_key', args)\n self.assertIn('boto_secret_key', args)\n self.assertIn('boto_region', args)\n self.assertIn('boto_log_level', args)",
"def _get_init_args(self):\n\n return dict(enum=self.enum, dflt=self._defname,\n base=self.base, shape=self.shape)",
"def _generate_args():\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-c\", \"--conf\", required=True,\n help=\"path to the .ini configuration file\")\n ap.add_argument(\"-v\", \"--verbose\", action=\"store_true\")\n return ap.parse_args()",
"def _arg_parse(self, **options) -> Dict[str, Any]:\n extra_options = dict()\n for key, value in options.items():\n private_key = f\"__{key}\"\n if hasattr(self, private_key):\n setattr(self, private_key, value)\n else:\n extra_options[key] = value\n\n return extra_options",
"def init_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--image_path', type=str, help='The image path or the src image save dir')\n parser.add_argument('--weights_path', type=str, help='The model weights path')\n\n return parser.parse_args()",
"def as_kwargs(self) -> Dict[str, Any]:\n ret = {}\n for arg in self.args.values():\n ret[arg.name] = arg.value\n return ret",
"def get_defaults(self):\n default_dict = {}\n args, varargs, keyword, defaults = inspect.getargspec(self.exec_obj)\n if defaults:\n default_dict = dict(zip(args[-len(defaults):], defaults))\n return default_dict",
"def _initiate_meta(kwargs, activity, ignores=()):\n meta = {AssociatedObjectId.ACTIVITY_ID: str(_retrieve_object_id(activity))}\n # also add the keys' in their snake case appearance so noPadding and no_padding, customHeight and custom_height\n keys_in_kwargs = KECARD_COMMON_KEYS + [snakecase(k) for k in KECARD_COMMON_KEYS]\n\n # initiate the meta based on known kwarg arguments\n for key in list(set(keys_in_kwargs)):\n if key in kwargs:\n meta[camelcase(key)] = kwargs.pop(key)\n\n # we check for custom_height specifically and deal with it.\n if snakecase(MetaWidget.CUSTOM_HEIGHT) in kwargs:\n meta[MetaWidget.CUSTOM_HEIGHT] = kwargs.pop(snakecase(MetaWidget.CUSTOM_HEIGHT))\n\n # remove the 'ignores' from the meta\n for key in ignores:\n if key in meta:\n del meta[key]\n\n return meta",
"def init_json_args(cls):\n defaults = cls._base_json_args()\n\n if cls.__json_args__ is None:\n cls.__json_args__ = defaults\n else:\n cls.__json_args__ = mapping_extend(defaults, cls.__json_args__)",
"def __init__(self, *args, **kwargs):\n\t\tconfig_file = None\n\t\tif len(args) > 0:\n\t\t\tconfig_file = args[0]\n\t\t\targs = args[1:]\n\t\targparse.ArgumentParser.__init__(self, *args, **kwargs)\n\t\tinit(config_file)\n\t\tfor name in parameters:\n\t\t\ttyp = type(parameters[name][\"value\"])\n\t\t\tself.add_argument(\"--\"+name, type=str if typ is bool else typ)",
"def _extract_params(self, kwargs, hyperparameters):\n init_params = dict()\n fit_params = dict()\n produce_params = dict()\n\n for name, param in hyperparameters.get('fixed', dict()).items():\n if name in kwargs:\n value = kwargs.pop(name)\n\n elif 'default' in param:\n value = param['default']\n\n else:\n raise TypeError(\"{} required argument '{}' not found\".format(self.name, name))\n\n init_params[name] = value\n\n for name, param in hyperparameters.get('tunable', dict()).items():\n if name in kwargs:\n init_params[name] = kwargs.pop(name)\n\n if not isinstance(self.fit_args, str):\n fit_args = [arg['name'] for arg in self.fit_args]\n else:\n fit_args = []\n\n if not isinstance(self.produce_args, str):\n produce_args = [arg['name'] for arg in self.produce_args]\n else:\n produce_args = []\n\n for name in list(kwargs.keys()):\n if name in fit_args:\n fit_params[name] = kwargs.pop(name)\n\n elif name in produce_args:\n produce_params[name] = kwargs.pop(name)\n\n if kwargs:\n error = \"Unexpected hyperparameters '{}'\".format(', '.join(kwargs.keys()))\n raise TypeError(error)\n\n return init_params, fit_params, produce_params",
"def _get_input_args(bam_file, data, out_base, background):\n if dd.get_genome_build(data) in [\"hg19\"]:\n return [\"--PileupFile\", _create_pileup(bam_file, data, out_base, background)]\n else:\n return [\"--BamFile\", bam_file]",
"def _get_save_args(self, attributes=True, null_check=True):\r\n kwargs = collections.OrderedDict()\r\n serialized = self._serialize(null_check=null_check)\r\n hash_key = serialized.get(HASH)\r\n range_key = serialized.get(RANGE, None)\r\n args = (hash_key, )\r\n if range_key:\r\n kwargs[pythonic(RANGE_KEY)] = range_key\r\n if attributes:\r\n kwargs[pythonic(ATTRIBUTES)] = serialized[pythonic(ATTRIBUTES)]\r\n return args, kwargs",
"def _base_json_args(cls):\n relationships = cls.__mapper__.relationships.keys()\n relationship_options = dict([(x, False) for x in relationships])\n\n defaults = {'relationships': relationship_options,\n 'exclude_attrs': [],\n 'include_attrs': []}\n return defaults"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a usable Boto object without creating a class around it. In the context of a krux.cli (or similar) interface the 'args', 'logger' and 'stats' objects should already be present. If you don't have them, however, we'll attempt to provide usable ones for the boto setup. (If you omit the add_boto_cli_arguments() call during other cli setup, the Boto object will still work, but its cli options won't show up in help output) | def get_boto(args=None, logger=None, stats=None):
return Boto(**__get_arguments(args, logger, stats)) | [
"def get_boto3(args=None, logger=None, stats=None):\n return Boto3(**__get_arguments(args, logger, stats))",
"def __get_arguments(args=None, logger=None, stats=None):\n\n if not args:\n parser = get_parser()\n add_boto_cli_arguments(parser)\n # Parse only the known arguments added by add_boto_cli_arguments().\n # We only need those arguments to create Boto object, nothing else.\n # parse_known_args() return (Namespace, list of unknown arguments),\n # we only care about the Namespace object here.\n args = parser.parse_known_args()[0]\n\n if not logger:\n logger = get_logger(name=NAME)\n\n if not stats:\n stats = get_stats(prefix=NAME)\n\n return {\n 'log_level': getattr(args, 'boto_log_level', DEFAULT['log_level']()),\n 'access_key': getattr(args, 'boto_access_key', DEFAULT['access_key']()),\n 'secret_key': getattr(args, 'boto_secret_key', DEFAULT['secret_key']()),\n 'region': getattr(args, 'boto_region', DEFAULT['region']()),\n 'logger': logger,\n 'stats': stats,\n }",
"def get_elb(args=None, logger=None, stats=None):\n if not args:\n parser = get_parser()\n add_elb_cli_arguments(parser)\n args = parser.parse_args()\n if not logger:\n logger = get_logger(name=NAME)\n\n if not stats:\n stats = get_stats(prefix=NAME)\n\n boto = Boto3(\n log_level=args.boto_log_level,\n access_key=args.boto_access_key,\n secret_key=args.boto_secret_key,\n region=args.boto_region,\n logger=logger,\n stats=stats,\n )\n return ELB(\n boto=boto,\n logger=logger,\n stats=stats,\n )",
"def _instance_from_boto_instance(boto_instance, driver, owner_id):\n return Instance(\n boto_instance.id,\n driver,\n owner_id,\n boto_instance.image_id,\n boto_instance.image_id,\n boto_instance.placement,\n boto_instance.state,\n boto_instance.public_dns_name,\n boto_instance.private_dns_name,\n )",
"def createaws() -> my_aws_api_library.MyAws:\r\n aws_cred_file_path = os.environ['AWS_CRED_FILE']\r\n comp_pubkey = os.environ['COMPANY_PUBKEY']\r\n my_aws = my_aws_api_library.MyAws(aws_cred_file_path, comp_pubkey)\r\n return my_aws",
"def aws_cli(args: List[str]):\n\n try:\n text_output = subprocess.check_output(['aws'] + args, text=True)\n except subprocess.CalledProcessError as e:\n raise Exception(f\"failed to call AWS CLI ({e.returncode}): \\n{e.stdout}\\n\\n{e.stderr}\") from e\n\n try:\n json_obj = json.loads(text_output)\n except json.JSONDecodeError as e:\n raise Exception(f\"AWS CLI did not output JSON as expected ({e.msg}). Output was:\\n{text_output}\") from e\n\n return json_obj",
"def __init__(\n self,\n service_name: str,\n account_id: str,\n region_name: Optional[str] = None,\n aws_creds: Optional[Dict[str, str]] = None,\n profile_name: Optional[str] = None,\n placebo: Optional[Any] = None,\n placebo_data_path: Optional[str] = None,\n placebo_mode: Optional[str] = \"record\",\n max_attempts: int = 20,\n config: Optional[Config] = None,\n max_attempts_on_client_error: int = 10,\n ):\n self._service_name = service_name\n self._region_name = region_name\n self._account_id = account_id\n self._max_attempts_on_client_error = max_attempts_on_client_error\n\n # Build a clojure in order to recreate boto3 client if needed\n\n def _create_client(service: str = None):\n return get_client(\n session=get_session(\n aws_creds=aws_creds,\n profile_name=profile_name,\n placebo=placebo,\n placebo_data_path=placebo_data_path,\n placebo_mode=placebo_mode,\n ),\n service_name=service if service else service_name,\n region_name=region_name,\n max_attempts=max_attempts,\n config=config,\n )\n\n # set client factory\n self.create_client = _create_client\n\n # Build boto3 client\n self._client = self.create_client()",
"def _get_client(self):\n if self._client is None:\n self._client = self.boto.client(service_name='elb', region_name=self.boto.cli_region)\n\n return self._client",
"def with_boto3():\n pytest.importorskip(\"boto3\")\n yield",
"def bcbio_s3_instance_profile(conn, args):\n import boto\n if hasattr(args, \"nocreate\") and args.nocreate:\n return {\"instance_profile\": \"\"}\n base_name = args.cluster if hasattr(args, \"cluster\") and args.cluster else \"bcbio\"\n name = \"%s_full_s3_access\" % (base_name)\n try:\n ip = conn.get_instance_profile(name)\n except boto.exception.BotoServerError:\n print(\"Instance profile %s doesn't exist, creating\" % name)\n ip = conn.create_instance_profile(name)\n try:\n conn.get_role(name)\n except boto.exception.BotoServerError:\n print(\"Role %s doesn't exist, creating\" % name)\n conn.create_role(name)\n conn.put_role_policy(name, name, S3_POLICY)\n if not tz.get_in([\"get_instance_profile_response\", \"get_instance_profile_result\", \"instance_profile\", \"roles\"],\n ip):\n conn.add_role_to_instance_profile(name, name)\n print(\"Instance profile: %s\" % name)\n return {\"instance_profile\": name}",
"def _image_from_boto_image(boto_instance, driver):\n return Image(\n boto_instance.id,\n driver,\n boto_instance.ownerId,\n boto_instance.name,\n boto_instance.description,\n boto_instance.architecture,\n )",
"def init():\n formatter = cli.make_formatter('aws_image')\n\n @click.group()\n def image():\n \"\"\"Manage image configuration\"\"\"\n pass\n\n @image.command(name='list')\n @click.option(\n '--account', required=False,\n help='Image account, defaults to current.'\n )\n @click.option(\n '--match',\n type=aws_cli.IMAGE,\n required=False,\n help='image name, id, or tag (key=value)'\n )\n @click.argument(\n 'image',\n required=False,\n type=aws_cli.IMAGE\n )\n @aws_cli.admin.aws.ON_AWS_EXCEPTIONS\n def _list(account, image, match):\n \"\"\"List images\"\"\"\n ec2_conn = awscontext.GLOBAL.ec2\n if not account:\n account = 'self'\n\n if not image:\n image = {}\n\n if not match:\n match = {}\n\n images = ec2client.list_images(ec2_conn,\n owners=[account],\n **match)\n cli.out(formatter(images))\n\n @image.command()\n @click.option(\n '--account', required=False,\n help='Image account, defaults to current.'\n )\n @click.argument(\n 'image',\n required=False,\n type=aws_cli.IMAGE\n )\n def configure(account, image):\n \"\"\"Configure AMI image.\"\"\"\n if not image:\n image = {'ids': [metadata.image_id()]}\n\n ec2_conn = awscontext.GLOBAL.ec2\n\n owners = []\n if not account:\n account = 'self'\n\n image_obj = ec2client.get_image(ec2_conn, owners=[account], **image)\n cli.out(formatter(image_obj))\n\n @image.command(name='create')\n @click.option(\n '--base-image',\n required=True,\n type=aws_cli.IMAGE,\n help='Base image.'\n )\n @click.option(\n '--base-image-account',\n required=False,\n help='Base image account.'\n )\n @click.option(\n '--userdata',\n required=True,\n type=click.Path(exists=True),\n multiple=True,\n help='Cloud-init user data.'\n )\n @click.option(\n '--instance-profile',\n required=True,\n help='Instance profile with create image privs.'\n )\n @click.option(\n '--secgroup',\n required=True,\n type=aws_cli.SECGROUP,\n help='Security group'\n )\n @click.option(\n '--subnet',\n required=True,\n type=aws_cli.SUBNET,\n help='Subnet'\n )\n @click.option(\n '--key',\n help='SSH key'\n )\n @click.argument('image', required=True, type=str)\n @aws_cli.admin.aws.ON_AWS_EXCEPTIONS\n def create(base_image, base_image_account, userdata, instance_profile,\n secgroup, subnet, image, key):\n \"\"\"Create image\"\"\"\n ec2_conn = awscontext.GLOBAL.ec2\n\n cloud_init = ud.CloudInit()\n for filename in userdata:\n with io.open(filename, 'rb') as f:\n content = f.read()\n if filename.endswith('.gz'):\n content = gzip.decompress(content)\n\n cloud_init.add(content.decode())\n\n cloud_init.add_cloud_config({\n 'image_description': '',\n 'image_name': image,\n })\n\n base_image_id = aws_cli.admin.image_id(\n ec2_conn, base_image, account=base_image_account)\n secgroup_id = aws_cli.admin.secgroup_id(ec2_conn, secgroup)\n subnet_id = aws_cli.admin.subnet_id(ec2_conn, subnet)\n tags = [{'ResourceType': 'instance',\n 'Tags': [{'Key': 'Name',\n 'Value': 'ImageBuild-{}'.format(image)}]}]\n\n instance = ec2client.create_instance(\n ec2_conn,\n user_data=cloud_init.userdata(),\n image_id=base_image_id,\n instance_type='t2.small',\n key=key,\n tags=tags,\n secgroup_ids=secgroup_id,\n subnet_id=subnet_id,\n instance_profile=instance_profile,\n disk=10\n )\n click.echo(instance['Instances'][0]['InstanceId'])\n\n @image.command(name='create-from-snapshot')\n @click.option('--snapshot',\n type=aws_cli.SNAPSHOT,\n required=True)\n @click.argument('image', required=True)\n def create_from_snapshot(snapshot, image):\n \"\"\"Create image from snapshot.\"\"\"\n\n ec2_conn = awscontext.GLOBAL.ec2\n\n snapshot = ec2client.get_snapshot(ec2_conn, **snapshot)\n\n snapshot_tag = {}\n for kv in snapshot['Tags']:\n key = kv['Key']\n value = kv['Value']\n if value == 'True':\n value = True\n if value == 'False':\n value = False\n snapshot_tag[key] = value\n\n kwargs = {}\n kwargs['Name'] = image\n kwargs['Architecture'] = snapshot_tag['Architecture']\n kwargs['EnaSupport'] = snapshot_tag['EnaSupport']\n kwargs['RootDeviceName'] = snapshot_tag['Device']\n kwargs['BlockDeviceMappings'] = [\n {\n 'DeviceName': snapshot_tag['Device'],\n 'Ebs': {\n 'SnapshotId': snapshot['SnapshotId'],\n }\n }\n ]\n kwargs['VirtualizationType'] = snapshot_tag['VirtualizationType']\n\n image = ec2_conn.register_image(**kwargs)\n print(image['ImageId'])\n\n @image.command(name='share')\n @click.option(\n '--account',\n required=True,\n help='Account ID.'\n )\n @click.argument('image', required=True, type=str)\n @aws_cli.admin.aws.ON_AWS_EXCEPTIONS\n def share(account, image):\n \"\"\"Share Image\"\"\"\n ec2_conn = awscontext.GLOBAL.ec2\n ec2client.get_image(ec2_conn, ids=[image])\n\n share_image = ec2_conn.modify_image_attribute(\n ImageId=image,\n Attribute='launchPermission',\n OperationType='add',\n UserIds=[\n account\n ]\n )\n\n click.echo('%s has been shared with %s' % (image, account))\n\n @image.command(name='delete')\n @click.argument('image', required=True, type=aws_cli.IMAGE)\n @aws_cli.admin.aws.ON_AWS_EXCEPTIONS\n def delete(image):\n \"\"\"Delete Image\"\"\"\n ec2_conn = awscontext.GLOBAL.ec2\n ec2client.delete_images(\n ec2_conn=ec2_conn,\n ids=image['ids']\n )\n\n click.echo(image)\n\n del _list\n del configure\n del create\n del delete\n\n return image",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Bucket':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = BucketArgs.__new__(BucketArgs)\n\n __props__.__dict__[\"acl\"] = None\n __props__.__dict__[\"autoclass\"] = None\n __props__.__dict__[\"billing\"] = None\n __props__.__dict__[\"cors\"] = None\n __props__.__dict__[\"custom_placement_config\"] = None\n __props__.__dict__[\"default_event_based_hold\"] = None\n __props__.__dict__[\"default_object_acl\"] = None\n __props__.__dict__[\"encryption\"] = None\n __props__.__dict__[\"etag\"] = None\n __props__.__dict__[\"iam_configuration\"] = None\n __props__.__dict__[\"kind\"] = None\n __props__.__dict__[\"labels\"] = None\n __props__.__dict__[\"lifecycle\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"location_type\"] = None\n __props__.__dict__[\"logging\"] = None\n __props__.__dict__[\"metageneration\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"owner\"] = None\n __props__.__dict__[\"predefined_acl\"] = None\n __props__.__dict__[\"predefined_default_object_acl\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"project_number\"] = None\n __props__.__dict__[\"projection\"] = None\n __props__.__dict__[\"retention_policy\"] = None\n __props__.__dict__[\"rpo\"] = None\n __props__.__dict__[\"satisfies_pzs\"] = None\n __props__.__dict__[\"self_link\"] = None\n __props__.__dict__[\"storage_class\"] = None\n __props__.__dict__[\"time_created\"] = None\n __props__.__dict__[\"updated\"] = None\n __props__.__dict__[\"user_project\"] = None\n __props__.__dict__[\"versioning\"] = None\n __props__.__dict__[\"website\"] = None\n return Bucket(resource_name, opts=opts, __props__=__props__)",
"def xray_botocore_api_call(wrapped, instance, args, kwargs):\n return generic_xray_wrapper(\n wrapped,\n instance,\n args,\n kwargs,\n name=get_service_name,\n namespace=\"aws\",\n metadata_extractor=extract_aws_metadata,\n error_handling_type=ERROR_HANDLING_BOTOCORE,\n )",
"def client() -> botocore.client.BaseClient:\n global _client\n if _client is None:\n endpoint_url = os.environ.get('LOCALSTACK_S3_URL')\n # If endpoint_url is None, botocore constructs the default AWS URL\n _client = boto3.client('s3', endpoint_url=endpoint_url)\n return _client",
"def get_connection():\n try:\n ec2 = boto3.resource('ec2')\n except (botocore.exceptions.NoRegionError,\n botocore.exceptions.NoCredentialsError) as e:\n # TODO(rushiagr): instead of telling people to run credentials, ask\n # credentials here itself\n print('Credentials and region not configured? Run \"aws configure\" to configure it.')\n # TODO(rushiagr): let people provide singapore, and guess region name from\n # that.\n print('Provide region as \"ap-southeast-1\" for Singapore.')\n return None\n return ec2",
"def make_sdk(options=None, **kwargs):\n from openstack import connection\n cloud = get_config(options=options, **kwargs)\n return connection.from_config(cloud_config=cloud, options=options)",
"def aws(ctx): # pylint: disable=unused-argument\n pass # pylint: disable=unnecessary-pass",
"def create_boto3_client(config, service):\n session = boto3.Session(profile_name=config.get('AWS_ACCESS', 'AWS_PROFILE'))\n return session.client(service, region_name=config.get('AWS_ACCESS', 'AWS_REGION'))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a usable Boto3 object without creating a class around it. In the context of a krux.cli (or similar) interface the 'args', 'logger' and 'stats' objects should already be present. If you don't have them, however, we'll attempt to provide usable ones for the boto setup. (If you omit the add_boto_cli_arguments() call during other cli setup, the Boto object will still work, but its cli options won't show up in help output) | def get_boto3(args=None, logger=None, stats=None):
return Boto3(**__get_arguments(args, logger, stats)) | [
"def get_boto(args=None, logger=None, stats=None):\n return Boto(**__get_arguments(args, logger, stats))",
"def __get_arguments(args=None, logger=None, stats=None):\n\n if not args:\n parser = get_parser()\n add_boto_cli_arguments(parser)\n # Parse only the known arguments added by add_boto_cli_arguments().\n # We only need those arguments to create Boto object, nothing else.\n # parse_known_args() return (Namespace, list of unknown arguments),\n # we only care about the Namespace object here.\n args = parser.parse_known_args()[0]\n\n if not logger:\n logger = get_logger(name=NAME)\n\n if not stats:\n stats = get_stats(prefix=NAME)\n\n return {\n 'log_level': getattr(args, 'boto_log_level', DEFAULT['log_level']()),\n 'access_key': getattr(args, 'boto_access_key', DEFAULT['access_key']()),\n 'secret_key': getattr(args, 'boto_secret_key', DEFAULT['secret_key']()),\n 'region': getattr(args, 'boto_region', DEFAULT['region']()),\n 'logger': logger,\n 'stats': stats,\n }",
"def with_boto3():\n pytest.importorskip(\"boto3\")\n yield",
"def client() -> botocore.client.BaseClient:\n global _client\n if _client is None:\n endpoint_url = os.environ.get('LOCALSTACK_S3_URL')\n # If endpoint_url is None, botocore constructs the default AWS URL\n _client = boto3.client('s3', endpoint_url=endpoint_url)\n return _client",
"def get_s3_client(args: argparse.Namespace) -> botocore.clients.s3:\n\n assert args.s3_region_name is not None, \"set COMPSYN_S3_REGION_NAME\"\n assert args.s3_access_key_id is not None, \"set COMPSYN_S3_ACCESS_KEY_ID\"\n assert args.s3_secret_access_key is not None, \"set COMPSYN_S3_SECRET_ACCESS_KEY\"\n assert args.s3_bucket is not None, \"set COMPSYN_S3_BUCKET\"\n\n return boto3.session.Session().client(\n \"s3\",\n region_name=args.s3_region_name,\n endpoint_url=args.s3_endpoint_url,\n aws_access_key_id=args.s3_access_key_id,\n aws_secret_access_key=args.s3_secret_access_key,\n )",
"def s3_get_instance__(self, object_key : str, bucket : str) -> any:\r\n\r\n _keys = self.s3_objects__(bucket)\r\n if object_key not in _keys:\r\n result = None\r\n else:\r\n _data = self.s3_engine.get_object(Bucket=bucket, Key=object_key)\r\n _bytes = _data.get('Body')\r\n if bytes is None:\r\n result = None\r\n else:\r\n result = pickle.loads(_bytes.read())\r\n return result",
"def aws_cli(args: List[str]):\n\n try:\n text_output = subprocess.check_output(['aws'] + args, text=True)\n except subprocess.CalledProcessError as e:\n raise Exception(f\"failed to call AWS CLI ({e.returncode}): \\n{e.stdout}\\n\\n{e.stderr}\") from e\n\n try:\n json_obj = json.loads(text_output)\n except json.JSONDecodeError as e:\n raise Exception(f\"AWS CLI did not output JSON as expected ({e.msg}). Output was:\\n{text_output}\") from e\n\n return json_obj",
"def create_boto3_client(config, service):\n session = boto3.Session(profile_name=config.get('AWS_ACCESS', 'AWS_PROFILE'))\n return session.client(service, region_name=config.get('AWS_ACCESS', 'AWS_REGION'))",
"def bcbio_s3_instance_profile(conn, args):\n import boto\n if hasattr(args, \"nocreate\") and args.nocreate:\n return {\"instance_profile\": \"\"}\n base_name = args.cluster if hasattr(args, \"cluster\") and args.cluster else \"bcbio\"\n name = \"%s_full_s3_access\" % (base_name)\n try:\n ip = conn.get_instance_profile(name)\n except boto.exception.BotoServerError:\n print(\"Instance profile %s doesn't exist, creating\" % name)\n ip = conn.create_instance_profile(name)\n try:\n conn.get_role(name)\n except boto.exception.BotoServerError:\n print(\"Role %s doesn't exist, creating\" % name)\n conn.create_role(name)\n conn.put_role_policy(name, name, S3_POLICY)\n if not tz.get_in([\"get_instance_profile_response\", \"get_instance_profile_result\", \"instance_profile\", \"roles\"],\n ip):\n conn.add_role_to_instance_profile(name, name)\n print(\"Instance profile: %s\" % name)\n return {\"instance_profile\": name}",
"def get_boto3_session():\n # profile_name = util.get_connection_profile(connection, \"aws\")\n # try:\n # session = boto3.Session(profile_name=profile_name)\n # except botocore.exceptions.ProfileNotFound:\n # raise RuntimeError(f\"[AWS] No such profile: {profile_name} (aws configure --profile {profile_name})\")\n # return session\n\n return boto3.Session()",
"def get_aioboto3_version() -> str:\n try:\n from aioboto3 import __version__ as version # type: ignore\n except ImportError:\n raise RuntimeError(\"aioboto3 is not installed\")\n\n return version",
"def get_s3_client():\n return boto3.resource('s3')",
"def client():\n global _environment, _client\n\n if _environment is None:\n raise EnvironmentNotInitializedError()\n\n if _client is None:\n logger.debug(\"Initializing AWS boto3 client in %s\", _environment)\n sandbox: bool = _environment is Environment.sandbox\n _client = mturk.get_client(sandbox)\n\n return _client",
"def get_bucket_boto3(name, create=False, versioning=True):\n s3 = get_s3_resource()\n # try to fetch the specified bucket -- may return an empty list\n bucket = [b for b in s3.buckets.all() if b.name == name]\n\n try:\n assert len(bucket) > 0\n return bucket[0]\n\n # bucket not found\n except AssertionError:\n if create:\n bucket = s3.create_bucket(Bucket=name)\n print(f'New bucket {name} was created')\n else:\n print(f'Bucket {name} not found')\n return None\n\n # enable versioning\n if versioning:\n bucket_versioning = s3.BucketVersioning(name)\n bucket_versioning.enable()\n\n print(f\"Versioning: {bucket_versioning.status}\")\n\n return bucket",
"def get_s3_args(\n parser: Optional[argparse.ArgumentParser] = None,\n) -> argparse.ArgumentParser:\n\n if parser is None:\n parser = argparse.ArgumentParser()\n\n s3_parser = parser.add_argument_group(\"s3\")\n\n s3_parser.add_argument(\n \"--s3-bucket\",\n type=str,\n action=env_default(\"COMPSYN_S3_BUCKET\"),\n required=False,\n help=\"bucket where img data is stored in S3\",\n )\n s3_parser.add_argument(\n \"--s3-region-name\",\n type=str,\n required=False,\n action=env_default(\"COMPSYN_S3_REGION_NAME\"),\n help=\"S3 region\",\n )\n s3_parser.add_argument(\n \"--s3-endpoint-url\",\n action=env_default(\"COMPSYN_S3_ENDPOINT_URL\"),\n required=False,\n help=\"S3 endpoint URL (only required for non-AWS S3)\",\n )\n s3_parser.add_argument(\n \"--s3-access-key-id\",\n type=str,\n action=env_default(\"COMPSYN_S3_ACCESS_KEY_ID\"),\n required=False,\n )\n s3_parser.add_argument(\n \"--s3-secret-access-key\",\n type=str,\n action=env_default(\"COMPSYN_S3_SECRET_ACCESS_KEY\"),\n required=False,\n )\n\n return parser",
"def __init__(\n self,\n service_name: str,\n account_id: str,\n region_name: Optional[str] = None,\n aws_creds: Optional[Dict[str, str]] = None,\n profile_name: Optional[str] = None,\n placebo: Optional[Any] = None,\n placebo_data_path: Optional[str] = None,\n placebo_mode: Optional[str] = \"record\",\n max_attempts: int = 20,\n config: Optional[Config] = None,\n max_attempts_on_client_error: int = 10,\n ):\n self._service_name = service_name\n self._region_name = region_name\n self._account_id = account_id\n self._max_attempts_on_client_error = max_attempts_on_client_error\n\n # Build a clojure in order to recreate boto3 client if needed\n\n def _create_client(service: str = None):\n return get_client(\n session=get_session(\n aws_creds=aws_creds,\n profile_name=profile_name,\n placebo=placebo,\n placebo_data_path=placebo_data_path,\n placebo_mode=placebo_mode,\n ),\n service_name=service if service else service_name,\n region_name=region_name,\n max_attempts=max_attempts,\n config=config,\n )\n\n # set client factory\n self.create_client = _create_client\n\n # Build boto3 client\n self._client = self.create_client()",
"def boto3_stubber_path():\n return \"pcluster.utils.boto3\"",
"def __init__(self):\n print(\"initializing s3 client\")\n try:\n self.s3_client = boto3.client(\"s3\", region_name='us-east-1')\n except Exception as error:\n print('client - Error initializing boto client for s3: ' + repr(error))\n raise",
"def createaws() -> my_aws_api_library.MyAws:\r\n aws_cred_file_path = os.environ['AWS_CRED_FILE']\r\n comp_pubkey = os.environ['COMPANY_PUBKEY']\r\n my_aws = my_aws_api_library.MyAws(aws_cred_file_path, comp_pubkey)\r\n return my_aws"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract plastic class label from Image Name and return it | def ExtractLabel(ImgName):
# Each img has name notation "*****a0X*" where X is PlasticType
PlasticType = ImgName[7]
return {
'1': 0, # PET
'2': 1, # HDPE
'4': 2, # LDPE
'5': 3, # PP
'6': 4, # PS
'7': 5, # Other
}[PlasticType] | [
"def true_class_label(img_path):\n for i in img_path.split('/'):\n if i in classlabels:\n return i",
"def name_to_label(self, name):\n\t\t\treturn self.classes[name]",
"def find_label_from_image(image):\n print(\"detecting label\")\n image = Image.open(BytesIO(base64.b64decode(image)))\n model = find_label_from_image.model\n try:\n label= show_inference(model, image)\n except:\n return None\n return label",
"def imageName(self):\n return self.name.split(\".\")[0]",
"def class_name_from_file(self, image_path) -> str:\n\n return self._convert(image_path, self.classes)",
"def classify_label(classes, label):\n for cl in classes:\n for name in classes[cl][\"names\"]:\n if name.lower() == label.lower():\n return cl\n raise Exception(\"Unknown Label Class: %s\" % label)",
"def get_label(client, label):\n image_name = get_image_name()\n image = client.images.get(image_name)\n try:\n return image.labels[label]\n except KeyError:\n raise Exception(f\"Image should have a label '{label}'\")",
"def get_imagenet_classnames():\r\n return np.loadtxt(open(path_data+'/ilsvrc_2012_labels.txt'), dtype=object, delimiter='\\n')",
"def get_label(img_path):\n img_name = img_path.stem\n label_name = img_name + \".txt\"\n label_path = img_path.parent / label_name\n with open(label_path) as f:\n label = json.load(f)\n return label",
"def getNameForImage(self):\n return self.toShortString()",
"def _parse_classification(self, name):\n if \"committee\" in name.lower():\n return COMMITTEE\n if \"hearing\" in name.lower():\n return FORUM\n return BOARD",
"def load_single_image():\n #return image, label",
"def save_classnames_in_image_sufficientpx(\n rgb_img: np.ndarray,\n label_img: np.ndarray,\n id_to_class_name_map: Dict[int, str],\n font_color=(0, 0, 0),\n save_to_disk: bool = False,\n save_fpath: str = \"\",\n min_conncomp_px: int = 4000,\n font_scale: int = 1,\n):\n H, W, C = rgb_img.shape\n class_to_conncomps_dict = scipy_conn_comp(label_img)\n\n for class_idx, conncomps_list in class_to_conncomps_dict.items():\n for conncomp in conncomps_list:\n if conncomp.sum() < min_conncomp_px:\n continue\n text = id_to_class_name_map[class_idx]\n\n y, x = get_mean_mask_location(conncomp)\n x -= 55 # move the text so approx. centered over mask.\n x = max(0, x)\n x = min(W - 1, x)\n\n # jitter location if nonconvex object mean not within its mask\n if conncomp[y, x] != 1:\n x, y = search_jittered_location_in_mask(x, y, conncomp)\n\n # print(f'Class idx: {class_idx}: (x,y)=({x},{y})')\n rgb_img = add_text_cv2(\n rgb_img, text, coords_to_plot_at=(x, y), font_color=font_color, font_scale=font_scale, thickness=2\n )\n\n if save_to_disk:\n cv2_write_rgb(save_fpath, rgb_img)\n\n return rgb_img",
"def getLabel(file_name):\n category = file_name.split('/')[-2]\n return categories.get(category, categories['_background_noise_'])",
"def get_image_name(self): # type: (...) -> str\n return self.metadata_dict['img_name']",
"def get_name_image(url):\n name_image = url.split(u'/')[-1]\n return name_image",
"def getImageName(self):\n # ˅\n return int(self.config(['image']['img_name']))\n # ˄",
"def label(self):\n\n return self.clean_name(self.release.data['labels'][0]['name'])",
"def classify(self, image_name, pred_idx=0):\n user_dataset = UserImageDataSet(self.img_folder_path, transform=self.transform)\n image = user_dataset[int(image_name.split('_')[0])]\n image = image.unsqueeze(0)\n output = self.net(image.float())\n if pred_idx == 0:\n _, predicted = torch.max(output, 1)\n return self.classes[predicted[0]]\n else:\n output = output.tolist()[0]\n for i in range(pred_idx):\n max_idx = output.index(max(output))\n output[max_idx] = min(output)-1\n predicted = output.index(max(output))\n return self.classes[predicted]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Support the following DHCP DeviceManager calls. self.plugin.release_dhcp_port(network.id, self.get_device_id(network)) | def release_dhcp_port(self, network_id, device_id):
LOG.debug("release_dhcp_port: %s %s", network_id, device_id) | [
"def release_dhcp_port(self, network_id, device_id):\n return self.call(self.context,\n self.make_msg('release_dhcp_port',\n network_id=network_id,\n device_id=device_id,\n host=self.host),\n topic=self.topic)",
"def _handle_dhcp_release(self, packet, source_address, port):",
"def destroy(self, network, device_name):\n if self.conf.use_namespaces:\n namespace = NS_PREFIX + network.id\n else:\n namespace = None\n\n self.driver.unplug(device_name, namespace=namespace)\n\n self.plugin.release_dhcp_port(network.id,\n self.get_device_id(network))",
"def ReleaseDHCP(self):\n if not self.iface: return False\n cmd = self.DHCP_RELEASE + \" \" + self.iface + \" 2>/dev/null\"\n misc.Run(cmd)",
"def drop_dhcp(self, dhcp_type: str, iface: str, mac_addr: str = None):\n dhcp_types = {\n \"discover\": \"0x01\",\n \"offer\": \"0x02\",\n \"request\": \"0x03\",\n \"decline\": \"0x04\",\n \"ack\": \"0x05\",\n \"nak\": \"0x06\",\n \"release\": \"0x07\",\n \"inform\": \"0x08\",\n }\n\n if dhcp_type not in dhcp_types:\n raise InvalidArgumentException(f\"{dhcp_type} not acceptable.\")\n dhcp_bit = dhcp_types.get(dhcp_type)\n if mac_addr:\n mac_binary = bin(int(mac_addr, 16))[2:]\n # add leading zeroes\n mac_binary = mac_binary.zfill(48)\n # tc u32 can only match on 32 bits at a time\n first_mac_binary, second_mac_binary = mac_binary[:32], mac_binary[32:]\n bitmask1, bitmask2 = \"0xffffffff\", \"0xffff\"\n else:\n first_mac_binary, second_mac_binary = f\"{0x0:0>32b}\", f\"{0x0:0>16b}\"\n bitmask1, bitmask2 = \"0x00000000\", \"0x0000\"\n\n self._setup_classes(iface)\n\n # where the magic happens\n try:\n self._machine[\"sudo\"](\n \"tc\",\n \"filter\",\n \"add\",\n \"dev\",\n iface,\n \"protocol\",\n \"ip\",\n \"parent\",\n \"1:\",\n \"pref\",\n self._pref,\n \"u32\",\n \"match\",\n \"ip\",\n \"protocol\",\n \"17\",\n \"0xff\",\n \"match\",\n \"u32\",\n first_mac_binary,\n bitmask1,\n \"at\",\n \"56\",\n \"match\",\n \"u16\",\n second_mac_binary,\n bitmask2,\n \"at\",\n \"60\",\n \"match\",\n \"u8\",\n dhcp_bit,\n \"0xff\",\n \"at\",\n \"270\",\n \"flowid\",\n \"1:11\",\n \"action\",\n \"drop\",\n )\n except ProcessExecutionError as err:\n self._log.warning(\"Plumbum failed\")\n raise CommandFailure(str(err))\n self._log.info(\"Qdisc deleted\")\n log_msg = \"Dropping DHCP\"\n if mac_addr:\n log_msg += f\" to {mac_addr}\"\n log_msg += f\" on {iface}\"\n self._log.info(log_msg)",
"def dhcp_release(ifname):\n\n logging.debug('Releasing %s...', ifname)\n\n try:\n subprocess.call(['dhcpcd', '--release', ifname])\n except OSError, err:\n if err.errno != errno.ENOENT:\n raise err\n\n try:\n call_timeout(['dhclient', '-1', '-r', ifname], timeout=5)\n except OSError, err:\n if err.errno == errno.ENOENT:\n logging.critical('Neither dhcpcd nor dhclient were found.')\n else:\n raise err",
"def StopDHCP():\n cmd = 'killall dhclient dhclient3 pump dhcpcd-bin'\n misc.Run(cmd)",
"def release(self) -> None:\n if not is_local_host(self.location):\n self.api.perform(\"ReleasePort\", portList=self.obj_ref())",
"def network_delete_end(self, payload):\n self.disable_dhcp_helper(payload['network_id'])",
"def network_create_end(self, payload):\n network_id = payload['network']['id']\n self.enable_dhcp_helper(network_id)",
"def release_port_fixed_ip(self, network_id, device_id, subnet_id):\n return self.call(self.context,\n self.make_msg('release_port_fixed_ip',\n network_id=network_id,\n subnet_id=subnet_id,\n device_id=device_id,\n host=self.host),\n topic=self.topic)",
"def _handle_dhcp_lease_query(self, packet, source_address, port):",
"def setup_dhcp_config(self, board_config):\n raise NotImplementedError",
"def disable_dhcp_helper(self, network_id):\n network = self.cache.get_network_by_id(network_id)\n if network:\n if self.call_driver('disable', network):\n self.cache.remove(network)",
"def dhcp(self, dhcp):\n\n self._dhcp = dhcp",
"def adb_down(self, port):\n self.adb_transport = None\n self.check_adb([\"disconnect\", \"localhost:%d\" % port])\n\n # Wait until QEMU's forward has expired\n CONNECT_MAX_TRIES = 15\n connect_tries = 0\n while True:\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((\"localhost\", port))\n sock.close()\n connect_tries += 1\n if connect_tries >= CONNECT_MAX_TRIES:\n raise Timeout(\"Wait for port forward to go away\",\n CONNECT_MAX_TRIES)\n time.sleep(1)\n except IOError:\n break",
"def dhcp_free(self, dhcp_free):\n\n self._dhcp_free = dhcp_free",
"def deldhcp_cmd(args):\n\n if VERSION_LIVEBOX == 'lb28':\n dhcpv4_object = 'NMC'\n else:\n dhcpv4_object = 'DHCPv4.Server.Pool.default'\n\n if len(args) >= 1:\n leases = requete(dhcpv4_object + ':getStaticLeases')\n if args[0] == \"all\":\n for lease in leases['status']:\n mac = lease['MACAddress']\n requete_print(dhcpv4_object + ':deleteStaticLease', {\"MACAddress\": mac})\n\n else:\n for i in args:\n for lease in leases['status']:\n mac = lease['MACAddress']\n if str.upper(mac) == str.upper(i):\n print(\"del dhcp\", mac)\n requete_print(dhcpv4_object + ':deleteStaticLease', {\"MACAddress\": mac})\n else:\n error(\"Usage: %s -deldchp MACAddress...\" % sys.argv[0])",
"def dhcp_agent_network_remove(self, dhcp_net_info):\n self.turn_on_dhcp_check()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct and return an empty network model. | def empty_network(network_id=NETWORK_ID):
return make_net_model({"id": network_id,
"subnets": [],
"ports": [],
"tenant_id": "calico",
"mtu": neutron_constants.DEFAULT_NETWORK_MTU}) | [
"def create_model_one(self):\n self.network_1 = Model(inputs=self.shared.input, outputs=self.shared.output)\n self.network_1.compile(loss='sparse_categorical_crossentropy',\n optimizer='adam',\n metrics=['acc'])\n self.network_1.summary()",
"def create_network(self):\n\t\tn = IMNN.IMNN(parameters=self.parameters)\n\t\ttf.reset_default_graph()\n\t\tn.setup(η = eta)\n\t\t\n\t\treturn n",
"def create_network(self):\n\n print ('Creating network, changing data will have no effect beyond this point.')\n n = IMNN.IMNN(parameters=self.parameters)\n\n if self.load_network:\n n.restore_network()\n else:\n n.setup(network = self.network, load_data = self.data)\n\n return n",
"def new_network():\n new_names = Names()\n new_devices = Devices(new_names)\n return Network(new_names, new_devices)",
"def make_model(self):\n onnx_graph = onnx.helper.make_graph(\n self._nodes, self._name, self._inputs, self._outputs, self._initializers\n )\n kwargs = {}\n kwargs[\"opset_imports\"] = self._get_opsets()\n kwargs[\"producer_name\"] = \"TVM Relay\"\n kwargs[\"producer_version\"] = tvm.__version__\n\n return onnx.helper.make_model(onnx_graph, **kwargs)",
"def _create_network(self, *args):\n return self._network_class()",
"def create_network(self):\n #Create the network\n self.network = Network(\"50.19.23.117\", 8080)",
"def build_empty_graph(input_dim, output_dim, num_intermediate):\n from .models import DAG\n num_emit, num_rec = num_intermediate + input_dim, num_intermediate + output_dim\n activations = torch.zeros(num_rec, dtype=torch.long)\n connections = torch.zeros(num_rec, num_emit, dtype=torch.long)\n\n return DAG(input_dim, output_dim, num_intermediate, connections, activations, check_valid=True)",
"def to_model(self, network_provider):\r\n network = Network.objects.get_or_create(\r\n name=self.name,\r\n description=self.description,\r\n subnet_mask=self.subnet_mask,\r\n public_gateway=self.public_gateway,\r\n local_gateway=self.local_gateway,\r\n network_provider=network_provider,\r\n )[0]\r\n \r\n return network",
"def build_model(self):\n cfg = self.cfg\n\n print('Building model')\n self.model = SimpleNet(cfg, cfg.MODEL, 0, **cfg.MODEL.BACKBONE.PARAMS)\n self.model.to(self.device)\n print('# params: {:,}'.format(count_num_param(self.model)))\n self.optim = build_optimizer(self.model, cfg.OPTIM)\n self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)\n self.register_model('model', self.model, self.optim, self.sched)\n\n fdim = self.model.fdim\n self.classifier = nn.Linear(fdim, self.num_classes)\n print('# params: {:,}'.format(count_num_param(self.classifier)))\n self.classifier.to(self.device)\n self.optim_classifier = build_optimizer(self.classifier, cfg.OPTIM)\n self.sched_classifier = build_lr_scheduler(self.optim_classifier, cfg.OPTIM)\n self.register_model('classifier', self.classifier, self.optim_classifier, self.sched_classifier)",
"def create_model(self):\n # Makes sure the backend is clean\n keras.backend.clear_session()\n\n model = keras.models.Sequential()\n\n if self.layers is None:\n model.add(keras.layers.Conv1D(32, 3, activation='relu'))\n model.add(keras.layers.MaxPooling1D((2)))\n model.add(keras.layers.Conv1D(64, 3, activation='relu'))\n\n model.add(keras.layers.Flatten())\n model.add(keras.layers.Dense(64, activation='relu'))\n model.add(keras.layers.Dense(2, activation='sigmoid'))\n \n else:\n for l in self.layers:\n model.add(l)\n \n model.compile(optimizer=self.optimizer,\n loss=self.loss,\n metrics=self.metrics)\n\n self.model = model",
"def test_create_network_without_router(self):\n # Create Network\n self.net_creator = OpenStackNetwork(\n self.os_creds, self.net_config.network_settings)\n network = self.net_creator.create()\n\n # Validate network was created\n self.assertTrue(neutron_utils_tests.validate_network(\n self.neutron, self.keystone,\n self.net_creator.network_settings.name, True,\n self.os_creds.project_name, mtu=999))\n\n # Validate subnets\n self.assertTrue(neutron_utils_tests.validate_subnet(\n self.neutron, network,\n self.net_creator.network_settings.subnet_settings[0].name,\n self.net_creator.network_settings.subnet_settings[0].cidr, True))",
"def create_network(model_file=DEFAULT_MODEL_FILE, pretrained=DEFAULT_PRETRAINED, *args, **kwargs):\n net = imagenet_classifier(*args,**kwargs)\n net.set_phase_test()\n net.set_mode_cpu()\n return net",
"def create_neural_network():\n network_input = keras.layers.Input((NETWORK_INPUT_SIZE,))\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(network_input)\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(network_layer)\n network_output = keras.layers.Dense(NETWORK_OUTPUT_SIZE, kernel_initializer='random_uniform', activation='linear')(network_layer)\n network = keras.models.Model(inputs=network_input, outputs=network_output)\n network.compile(loss=\"mse\", optimizer=\"Adam\")\n return network",
"def create_model(self):\n # Create the generator and discriminators\n self.generator_lungs = self.generator_model()\n self.generator_organs = self.generator_model()\n\n self.disc_lungs = self.discriminator_model_lungs()\n self.disc_organs = self.discriminator_model_organs()\n\n # Initialize the optimizer and backend\n self.generator_optimizer = tf.keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)\n self.discriminator_optimizer = tf.keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)\n self.set_backend = tf.keras.backend.set_floatx('float32')\n\n # Create the summary writer\n self.create_summary_writer()\n print('Models are created.')\n return self",
"def create_nueral_network(X, y, epochs=8):\n model = Sequential()\n model.add(layers.Dense(500, input_dim=X.shape[1]))\n model.add(layers.Dropout(.2))\n model.add(layers.Dense(128, activation='relu'))\n model.add(layers.Dropout(.2))\n model.add(layers.Dense(64, activation='relu'))\n model.add(layers.Dropout(.2))\n model.add(layers.Dense(32, activation='relu'))\n model.add(layers.Dense(5,activation='softmax'))\n\n model.compile(optimizer='adam', loss='categorical_crossentropy',metrics=['accuracy'])\n print(model.summary())\n model.fit(X, y, epochs=epochs, batch_size=500)\n return model",
"def _generate_init_model(self):\n model_init = self._new_model_init()\n chn_mask = self._init_chn_node_mask()\n if vega.is_torch_backend():\n import torch\n checkpoint = torch.load(self.config.init_model_file + '.pth')\n model_init.load_state_dict(checkpoint)\n model = PruneMobileNet(model_init).apply(chn_mask)\n model.to(self.device)\n elif vega.is_tf_backend():\n import tensorflow as tf\n model = model_init\n with tf.compat.v1.Session(config=self.trainer._init_session_config()) as sess:\n saver = tf.compat.v1.train.import_meta_graph(\"{}.meta\".format(self.config.init_model_file))\n saver.restore(sess, self.config.init_model_file)\n all_weight = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.VARIABLES)\n all_weight = [t for t in all_weight if not t.name.endswith('Momentum:0')]\n PruneMobileNet(all_weight).apply(chn_mask)\n save_file = FileOps.join_path(self.trainer.get_local_worker_path(), 'prune_model')\n saver.save(sess, save_file)\n elif vega.is_ms_backend():\n from mindspore.train.serialization import load_checkpoint, load_param_into_net\n parameter_dict = load_checkpoint(self.config.init_model_file)\n load_param_into_net(model_init, parameter_dict)\n model = PruneMobileNet(model_init).apply(chn_mask)\n return model",
"def create_model():\n\n # Create a sequential model (a simple NN is created) adding a softmax activation at the end with 10 units:\n model = Sequential()\n model.add(Dense(units=128, activation=\"relu\", input_shape=(784,)))\n model.add(Dense(units=128, activation=\"relu\"))\n model.add(Dense(units=128, activation=\"relu\"))\n model.add(Dense(units=10, activation=\"softmax\"))\n\n # Compile the model using the loss function \"categorical_crossentropy\" and Stocastic Gradient Descent optimizer:\n model.compile(optimizer=SGD(0.001), loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n\n # Return the created model\n return model",
"def create(self) -> tf.keras.models.Model:\n raise NotImplementedError()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure that the cache has a NetModel and subnets for PORT. | def _ensure_net_and_subnets(self, port):
# Gather the subnet IDs that we need for this port, and get the
# NetModel if we already have it in the cache.
needed_subnet_ids = set()
net = None
for fixed_ip in port['fixed_ips']:
subnet_id = fixed_ip.get('subnet_id')
if subnet_id:
needed_subnet_ids.add(subnet_id)
if not net:
net = self.agent.cache.get_network_by_subnet_id(subnet_id)
LOG.debug("Needed subnet IDs: %s", needed_subnet_ids)
LOG.debug("Existing network model by subnet ID: %s", net)
# For each subnet that we need, get its data from SubnetWatcher and
# hold for adding into the cache.
new_subnets = {}
for subnet_id in needed_subnet_ids:
# Get data for this subnet from the SubnetWatchers.
subnet = (self.subnet_watcher.get_subnet(subnet_id) or
self.v1_subnet_watcher.get_subnet(subnet_id))
if subnet is None:
LOG.warning("No data for subnet %s", subnet_id)
raise SubnetIDNotFound()
new_subnets[subnet_id] = subnet
if not net:
# We don't already have a NetModel, so look for a cached NetModel
# with the right network ID. (In this case we must have new
# subnets to add into the cache, and the cached NetModel must have
# subnets other than the ones that we're adding in this iteration;
# otherwise we would have already found it when searching by
# subnet_id above.)
assert new_subnets
network_id = list(new_subnets.values())[0]['network_id']
net = self.agent.cache.get_network_by_id(network_id)
LOG.debug("Existing network model by network ID: %s", net)
if not net:
# We still have no NetModel for the relevant network ID, so create
# a new one. In this case we _must_ be adding new subnets.
assert new_subnets
net = empty_network(network_id)
LOG.debug("New network %s", net)
elif new_subnets:
# We have a NetModel that was already in the cache and are about to
# modify it. Cache replacement only works if the new NetModel is a
# distinct object from the existing one, so make a copy here.
net = copy_network(net)
LOG.debug("Copied network %s", net)
if new_subnets:
# Add the new subnets into the NetModel.
assert net
net.subnets = [s for s in net.subnets
if s.id not in new_subnets]
net.subnets += list(new_subnets.values())
# Add (or update) the NetModel in the cache.
LOG.debug("Net: %s", net)
_fix_network_cache_port_lookup(self.agent, net.id)
self.agent.cache.put(net)
return net.id | [
"def _check_config(self):\n\n if self._stb_ip == None:\n raise RuntimeError('Cannot do HTTP request without setting STB IP')",
"def _fix_network_cache_port_lookup(agent, network_id):\n\n # If there is an existing NetModel for this network ID, ensure that all\n # its ports are in the port_lookup dict.\n if network_id in agent.cache.cache:\n for port in agent.cache.cache[network_id].ports:\n agent.cache.port_lookup[port.id] = network_id",
"def test_get_valid_networks_for_virtualization_realm(self):\n pass",
"def check_port_validity(self):\n # Check if ports provided are already present in VPLEX\n if self.ports:\n LOG.info(\"Validating the ports\")\n for port in self.ports:\n obj = None\n try:\n obj = self.storageview.get_port(self.cl_name, port)\n except (utils.ApiException, ValueError, TypeError) as err:\n msg = \"Could not get port {0} details in {1} due to\"\n err_msg = msg.format(port, self.cl_name) + \" error {0}\"\n e_msg = utils.display_error(err_msg, err)\n LOG.error(\"%s\\n%s\\n\", e_msg, err)\n self.module.fail_json(msg=e_msg)\n\n if obj is None:\n msg = (\"Could not get port {0} details in {1}\"\n .format(port, self.cl_name))\n LOG.error(msg)\n self.module.fail_json(msg=msg)",
"def _check_and_set_network(self) -> None:\n from hathor.transaction.storage.exceptions import WrongNetworkError\n\n network = settings.NETWORK_NAME\n stored_network = self.get_network()\n\n if stored_network is None:\n # no network is set, let's try to infer it\n self._checked_set_network(network)\n elif stored_network != network:\n # the stored network does not match, something is wrong\n raise WrongNetworkError(f'Databases created on {stored_network}, expected {network}')\n else:\n # the network is what is expected, nothing to do here\n pass",
"def _validatePortConfig(self):\n if config.BindHTTPPorts:\n if config.HTTPPort == 0:\n raise UsageError(\n \"HTTPPort required if BindHTTPPorts is not empty\"\n )\n elif config.HTTPPort != 0:\n config.BindHTTPPorts = [config.HTTPPort]\n if config.BindSSLPorts:\n if config.SSLPort == 0:\n raise UsageError(\n \"SSLPort required if BindSSLPorts is not empty\"\n )\n elif config.SSLPort != 0:\n config.BindSSLPorts = [config.SSLPort]",
"def testConfiguredCacheViaConstraint(self):\n self.assertEqual(self.portal.portal_memcached.checkConsistency(), [])",
"def test_network_not_in_dc_with_mapping(self):\n mac, network, nic = helper.get_vm_params(vm=self.vm)\n testflow.step(\n \"Check the network %s of imported VM %s changed to %s\",\n network, self.vm, self.dst_net\n )\n assert ll_vms.check_vnic_on_vm_nic(\n vm=self.vm, nic=nic, vnic=self.dst_net\n )",
"def check_network(self):\n LOG.info(\"Checking network...\")\n success = True\n for configuration, provider in zip(self.configurations, self.providers):\n network_name_or_id = configuration.get(\"network\")\n if network_name_or_id:\n network = provider.get_network_by_id_or_name(network_name_or_id)\n if not network:\n LOG.warning(f\"Network '{network_name_or_id}' not found\", network_name_or_id)\n success = False\n else:\n LOG.info(f\"Network '{subnet_name_or_id}' found\")\n subnet_name_or_id = configuration.get(\"subnet\")\n if subnet_name_or_id:\n subnet = provider.get_subnet_by_id_or_name(subnet_name_or_id)\n if not subnet:\n LOG.warning(f\"Subnet '{subnet_name_or_id}' not found\")\n success = False\n else:\n LOG.info(f\"Subnet '{subnet_name_or_id}' found\")\n return bool(success and (network_name_or_id or subnet_name_or_id))",
"def test_networking_project_network_update(self):\n pass",
"def _check_port_available(hostname, port):\n for config_file in config_files:\n network_config = networkConfig(config_file)\n for name, host in network_config.hostDict.items():\n if port == host.port:\n return False\n\n return _check_socket_is_free(hostname, port)",
"def _validate_fixed_ips_for_port(self, context, network_id, fixed_ips):\n\n fixed_ip_set = []\n for fixed in fixed_ips:\n found = False\n if 'subnet_id' not in fixed:\n if 'ip_address' not in fixed:\n msg = _('IP allocation requires subnet_id or ip_address')\n raise exc.InvalidInput(error_message=msg)\n\n filter = {'network_id': [network_id]}\n subnets = self.get_subnets(context, filters=filter)\n for subnet in subnets:\n if super(\n NeutronPluginContrailCoreV2, self)._check_subnet_ip(\n subnet['cidr'], fixed['ip_address']):\n found = True\n subnet_id = subnet['id']\n break\n if not found:\n msg = _('IP address %s is not a valid IP for the defined '\n 'networks subnets') % fixed['ip_address']\n raise exc.InvalidInput(error_message=msg)\n else:\n subnet = self._get_subnet(context, fixed['subnet_id'])\n if subnet['network_id'] != network_id:\n msg = (_(\"Failed to create port on network %(network_id)s\"\n \", because fixed_ips included invalid subnet \"\n \"%(subnet_id)s\") %\n {'network_id': network_id,\n 'subnet_id': fixed['subnet_id']})\n raise exc.InvalidInput(error_message=msg)\n subnet_id = subnet['id']\n\n if 'ip_address' in fixed:\n # Ensure that the IP is valid on the subnet\n if (not found and\n not super(\n NeutronPluginContrailCoreV2, self)._check_subnet_ip(\n subnet['cidr'], fixed['ip_address'])):\n msg = _('IP address %s is not a valid IP for the defined '\n 'subnet') % fixed['ip_address']\n raise exc.InvalidInput(error_message=msg)\n\n fixed_ip_set.append({'subnet_id': subnet_id,\n 'ip_address': fixed['ip_address']})\n else:\n fixed_ip_set.append({'subnet_id': subnet_id})\n if len(fixed_ip_set) > cfg.CONF.max_fixed_ips_per_port:\n msg = _('Exceeded maximim amount of fixed ips per port')\n raise exc.InvalidInput(error_message=msg)\n return fixed_ip_set",
"def test_netport_get(self):\n\n # the function to be tested:\n port1 = self.urihandler.get(self.hmc,\n '/api/adapters/1/network-ports/1', True)\n\n exp_port1 = {\n 'element-id': '1',\n 'element-uri': '/api/adapters/1/network-ports/1',\n 'class': 'network-port',\n 'parent': '/api/adapters/1',\n 'name': 'osa_1_port_1',\n 'description': 'Port #1 of OSA #1',\n }\n assert port1 == exp_port1",
"def check_neighbour_set(self):\n if self.requires_neighbours():\n if self.neighbours is None:\n self.log.exception(\"neighbours attribute must be set\")\n raise ValueError()",
"def test_get_networks(self):\n pass",
"def test_patch_host_subnet(self):\n pass",
"def test_read_host_subnet(self):\n pass",
"def check_routable(self, from_subnets: List[Subnet], to_subnets: List[Subnet]) -> dict:\n # check what ports from subnets allow to any to subnets\n ports = {} # port: (to_subnet, from_subnet)\n for from_subnet in from_subnets:\n for to_subnet in to_subnets:\n # check if traffic from subnet is stopped by to subnet nacl\n if from_subnet.name in to_subnet.nacls:\n if 'ICMP' not in ports:\n ports['ICMP'] = (from_subnet.cidr, to_subnet.cidr)\n if 'all' in to_subnet.nacls[from_subnet.name]['in']:\n # if all ports accepted in then set ports to all and we are done\n return {'all': (from_subnet.cidr, to_subnet.cidr)}\n elif 'None' in to_subnet.nacls[from_subnet.name]['in']:\n # If you don't have access to Enteprise network, you can't act on Operational Host\n # TODO refactor this hacky fix\n permission = self.check_for_enterprise_sessions()\n ports = {'all': (from_subnet.cidr, to_subnet.cidr)} if permission else {}\n return ports\n \n else:\n # we only add the ports in rules to our accepted ports\n for rule in to_subnet.nacls[from_subnet.name]['in']:\n if rule['PortRange'] is int and rule['PortRange'] not in ports:\n ports[rule[\"PortRange\"]] = (from_subnet.cidr, to_subnet.cidr)\n else:\n for p in range(rule[\"PortRange\"][0], rule[\"PortRange\"][1]):\n if p not in ports:\n ports[p] = (from_subnet.cidr, to_subnet.cidr)\n elif 'all' in to_subnet.nacls:\n if 'ICMP' not in ports:\n ports['ICMP'] = (from_subnet.cidr, to_subnet.cidr)\n # if all ports accepted out then use inbound rules only\n if 'all' in to_subnet.nacls['all']['in']:\n # if all ports accepted in then set ports to all and we are done\n return {'all': (from_subnet.cidr, to_subnet.cidr)}\n else:\n # we only add the ports in rules to our accepted ports\n for rule in to_subnet.nacls['all']['in']:\n if rule['PortRange'] is int and rule['PortRange'] not in ports:\n ports[rule[\"PortRange\"]] = (from_subnet.cidr, to_subnet.cidr)\n else:\n for p in range(rule[\"PortRange\"][0], rule[\"PortRange\"][1]):\n if p not in ports:\n ports[p] = (from_subnet.cidr, to_subnet.cidr)\n else:\n # this means that traffic cannot reach move between these 2 subnets\n continue\n\n return ports",
"def test_generate_subnetworks_allports(self):\n ntwk = rf.Network(os.path.join(self.test_dir,'ntwk.s32p'))\n for m in range(ntwk.nports):\n for n in range(ntwk.nports):\n npy.testing.assert_array_almost_equal(\n ntwk.s[:,m,n],\n getattr(ntwk, f's{m+1}_{n+1}').s[:,0,0]\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Start/stop/restart Dnsmasq for NETWORK_ID. | def _update_dnsmasq(self, network_id):
# Check whether we should really do the following processing.
if self.suppress_dnsmasq_updates:
LOG.debug("Don't update dnsmasq yet;"
" must be processing a snapshot")
self.dirty_networks.add(network_id)
return
self.dnsmasq_updater.update_network(network_id) | [
"def restart_dnsmasq(self, config):\n\n # TODO currently only supports systemd, add upstart support\n command = ['systemctl', 'restart', 'dnsmasq.service']\n try:\n self.run_command(command, config)\n except Exception, e:\n raise e",
"def startServices():\n # dnsmasq\n out_dnsmasq = subprocess.run([\"systemctl\", \"restart\", \"dnsmasq\"], stdout=subprocess.PIPE)\n if out_dnsmasq.returncode == 0:\n logging.info(\"dnsmasq service started/restarted successfully\")\n else:\n logging.error(\"dnsmasq service start restart error\")\n # 3proxy\n out_3proxy = subprocess.run([\"systemctl\", \"restart\", \"3proxy\"], stdout=subprocess.PIPE)\n if out_3proxy.returncode == 0:\n logging.info(\"3proxy service started/restarted successfully\")\n else:\n logging.error(\"3proxy service start restart error\")",
"def launch (no_flow = False,\n network = \"192.168.0.0/24\", # Address range\n first = 1, last = None, count = None, # Address range\n ip = \"192.168.0.254\",\n router = (), # Auto\n dns = (), # Auto\n dpid = None, # All\n ports = None, # All\n __INSTANCE__ = None):\n def fixint (i):\n i = str(i)\n if i.lower() == \"none\": return None\n if i.lower() == \"true\": return None\n return int(i)\n def fix (i):\n i = str(i)\n if i.lower() == \"none\": return None\n if i.lower() == \"true\": return None\n if i == '()': return ()\n return i\n first,last,count = map(fixint,(first,last,count))\n router,dns = map(fix,(router,dns))\n\n if ports is not None:\n ports = ports.split(\",\")\n ports = set(int(p) if p.isdigit() else p for p in ports)\n\n pool = SimpleAddressPool(network = network, first = first, last = last,\n count = count)\n\n inst = DHCPD(install_flow = not no_flow, pool = pool,\n ip_address = ip, router_address = router,\n dns_address = dns, dpid = dpid, ports = ports)\n\n if __INSTANCE__[0] == 0:\n # First or only instance\n core.register(inst)\n\n log.debug(\"DHCP serving a%s\", str(pool)[2:-1])",
"def enable_dhcp_helper(self, network_id):\n try:\n network = self.plugin_rpc.get_network_info(network_id)\n except:\n self.needs_resync = True\n LOG.exception(_('Network %s RPC info call failed.') % network_id)\n return\n\n if not network.admin_state_up:\n return\n\n for subnet in network.subnets:\n if subnet.enable_dhcp:\n if self.call_driver('enable', network):\n self.cache.put(network)\n break",
"def _configure_dns(self, tr: 'TestRun') -> 'Process':\n self._dns = tr.MakeDNServer(\"dns\", default=['127.0.0.1'])\n return self._dns",
"def startservers():\n try:\n dns = subprocess.Popen(['python', FAKE_LOC, '-c', DNS_LOC])\n except IOError:\n sys.exit('>> Unable to locate FakeDns')\n\n try:\n httpd = MyTCPServer(('', 80), MyHandler)\n except socket.error:\n dns.kill()\n sys.exit('>> Port 80 already in use')\n try:\n print '>> Starting HTTP Server...'\n httpd.serve_forever()\n except KeyboardInterrupt:\n httpd.shutdown()\n httpd.server_close()\n dns.kill()\n sys.exit()",
"def restartHTTPd(htconf):\n parentpid = pidHTTPd(htconf)\n if parentpid <= 1:\n return\n# hopefulle killing the parent proc. will do the trick\n print >> FileKeyUtils.WMSlog, 'restartHTTPd> kill parentpid:', parentpid\n os.system('kill -TERM '+repr(parentpid))\n apache = '/devstore/apache2/bin/httpd -f /devstore/apache2/conf/' + htconf\n print >> FileKeyUtils.WMSlog, 'restartHTTPd> via:', apache\n time.sleep(0.5) # give it time to complete proc. termination\n os.system('/devstore/apache2/bin/httpd -f /devstore/apache2/conf/' + htconf)",
"def start_srv(start, process):\n if not \"conf_option\" in world.cfg:\n world.cfg[\"conf_option\"] = \"\"\n\n world.cfg['log_file'] = build_log_path()\n fabric_sudo_command('cat /dev/null >' + world.cfg['log_file'])\n world.cfg[\"dhcp_log_file\"] = world.cfg['log_file']\n\n log = \"local7\"\n if world.f_cfg.isc_dhcp_log_facility != \"\":\n log = world.f_cfg.isc_dhcp_log_facility\n\n world.cfg['log_facility'] = '''\\nlog-facility {log};\\n'''.format(**locals())\n\n add_defaults()\n cfg_write()\n log.debug(\"Start ISC-DHCP with generated config:\")\n convert_cfg_file(world.cfg[\"cfg_file\"])\n fabric_send_file(world.cfg[\"cfg_file\"] + '_processed', world.cfg[\"cfg_file\"] + '_processed')\n copy_configuration_file(world.cfg[\"cfg_file\"] + '_processed')\n remove_local_file(world.cfg[\"cfg_file\"])\n #set_ethernet_interface()\n stop_srv()\n\n world.cfg['leases'] = build_leases_path()\n\n #fabric_sudo_command('echo y |rm ' + world.cfg['leases'])\n fabric_sudo_command('touch ' + world.cfg['leases'])\n\n result = fabric_sudo_command('(' + os.path.join(world.f_cfg.software_install_path, 'sbin/dhcpd') + ' -cf server.cfg_processed'\n + ' -lf ' + world.cfg['leases']\n + '&); sleep ' + str(world.f_cfg.sleep_time_1) + ';')\n\n check_process_result(start, result, process)\n\n # clear configs in case we would like make couple configs in one test\n world.cfg[\"conf_time\"] = \"\"\n world.cfg[\"log_facility\"] = \"\"\n world.cfg[\"custom_lines\"] = \"\"\n world.cfg[\"conf_option\"] = \"\"\n world.cfg[\"conf_vendor\"] = \"\"",
"def StopDHCP():\n cmd = 'killall dhclient dhclient3 pump dhcpcd-bin'\n misc.Run(cmd)",
"def start_srv(start, process):\n if not \"conf_option\" in world.cfg:\n world.cfg[\"conf_option\"] = \"\"\n\n world.cfg['log_file'] = build_log_path()\n fabric_sudo_command('cat /dev/null >' + world.cfg['log_file'])\n world.cfg[\"dhcp_log_file\"] = world.cfg['log_file']\n\n log = \"local7\"\n if world.f_cfg.isc_dhcp_log_facility != \"\":\n log = world.f_cfg.isc_dhcp_log_facility\n\n world.cfg['log_facility'] = '''\\nlog-facility {log};\\n'''.format(**locals())\n\n add_defaults()\n cfg_write()\n get_common_logger().debug(\"Start ISC-DHCPv6 with generated config:\")\n convert_cfg_file(world.cfg[\"cfg_file\"])\n fabric_send_file(world.cfg[\"cfg_file\"] + '_processed', world.cfg[\"cfg_file\"] + '_processed')\n copy_configuration_file(world.cfg[\"cfg_file\"] + '_processed')\n remove_local_file(world.cfg[\"cfg_file\"])\n #set_ethernet_interface()\n stop_srv()\n\n world.cfg['leases'] = build_leases_path()\n #fabric_run_command('echo y |rm ' + world.cfg['leases'])\n fabric_sudo_command('touch ' + world.cfg['leases'])\n\n result = fabric_sudo_command('(' + os.path.join(world.f_cfg.software_install_path, 'sbin/dhcpd') + ' -6 -cf server.cfg_processed'\n + ' -lf ' + world.cfg['leases']\n + '&); sleep ' + str(world.f_cfg.sleep_time_1) + ';')\n\n check_process_result(start, result, process)\n\n # clear configs in case we would like make couple configs in one test\n world.cfg[\"conf_time\"] = \"\"\n world.cfg[\"log_facility\"] = \"\"\n world.cfg[\"custom_lines\"] = \"\"\n world.cfg[\"conf_option\"] = \"\"\n world.cfg[\"conf_vendor\"] = \"\"",
"def start_dhclient(self, ifname):\n # Emulates vyatta-interfaces.pl's behavior\n cf, pf, lf = self.dhclient_pathnames(ifname)\n killpidfile(pf, signal.SIGTERM)\n safe_unlink(pf)\n self.run(\"/sbin/dhclient\", \"-q\", \"-nw\", \"-cf\", cf, \"-pf\", pf, \"-lf\", lf, ifname)",
"def refresh_dhcp_helper(self, network_id):\n old_network = self.cache.get_network_by_id(network_id)\n if not old_network:\n # DHCP current not running for network.\n return self.enable_dhcp_helper(network_id)\n\n try:\n network = self.plugin_rpc.get_network_info(network_id)\n except:\n self.needs_resync = True\n LOG.exception(_('Network %s RPC info call failed.') % network_id)\n return\n\n old_cidrs = set(s.cidr for s in old_network.subnets if s.enable_dhcp)\n new_cidrs = set(s.cidr for s in network.subnets if s.enable_dhcp)\n\n if new_cidrs and old_cidrs == new_cidrs:\n self.call_driver('reload_allocations', network)\n self.cache.put(network)\n elif new_cidrs:\n if self.call_driver('restart', network):\n self.cache.put(network)\n else:\n self.disable_dhcp_helper(network.id)",
"def run_restart_apache():\n run('{0}restart'.format(settings.FAB_SETTING('SERVER_APACHE_BIN_DIR')))",
"def run(self):\n try:\n self.ssh_connection.connect()\n dns_response = self.query_dns_server()\n result = self.process_dns_response(dns_response)\n self.handle_result(result)\n\n except Exception as e:\n print(f\"Error: {str(e)}\")\n sys.exit(2)",
"def sync_dns(self,):\n\n for server_name, server_ip in self.get_instances():\n self.dnsmanager.ensure_a_record(server_name, server_ip)",
"def restart_dcrpd_service(self):\n self.start_dcrpd_service(restart=True)",
"def restartNetworkManager(cls):\n nms = 'network-manager'\n nm_is_running = os.system('service %s status 2>&1 | grep '\n '-ic running >/dev/null 2>&1' % nms)\n if nm_is_running != 256:\n info('Mac address(es) added into %s\\n' % cls.nm_conf_file)\n info('Restarting %s...\\n' % nms)\n os.system('sudo service network-manager restart')\n #os.system('nmcli general reload')\n sleep(2)",
"def restart(self, cleanup=False): \n params = {'command':'restartNetwork',\n 'id':self.id,\n 'cleanup':cleanup}\n\n name = self.name\n self.logger.debug('Restart network %s' % name)\n\n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['restartnetworkresponse']['jobid']\n self.logger.debug('Start job over %s.%s - %s: %s' % (\n self._obj_type, self.name, \n 'restartNetwork', res))\n return clsk_job_id\n except KeyError as ex :\n self.logger.error('Error parsing json data: %s' % ex)\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n self.logger.error(ex)\n raise ClskError(ex)",
"def configureDHCP():\n dhcpStart = config.get(\"hotspot\", \"dhcpstart\")\n dhcpEnd = config.get(\"hotspot\", \"dhcpend\")\n dnsmasqConfig = f\"\"\"#PI Hotspot config\ndomain-needed\nbogus-priv\ndhcp-option=option:dns-server\ndhcp-authoritative\ndhcp-range={dhcpStart},{dhcpEnd},1h\n\"\"\"\n confFile = open(\"/etc/dnsmasq.conf\", \"w\")\n confFile.write(dnsmasqConfig)\n confFile.close()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fix NetworkCache before removing or replacing a network. neutron.agent.dhcp.agent is bugged in that it adds the DHCP port into the cache without updating the cache's port_lookup dict, but then NetworkCache.remove() barfs if there is a port in network.ports but not in that dict... NetworkCache.put() implicitly does a remove() first if there is already a NetModel in the cache with the same ID. So a put() to update or replace a network also hits this problem. This method avoids that problem by ensuring that all of a network's ports are in the port_lookup dict. A caller should call this immediately before a remove() or a put(). | def _fix_network_cache_port_lookup(agent, network_id):
# If there is an existing NetModel for this network ID, ensure that all
# its ports are in the port_lookup dict.
if network_id in agent.cache.cache:
for port in agent.cache.cache[network_id].ports:
agent.cache.port_lookup[port.id] = network_id | [
"def _ensure_net_and_subnets(self, port):\n\n # Gather the subnet IDs that we need for this port, and get the\n # NetModel if we already have it in the cache.\n needed_subnet_ids = set()\n net = None\n for fixed_ip in port['fixed_ips']:\n subnet_id = fixed_ip.get('subnet_id')\n if subnet_id:\n needed_subnet_ids.add(subnet_id)\n if not net:\n net = self.agent.cache.get_network_by_subnet_id(subnet_id)\n LOG.debug(\"Needed subnet IDs: %s\", needed_subnet_ids)\n LOG.debug(\"Existing network model by subnet ID: %s\", net)\n\n # For each subnet that we need, get its data from SubnetWatcher and\n # hold for adding into the cache.\n new_subnets = {}\n for subnet_id in needed_subnet_ids:\n # Get data for this subnet from the SubnetWatchers.\n subnet = (self.subnet_watcher.get_subnet(subnet_id) or\n self.v1_subnet_watcher.get_subnet(subnet_id))\n if subnet is None:\n LOG.warning(\"No data for subnet %s\", subnet_id)\n raise SubnetIDNotFound()\n new_subnets[subnet_id] = subnet\n\n if not net:\n # We don't already have a NetModel, so look for a cached NetModel\n # with the right network ID. (In this case we must have new\n # subnets to add into the cache, and the cached NetModel must have\n # subnets other than the ones that we're adding in this iteration;\n # otherwise we would have already found it when searching by\n # subnet_id above.)\n assert new_subnets\n network_id = list(new_subnets.values())[0]['network_id']\n net = self.agent.cache.get_network_by_id(network_id)\n LOG.debug(\"Existing network model by network ID: %s\", net)\n\n if not net:\n # We still have no NetModel for the relevant network ID, so create\n # a new one. In this case we _must_ be adding new subnets.\n assert new_subnets\n net = empty_network(network_id)\n LOG.debug(\"New network %s\", net)\n elif new_subnets:\n # We have a NetModel that was already in the cache and are about to\n # modify it. Cache replacement only works if the new NetModel is a\n # distinct object from the existing one, so make a copy here.\n net = copy_network(net)\n LOG.debug(\"Copied network %s\", net)\n\n if new_subnets:\n # Add the new subnets into the NetModel.\n assert net\n net.subnets = [s for s in net.subnets\n if s.id not in new_subnets]\n net.subnets += list(new_subnets.values())\n\n # Add (or update) the NetModel in the cache.\n LOG.debug(\"Net: %s\", net)\n _fix_network_cache_port_lookup(self.agent, net.id)\n self.agent.cache.put(net)\n\n return net.id",
"def update_host_routes(self, config, cache):\n db = cache.get_or_create('host_routes', lambda: {})\n for net in config.networks:\n\n # For each subnet...\n for subnet in net.subnets:\n cidr = str(subnet.cidr)\n\n # determine the set of previously written routes for this cidr\n if cidr not in db:\n db[cidr] = set()\n\n current = db[cidr]\n\n # build a set of new routes for this cidr\n latest = set()\n for r in subnet.host_routes:\n latest.add((r.destination, r.next_hop))\n\n # If the set of previously written routes contains routes that\n # aren't defined in the new config, run commands to delete them\n for x in current - latest:\n if self._alter_route(net.interface.ifname, 'del', *x):\n current.remove(x)\n\n # If the new config contains routes that aren't defined in the\n # set of previously written routes, run commands to add them\n for x in latest - current:\n if self._alter_route(net.interface.ifname, 'add', *x):\n current.add(x)\n\n if not current:\n del db[cidr]\n\n cache.set('host_routes', db)",
"def _update_network_config(port_config, allow_multiple=False):\n # Get network id from port config\n network_id = port_config.get('network_id')\n\n # Get the network id from relationship if any\n rel_network_ids = find_openstack_ids_of_connected_nodes_by_openstack_type(\n ctx, NETWORK_OPENSTACK_TYPE)\n\n rel_network_id = rel_network_ids[0] if rel_network_ids else None\n # Check if network config comes from two sources or not\n if network_id and rel_network_id and not allow_multiple:\n raise NonRecoverableError('Port can\\'t both have the '\n '\"network_id\" property and be '\n 'connected to a network via a '\n 'relationship at the same time')\n\n port_config['network_id'] = network_id or rel_network_id",
"def _update_network_update_port(self, obj, networks):\n nc = self.network(obj)\n internal_ports = obj.data.get('internal_ports', [])\n\n # process each network to be updated\n for n in networks:\n # verify network properties and resolve names into ids\n net = self._validate_network(obj, n, 'update')\n\n # find existing port that matches network\n candidate_ports = self._find_port_by_net_spec(\n obj, net, internal_ports)\n port = candidate_ports[0]\n try:\n # set updated security groups for port\n port_attr = {\n 'security_groups': net.get(self.PORT_SECURITY_GROUPS, []),\n }\n LOG.debug(\"Setting security groups %s for port %s\",\n port_attr, port['id'])\n nc.port_update(port['id'], **port_attr)\n except exc.InternalError as ex:\n raise exc.EResourceUpdate(type='server', id=obj.physical_id,\n message=str(ex))",
"def test_update_network_no_policy_change(self):\n for qos_policy_id in (self.qos_policies[0].id, None):\n self.networks[0].qos_policy_id = qos_policy_id\n self.networks[0].update()\n original_network = {'qos_policy_id': qos_policy_id}\n port_ids, fip_ids, router_ids = self.qos_driver.update_network(\n mock.ANY, self.networks[0], original_network)\n self.assertEqual(set([]), port_ids)\n self.assertEqual(set([]), fip_ids)\n self.assertEqual(set([]), router_ids)\n self.mock_rules.assert_not_called()",
"def refresh_dhcp_helper(self, network_id):\n old_network = self.cache.get_network_by_id(network_id)\n if not old_network:\n # DHCP current not running for network.\n return self.enable_dhcp_helper(network_id)\n\n try:\n network = self.plugin_rpc.get_network_info(network_id)\n except:\n self.needs_resync = True\n LOG.exception(_('Network %s RPC info call failed.') % network_id)\n return\n\n old_cidrs = set(s.cidr for s in old_network.subnets if s.enable_dhcp)\n new_cidrs = set(s.cidr for s in network.subnets if s.enable_dhcp)\n\n if new_cidrs and old_cidrs == new_cidrs:\n self.call_driver('reload_allocations', network)\n self.cache.put(network)\n elif new_cidrs:\n if self.call_driver('restart', network):\n self.cache.put(network)\n else:\n self.disable_dhcp_helper(network.id)",
"def update_network(self, link_list):\n for couple in link_list:\n if couple[0] not in self.__network:\n self.__network[couple[0]] = Article(couple[0])\n if couple[1] not in Article(couple[0]).get_neighbors():\n self.__network[couple[0]].add_neighbor(Article(couple[1]))\n if couple[1] not in self.__network:\n self.__network[couple[1]] = Article(couple[1])",
"def update_network(self, context, id, network):\n LOG.debug(\"NECPluginV2.update_network() called, \"\n \"id=%(id)s network=%(network)s .\",\n {'id': id, 'network': network})\n\n if 'admin_state_up' in network['network']:\n network['network']['status'] = self._net_status(network)\n\n session = context.session\n with session.begin(subtransactions=True):\n old_net = super(NECPluginV2, self).get_network(context, id)\n new_net = super(NECPluginV2, self).update_network(context, id,\n network)\n self._process_l3_update(context, new_net, network['network'])\n\n changed = (old_net['admin_state_up'] != new_net['admin_state_up'])\n if changed and not new_net['admin_state_up']:\n # disable all active ports of the network\n filters = dict(network_id=[id], status=[const.PORT_STATUS_ACTIVE])\n ports = super(NECPluginV2, self).get_ports(context,\n filters=filters)\n for port in ports:\n # If some error occurs, status of errored port is set to ERROR.\n # This is avoids too many rollback.\n # TODO(amotoki): Raise an exception after all port operations\n # are finished to inform the caller of API of the failure.\n self.deactivate_port(context, port, raise_exc=False)\n elif changed and new_net['admin_state_up']:\n # enable ports of the network\n filters = dict(network_id=[id], status=[const.PORT_STATUS_DOWN],\n admin_state_up=[True])\n ports = super(NECPluginV2, self).get_ports(context,\n filters=filters)\n for port in ports:\n self.activate_port_if_ready(context, port, new_net)\n\n return new_net",
"def port_update_end(self, payload):\n port = DictModel(payload['port'])\n network = self.cache.get_network_by_id(port.network_id)\n if network:\n self.cache.put_port(port)\n self.call_driver('reload_allocations', network)",
"def reconfigure_priority(cls):\n # Reorder the existing networks\n all_networks = sorted(cls.all(), key=attrgetter('priority'))\n network_num = 0\n for network in all_networks:\n\n old_priority = network.priority\n\n # print(\"Network: \"+network.ssid+\" Priority: (\"+str(old_priority)+\" -> \"+str(network_num)+\")\")\n\n # Update the networks priority\n network.add_option(\"priority\", network_num)\n network.save()\n\n # Only increment priority for non-ambiguous networks\n if old_priority > 0:\n network_num += 1",
"def reset_network(self, req, id):\n context = req.environ['nova.context']\n try:\n self.compute_api.reset_network(context, id)\n except:\n readable = traceback.format_exc()\n LOG.exception(_(\"Compute.api::reset_network %s\"), readable)\n return faults.Fault(exc.HTTPUnprocessableEntity())\n return exc.HTTPAccepted()",
"def fin1():\n results.append(\n (\n ll_networks.update_network(\n positive=True, network=network, dns=list(), data_center=dc\n ), \"fin1: ll_networks.update_network (remove DNS checkbox)\"\n )\n )",
"def reset_network():\n _execute(\"sudo service network-manager stop\")\n _execute(\"sudo rm -f /var/lib/NetworkManager/NetworkManager.state\")\n _execute(\"sudo service network-manager start\")",
"def test_update_network_external_ports(self):\n policies_ports = [(self.qos_policies[0].id, {self.ports[0].id})]\n self.ports[2].qos_policy_id = self.qos_policies[0].id\n self.ports[2].update()\n port_obj.PortBinding(self.ctx, port_id=self.ports[1].id, host='host',\n profile={}, vif_type='',\n vnic_type=portbindings_api.VNIC_DIRECT).create()\n with mock.patch.object(self.qos_driver._driver._nb_idl,\n 'get_lswitch_port') as mock_lsp:\n mock_lsp.side_effect = [\n mock.Mock(type=ovn_const.LSP_TYPE_LOCALNET),\n mock.Mock(type=ovn_const.LSP_TYPE_EXTERNAL)]\n for qos_policy_id, reference_ports in policies_ports:\n self.networks[0].qos_policy_id = qos_policy_id\n self.networks[0].update()\n original_network = {'qos_policy_id': self.qos_policies[0]}\n reviewed_port_ids, _, _ = self.qos_driver.update_network(\n mock.ANY, self.networks[0], original_network, reset=True)\n self.assertEqual(reference_ports, reviewed_port_ids)\n calls = [mock.call(\n mock.ANY, self.ports[0].id, self.ports[0].network_id,\n qos_policy_id, None)]\n self.mock_rules.assert_has_calls(calls)\n self.mock_rules.reset_mock()",
"def __fillCache(self):\n assert (not self.__modelCache)\n\n # Assemble a list of model IDs to look up\n numModelIDs = len(self.__modelIDs) if self.__modelIDs else 0\n\n if self.__nextIndex >= numModelIDs:\n return\n\n idRange = self.__nextIndex + self.__CACHE_LIMIT\n if idRange > numModelIDs:\n idRange = numModelIDs\n\n lookupIDs = self.__modelIDs[self.__nextIndex:idRange]\n\n self.__nextIndex += (idRange - self.__nextIndex)\n\n # Query Nupic for model info of all models in the look-up list\n # NOTE: the order of results may not be the same as lookupIDs\n infoList = _clientJobsDB().modelsInfo(lookupIDs)\n assert len(infoList) == len(lookupIDs), \\\n \"modelsInfo returned %s elements; expected %s.\" % \\\n (len(infoList), len(lookupIDs))\n\n # Create _NupicModelInfo instances and add them to cache\n for rawInfo in infoList:\n modelInfo = _NupicModelInfo(rawInfo=rawInfo)\n self.__modelCache.append(modelInfo)\n\n assert len(self.__modelCache) == len(lookupIDs), \\\n \"Added %s elements to modelCache; expected %s.\" % \\\n (len(self.__modelCache), len(lookupIDs))\n\n if self.debug:\n _emit(Verbosity.DEBUG,\n \"MODELITERATOR: Leaving __fillCache(); modelCacheLen=%s\" % \\\n (len(self.__modelCache),))",
"def sync_state(self):\n LOG.info(_('Synchronizing state'))\n known_networks = set(self.cache.get_network_ids())\n\n try:\n active_networks = set(self.plugin_rpc.get_active_networks())\n for deleted_id in known_networks - active_networks:\n self.disable_dhcp_helper(deleted_id)\n\n for network_id in active_networks:\n self.refresh_dhcp_helper(network_id)\n except:\n self.needs_resync = True\n LOG.exception(_('Unable to sync network state.'))",
"def test_update_external_network(self):\n network_policies = [(self.qos_policies[1].id,\n {self.fips[1].id},\n {self.router_fips.id}),\n (None,\n {self.fips[1].id},\n {self.router_fips.id})]\n\n self.fips[0].qos_policy_id = self.qos_policies[0].id\n self.fips[0].update()\n for qos_policy_id, ref_fips, ref_routers in network_policies:\n self.fips_network.qos_policy_id = qos_policy_id\n self.fips_network.update()\n original_network = {'qos_policy_id': self.qos_policies[0]}\n _, reviewed_fips_ids, reviewed_router_ids = (\n self.qos_driver.update_network(\n mock.Mock(), self.fips_network, original_network))\n self.assertEqual(ref_fips, reviewed_fips_ids)\n self.assertEqual(ref_routers, reviewed_router_ids)",
"def dcnm_network_delete_event(self, network_info):\n seg_id = network_info.get('segmentation_id')\n if not seg_id:\n LOG.error(_LE('Failed to delete network. Invalid network '\n 'info %s.'), network_info)\n query_net = self.get_network_by_segid(seg_id)\n if not query_net:\n LOG.info(_LI('dcnm_network_delete_event: network %(segid)s '\n 'does not exist.'), {'segid': seg_id})\n return\n if self.fw_api.is_network_source_fw(query_net, query_net.name):\n LOG.info(_LI(\"Service network %s, returning\"), query_net.name)\n return\n # Send network delete request to neutron\n try:\n del_net = self.network.pop(query_net.network_id)\n self.neutronclient.delete_network(query_net.network_id)\n self.delete_network_db(query_net.network_id)\n except Exception as exc:\n # Failed to delete network.\n # Put back the entry to the local cache???\n self.network[query_net.network_id] = del_net\n LOG.exception(_LE('dcnm_network_delete_event: Failed to delete '\n '%(network)s. Reason %(err)s.'),\n {'network': query_net.name, 'err': str(exc)})",
"def fusion_api_edit_network_set(self, body=None, uri=None, api=None, headers=None):\n return self.network_set.update(body, uri, api, headers)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run the EtcdWatcher loop. | def run(self):
self.etcd.start() | [
"def watch_forever(self):\n\n while True:\n try:\n self.do_tick()\n if self.etcd_elector:\n self.etcd_elector.wait_until_elected()\n self.do_watch()\n except Exception:\n LOG.exception('%s: etcd threw exception',\n self.name)\n # In case of a dead etcd causing continuous\n # exceptions, the pause here avoids eating all the\n # CPU\n time.sleep(self.DANGER_PAUSE)",
"def run_forever(self):\n self.__event_loop__.run_forever()",
"def run(self):\n idx = 0\n if self._verbose:\n self._log.info('(%s) Starting AlertWatcher' % self._label)\n self._log.debug('(%s) Sleep interval: %s' % (self._label, str(self._tick_interval)))\n while self._run:\n if self._loop_limit > 0 and idx >= self._loop_limit:\n if self._verbose:\n self._log.info('(%s) Watch loop limit reached, exiting.' % self._label)\n break\n\n alerts = self._get_alerts(self._session)\n for alert in alerts:\n self._match_alert(alert)\n\n match_done = self.is_fully_matched()\n if match_done:\n if self._verbose:\n self._log.info('(%s) Fully matched, exiting.' % self._label)\n break\n\n # TODO: purge expired here\n\n if self._watch_loop_callback is not None:\n self._watch_loop_callback()\n\n time.sleep(self._tick_interval)\n if self._loop_limit > 0:\n idx += 1",
"def run_inner(self):\n for event in self.inotify.event_gen():\n self.process_inotify_event(event)",
"async def start_watcher(self):\n allwatcher = client.AllWatcherFacade.from_connection(self.model.connection())\n\n change = await allwatcher.Next()\n\n while True:\n units = self.model.units\n await asyncio.sleep(2)\n change = await allwatcher.Next()\n for delta in change.deltas:\n delta_entity = delta.entity\n\n if delta_entity == \"unit\":\n if self.governor in delta.data[\"name\"]:\n continue\n\n if delta.type == \"change\":\n if delta.data[\"name\"] not in units:\n logging.warning(\"New unit was added\")\n\n event_data = {\n \"event_name\": \"unit_added\",\n \"event_data\": {\"unit_name\": delta.data[\"name\"]},\n }\n\n self.event_list.append(event_data)\n\n workload_status = delta.deltas[2][\"workload-status\"][\"current\"]\n if workload_status in self.status_changes:\n status_change_function = self.status_changes[\n workload_status\n ]\n status_change_function(delta)\n\n if delta.type == \"remove\":\n logging.warning(\"Unit was removed\")\n event_data = {\n \"event_name\": \"unit_removed\",\n \"event_data\": {\"unit_name\": delta.data[\"name\"]},\n }\n self.event_list.append(event_data)\n logging.warning(\"Action executed\")\n\n if self.event_list:\n await self.events_to_storage()",
"def run(self):\n self._connect()\n msg = 'monitoring %s at %d second intervals' % (self.host,\n self.interval)\n #self.log('CoreWatcher: %s' % msg)\n while self.state == self.CONNECTED:\n try:\n core = self._checkCoreFile()\n #14000\n except OSError,e:\n # For some reason, after tests are finished, the sendline()\n # in checkCoreFile() fails with:\n #\n # File \"...site-packages/pexpect.py\", line 439, in send\n # return os.write(self.child_fd, str)\n # OSError: [Errno 9] Bad file number\n #\n # The ssh connection is still up but we've lost track of it.\n # This may be something to do with NexTest tearing down the\n # logicial IP interfaces on Solaris? Anyway, hack for now:\n\t # get out of the loop and shut down the connection.\n #14000 \n self.log(\"CoreWatcher: Exception: %s - caught OSError, shutting down\" % str(e))\n break\n \n if core:\n #self.log('CoreWatcher: releasing semaphore')\n self.caller.put('CORE')\n msg = self.q.get()\n # wait for caller message\n if msg == 'GO':\n #34036 - Removed clear core method for not to remove the \n #corefile\n self.log('CoreWatcher: continue')\n continue\n elif msg == 'STOP':\n #34036 - Removed clear core method for not to remove the\n #corefile\n print \"All done\"\n self.state=self.STOPPING\n else:\n print \"got unknown message from caller\"\n try:\n msg = self.q.get_nowait()\n except Empty:\n \"\"\"\n Nothing to do\n \"\"\"\n #self.log(\"CoreWatcher: no messages to process\")\n else:\n # only one kind of message for now: STOP\n if msg == 'GO':\n #print \"CoreWatcher: deleting core file\"\n self.ssh.sendline\n if msg == 'STOP':\n self.log('stopping')\n self.state = self.STOPPING\n #self.log('sleeping')\n time.sleep(self.interval)\n\n # Loop complete. Clean up.\n self._disconnect()",
"def listen(self):\n self.init_delete_batch_processing()\n self.init_file_batch_processing()\n self.init_symlink_batch_processing()\n\n self.loop.create_task(self.start_watching_roots())\n\n self.revisit_cond = asyncio.Condition()\n self.loop.create_task(self.start_polling_revisits())\n\n self.start_polling_changes()\n self.loop.run_forever()\n self.stop_polling_changes()",
"def main():\n channel_watcher = ChannelWatcher()\n channel_watcher.create_threads()\n for thread in channel_watcher.threads:\n thread.join()\n return",
"def run_forever():\n asyncio.get_event_loop().run_forever()",
"def run(self):\n self.cmdloop()",
"def run(self):\n while True:\n try:\n if not self._read_new_entries(False):\n time.sleep(0.1)\n self._update_all_tasks()\n except KeyboardInterrupt:\n break",
"def main(dir_to_watch):\n event_handler = AudioCreatedHandler()\n observer = Observer()\n observer.schedule(event_handler, dir_to_watch, recursive=True)\n observer.start()\n try:\n while True:\n time.sleep(1) \n except KeyboardInterrupt:\n print \"Stopping...\"\n observer.stop()\n observer.join()",
"def loop(self):\n keys.mode = 'main'\n for line in client.readlines('/event'):\n if not self.alive:\n break\n self.dispatch(*line.split(' ', 1))\n self.alive = False",
"def _run_loop(self):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n self._server = websockets.serve(self._log_message, self._host, self._port)\n\n loop.run_until_complete(self._server)\n loop.run_forever()",
"def run(self):\n while True:\n try:\n self._run()\n except:\n time.sleep(2)",
"def _run_cycle(self):\n \n print(self.example_echo(\"Hello World\"))\n time.sleep(1)",
"def run(self):\n self._start_servers()\n monitor = KodiMonitor(self.nx_common, self.nx_common.log)\n while not monitor.abortRequested():\n monitor.update_playback_progress()\n try:\n if self.library_update_scheduled() and self._is_idle():\n self.update_library()\n except RuntimeError as exc:\n self.nx_common.log(\n 'RuntimeError: {}'.format(exc), xbmc.LOGERROR)\n if monitor.waitForAbort(5):\n break\n self._shutdown()",
"def run(self) -> None:\n\n while self.stayAlive:\n\n time.sleep(self.cfg.MCtrl[\"LoopTime\"])",
"def run_version(watcher: inotify.adapters.Inotify, config: Config) -> None:\n for directory in config[\"directories\"]:\n make_dir(directory)\n\n for watch_job in config[\"watches\"]:\n if watch_job[\"type\"] == \"simple\":\n watch_job_simple = cast(ConfigSimpleValue, watch_job)\n watch_directory_recursively(watcher, watch_job_simple[\"path\"])\n elif watch_job[\"type\"] == \"regex\":\n watch_job_regex = cast(ConfigRegexValue, watch_job)\n watch_directory_recursively(watcher, watch_job_regex[\"base_path\"])\n else:\n logging.warning(f\"Unknown watch job type: {watch_job['type']}\")\n\n for _, type_names, path, filename in watcher.event_gen(yield_nones=False):\n filepath = os.path.join(path, filename)\n\n # print(f\"event: {type_names}, {path}, {filename}\")\n if \"IN_CREATE\" in type_names and \"IN_ISDIR\" in type_names: # Directory was created\n watcher.add_watch(filepath)\n logging.warning(f\"Watching new directory {filepath}\")\n continue\n\n if \"IN_CLOSE_WRITE\" not in type_names: # Skip anything else as we're after events after a file has been written\n continue\n\n simple_conf, regex_conf = get_watch_job(filepath, config)\n\n if simple_conf: # Process simple files put in directory\n slug = simple_conf[\"slug\"]\n blob_name = f\"{slug}/{filename}\"\n\n upload_file(filepath, simple_conf[\"dsn\"], simple_conf[\"container\"], blob_name)\n\n elif regex_conf: # Check if filepath matches regex\n local_path = filepath.replace(regex_conf[\"base_path\"], \"\").lstrip(\"/\")\n match = re.match(regex_conf[\"regex\"], local_path)\n if not match:\n logging.warning(f\"No watches to cover file: {filename}\")\n continue\n\n match_data = match.groupdict()\n match_data[\"filename\"] = filename\n blob_name = regex_conf[\"dest_path\"].format(**match_data)\n upload_file(filepath, regex_conf[\"dsn\"], regex_conf[\"container\"], blob_name)\n\n else:\n logging.warning(f\"No watches to cover file: {filename}\")\n\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compiles robot from given file and returns class object | def compile_robot(file_name, module_name = "contestant_module"):
global counter_module
module_name += str(counter_module)
counter_module += 1
mod = importCode(file_name, module_name)
compiled_class = None
for symbol in dir(mod):
if hasattr(getattr(mod, symbol), "act") and getattr(mod, symbol).__name__ != "RobotController":
compiled_class = getattr(mod, symbol)
print compiled_class
globals()[compiled_class.__name__] = compiled_class
if compiled_class is None:
raise KrakrobotException("Not found class with act() function named different than RobotController in provided .py")
return compiled_class, mod | [
"def compile(self, filename=None):\n\n code = CodeGen()\n code.append(\"\"\"\n # generated by Construct, this source is for inspection only! do not import!\n\n from construct import *\n from construct.lib import *\n from io import BytesIO\n import struct\n import collections\n import itertools\n\n def restream(data, func):\n return func(BytesIO(data))\n def reuse(obj, func):\n return func(obj)\n\n linkedinstances = {}\n linkedparsers = {}\n linkedbuilders = {}\n\n len_ = len\n sum_ = sum\n min_ = min\n max_ = max\n abs_ = abs\n \"\"\")\n code.append(f\"\"\"\n def parseall(io, this):\n return {self._compileparse(code)}\n def buildall(obj, io, this):\n return {self._compilebuild(code)}\n compiled = Compiled(parseall, buildall)\n \"\"\")\n source = code.toString()\n\n if filename:\n with open(filename, \"wt\") as f:\n f.write(source)\n\n modulename = hexlify(hashlib.sha1(source.encode()).digest()).decode()\n module_spec = importlib.machinery.ModuleSpec(modulename, None)\n module = importlib.util.module_from_spec(module_spec)\n c = compile(source, '', 'exec')\n exec(c, module.__dict__)\n\n module.linkedinstances = code.linkedinstances\n module.linkedparsers = code.linkedparsers\n module.linkedbuilders = code.linkedbuilders\n compiled = module.compiled\n compiled.source = source\n compiled.module = module\n compiled.modulename = modulename\n compiled.defersubcon = self\n return compiled",
"def createInstanceSource(pcol, path, nr_robots, smallest_robot_id):\n\n # prevent alphabet related bugs by including e and f objects in alphabet\n if (\"e\" not in pcol.A):\n pcol.A.append(\"e\")\n if (\"f\" not in pcol.A):\n pcol.A.append(\"f\")\n\n with open(path + \".c\", \"w\") as fout:\n fout.write(\"\"\"#include \"%s.h\"\n\n#ifdef NEEDING_WILDCARD_EXPANSION\n #include \"wild_expand.h\"\n#endif\n\n#ifdef PCOL_SIM\"\"\" % path.split(\"/\")[-1]) #only filename\n\n fout.write(\"\"\"\\n char* objectNames[] = {[NO_OBJECT] = \"no_object\", \"\"\")\n for obj in pcol.A:\n fout.write(\"\"\"[OBJECT_ID_%s] = \"%s\", \"\"\" % (obj.upper(), obj))\n\n fout.write(\"\"\"};\n char* agentNames[] = {\"\"\")\n for ag_name in pcol.B:\n fout.write(\"\"\"[AGENT_%s] = \"%s\", \"\"\" % (ag_name.upper(), ag_name))\n fout.write(\"\"\"};\n#endif\n\n//the smallest kilo_uid from the swarm\nconst uint16_t smallest_robot_uid = %d;\n//the number of robots that make up the swarm\nconst uint16_t nr_swarm_robots = %d;\n\nvoid lulu_init(Pcolony_t *pcol) {\"\"\" % (smallest_robot_id, nr_robots) )\n\n # call initPcolony()\n fout.write(\"\"\"\\n //init Pcolony with alphabet size = %d, nr of agents = %d, capacity = %d\n initPcolony(pcol, %d, %d, %d);\"\"\" % (len(pcol.A), len(pcol.B), pcol.n, len(pcol.A), len(pcol.B), pcol.n))\n fout.write(\"\"\"\\n //Pcolony.alphabet = %s\"\"\" % pcol.A)\n\n # init environment\n fout.write(\"\"\"\\n\\n //init environment\"\"\")\n counter = 0;\n for obj, nr in pcol.env.items():\n #replace %id and * with $id and $ respectively\n\n fout.write(\"\"\"\\n pcol->env.items[%d].id = OBJECT_ID_%s;\"\"\" % (counter, obj.upper()))\n fout.write(\"\"\"\\n pcol->env.items[%d].nr = %d;\\n\"\"\" % (counter, nr))\n counter += 1\n fout.write(\"\"\"\\n //end init environment\"\"\")\n\n fout.write(\"\"\"\\n\\n //init global pswarm environment\"\"\")\n if (pcol.parentSwarm == None or len(pcol.parentSwarm.global_env) == 0):\n fout.write(\"\"\"\\n pcol->pswarm.global_env.items[0].id = OBJECT_ID_E;\"\"\")\n fout.write(\"\"\"\\n pcol->pswarm.global_env.items[0].nr = 1;\"\"\")\n else:\n counter = 0\n for obj, nr in pcol.parentSwarm.global_env.items():\n #replace %id and * with $id and $ respectively\n\n fout.write(\"\"\"\\n pcol->pswarm.global_env.items[%d].id = OBJECT_ID_%s;\"\"\" % (counter, obj.upper()))\n fout.write(\"\"\"\\n pcol->pswarm.global_env.items[%d].nr = %d;\"\"\" % (counter, nr))\n counter += 1\n fout.write(\"\"\"\\n //end init global pswarm environment\"\"\")\n\n fout.write(\"\"\"\\n\\n //init INPUT global pswarm environment\"\"\")\n if (pcol.parentSwarm == None or len(pcol.parentSwarm.in_global_env) == 0):\n fout.write(\"\"\"\\n pcol->pswarm.in_global_env.items[0].id = OBJECT_ID_E;\"\"\")\n fout.write(\"\"\"\\n pcol->pswarm.in_global_env.items[0].nr = 1;\"\"\")\n else:\n counter = 0\n for obj, nr in pcol.parentSwarm.in_global_env.items():\n #replace %id and * with $id and $ respectively\n\n fout.write(\"\"\"\\n pcol->pswarm.in_global_env.items[%d].id = OBJECT_ID_%s;\"\"\" % (counter, obj.upper()))\n fout.write(\"\"\"\\n pcol->pswarm.in_global_env.items[%d].nr = %d;\"\"\" % (counter, nr))\n counter += 1\n fout.write(\"\"\"\\n //end init INPUT global pswarm environment\"\"\")\n\n fout.write(\"\"\"\\n\\n //init OUTPUT global pswarm environment\"\"\")\n if (pcol.parentSwarm == None or len(pcol.parentSwarm.out_global_env) == 0):\n fout.write(\"\"\"\\n pcol->pswarm.out_global_env.items[0].id = OBJECT_ID_E;\"\"\")\n fout.write(\"\"\"\\n pcol->pswarm.out_global_env.items[0].nr = 1;\"\"\")\n else:\n counter = 0\n for obj, nr in pcol.parentSwarm.out_global_env.items():\n #replace %id and * with $id and $ respectively\n\n fout.write(\"\"\"\\n pcol->pswarm.out_global_env.items[%d].id = OBJECT_ID_%s;\"\"\" % (counter, obj.upper()))\n fout.write(\"\"\"\\n pcol->pswarm.out_global_env.items[%d].nr = %d;\"\"\" % (counter, nr))\n counter += 1\n fout.write(\"\"\"\\n //end init OUTPUT global pswarm environment\"\"\")\n\n for ag_name in pcol.B:\n fout.write(\"\"\"\\n\\n //init agent %s\"\"\" % ag_name)\n #fout.write(\"\"\"\\n\\n initAgent(&pcol->agents[AGENT_%s], pcol, %d);\"\"\" % (ag_name.upper(), len(pcol.agents[ag_name].programs)))\n fout.write(\"\"\"\\n\\n initAgent(&pcol->agents[AGENT_%s], pcol, %d);\"\"\" % (ag_name.upper(), getNrOfProgramsAfterExpansion(pcol.agents[ag_name], nr_robots- 1)))\n\n fout.write(\"\"\"\\n //init obj multiset\"\"\")\n counter = 0;\n for obj, nr in pcol.agents[ag_name].obj.items():\n #replace %id and * with $id and $ respectively\n\n for i in range(nr):\n fout.write(\"\"\"\\n pcol->agents[AGENT_%s].obj.items[%d] = OBJECT_ID_%s;\"\"\" % (ag_name.upper(), counter, obj.upper()))\n counter += 1\n\n fout.write(\"\"\"\\n\\n //init programs\"\"\")\n for prg_nr, prg in enumerate(pcol.agents[ag_name].programs):\n fout.write(\"\"\"\\n\\n initProgram(&pcol->agents[AGENT_%s].programs[%d], %d);\"\"\" % (ag_name.upper(), prg_nr, getNrOfRulesWithoutRepetitions(prg)))\n fout.write(\"\"\"\\n //init program %d: < %s >\"\"\" % (prg_nr, prg.print()))\n\n rule_index = 0\n for rule_nr, rule in enumerate(prg):\n # skip rules that contain identical operands and thus have no effect\n if (rule.lhs == rule.rhs and rule.lhs == 'e' and rule.main_type != sim.RuleType.conditional):\n continue\n\n fout.write(\"\"\"\\n //init rule %d: %s\"\"\" % (rule_nr, rule.print(toString=True)) )\n if (rule.main_type != sim.RuleType.conditional):\n fout.write(\"\"\"\\n initRule(&pcol->agents[AGENT_%s].programs[%d].rules[%d], RULE_TYPE_%s, OBJECT_ID_%s, OBJECT_ID_%s, NO_OBJECT, NO_OBJECT);\"\"\" % (ag_name.upper(), prg_nr, rule_index, rule.type.name.upper(), rule.lhs.upper(), rule.rhs.upper()))\n else:\n fout.write(\"\"\"\\n initRule(&pcol->agents[AGENT_%s].programs[%d].rules[%d], RULE_TYPE_CONDITIONAL_%s_%s, OBJECT_ID_%s, OBJECT_ID_%s, OBJECT_ID_%s, OBJECT_ID_%s);\"\"\" % (ag_name.upper(), prg_nr, rule_index, rule.type.name.upper(), rule.alt_type.name.upper(), rule.lhs.upper(), rule.rhs.upper(), rule.alt_lhs.upper(), rule.alt_rhs.upper()))\n\n #increase rule_index\n rule_index += 1\n fout.write(\"\"\"\\n //end init program %d\n pcol->agents[AGENT_%s].init_program_nr++;\"\"\" % (prg_nr, ag_name.upper()))\n fout.write(\"\"\"\\n //end init programs\"\"\")\n\n fout.write(\"\"\"\\n //end init agent %s\"\"\" % ag_name)\n\n fout.write(\"\"\"\\n}\"\"\")\n fout.write(\"\"\"\\n\\nvoid lulu_destroy(Pcolony_t *pcol) {\n //destroys all of the subcomponents\n destroyPcolony(pcol);\n}\"\"\")\n fout.write(\"\"\"\\n\n#ifdef NEEDING_WILDCARD_EXPANSION\nuint16_t expand_pcolony(Pcolony_t *pcol, uint16_t my_id) {\n //used for a cleaner iteration through the P colony\n //instead of using agents[i] all of the time, we use just agent\n Agent_t *agent;\n\"\"\")\n\n fout.write(\"\"\"\\n uint8_t obj_with_id[] = {\"\"\")\n obj_with_id_size = 0\n for obj in pcol.A:\n if (\"_W_ID\" in obj):\n fout.write(\"OBJECT_ID_%s, \" % obj.upper())\n obj_with_id_size += 1\n fout.write(\"\"\"};\n uint8_t obj_with_id_size = %d;\"\"\" % (obj_with_id_size))\n\n fout.write(\"\"\"\\n uint8_t obj_with_any[] = {\"\"\")\n obj_with_any_size = 0\n is_obj_with_any_followed_by_id = []\n for i, obj in enumerate(pcol.A):\n if (obj.endswith(\"_W_ALL\")):\n fout.write(\"OBJECT_ID_%s, \" % obj.upper())\n # if we are at least 2 objects before the end of the list\n if (i < len(pcol.A) - 1):\n # check if this _$ wildcarded object is followed by a _$id object\n if (\"_W_ID\" in pcol.A[i+1]):\n is_obj_with_any_followed_by_id.append(1)\n else:\n is_obj_with_any_followed_by_id.append(0)\n else:\n # this (_$) object is the last one in the list\n is_obj_with_any_followed_by_id.append(0)\n obj_with_any_size += 1\n fout.write(\"\"\"};\n uint8_t obj_with_any_size = %d;\n uint8_t is_obj_with_any_followed_by_id[] = {%s};\"\"\" % (obj_with_any_size,\n str(is_obj_with_any_followed_by_id).replace(\"[\", \"\").replace(\"]\", \"\")))\n\n fout.write(\"\"\"\\n\\n uint16_t my_symbolic_id = my_id - smallest_robot_uid;\n\n //replace W_ID wildcarded objects with the object corresponding to the symbolic id\n // e.g.: B_W_ID -> B_0 for my_symbolic_id = 0\n replacePcolonyWildID(pcol, obj_with_id, obj_with_id_size, my_symbolic_id);\n\n //expand each obj_with_any[] element into nr_swarm_robots objects except my_symbolic id.\n // e.g.: B_W_ALL -> B_0, B_2 for nr_swarm_robots = 3 and my_symbolic_id = 1\n expandPcolonyWildAny(pcol, obj_with_any, is_obj_with_any_followed_by_id, obj_with_any_size, my_symbolic_id, nr_swarm_robots);\n\n return my_symbolic_id;\n}\n#endif\"\"\")",
"def generate_classes():\n import_url = 'https://raw.githubusercontent.com/{}/{}/{}/{}'.format(\n github_user, github_repo, github_branch, github_path)\n print(\"Downloading {} for last API version.\".format(import_url))\n data = json.loads(urlopen(import_url).read().decode('utf-8'))\n print(\"Download OK. Generating python files...\")\n\n for kinds in ['requests', 'events']:\n if kinds not in data:\n raise Exception(\"Missing {} in data.\".format(kinds))\n kind = kinds.rstrip('s').title()\n with open('pyobs/{}.py'.format(kinds), 'w') as f:\n\n f.write(\"#!/usr/bin/env python\\n\")\n f.write(\"# -*- coding: utf-8 -*-\\n\")\n f.write(\"\\n\")\n f.write(\"# THIS FILE WAS GENERATED BY generate_classes.py - \"\n \"DO NOT EDIT #\\n\")\n f.write(\"# (Generated on {}) #\\n\".format(\n datetime.now().isoformat(\" \")))\n f.write(\"\\n\")\n f.write(\"from .base_classes import Base{}\\n\".format(kind))\n f.write(\"\\n\\n\")\n for sec in data[kinds]:\n for i in data[kinds][sec]:\n f.write(\"class {}(Base{}):\\n\".format(i['name'], kind))\n f.write(\" \\\"\\\"\\\"\\n\")\n f.write(\" {}\\n\".format(\n i['description'].replace('\\n', '\\n ')))\n\n if (\n ('returns' in i) and (len(i['returns']) > 0)\n ) or (\n ('params' in i) and (len(i['params']) > 0)\n ):\n f.write(\"\\n\")\n\n arguments_default = []\n arguments = []\n try:\n if len(i['params']) > 0:\n f.write(\" :Arguments:\\n\")\n for a in i['params']:\n f.write(\" *{}*\\n\".format(\n clean_var(a['name'])))\n f.write(\" type: {}\\n\".format(\n a['type']))\n f.write(\" {}\\n\".format(\n a['description']))\n if '.' in a['name']:\n # If the name contains a . it is\n # describing a field of an object, we do\n # not need to create variables, storage\n # or accessors, just document it\n continue\n if '[]' in a['name']:\n # If the name contains a [] it is\n # describing the items of an array, we do\n # not need to create variables, storage\n # or accessors, just document it\n continue\n if 'optional' in a['type']:\n arguments_default.append(a['name'])\n else:\n arguments.append(a['name'])\n except KeyError:\n pass\n\n returns = []\n try:\n if len(i['returns']) > 0:\n f.write(\" :Returns:\\n\")\n for r in i['returns']:\n f.write(\" *{}*\\n\".format(\n clean_var(r['name'])))\n f.write(\" type: {}\\n\".format(\n r['type']))\n f.write(\" {}\\n\".format(\n r['description']))\n if '.' in r['name']:\n # If the name contains a . it is\n # describing a field of an object, we do\n # not need to create variables, storage\n # or accessors, just document it\n continue\n # .*. are used to describe arrays that are\n # already being captured by the above filter\n returns.append(r['name'])\n except KeyError:\n pass\n\n f.write(\" \\\"\\\"\\\"\\n\")\n f.write(\" def __init__({}):\\n\".format(\n \", \".join(\n [\"self\"] +\n [clean_var(a) for a in arguments] +\n [clean_var(a) + \"=None\" for a in arguments_default]\n )\n ))\n f.write(\" Base{}.__init__(self)\\n\".format(kind))\n f.write(\" self._name = '{}'\\n\".format(i['name']))\n for r in returns:\n f.write(\" self._returns['{}'] = None\\n\".format(\n r))\n for a in arguments:\n f.write(\" self._params['{}'] = {}\\n\".format(\n a, clean_var(a)))\n for a in arguments_default:\n f.write(\" self._params['{}'] = {}\\n\".format(\n a, clean_var(a)))\n f.write(\"\\n\")\n for r in returns:\n f.write(\" @property\\n\")\n f.write(\" def {}(self):\\n\".format(clean_var(r)))\n f.write(\" return self._returns['{}']\\n\".format(\n r))\n f.write(\"\\n\")\n f.write(\"\\n\")\n\n print(\"API classes have been generated.\")",
"def compile_contract(file: str, class_call: str) -> str:\n print(f\"Compiling {file}.py ....\")\n exit_code = os.system(\n f\"~/smartpy-cli/SmartPy.sh compile contract/contracts/{file}.py \\\"{class_call}\\\" contract/build\")\n if exit_code != 0:\n raise Exception(f\"Failed to compile Contract : {file}.py\")",
"def Build(self, out_file):\n raise NotImplementedError",
"def default_robot(): #py:default_robot\n class Robot(UsedRobot):\n def __init__(self):\n self.body = RUR._default_robot_body_()\n return Robot()",
"def main():\n file_or_dir = sys.argv[-1]\n # print file_or_dir\n # If the argv is file\n if os.path.isfile(file_or_dir):\n # print file_or_dir\n with open(file_or_dir, 'r') as vm_file_code:\n file_path = (os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))) + \"/\" + file_or_dir\n parser = Parser(vm_file_code=\"vm_file_code\", file_path=\"file_path\")\n parsed_file, command_type_list = parser.parse_asm()\n code_writer = Code_Writer(file_path, parsed_file, command_type_list)\n code_writer.open_file_and_write()\n vm_file_code.close()\n\n # If the argv is a dir\n elif os.path.isdir(file_or_dir):\n dir_path = os.path.abspath(file_or_dir)\n print 'IS a directory', file_or_dir\n\n dir_name = file_or_dir.split('/')[-1]\n\n parser = Parser(dir_path=dir_path)\n class_dict = parser.parse_directory()\n asm_file_path = (os.getcwd() + \"/\" + dir_name + \"/\" + dir_name + '.asm')\n print asm_file_path\n code_writer = Code_Writer()\n code_writer.write_to_directory_file(class_dict, asm_file_path)\n # print class_dict\n # pprint(class_dict)\n\n\n else:\n print \"Pass a valid file or dir\"\n\n # for file in os.walk(file_or_dir):\n # print file",
"def NewRobot(self,module,s,d,x,y,xT,yT,rgb): #Add robot\n\t\tsys.path.insert(0, './studentRobots')\n\t\tmname = module[:-3]\n\t\t\n\t\tspec = importlib.util.find_spec(mname)\n\t\tif spec is None:\n\t\t\tprint(\"can't find the module\")\n\t\telse:\n\t\t\t# the actual import ...\n\t\t\tmodule = importlib.util.module_from_spec(spec)\n\t\t\tspec.loader.exec_module(module)\n\t\tprint(mname,module)\n\t\trbot = module.s1Robot(s,d,x,y,xT,yT,rgb)\n\t\tself.__robotList.append(rbot)",
"def main():\n toy_robot = toys.ToyRobot()\n filename = 'input.txt'\n try:\n with open(filename) as f:\n lines = f.readlines()\n except IOError:\n print \"%s not found. Exiting\" % filename\n sys.exit()\n\n lines = [line.rstrip() for line in lines]\n for line in lines:\n try:\n print line\n line = line.rstrip().split(' ')\n try:\n command, args = line[0].lower(), line[1].split(',')\n output = getattr(toy_robot, command)(*args)\n except IndexError:\n command = line[0].lower()\n output = getattr(toy_robot, command)()\n if output:\n print output\n\n except TypeError:\n print \"Invalid command syntax\"",
"def get_robots_parser(robots_url):\n try:\n robot_parser = robotparser.RobotFileParser()\n robot_parser.set_url(robots_url)\n robot_parser.read()\n return robot_parser\n except Exception as e:\n print('Error finding robots url:', robots_url, e)",
"def compile(self, tokens):\n tokens.reverse()\n compiled_cmds = []\n if tokens[-1][1] == 'class':\n compiled_cmds.append('<class>')\n compiled_cmds += self._compile_class(tokens)\n compiled_cmds.append('</class>')\n return compiled_cmds",
"def compile(self, compilationParams, ownDir, sourceFiles, depFiles, evaluationContext, name='executable'):\n raise Exception(\"Can't compile files from language %s.\" % self.lang)",
"def compile_class(self):\r\n self.tokenizer.advance() # ignore 'class' keyword\r\n self.class_name = self.tokenizer.identifier()\r\n self.tokenizer.advance()\r\n self.tokenizer.advance() # ignore '{' symbol\r\n while self.tokenizer.curtok < len(self.tokenizer.tokens) - 1:\r\n dec = self.tokenizer.key_word()\r\n if dec == \"field\" or dec == \"static\":\r\n self.compile_var_dec()\r\n else:\r\n self.compile_subroutine()\r\n self.tokenizer.advance()",
"def _compile_class(self, tokens):\n cmds = []\n cmds.append( self._generate_cmd(tokens) )\n cond = tokens[-1][0] == 'identifier'\n cmds.append( self._generate_cmd_if(cond, tokens, \"invalid class name\") )\n cond = tokens[-1][1] == '{'\n cmds.append( self._generate_cmd_if(cond, tokens, \"missing '{' after the class name\"))\n cmds += self._compile_classVarDec(tokens)\n cmds += self._compile_subroutineDec(tokens)\n cond = tokens[-1][1] == '}'\n cmds.append( self._generate_cmd_if(cond, tokens, \"missing '}' after the class\"))\n return cmds",
"def __init__(self, a_config_file, a_target_name=None):\n self.set_config_file(a_config_file)\n self.set_target_name(a_target_name)\n self._is_compile = True\n self._has_executable = True\n self._error_list = []\n self._warning_list = []\n self._init_error_warning_regex()\n self._init_tools_regex()",
"def compile(c_file: File) -> File:\n os.system(\"gcc -c {c_file}\".format(c_file=c_file.path))\n return File(c_file.path.replace('.c', '.o'))",
"def makeTestProcessor(test_processor_path):\r\n\r\n className = splitext(basename(test_processor_path))[0]\r\n\r\n with open(test_processor_path, 'w') as f:\r\n f.write(\"\"\"\\\r\n'''\r\nTest processor class - should be deleted upon completion of test\r\n'''\r\n\r\n'''___Built-In Modules___'''\r\nimport sys\r\nfrom os.path import dirname\r\n\r\n'''___Third-Party Modules___'''\r\n\r\n'''___NPL Modules___'''\r\ndataProcessing_directory = dirname(dirname(__file__))\r\nsys.path.append(dataProcessing_directory)\r\nfrom AbstractProcessor import AbstractProcessor\r\n\r\nclass %s(AbstractProcessor):\r\n processor_directory = dirname(__file__)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n\"\"\" % (className))\r\n\r\n return 0",
"def _get_codeobj(pyfile):\n from imp import PY_COMPILED, PY_SOURCE\n\n result, fileobj, fullpath = _check_if_pyc(pyfile)\n\n # WARNING:\n # fp.read() can blowup if the module is extremely large file.\n # Lookout for overflow errors.\n try:\n data = fileobj.read()\n finally:\n fileobj.close()\n\n # This is a .pyc file. Treat accordingly.\n if result is PY_COMPILED:\n # .pyc format is as follows:\n # 0 - 4 bytes: Magic number, which changes with each create of .pyc file.\n # First 2 bytes change with each marshal of .pyc file. Last 2 bytes is \"\\r\\n\".\n # 4 - 8 bytes: Datetime value, when the .py was last changed.\n # 8 - EOF: Marshalled code object data.\n # So to get code object, just read the 8th byte onwards till EOF, and\n # UN-marshal it.\n import marshal\n code_obj = marshal.loads(data[8:])\n\n elif result is PY_SOURCE:\n # This is a .py file.\n code_obj = compile(data, fullpath, 'exec')\n\n else:\n # Unsupported extension\n raise Exception(\"Input file is unknown format: {0}\".format(fullpath))\n\n # Return code object\n return code_obj",
"def createClassFile( p ):\n create_modules( p[\"package\"] )\n name = p[\"protocol\"][\"name\"]\n name.lower()\n path = os.path.join( *p[\"package\"].split( \".\" ) )\n with open( \"./%s/%s.py\" % ( path, name ), \"w\" ) as f:\n for i in p[\"imports\"]:\n createClassFile( i )\n\n c = Klass( package=p[\"package\"], includes=p[\"imports\"], **p[\"protocol\"] )\t\n\n f.write( c.generate() )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prepare normalized image and label. | def _prepare_image_and_label(self, data):
image = tf.io.decode_image(data['image/encoded'], channels=3)
label = tf.io.decode_image(data['image/segmentation/class/encoded'],
channels=1)
height = data['image/height']
width = data['image/width']
image = tf.reshape(image, (height, width, 3))
label = tf.reshape(label, (1, height, width))
label = tf.cast(label, tf.float32)
# Normalizes image with mean and std pixel values.
image = input_utils.normalize_image(image)
return image, label | [
"def prep_images(self):\n self.prep_score()\n self.prep_high_score()\n self.prep_level()\n self.prep_rockets()",
"def prep_image_data(arg_dict):\n cat_df = pd.read_csv(arg_dict['category_file'],\n skiprows=1,\n sep='\\s+')\n bbox_df = pd.read_csv(arg_dict['bbox_file'],\n skiprows=1,\n sep='\\s+')\n img_dir = arg_dict['image_dir']\n\n combo_df = pd.merge(cat_df, bbox_df, how='outer', on='image_name')\n combo_df['image_name'] = combo_df['image_name'].apply(\n lambda x: x[len('img'):-len('.jpg')])\n labels = Labels(combo_df, img_dir, n_images_loaded=-1)\n labels.set_data_target('raw_image', chunksize=3000)\n return labels",
"def imagenet_preprocess(image, label):\n i = image\n i = tf.cast(i, tf.float32)\n i = tf.image.resize_with_crop_or_pad(i, 224, 224)\n if model_name == 'ResNet50' or model_name == 'ResNet152':\n i = tf.keras.applications.resnet.preprocess_input(i)\n else:\n i = tf.keras.applications.densenet.preprocess_input(i)\n return (i, label)",
"def prepare(self):\n def clean_name(name):\n \"\"\"Returns a shorter version of object names for cleaner display.\"\"\"\n return \",\".join(name.split(\",\")[:1])\n\n # Prepare additional properties for the dataset\n self.num_classes = len(self.class_info)\n self.class_ids = np.arange(self.num_classes)\n self.class_names = [clean_name(c[\"name\"]) for c in self.class_info]\n self.num_images = len(self.image_info)\n self._image_ids = np.arange(self.num_images)",
"def test_normalize(dummy_input):\n # Test the 2D image: H, W, C\n image, label = dummy_input(image_size=(512, 512, 3),\n label_size=(512, 512, 1))\n transform = Normalize(means=None, stds=None)\n _image, _label = transform(image, label, normalize_tags=[True, False])\n assert not (image == _image).all()\n assert (label == _label).all()\n\n # Test the 3D image: H, W, D, C\n image, label = dummy_input(image_size=(512, 512, 20, 3),\n label_size=(512, 512, 20, 1))\n transform = Normalize(means=None, stds=None)\n _image, _label = transform(image, label, normalize_tags=[True, False])\n assert not (image == _image).all()\n assert (label == _label).all()\n assert np.abs(np.mean(_image)-0) < 1e-8\n assert np.abs(np.std(_image)-1) < 1e-8",
"def preprocess(img):\n \n scaler=StandardScaler() ## scaler object to perform preprocessing\n img=scaler.fit_transform(img) ## zero-center and normalize\n \n return img",
"def normalise(image):",
"def prep_data(labels, image_root):\n labels = split_description(labels)\n labels = convert_plastics(labels)\n\n # Encoding shape and color data\n labels['Shape'] = encode_column(labels[['Shape']])\n labels['Color'] = encode_column(labels[['Color']])\n labels['isPlastic'] = encode_column(labels[['isPlastic']])\n labels = add_filenames(labels, image_root)\n labels = labels.dropna().reset_index()\n\n return labels",
"def preprocess_images(self, images):\n raise NotImplementedError",
"def normalize_labels(self):\n self.y_mean, self.y_std = du.get_mean_std(self.y_train)\n self.y_train = du.normalize(self.y_train, self.y_mean, self.y_std)\n if self.x_test is not None and self.y_test is not None:\n self.y_test = du.normalize(self.y_test, self.y_mean, self.y_std)",
"def _prepare_im(self, im):\n # Train and test setups differ\n train_size = cfg.TRAIN.IM_SIZE\n if \"train\" in self._split:\n # Scale and aspect ratio then horizontal flip\n im = transforms.random_sized_crop(im=im, size=train_size, area_frac=0.08)\n im = transforms.horizontal_flip(im=im, p=0.5, order=\"HWC\")\n else:\n # Scale and center crop\n im = transforms.scale(cfg.TEST.IM_SIZE, im)\n im = transforms.center_crop(train_size, im)\n # HWC -> CHW\n im = im.transpose([2, 0, 1])\n # [0, 255] -> [0, 1]\n im = im / 255.0\n # PCA jitter\n if \"train\" in self._split:\n im = transforms.lighting(im, 0.1, _EIG_VALS, _EIG_VECS)\n # Color normalization\n im = transforms.color_norm(im, _MEAN, _SD)\n return im",
"def _reshape_and_normalize_img(self, image):\n\t\timage = skimage.transform.resize(image, (self.height, self.width, self.channels), mode='reflect', preserve_range=True)\n\t\timage = np.reshape(image, (1, self.height, self.width, self.channels))\n\t\timage = image - VGG_MEANS\n\t\treturn image",
"def preprocess(self, img):\n img_ = image.load_img(img, target_size=(299, 299))\n img_ = image.img_to_array(img_)\n img_ = np.expand_dims(img_, axis=0)\n img_ = preprocess_input(img_)\n return img_",
"def preprocess():\r\n # preload image data to speed up processing for testing\r\n # vehicle_images = utils.unpickle_data(c.vehicles_train_data_p)\r\n # non_vehicle_images = utils.unpickle_data(c.non_vehicles_train_data_p)\r\n\r\n vehicle_images = make_image_pickle(c.vehicles_train_data_folder, c.vehicles_train_data_p, (64,64,3))\r\n non_vehicle_images = make_image_pickle(c.non_vehicles_train_data_folder, c.non_vehicles_train_data_p, (64,64,3))\r\n vehicle_hists = histogram.multispace_histograms_images(vehicle_images, c.vehicles_histograms_p)\r\n non_vehicle_hists = histogram.multispace_histograms_images(non_vehicle_images, c.non_vehicles_histograms_p)\r\n vehicle_sbins = binning.multispace_spatial_bin_images(vehicle_images, c.vehicles_spatial_bins_p)\r\n non_vehicle_sbins = binning.multispace_spatial_bin_images(non_vehicle_images, c.non_vehicles_spatial_bins_p)\r\n vehicle_hogs = hog.multispace_hog_images(vehicle_images, c.vehicles_hog_p)\r\n non_vehicle_hogs = hog.multispace_hog_images(non_vehicle_images, c.non_vehicles_hog_p)",
"def prepare_image(image):\n image, _, ratio = resize_and_pad_image(image, jitter=None)\n image = tf.keras.applications.resnet.preprocess_input(image)\n return tf.expand_dims(image, axis=0), ratio",
"def normalized_images(image, config):\n image = tf.image.per_image_standardization(image)\n return image",
"def _transform_fn(*data):\n img, label = data\n img = img.astype('float32') # deepcopy\n label = label.astype('float32')\n\n # validation 数据集没有 color jitter, crop 等操作,只有 resize\n aug_img, aug_label = myutils.data_augment(img, label, size=self.model_img_size)\n norm_img = mx.img.color_normalize(mx.nd.array(aug_img),\n mean=mx.nd.array(myutils.mean),\n std=mx.nd.array(myutils.std))\n mx_img = myutils.to_tensor(norm_img)\n aug_label[:, 1:] = myutils.bbox_abs_to_rel(aug_label[:, 1:], mx_img.shape[-2:])\n mx_label = mx.nd.array(aug_label)\n\n return mx_img, mx_label",
"def _prepare_image(self, image, initial_shape, gt_shape=None):\n image.landmarks['initial_shape'] = initial_shape\n image = image.rescale_to_reference_shape(\n self.reference_shape, group='initial_shape',\n interpolator=self.interpolator)\n\n if gt_shape:\n image.landmarks['gt_shape'] = initial_shape\n\n if self.n_levels > 1:\n if self.scaled_levels:\n pyramid = image.gaussian_pyramid(\n n_levels=self.n_levels, downscale=self.downscale)\n else:\n pyramid = image.smoothing_pyramid(\n n_levels=self.n_levels, downscale=self.downscale)\n images = [compute_features(i, self.feature_type)\n for i in pyramid]\n images.reverse()\n else:\n images = [compute_features(image, self.feature_type)]\n\n return images",
"def _transform_fn(*data):\n img, label = data\n img = img.astype('float32') # deepcopy\n label = label.astype('float32')\n\n aug_img, aug_label = myutils.data_augment(img, label, size=self.model_img_size, rb=0.0, rc=0.0, rh=0.0, rs=0.0, \n rflr=False, re=True, rcp=False)\n norm_img = mx.img.color_normalize(mx.nd.array(aug_img),\n mean=mx.nd.array(myutils.mean),\n std=mx.nd.array(myutils.std))\n mx_img = myutils.to_tensor(norm_img)\n aug_label[:, 1:] = myutils.bbox_abs_to_rel(aug_label[:, 1:], mx_img.shape[-2:])\n mx_label = mx.nd.array(aug_label)\n\n return mx_img, mx_label"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses data for prediction. | def _parse_predict_data(self, data):
image, labels = self._parse_eval_data(data)
return {
'images': image,
'labels': labels
} | [
"def predict(self, datafile):",
"def predict(self, data):\n pass",
"def _parse_fit_and_predict_result(result):\n if len(result) > 1 and result[1] and not isinstance(result[1], str):\n # Scores object does not resemble a label prediction (always string)\n y = result[0]\n scores = result[1]\n else:\n y = result\n scores = None\n return y, scores",
"def _parse_prediction_result(self, pred) -> List[ClassificationResult]:\n raw = pred\n label, proba = sorted(pred.items(), key=lambda kv: kv[1], reverse=True)[0]\n return ClassificationResult(label=label, proba=proba, raw=raw)",
"def predict(self, input_data):\n pass",
"def prediction():\r\n\r\n\r\n\tpredictVal = []\r\n\taccuracy = 0.0\r\n\r\n\t# Calculate accuracy for each class in testData\r\n\tfor item in testData:\r\n\t\tclass0Prediction = posProb / 100\r\n\t\tclass1Prediction = negProb / 100\r\n\t\t\r\n\t\t# Multiply the prior probablities for negative and positive reviews by their feature likelihoods \r\n\t\tfor word in item[2]:\r\n\t\t\tclass0Prediction *= class0Dict[word]\r\n\t\t\tclass1Prediction *= class1Dict[word]\r\n\r\n\t\t# Give every item in testData a predicted value\r\n\t\tif(class0Prediction > class1Prediction):\r\n\t\t\tpredictVal.append('0')\r\n\t\telse:\r\n\t\t\tpredictVal.append('1')\r\n\r\n\tfor i in range(len(testData)):\r\n\t\tif(testData[i][1] == predictVal[i]):\r\n\t\t\taccuracy += 1\r\n\r\n\t\t\t\r\n\taccuracy = 100 * (accuracy / len(testData))\r\n\treturn(predictVal, accuracy)",
"def parse_predict(predictions):\n label_classes = cfg['labels_list']\n\n bbox_regressions, confs = tf.split(predictions[0], [4, -1],-1)\n boxes = decode_bbox_tf(bbox_regressions, priors, cfg['variances'])\n ##classifications shape :(num_priors,num_classes)\n\n confs = tf.math.softmax(confs, axis=-1)\n\n out_boxes = []\n out_labels = []\n out_scores = []\n\n for i in range(1, len(label_classes)):\n cls_scores = confs[:, i]\n\n score_idx = cls_scores > DETECTION_THRESHOLD\n\n cls_boxes = boxes[score_idx]\n cls_scores = cls_scores[score_idx]\n\n nms_idx = compute_nms(\n cls_boxes,\n cls_scores,\n cfg['nms_threshold'],\n cfg['max_number_keep']\n )\n\n cls_boxes = tf.gather(cls_boxes, nms_idx, axis=None)\n cls_scores = tf.gather(cls_scores, nms_idx, axis=None)\n\n cls_labels = [i] * cls_boxes.shape[0]\n\n out_boxes.append(cls_boxes)\n out_labels.extend(cls_labels)\n out_scores.append(cls_scores)\n\n out_boxes = tf.concat(out_boxes, 0)\n out_scores = tf.concat(out_scores, 0)\n\n boxes = tf.clip_by_value(out_boxes, 0.0, 1.0).numpy()\n classes = np.array(out_labels)\n\n return boxes, classes, out_scores.numpy()",
"def predictSet(self, testData=\"\"):\n rawTestDataDump = self._read_file(testData)\n formattedTestData = [line.split(' ') for line in rawTestDataDump.split('\\n')]\n for test in formattedTestData:\n self._predictions.append(self.predict(test))\n return self._predictions",
"def parseArray(self, data):\n self.title = data[0]\n self.director = data[1]\n self.cast = data[2]\n self.producer = data[3]\n self.writer = data[4]\n self.country = data[5]\n self.language = data[6]\n self.year = data[7]\n self.genres = data[8]\n self.votes = data[9]\n self.rating = float(data[10])\n self.runtime = data[11]\n self.plot = data[12]\n self.coverUrl = data[13]",
"def parse_prediction(self, predictions):\n\t\tusers = list()\n\t\tprint(predictions)\n\t\tfor prediction in predictions:\n\t\t\tfor email in prediction:\n\t\t\t\tusers.append(email)\n\t\t\t\t\n\t\treturn users",
"def load_data(self):\r\n self.logger.log(self.log_file, 'Loading prediction data!')\r\n try:\r\n prediction_data = self.aws_operations.read_csv(self.prediction_file_path)\r\n if prediction_data is None:\r\n return None\r\n self.logger.log(self.log_file, 'Prediction data loaded successfully!')\r\n return prediction_data\r\n except Exception as e:\r\n self.logger.log(self.log_file, 'Error occurred while loading prediction data: %s' % e)\r\n raise e",
"def postprocess(self, data):\n if self.error is not None:\n return [self.error]\n\n # Iterating over inference results to render the normalized probabilities\n response = []\n for inference_result in data:\n softmax_result = inference_result.softmax().asnumpy()\n for idx, label in enumerate(self.labels):\n response.append({label: float(softmax_result[0][idx])})\n return [response]",
"def _preprocess_data(data):\n # Convert the json string to a python dictionary object\n feature_vector_dict = json.loads(data)\n \n # Load the dictionary as a Pandas DataFrame.\n predict_vector = pd.DataFrame.from_dict([feature_vector_dict])\n\n # ---------------------------------------------------------------\n # NOTE: You will need to swap the lines below for your own data\n # preprocessing methods.\n #\n # The code below is for demonstration purposes only. You will not\n # receive marks for submitting this code in an unchanged state.\n # ---------------------------------------------------------------\n\n # ----------- Replace this code with your own preprocessing steps --------\n \n # 1. Select variables for model\n predict_vector = predict_vector[list_predictors]\n\n # 2. Impute Missing Data\n predict_vector = impute_request_data_median(list_predictors, response, predict_vector)\n\n # 3. Data Scaling\n predict_vector = scale_request_data(list_predictors, response, predict_vector)\n\n # ------------------------------------------------------------------------\n\n return predict_vector",
"def validate(self, validate_data):\n with open(validate_data, 'r') as validate_data:\n true_positive = 0\n true_negative = 0\n false_positive = 0\n false_negative = 0\n result = {}\n for type in self.label_type_map:\n result[type] = []\n while True:\n tokens = validate_data.readline().split()\n pos = validate_data.readline().split()\n labels = validate_data.readline().split()\n if not tokens or not pos or not labels:\n break\n # Classify all named entities in a sentence 85\n curr_results = self.viterbi(tokens)\n for i in range(0, len(labels)):\n if curr_results[i] != 'O':\n if labels[i] == 'O':\n false_positive += 1 # Not 'O', but should be 'O'\n else:\n if self.label_type_map[labels[i]] == self.label_type_map[curr_results[i]]:\n true_positive += 1 # Correct prediction\n else:\n if labels[i] == 'O':\n true_negative += 1 # Correct prediction of 'O'\n else:\n false_negative += 1 # Predicted 'O', not 'O'\n # Calculate precision - TP / (TP + FP)\n precision = float(true_positive) / float(true_positive + false_positive)\n # Calculate recall - TP / (TP + FN)\n recall = float(true_positive) / float(true_positive + false_negative)\n # Calculate F-Score - 2 * P * R / (P + R)\n f_score = float(2*precision * recall) / float(precision + recall)\n print \"Precision: \" + str(precision)\n print \"Recall: \" + str(recall)\n print \"F-score: \" + str(f_score)",
"def _import_prediction(self, prediction_filename):\n with open(prediction_filename, 'r') as fobj:\n data = json.load(fobj)\n # Checking format...\n if not all([field in data.keys() for field in self.pred_fields]):\n raise IOError('Please input a valid prediction file.')\n\n # Read predictions.\n video_lst, t_start_lst, t_end_lst = [], [], []\n label_lst, score_lst = [], []\n for videoid, v in data['results'].items():\n if videoid in self.blocked_videos:\n continue\n for result in v:\n label = self.activity_index[result['label']]\n video_lst.append(videoid)\n t_start_lst.append(float(result['segment'][0]))\n t_end_lst.append(float(result['segment'][1]))\n label_lst.append(label)\n score_lst.append(result['score'])\n prediction = pd.DataFrame({'video-id': video_lst,\n 't-start': t_start_lst,\n 't-end': t_end_lst,\n 'label': label_lst,\n 'score': score_lst})\n return prediction",
"def make_and_format_predictions(self, data_file, X_data, parameters):\n tag_to_num = parameters['tag_to_num']\n predictions = []\n prediction_probabilities = []\n num_to_tags = build_num_to_tags(tag_to_num)\n\n # format y_preds\n print 'Making predictions...'\n train_predictions, probabilities, vectors = train.get_crf_proba(data_file, parameters)\n\n print 'Formatting predictions...'\n p_tags = []\n y_preds = []\n p_preds = [] # for each byte: {tag: prob, tag2: prob2, ...}\n for train_idx, train_sample in enumerate(train_predictions):\n tag_ids = train_sample.argmax(axis=1)\n y_preds.append(tag_ids)\n train_sample_tags = []\n train_sample_preds = []\n for tag_id in tag_ids:\n train_sample_tags.append(num_to_tags[tag_id])\n if parameters['tag_scheme'] == 'iobes':\n train_sample_tags = iobes_iob(train_sample_tags)\n p_tags.append(train_sample_tags)\n for sample in train_sample: # train_sample is 2D, sample is 1D\n tag_to_pred = []\n for tag_idx in range(len(sample)):\n tag_to_pred.append((num_to_tags[tag_idx], sample[tag_idx]))\n train_sample_preds.append(str(tag_to_pred))\n p_preds.append(train_sample_preds)\n\n del train_predictions\n\n # clean X_data, p_tags, y_preds, r_tags, y_reals so that there are no duplicate data\n # if entity starts in one sample and ends in another, overwrite earlier sample prediction\n print 'Combine overlapping data...'\n X_data, _, _, p_tags, y_preds, probabilities, vectors = self.combine_overlapping_data(X_data, None, None, p_tags,\n y_preds, probabilities, vectors, parameters)\n\n # combine vector to be per sample\n combined_vectors = []\n for vector in vectors: # (# samples, x)\n vector = np.sum(vector, axis=0)\n combined_vectors.append(vector)\n vectors = combined_vectors\n\n print 'Gathering and writing predictions...'\n for i, y_pred_sample in enumerate(y_preds):\n X_sample_nums = []\n for j, y_pred in enumerate(y_pred_sample):\n feat_id = str(X_data[i][j][0]).split(',')[0]\n if int(feat_id): # not the byte for padding\n X_sample_num = ','.join(X_data[i][j]) # byte features of this sample\n predictions.append(' '.join([str(X_sample_num), p_tags[i][j]]))\n # format prediction_probabilities\n X_sample_nums.append(int(feat_id))\n predictions.append('')\n\n if parameters['get_probs'] or parameters['get_vectors']:\n X_sample_nums = ''.join([chr(x) for x in X_sample_nums])\n X_sample_nums = X_sample_nums.replace('\\t', ' ')\n curr_line = [X_sample_nums]\n if parameters['get_probs']:\n curr_line.append(str(probabilities[i]))\n if parameters['get_vectors']:\n vector_strs = []\n for vector in vectors:\n vector_strs.append(' '.join([str(v) for v in vector]))\n curr_line.append(vector_strs[i])\n prediction_probabilities.append('\\t'.join(curr_line))\n\n del X_data\n del p_tags\n del y_preds\n\n # Write predictions to disk\n with codecs.open(parameters['output'], 'wb') as f:\n print 'writing...'\n f.write(\"\\n\".join(predictions)) # write feature ids (e.g., byte ids, bpe ids)\n\n # Write probabilities file\n if parameters['get_probs'] or parameters['get_vectors']:\n with codecs.open(parameters['output'] + '.probs_vectors', 'wb') as f:\n print 'writing probabilities...'\n f.write('\\n'.join(prediction_probabilities) + '\\n')",
"def load_predictions(preds_path):\n y_stack = np.load(preds_path)\n if y_stack.ndim <= 2:\n y_stack = get_pos_neg_probs(y_stack, axis=-1)\n if y_stack.ndim <= 2:\n y_stack = np.expand_dims(y_stack, axis=-2)\n\n y_mean, y_label = get_y_mean_label(y_stack)\n print(f\"y_stack shape: \\t{y_stack.shape}\")\n print(f\"y_mean shape: \\t{y_mean.shape}\")\n print(f\"y_label shape: \\t{y_label.shape}\")\n\n return y_stack, y_mean, y_label",
"def _parse_data(self):\n for i, val in enumerate(self.values.keys()):\n x_, y_ = [], []\n xy = self.values[val]\n for value in self.values.index:\n x_.append(xy[value][0])\n y_.append(xy[value][1])\n\n self.set_and_get(\"x_\", val, x_)\n self.set_and_get(\"y_\", val, y_)",
"def postprocess(prediction):\n\n # pred, uncertainty = prediction\n pred = prediction\n\n # Validate. As an example, if the output is an int, check that it is positive.\n try:\n int(pred[0]) > 0\n except:\n pass\n\n # Make strings\n pred = str(pred[0])\n\n # Return\n return_dict = {'pred': pred}\n\n return return_dict"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If an iteriter_op is given an iterator as input, no exception should be thrown, and we should return the wrapped function's output. | def test_iteriter_op_1():
@ops.iteriter_op
def f(x):
return iter([4, 5, 6])
result = f(iter([1, 2, 3])) # Passing in an iterator, as expected
assert(isinstance(result, collections.abc.Iterator)), f"{result}"
assert(list(result) == [4, 5, 6]) | [
"def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))",
"def test_iteriter_op_2():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator",
"def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list",
"def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])",
"def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])",
"def mymap(func, iter):\r\n\r\n if hasattr(iter, '__iter__'):\r\n for i in iter:\r\n yield func(i)\r\n else:\r\n raise TypeError(f'<{iter}> can not iterable')",
"def eval_infix_iter(iterator):\n\treturn eval_infix_sum(Peekable(iterator))",
"def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])",
"def iter(space, w_collection_or_callable, w_sentinel=None):\n if w_sentinel is None:\n return space.iter(w_collection_or_callable)\n else:\n return iter_sentinel(space, w_collection_or_callable, w_sentinel)",
"def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list",
"def generator(func):\n\n @fn\n @wraps(func)\n def gen(*args, **kwargs):\n return Iter(func(*args, **kwargs))\n\n return gen",
"def next(space, w_iterator, w_default=None):\n try:\n return space.next(w_iterator)\n except OperationError as e:\n if w_default is not None and e.match(space, space.w_StopIteration):\n return w_default\n raise",
"def unwrapping_iterator(wrapped_iterator: Iterator['BinItem[KeyType, PayloadType]']) \\\n -> Iterator[PayloadType]:\n return (item.payload for item in wrapped_iterator)",
"def ireduce(f, it):\n acc = it.next()\n yield acc\n for x in it:\n acc = f(acc, x)\n yield acc",
"def toiter(x):\n if iterable(x):\n return x\n else:\n return [x]",
"def _iterator_codegen(resty):\n\n def codegen(context, builder, sig, args):\n [d] = args\n [td] = sig.args\n iterhelper = context.make_helper(builder, resty)\n iterhelper.parent = d\n iterhelper.state = iterhelper.state.type(None)\n return impl_ret_borrowed(\n context,\n builder,\n resty,\n iterhelper._getvalue(),\n )\n\n return codegen",
"def iter_except(function, exception):\r\n try:\r\n while True:\r\n yield function()\r\n except exception:\r\n return",
"def check_iter(iter_obj):\n try:\n res = next(iter_obj)\n except StopIteration:\n res = None\n return res",
"def intercept(iterable, function):\n\n def intercepting(iterable_):\n for item in iterable_:\n function(item)\n yield item\n\n return intercepting(iterable)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If an iteriter_op is given something besides an iterator as input, raise a ValueError. | def test_iteriter_op_2():
@ops.iteriter_op
def f(x):
return iter([4, 5, 6])
with pytest.raises(ValueError):
f([1, 2, 3]) # Passing in a list instead of an iterator | [
"def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))",
"def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list",
"def test_iteriter_op_1():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])",
"def check_valid_iterator(self):\n\n test_iterator = self._creator(*self._args, **self._kwargs)\n\n try:\n next(test_iterator)\n except StopIteration:\n raise BadIteratorException('Iterator doesnt yield anything')",
"def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list",
"def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])",
"def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])",
"def test_is_iterable(self):\n self.assertTrue(utils.is_iterable(xrange(10)))\n self.assertTrue(utils.is_iterable(range(10)))\n self.assertTrue(utils.is_iterable(np.arange(10)))\n self.assertTrue(utils.is_iterable(dict(x=3)))\n\n self.assertFalse(utils.is_iterable(17))\n self.assertFalse(utils.is_iterable('fleek'))",
"def _validate_iterable(iterable_type: Callable[[Any], T], value: Any) -> T:\n if isinstance(value, str):\n msg = \"Invalid iterable of type(%s): %s\"\n raise ValidationError(msg % (type(value), value))\n\n try:\n return iterable_type(value)\n except TypeError:\n raise ValidationError(\"Invalid iterable: %s\" % (value))",
"def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])",
"def test_no_iterable():\n with pytest.raises(ValueError):\n for _ in jabbar(total=10):\n pass",
"def test_generator_without_iterable_len(self):\n with self.assertRaises(ValueError):\n next(chunk_tasks(iter([]), n_splits=1))",
"def _data_validation(data):\n if isinstance(data, (list, tuple, type(None))) is not True:\n raise ValueError(f\"data must be tuple, list, or None, \"\n f\"data type is '{type(data).__name__}'. \"\n f\"Iterable data cannot be empty.\")",
"def is_iterable(x):\n try:\n iter(x)\n except TypeError:\n return False\n else:\n return True",
"def test_adjacent_error(iterable, n1, n2):\n g = Graph(iterable)\n with pytest.raises(KeyError):\n g.adjacent(n1, n2)",
"def isiterable(x):\n try:\n iter(x)\n return True\n except TypeError:\n return False",
"def eval_infix_iter(iterator):\n\treturn eval_infix_sum(Peekable(iterator))",
"def test_raises_exception_if_no_next_value(self):\n iterator = Matrix_Iterator([])\n \n def result():\n return iterator.next()\n\n self.assertRaises(Exception, result)",
"def is_iterator(obj):\n if isinstance(obj, (list, tuple)):\n return True\n try:\n iter(obj)\n return True\n except TypeError:\n return False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If an iteriter_op returns something besides an iterator as output, raise a ValueError. | def test_iteriter_op_3():
@ops.iteriter_op
def f(x):
return [4, 5, 6] # Returning a list instead of an iterator
with pytest.raises(ValueError):
result = f(iter([1, 2, 3])) | [
"def test_iteriter_op_2():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator",
"def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list",
"def test_iteriter_op_1():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])",
"def check_valid_iterator(self):\n\n test_iterator = self._creator(*self._args, **self._kwargs)\n\n try:\n next(test_iterator)\n except StopIteration:\n raise BadIteratorException('Iterator doesnt yield anything')",
"def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list",
"def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])",
"def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])",
"def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])",
"def check_iter(iter_obj):\n try:\n res = next(iter_obj)\n except StopIteration:\n res = None\n return res",
"def test_raises_exception_if_no_next_value(self):\n iterator = Matrix_Iterator([])\n \n def result():\n return iterator.next()\n\n self.assertRaises(Exception, result)",
"def test_returns_multiple_values_and_then_raises_exception(self):\n iterator = Matrix_Iterator([[2, 3, 4], [], [6]])\n iterator.next()\n iterator.next()\n iterator.next()\n iterator.next()\n\n def result():\n return iterator.next()\n\n self.assertRaises(Exception, result)",
"def testNextRaiseStopIteration(self):\n rotator = RotateAroundIterator(self.array)\n self.assertRaises(StopIteration, rotator.__next__)",
"def test_generator_without_iterable_len(self):\n with self.assertRaises(ValueError):\n next(chunk_tasks(iter([]), n_splits=1))",
"def test_to_iterable(self):\n self.assertEqual(utils.to_iterable(17, length=0), [])\n self.assertEqual(utils.to_iterable(17, length=3), [17] * 3)\n self.assertEqual(utils.to_iterable([17] * 3, length=3), [17] * 3)\n with self.assertRaises(ValueError):\n print utils.to_iterable([17] * 3, length=4)",
"def test_assert_iterator(self):\n iterator = iter([1,2,3,4])\n # Should pass\n self.assert_iterator(iterator,\n count=4,\n assert_item_function=lambda i: i>0)",
"def test_no_iterable():\n with pytest.raises(ValueError):\n for _ in jabbar(total=10):\n pass",
"def _validate_iterable(iterable_type: Callable[[Any], T], value: Any) -> T:\n if isinstance(value, str):\n msg = \"Invalid iterable of type(%s): %s\"\n raise ValidationError(msg % (type(value), value))\n\n try:\n return iterable_type(value)\n except TypeError:\n raise ValidationError(\"Invalid iterable: %s\" % (value))",
"def _check_raising_stopiteration_in_generator_next_call(self, node):\n\n def _looks_like_infinite_iterator(param):\n inferred = utils.safe_infer(param)\n if inferred:\n return inferred.qname() in KNOWN_INFINITE_ITERATORS\n return False\n\n if isinstance(node.func, astroid.Attribute):\n # A next() method, which is now what we want.\n return\n\n inferred = utils.safe_infer(node.func)\n if getattr(inferred, \"name\", \"\") == \"next\":\n frame = node.frame()\n # The next builtin can only have up to two\n # positional arguments and no keyword arguments\n has_sentinel_value = len(node.args) > 1\n if (\n isinstance(frame, astroid.FunctionDef)\n and frame.is_generator()\n and not has_sentinel_value\n and not utils.node_ignores_exception(node, StopIteration)\n and not _looks_like_infinite_iterator(node.args[0])\n ):\n self.add_message(\"stop-iteration-return\", node=node)",
"def isIterable(value):\n try:\n iter(value)\n return True\n except:\n return False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If a listlist_op is given a list as input, no exception should be thrown, and we should return the wrapped function's output. | def test_listlist_op_1():
@ops.listlist_op
def f(x):
return [4, 5, 6]
result = f([1, 2, 3]) # Passing in a list, as expected
assert(isinstance(result, list)), f"{result}"
assert(result == [4, 5, 6]) | [
"def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])",
"def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list",
"def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])",
"def list_check(default_return=nan):\n\n def decorate(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n for i in args:\n if not i and isinstance(i, list) and len(i) == 0:\n return default_return\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorate",
"def list_generalizer(f):\n @functools.wraps(f)\n def wrapped(data, *args, **kwargs):\n if type(data) == list:\n return [f(d, *args, **kwargs) for d in data]\n else:\n return f(data, *args, **kwargs)\n\n return wrapped",
"def listify(fn=None, wrapper=list):\n\n def listify_return(fn):\n @functools.wraps(fn)\n def listify_helper(*args, **kw):\n return wrapper(fn(*args, **kw))\n\n return listify_helper\n\n if fn is None:\n return listify_return\n return listify_return(fn)",
"def visit_List(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, ast.Load):\n return to_call(to_attribute(self.operator, '__list__'), node.elts)\n return node",
"def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])",
"def _valueorlistmethod(method):\n\n def wrappedmethod(self, valueorlist, *args, **kwargs):\n try:\n for item in valueorlist:\n break\n except:\n return method(self, [valueorlist], *args, **kwargs)[0]\n return method(self, valueorlist, *args, **kwargs)\n return wrappedmethod",
"def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list",
"def listify(arg):\n if isinstance(arg, list):\n return arg\n else:\n return [arg]",
"def listify(gen):\n @wraps(gen)\n def patched(*args, **kwargs):\n return list(gen(*args, **kwargs))\n return patched",
"def comma_separated_list(function):\n @functools.wraps(function)\n def wrapper(tokens, *args):\n results = []\n for part in split_on_comma(tokens):\n result = function(remove_whitespace(part), *args)\n if result is None:\n return None\n results.append(result)\n return tuple(results)\n wrapper.single_value = function\n return wrapper",
"def listify(list_or_value):\n if isinstance(list_or_value, list):\n return list_or_value\n else:\n return [list_or_value]",
"def process_list(_func, iterator, *args, **kwargs):\n return [_func(i, *args, **kwargs) for i in iterator]",
"def is_listing(op):\n return isinstance(op, (list, tuple))",
"def ListMonad(*elements: List[T]) -> _List[T]: # pylint: disable=invalid-name\n\n return _List(list(elements), None)",
"def as_list(arg):\n if _is_list(arg):\n return arg\n return [arg]",
"def wrap_into_list(x):\r\n if x is None:\r\n return []\r\n elif not isinstance(x, (list, tuple)):\r\n return [x]\r\n else:\r\n return list(x)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If a listlist_op is given something besides a list as input, raise a ValueError. | def test_listlist_op_2():
@ops.listlist_op
def f(x):
return [4, 5, 6]
with pytest.raises(ValueError):
f(iter([1, 2, 3])) # Passing in an iterator instead of an list | [
"def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])",
"def is_listing(op):\n return isinstance(op, (list, tuple))",
"def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])",
"def _inputIsList(self, input):\n\n inputIsList = True\n if (not isinstance(input, list)):\n inputIsList = False\n raise TypeError(\"The input type is '%s' instead of list.\"\n % type(input))\n\n return inputIsList",
"def _is_list(val):\n\n return isinstance(val, list)",
"def _validate_list_type(self, name, obj, *args):\n if obj is None:\n return\n if isinstance(obj, list):\n for i in obj:\n self._validate_type_not_null(name, i, *args)\n else:\n self._validate_type(name, obj, *args)",
"def test_list(self):\n self.assertEqual(lib.LIST.cast(LIST_VALUE), LIST_VALUE)\n self.assertEqual(lib.LIST.cast(STRING_VALUE), LIST_VALUE)\n self.assertRaises(env.CastException, lib.LIST.cast, TRUE_VALUE)",
"def _is_list(arg):\n return isinstance(arg, collections.Sequence) and not _is_string(arg)",
"def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list",
"def test_list_error(self):\r\n \r\n try:\r\n the_list = self.conf.get_list('GroupTestValueStruct', 'list_error')\r\n print('the_list = %s\\n' % (the_list))\r\n except Exception, err:\r\n self.assertEquals(err.message, 'Unsupported token (type: @, value : OP) (line=1,col=3).')\r\n return\r\n \r\n self.fail('Should never reach that point')",
"def test_parse_substitution_error_if_not_list():\n with pytest.raises(SyntaxError):\n config.parse_substitution_from_list('foo_str')",
"def __ensure_list_of_list(ll):\n ###############################################################################\n \n # check this is a list\n \n if not isinstance(ll,list):\n raise TypeError('!!! __ensure_list_of_list requires a list or a list of list as argument: ',ll)\n \n if ll == []:\n return [[]]\n \n # case simple list\n if not isinstance(ll[0],list):\n return([ll])\n # case list of list\n else:\n return(ll)",
"def visit_List(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, ast.Load):\n return to_call(to_attribute(self.operator, '__list__'), node.elts)\n return node",
"def is_list(value):\n return isinstance(value, list)",
"def force_list(value, min=None, max=None):\r\n if not isinstance(value, (list, tuple)):\r\n value = [value]\r\n return is_list(value, min, max)",
"def test_not_a_list(self):\n with self.assertRaises(TypeError) as error:\n correlation('a', 1)\n self.assertEqual(error.exception.args[0], 'argument of the function must be a list of values of type int '\n 'or float')",
"def _builtin_consult_as_list(op1, op2, **kwdargs):\n # TODO make non-recursive\n check_mode((op1, op2), ['*L'], functor='consult', **kwdargs)\n _builtin_consult(op1, **kwdargs)\n if _is_list_nonempty(op2):\n _builtin_consult_as_list(op2.args[0], op2.args[1], **kwdargs)\n return True",
"def IsList(param):\n if type(param) is types.ListType:\n return True\n return False",
"def test_ctx_list_append_negative(self, value, list_indexes, expected):\n ctx = []\n for index in list_indexes:\n ctx.append(cdt_ctx.cdt_ctx_list_index(index))\n\n ops = [list_operations.list_append(self.nested_list_bin, value, None, ctx)]\n\n with pytest.raises(expected):\n self.as_connection.operate(self.test_key, ops)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If a listlist_op returns something besides a list as output, raise a ValueError. | def test_listlist_op_3():
@ops.listlist_op
def f(x):
return iter([4, 5, 6]) # Returning an iterator instead of an list
with pytest.raises(ValueError):
result = f([1, 2, 3]) | [
"def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list",
"def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])",
"def is_listing(op):\n return isinstance(op, (list, tuple))",
"def _is_list(val):\n\n return isinstance(val, list)",
"def __ensure_list_of_list(ll):\n ###############################################################################\n \n # check this is a list\n \n if not isinstance(ll,list):\n raise TypeError('!!! __ensure_list_of_list requires a list or a list of list as argument: ',ll)\n \n if ll == []:\n return [[]]\n \n # case simple list\n if not isinstance(ll[0],list):\n return([ll])\n # case list of list\n else:\n return(ll)",
"def _inputIsList(self, input):\n\n inputIsList = True\n if (not isinstance(input, list)):\n inputIsList = False\n raise TypeError(\"The input type is '%s' instead of list.\"\n % type(input))\n\n return inputIsList",
"def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list",
"def is_list(value):\n return isinstance(value, list)",
"def test_list(self):\n self.assertEqual(lib.LIST.cast(LIST_VALUE), LIST_VALUE)\n self.assertEqual(lib.LIST.cast(STRING_VALUE), LIST_VALUE)\n self.assertRaises(env.CastException, lib.LIST.cast, TRUE_VALUE)",
"def test_list_error(self):\r\n \r\n try:\r\n the_list = self.conf.get_list('GroupTestValueStruct', 'list_error')\r\n print('the_list = %s\\n' % (the_list))\r\n except Exception, err:\r\n self.assertEquals(err.message, 'Unsupported token (type: @, value : OP) (line=1,col=3).')\r\n return\r\n \r\n self.fail('Should never reach that point')",
"def ensure_list(item: Any) -> list:\n if item:\n return item if isinstance(item, list) else [item]\n return list()",
"def _is_list(arg):\n return isinstance(arg, collections.Sequence) and not _is_string(arg)",
"def listify(list_or_value):\n if isinstance(list_or_value, list):\n return list_or_value\n else:\n return [list_or_value]",
"def visit_List(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, ast.Load):\n return to_call(to_attribute(self.operator, '__list__'), node.elts)\n return node",
"def ensure_list(iterable):\n if isinstance(iterable, list):\n return iterable\n else:\n return list(iterable)",
"def aslist(something):\n return something if isinstance(something, list) else [something]",
"def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])",
"def list_check(default_return=nan):\n\n def decorate(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n for i in args:\n if not i and isinstance(i, list) and len(i) == 0:\n return default_return\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorate",
"def force_list(value, min=None, max=None):\r\n if not isinstance(value, (list, tuple)):\r\n value = [value]\r\n return is_list(value, min, max)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If a listiter_op is given a list as input, no exception should be thrown, and we should return the wrapped function's output. | def test_listiter_op_1():
@ops.listiter_op
def f(x):
return iter([4, 5, 6])
result = f([1, 2, 3]) # Passing in a list, as expected
assert(isinstance(result, collections.abc.Iterator)), f"{result}"
assert(list(result) == [4, 5, 6]) | [
"def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])",
"def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])",
"def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list",
"def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list",
"def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])",
"def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))",
"def process_list(_func, iterator, *args, **kwargs):\n return [_func(i, *args, **kwargs) for i in iterator]",
"def test_iteriter_op_1():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])",
"def test_iteriter_op_2():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator",
"def _valueorlistmethod(method):\n\n def wrappedmethod(self, valueorlist, *args, **kwargs):\n try:\n for item in valueorlist:\n break\n except:\n return method(self, [valueorlist], *args, **kwargs)[0]\n return method(self, valueorlist, *args, **kwargs)\n return wrappedmethod",
"def list_generalizer(f):\n @functools.wraps(f)\n def wrapped(data, *args, **kwargs):\n if type(data) == list:\n return [f(d, *args, **kwargs) for d in data]\n else:\n return f(data, *args, **kwargs)\n\n return wrapped",
"def _multilist_iterator(arg, func):\n ret = []\n if isinstance(arg, list):\n for el in arg:\n ret.append(_multilist_iterator(el, func))\n else:\n return func(arg)\n\n return ret",
"def list_check(default_return=nan):\n\n def decorate(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n for i in args:\n if not i and isinstance(i, list) and len(i) == 0:\n return default_return\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorate",
"def listify(fn=None, wrapper=list):\n\n def listify_return(fn):\n @functools.wraps(fn)\n def listify_helper(*args, **kw):\n return wrapper(fn(*args, **kw))\n\n return listify_helper\n\n if fn is None:\n return listify_return\n return listify_return(fn)",
"def _process_value(func, value):\n if isinstance(value, (list, tuple)):\n return [func(item) for item in value]\n return func(value)",
"def listify(gen):\n @wraps(gen)\n def patched(*args, **kwargs):\n return list(gen(*args, **kwargs))\n return patched",
"def intercept(iterable, function):\n\n def intercepting(iterable_):\n for item in iterable_:\n function(item)\n yield item\n\n return intercepting(iterable)",
"def visit_List(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, ast.Load):\n return to_call(to_attribute(self.operator, '__list__'), node.elts)\n return node",
"def listify(gen: Callable[..., Generator[T, None, None]]) -> Callable[..., List[T]]:\n\n @wraps(gen)\n def list_func(*args, **kwargs) -> List[Any]:\n return list(gen(*args, **kwargs))\n\n return list_func"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If a listiter_op is given something besides a list as input, raise a ValueError. | def test_listiter_op_2():
@ops.listiter_op
def f(x):
return iter([4, 5, 6])
with pytest.raises(ValueError):
f(iter([1, 2, 3])) # Passing in an iterator instead of a list | [
"def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list",
"def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])",
"def test_iteriter_op_2():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator",
"def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))",
"def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])",
"def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])",
"def is_listing(op):\n return isinstance(op, (list, tuple))",
"def _data_validation(data):\n if isinstance(data, (list, tuple, type(None))) is not True:\n raise ValueError(f\"data must be tuple, list, or None, \"\n f\"data type is '{type(data).__name__}'. \"\n f\"Iterable data cannot be empty.\")",
"def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])",
"def test_not_a_list(self):\n with self.assertRaises(TypeError) as error:\n correlation('a', 1)\n self.assertEqual(error.exception.args[0], 'argument of the function must be a list of values of type int '\n 'or float')",
"def _validate_iterable(iterable_type: Callable[[Any], T], value: Any) -> T:\n if isinstance(value, str):\n msg = \"Invalid iterable of type(%s): %s\"\n raise ValidationError(msg % (type(value), value))\n\n try:\n return iterable_type(value)\n except TypeError:\n raise ValidationError(\"Invalid iterable: %s\" % (value))",
"def test_iteriter_op_1():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])",
"def test_empty_list_error(self):\n with self.assertRaises(ValueError):\n function_inclusion_filter_builder([])",
"def test_parse_substitution_error_if_not_list():\n with pytest.raises(SyntaxError):\n config.parse_substitution_from_list('foo_str')",
"def _is_list(arg):\n return isinstance(arg, collections.Sequence) and not _is_string(arg)",
"def _inputIsList(self, input):\n\n inputIsList = True\n if (not isinstance(input, list)):\n inputIsList = False\n raise TypeError(\"The input type is '%s' instead of list.\"\n % type(input))\n\n return inputIsList",
"def _is_listlike(x):\n return hasattr(x, \"__iter__\") and not isinstance(x, (six.text_type, bytes))",
"def _validate_list(key, value):\n\n for ind, element in enumerate(value):\n if not isinstance(element, (\n basestring, datetime.date, datetime.datetime, numbers.Number)):\n raise ValueError(\n 'All values of a multi-valued field must be numbers, strings, '\n 'date or datetime instances, The %dth value for field %s has'\n ' type %s.' % (ind, key, type(element)))",
"def test_list_error(self):\r\n \r\n try:\r\n the_list = self.conf.get_list('GroupTestValueStruct', 'list_error')\r\n print('the_list = %s\\n' % (the_list))\r\n except Exception, err:\r\n self.assertEquals(err.message, 'Unsupported token (type: @, value : OP) (line=1,col=3).')\r\n return\r\n \r\n self.fail('Should never reach that point')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If an iterlist_op is given an iterator as input, no exception should be thrown, and we should return the wrapped function's output. | def test_iterlist_op_1():
@ops.iterlist_op
def f(x):
return [4, 5, 6]
result = f(iter([1, 2, 3])) # Passing in an iterator, as expected
assert(isinstance(result, list)), f"{result}"
assert(result == [4, 5, 6]) | [
"def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list",
"def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])",
"def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))",
"def test_iteriter_op_1():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])",
"def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])",
"def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list",
"def test_iteriter_op_2():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator",
"def process_list(_func, iterator, *args, **kwargs):\n return [_func(i, *args, **kwargs) for i in iterator]",
"def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])",
"def intercept(iterable, function):\n\n def intercepting(iterable_):\n for item in iterable_:\n function(item)\n yield item\n\n return intercepting(iterable)",
"def toiter(x):\n if iterable(x):\n return x\n else:\n return [x]",
"def eval_infix_iter(iterator):\n\treturn eval_infix_sum(Peekable(iterator))",
"def _multilist_iterator(arg, func):\n ret = []\n if isinstance(arg, list):\n for el in arg:\n ret.append(_multilist_iterator(el, func))\n else:\n return func(arg)\n\n return ret",
"def mymap(func, iter):\r\n\r\n if hasattr(iter, '__iter__'):\r\n for i in iter:\r\n yield func(i)\r\n else:\r\n raise TypeError(f'<{iter}> can not iterable')",
"def iter(space, w_collection_or_callable, w_sentinel=None):\n if w_sentinel is None:\n return space.iter(w_collection_or_callable)\n else:\n return iter_sentinel(space, w_collection_or_callable, w_sentinel)",
"def chained(func):\n def wrapper(*args, **kwargs):\n for xs in func(*args, **kwargs):\n for x in xs:\n yield x\n return wrapper",
"def unwrapping_iterator(wrapped_iterator: Iterator['BinItem[KeyType, PayloadType]']) \\\n -> Iterator[PayloadType]:\n return (item.payload for item in wrapped_iterator)",
"def ireduce(f, it):\n acc = it.next()\n yield acc\n for x in it:\n acc = f(acc, x)\n yield acc",
"def _valueorlistmethod(method):\n\n def wrappedmethod(self, valueorlist, *args, **kwargs):\n try:\n for item in valueorlist:\n break\n except:\n return method(self, [valueorlist], *args, **kwargs)[0]\n return method(self, valueorlist, *args, **kwargs)\n return wrappedmethod"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If a pool of size 3 is used, the first 3 individuals in the input iterator should be collected into a list. | def test_pool():
pop = iter([ 'a', 'b', 'c', 'd', 'e' ])
pop = ops.pool(pop, size=3)
assert(len(pop) == 3)
assert(pop == [ 'a', 'b', 'c' ]) | [
"def n_wise(x: List[Any], size: Optional[int] = 2) -> Iterable:\n\n iterator = iter(x)\n\n return iter(lambda: tuple(islice(iterator, size)), ())",
"def batch_iterator(iterator, batch_size):\n it = iter(iterator)\n item = list(itertools.islice(it, batch_size))\n while item:\n yield item\n item = list(itertools.islice(it, batch_size))",
"def _split_iterators(iterator, n=None):\n #if n is None:\n # item, iterator = cytoolz.peek(iterator)\n # n = len(item)\n iterators = itertools.tee(iterator, n)\n #iterators = ((sample[i] for sample in iterator) for i, iterator in enumerate(iterators))\n # Above does not work?!\n\n out = list()\n out.append(s[0] for s in iterators[0])\n out.append(s[1] for s in iterators[1])\n out.append(s[2] for s in iterators[2])\n iterators = out\n return iterators",
"def take(n, iterable):\r\n return list(islice(iterable, n))",
"def batch(size, iterable):\r\n return list(xbatch(size, iterable))",
"def take(num, iterable):\n return list(islice(iterable, num))",
"def population(count, length, min, max, pool):\n print('Generating population of size %d...' % (count))\n return [ individual(length, min, max, pool) for x in range(count) ]",
"def take(n, iterable):\r\n return list(itertools.islice(iterable, n))",
"def take(n: int, it: Iterable[T]) -> Iterator[T]:\n yield from (i for _, i in zip(range(n), it))",
"def take(n, iterable):\n return list(islice(iterable, n))",
"def pooling(fct,lst,nb_pool=10):\n p = Pool(nb_pool)\n infos = p.map(fct,lst)\n p.terminate()\n p.join()\n return infos",
"async def get_pools(self) -> List[Pool]:",
"def batch(iterable, size):\n iterator = iter(iterable)\n while True:\n res = []\n for _ in range(size):\n try:\n res.append(next(iterator))\n except StopIteration:\n break\n\n if not res:\n return\n\n yield res",
"def take(n: int, iterable: Iterable[T_]) -> List[T_]:\n return list(islice(iterable, n))",
"def batches(iterator, batch_size: int):\n while True:\n batch = list(islice(iterator, batch_size))\n if not batch:\n break\n yield batch",
"def vytvorit_generator():\n mylist = range(3)\n print 'mylist = ', mylist\n for element in mylist:\n yield element",
"def take(n, iterable):\n return list(itertools.islice(iterable, n))",
"def _elements(self):\n return list(islice(self.generate(), None))",
"def to_list(iterator):\n items = []\n for item in iterator:\n items.append(item)\n return items"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts a wave to a vector of prosodic features. offset (in ms) determines where the signal will be sampled. window_len is ignored. | def wav_to_prosodic(path, sr=16000, offset=10):
sound = parselmouth.Sound(path)
pitch = sound.to_pitch() #timestep, pitch_floor, pitch_ceiling
intensity = sound.to_intensity()
features = []
max_time = sound.get_total_duration()
for time in np.arange(0, max_time, 0.001):
f0 = pitch.get_value_at_time(time)
f0_nan = 0
if np.isnan(f0):
f0 = 0
f0_nan = 1
int_db = intensity.get_value(time)
if np.isnan(int_db):
int_db = 0
features.append([f0, f0_nan, int_db])
array_feats = np.array(features).T
print("SHAPE OF THE FEATURES:", array_feats.shape)
assert(not np.any(np.isnan(array_feats)))
return array_feats, max_time | [
"def _choose_wavelength_slice(self, offset):\n if 'WAVE' not in self.axes_wcs.wcs.ctype:\n raise cu.CubeError(2, \"Spectral dimension not present\")\n if self.data.ndim == 4:\n raise cu.CubeError(4, \"Can only work with 3D cubes\")\n\n axis = -2 if self.axes_wcs.wcs.ctype[0] in ['TIME', 'UTC'] else -1\n arr = None\n length = self.data.shape[axis]\n if isinstance(offset, int) and offset >= 0 and offset < length:\n arr = self.data.take(offset, axis=axis)\n\n if isinstance(offset, u.Quantity):\n delta = self.axes_wcs.wcs.cdelt[-1 - axis] * u.m\n wloffset = offset.to(u.m) / delta\n wloffset = int(wloffset)\n if wloffset >= 0 and wloffset < self.data.shape[axis]:\n arr = self.data.take(wloffset, axis=axis)\n\n return arr",
"def getFFTVector(data, timeOffset, window):\n # Get the desired starting and ending index of the numpy data array\n start, end = getSegment(timeOffset, window) \n # get the length of the selected data sample range \n N = len(data[start:end])\n \n print(\"segment length = \", N)\n \n # calculate the FFT of the selected sample range. But the FFT x axis contains data\n # in the range from 0 to positive values first and at the end the negative values\n # like 0, 1, 2, 3, 4, -4, -3, -2, -1\n yf = fft(data[start:end])\n # rearrange the FFT vector to have it zero-centered, e.g., -4, -3, -2, -1, 0, 1, 2, 3, 4\n new_yf = np.concatenate((yf[int(N/2):int(N)], yf[0:int(N/2)]))\n # return the absolute values of the FFT vector.\n return np.abs(new_yf)",
"def slice_signal(file, window_size, stride, sample_rate):\n wav, sr = librosa.load(file, sr=sample_rate)\n hop = int(window_size * stride)\n slices = []\n for end_idx in range(window_size, len(wav), hop):\n start_idx = end_idx - window_size\n slice_sig = wav[start_idx:end_idx]\n #print(type(slice_sig),' ',slice_sig.shape,'begin:',start_idx,'end_idx:',end_idx)\n slices.append(slice_sig)\n\n if(len(slices)*window_size<len(wav)):\n slice_sig = np.zeros((window_size,))\n temp = wav[len(slices)*window_size:]\n slice_sig[:len(temp)] = temp\n slices.append(slice_sig)\n #print(type(slice_sig), ' ', slice_sig.shape,'begin:',0,'end_idx:',len(temp))\n\n return slices",
"def extract_stats_features(bvp: Dict[str, Dict[str, List[float]]], window_size: int, window_shift: int, sampling_rate: int) -> np.array: \n # window_size: unit -> seconds - the length of signal which is cut to extract statistical feature equals to 60 seconds\n # window_shift: unit -> seconds - the step of the sliding window\n # sampling_rate: unit -> Hz - the number of recorded points per second\n stats_features = []\n for user_id, data in tqdm(bvp.items()):\n for task_id, bvp_signal in data.items():\n len_bvp_signal = len(bvp_signal)\n step = int(window_shift * sampling_rate) # The true step to slide along the time axis of the signal\n first_iter = int(window_size * sampling_rate) # The true index of the signal at a time-point \n for current_iter in range(first_iter, len_bvp_signal, step): # current_iter is \"second_iter\"\n previous_iter = current_iter - first_iter\n signal = bvp_signal[previous_iter:current_iter]\n bvp_stats_features = extract_bvp_features(signal, sampling_rate) # Extract statistical features from extracted BVP features\n stats_features.append(bvp_stats_features)\n stats_features = np.array(stats_features) # Transform to numpy array format\n return stats_features",
"def extract_features(wavfile, feature, sampling_rate=16000):\n\n raw_signal, sr = librosa.core.load(wavfile,\n sampling_rate,\n mono=True,\n dtype='float'\n )\n\n\n if feature == 'MFCC':\n feat_seq = librosa.feature.mfcc(raw_signal,\n sampling_rate,\n n_fft=400,\n hop_length=160,\n n_mfcc=13,\n fmin=75,\n fmax=5999\n )\n # Numerical Stability\n #feat_seq = np.where(feat_seq == 0, np.finfo(float).eps, feat_seq)\n\n\n elif feature == 'FBANK':\n feat_seq = librosa.feature.melspectrogram(raw_signal,\n sampling_rate,\n n_fft=400,\n hop_length=160,\n n_mels=13,\n fmin=75,\n fmax=5999\n )\n\n # Numerical Stability\n feat_seq = np.where(feat_seq == 0, np.finfo(float).eps, feat_seq)\n\n # 20 * log | convert to Me-Scale\n feat_seq = 20*np.log10(feat_seq)\n\n # z-norm: feature normalization\n feat_norm = preprocessing.scale(feat_seq, axis=1)\n\n return feat_norm",
"def extract_window_features(self, signals_array, feature_funcs, \n window_size):\n window_signals = signals_array[-window_size:, :]\n n_features = len(feature_funcs)\n n_signals = signals_array.shape[1]\n \n feats = np.zeros((1, n_features * n_signals))\n for signal_i in range(signals_array.shape[1]):\n for feature_i, feature in enumerate(feature_funcs):\n feats[0, signal_i * n_features + feature_i] = \\\n feature(array=window_signals[:, signal_i: signal_i + 1], \n axis=0)\n \n return feats",
"def slice_signal(wav, window_size, stride):\n hop = int(window_size * stride)\n slices = []\n for end_idx in range(window_size, len(wav), hop):\n start_idx = end_idx - window_size\n slice_sig = wav[start_idx:end_idx]\n slices.append(slice_sig)\n return slices",
"def wave_get_pulses(self):\n return _u2i(_pigpio_command(self.sl, _PI_CMD_WVSP, 0, 0))",
"def polyfit_window(x, window_length=5, deg=1, deriv=0, delta=1, pos=None):\n if not pos:\n pos = int(window_length/2)+1\n num_samples = len(x)\n idx = np.arange(window_length)\n x_out = np.zeros(num_samples)\n\n x_padded = np.concatenate([np.zeros(window_length-1), x])\n\n for frame_start in np.arange(num_samples):\n x_frame = x_padded[idx + frame_start]\n p = np.polyfit(idx*delta, x_frame, deg=deg)\n p = np.polyder(p, m=deriv)\n x_out[frame_start] = np.polyval(p, idx[pos]*delta)\n\n return x_out",
"def get_data(self, wave):\n data = np.array([lfilter(self.b, self.a, wave[i]) for i in range(self.n_channels)])\n self.min_threshold = np.min(data)\n self.max_threshold = np.max(data)\n return data",
"def predict_proba(self, window: np.array):\n \n data = np.transpose(np.array(window))[self.data_channels]\n print('data shape in wrapped:', data.shape)\n proba = self.clf.predict_proba(data)\n return proba[0][1] # proba = [[prob_left, prob_right]]",
"def compute_offset_feature(self, x, offset, padding_mode):\n offset_reshape = offset.view(offset.shape[0], -1, 2, offset.shape[2], offset.shape[3]) # (n, sample_pts, 2, h, w)\n num_pts = offset_reshape.shape[1]\n offset_reshape = offset_reshape.contiguous().view(-1, 2, offset.shape[2],\n offset.shape[3]) # (n*sample_pts, 2, h, w)\n x_repeat = x.unsqueeze(1).repeat(1, num_pts, 1, 1, 1) # (n, sample_pts, C, h, w)\n x_repeat = x_repeat.view(-1, x_repeat.shape[2], x_repeat.shape[3], x_repeat.shape[4]) # (n*sample_pts, C, h, w)\n sampled_feat = self.sample_offset(x_repeat, offset_reshape, padding_mode) # (n*sample_pts, C, h, w)\n sampled_feat = sampled_feat.view(-1, num_pts, sampled_feat.shape[1], sampled_feat.shape[2],\n sampled_feat.shape[3]) # (n, sample_pts, C, h, w)\n return sampled_feat",
"def window_data(X, window_length):\n return X[int(len(X)/2-window_length/2):int(len(X)/2+window_length/2)]",
"def _compute_mean_features(window):\r\n return np.mean(window, axis=0)",
"def _compute_median_features(window):\r\n return np.median(window, axis=0)",
"def poly_features(frames, sample_rate, *, kwargs={}):\n l = []\n for frame in frames:\n l.append(\n np.mean(\n librosa.feature.poly_features(\n y=frame,\n sr=sample_rate,\n **kwargs\n ).T, axis=0\n )\n )\n return np.array(l)",
"def featExtract(data, winLength):\n dataLength = data.shape[0]\n nbWin = dataLength - winLength\n extractData = np.zeros((nbWin, winLength))\n for i in range(nbWin):\n extractData[i, :] = data[i:i+winLength]\n return extractData",
"def smoothed(x, w):\n if len(x) <= w:\n return x\n smooth = []\n for i in range(1, w):\n smooth.append( np.mean(x[0:i]) )\n for i in range(w, len(x)+1):\n smooth.append( np.mean(x[i-w:i]) )\n assert len(x) == len(smooth), \"lengths: {}, {}\".format(len(x), len(smooth))\n return np.array(smooth)",
"def fram_wave(waveform: np.array, hop_length: int = 160, fft_window_size: int = 400, center: bool = True):\n frames = []\n for i in range(0, waveform.shape[0] + 1, hop_length):\n if center:\n half_window = (fft_window_size - 1) // 2 + 1\n start = i - half_window if i > half_window else 0\n end = i + half_window if i < waveform.shape[0] - half_window else waveform.shape[0]\n frame = waveform[start:end]\n if start == 0:\n padd_width = (-i + half_window, 0)\n frame = np.pad(frame, pad_width=padd_width, mode=\"reflect\")\n\n elif end == waveform.shape[0]:\n padd_width = (0, (i - waveform.shape[0] + half_window))\n frame = np.pad(frame, pad_width=padd_width, mode=\"reflect\")\n\n else:\n frame = waveform[i : i + fft_window_size]\n frame_width = frame.shape[0]\n if frame_width < waveform.shape[0]:\n frame = np.lib.pad(\n frame, pad_width=(0, fft_window_size - frame_width), mode=\"constant\", constant_values=0\n )\n frames.append(frame)\n\n frames = np.stack(frames, 0)\n return frames"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print percentage of rows that have been processed. | def _print_stat_rows(title,rows_before,rows_after):
self.strprint(str(title)+" : Percent of processed rows = %1.2F"\
%(np.abs(rows_before-rows_after)*100/rows_before)) | [
"def printProgress(self, percentage):\n #print '%s\\r' % ' '*20, # clean up row\n #print '%3d%% ' % percentage, # ending with comma prevents newline from being appended\n sys.stdout.flush()",
"def printProgress(self, percentage):\n print('%s\\r' % ' ' * 20,) # clean up row\n print('%3d%% ' % percentage,) # ending with comma prevents newline from being appended\n sys.stdout.flush()",
"def _print_progress(self):\n if self.current_training_size % 1000 == 0:\n print(self.current_training_size, end='')\n elif self.current_training_size % 100 == 0:\n print('.', end='')",
"def progress(self, percentage):\n\n if self.proc:\n try:\n self.proc.stdin.write((\"%d\\n\" % percentage).encode('ascii'))\n except IOError:\n pass\n else:\n print(\"\\r%s: 0%%\" % self.description, end='')\n sys.stdout.flush()",
"def calc_percentage(self, total_entries):\n self.percentage = self.count/total_entries * 100",
"def ReportProgress(self, numRows):\n if self.inFileSize is not None:\n percent = (self.inFile.tell() / self.inFileSize) * 100\n cx_Logging.Trace(\" %d rows imported (%.0f%% of file).\",\n numRows, percent)\n else:\n cx_Logging.Trace(\" %d rows imported.\", numRows)",
"def _progress(self, num_completed_batches, data_loader):\n return '[{}/{} ({:.0f}%)]'.format(num_completed_batches, len(data_loader),\n 100.0 * num_completed_batches / len(data_loader))",
"def _print_progress(self):\n \n print 'Completed %d of %d' %(self.progress_id, self.total_work)\n self.progress_id += 1",
"def _printProgressBar(self, fractionComplete):\n import sys\n nInc = 50\n count = int(nInc * fractionComplete)\n proBar = \"|\"\n for i in range(nInc):\n if i < count:\n proBar += \"-\"\n else:\n proBar += \" \"\n proBar += \"|\"\n print((proBar, int(fractionComplete * 100), \"%\\r\",))\n sys.stdout.flush()\n\n return",
"def print_progress():\n\tstates = domain_state.values()\n\tdone_count = sum([s['status'] in ['DONE', 'DIDNOTEXIST'] for s in states])\n\ttotal_count = len(states)\n\tpercentage = done_count * 100.0 / total_count\n\tprint \"%.5f%% of domains checked\" % percentage",
"def percentage(self) -> float:\n if self.num_total() == 0:\n raise RuntimeError(\n \"Yield::percentage is undefined when Yield::num_total() == 0\"\n )\n\n return float(self.num_success_) / float(self.num_total())",
"def printprogress(self, t):\n sys.stdout.write('\\r%.2f%% of simulation completed (t=%s of %s)' % (100 * t/self.T, t, self.T))\n sys.stdout.flush()",
"def print_progress_bar(self):\r\n\r\n print_progress_bar(self._completed, self._total, length=50)",
"def progress(self, ind, total):\n arg = np.floor(ind/total*10.)\n if arg > self.progress_index:\n print(\"Done \" + str(arg*10) + \" %\")\n self.progress_index = arg",
"def download_report_hook(count, block_size, total_size):\n percent = int(count * block_size * 100 / total_size)\n print(\"\\r%d%%\" % percent + \" completed\", end=\"\\r\")",
"def _displayProgress(self, frame):\n x, y = self.frame_dimensions\n percent_complete = str(int(self.samples/self.sample_size*100)) + \"%\"\n cv2.putText(frame, percent_complete,\n (25, 30), self.font, 1.2,\n (0,255,0), 2)",
"def __show_progress(self, _cur_file_idx, _file_count):\n if (self.__is_show_proegress == False):\n return\n\n if(_file_count == 0):\n raise StandardError('no file found.')\n\n # show progress for each 5% (20 steps)\n digit = math.modf(math.log10(_file_count))[1]\n if(digit < 3):\n print \"prog: [{0}%] {1}/{2}\".format((100 * _cur_file_idx) /_file_count,\n _cur_file_idx, _file_count)\n else:\n digit = digit - 2\n skipstep10 = math.pow(10, digit)\n if ((_cur_file_idx % skipstep10) == 0):\n print \"prog: [{0}%] {1}/{2}\".format((100 * _cur_file_idx) /_file_count,\n _cur_file_idx, _file_count)",
"def percentage(self):\n return sum(self.chunk_percentage) / self.total_steps",
"def _progressBar(self, percent, printEvery=10):\n floor = int(percent)\n sys.stdout.write('\\r' * (floor + 9))\n sys.stdout.write('[')\n sys.stdout.write('=' * (floor/printEvery))\n sys.stdout.write('>] {:02.2f}%'.format(percent))\n sys.stdout.flush()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove raws with countries other then 'United Kingdom' then remove Country feature. | def _feature_country_process(self):
if 'Country' not in self._df_invoice_line.columns:
return
list_countries_keep = ['United Kingdom']
rows_before = self._df_invoice_line.shape[0]
df_invoice_line_new = pd.DataFrame()
for country in list_countries_keep :
df_invoice_line_new = df_invoice_line_new.append(\
self._df_invoice_line[self._df_invoice_line['Country']==country]\
, ignore_index=True)
self.df_invoice_line = df_invoice_line_new
del(df_invoice_line_new)
rows_after = self._df_invoice_line.shape[0]
_print_stat_rows("Countries filtering : ",rows_before, rows_after)
#-------------------------------------------------------------------------
# Due to the fact only one country is used, then this feature is dropped
#-------------------------------------------------------------------------
list_col_to_keep = [col for col in self._df_invoice_line.columns \
if col not in 'Country']
self._df_invoice_line = self._df_invoice_line[list_col_to_keep]
return | [
"def delete_countries() -> None:\n remove_all_countries()",
"def apply_feature_filter(self):\n self.features = set()\n for language in self.data.values():\n features_in_data = set(language.keys())\n features_to_keep = features_in_data & self.feature_filter\n self.features |= features_to_keep\n features_to_remove = features_in_data - features_to_keep\n for feat in features_to_remove:\n language.pop(feat)\n self.features = sorted(list(self.features))",
"def clean_iso_country(spark, input_data):\n try:\n #read file\n df_iso_country = spark.read.option(\"header\",\"true\").csv(input_data+'wikipedia-iso-country-codes.csv')\n df = (df_iso_country.withColumnRenamed('English short name lower case','country_name') \\\n .withColumnRenamed('Alpha_2', 'country_iso2') \\\n .withColumnRenamed('Alpha_3', 'country_iso3') \\\n .withColumnRenamed('Num_code','country_num'))\n\n df_clean_iso_country = df_iso_country.drop(\"ISO_3166-2\") \\\n .select(F.col(\"Country\").alias(\"country_name\"), \\\n F.col(\"Alpha_2\").alias(\"country_iso2\"), \\\n F.col(\"Alpha_3\").alias(\"country_iso3\"), \\\n F.col(\"Num_code\").alias(\"country_num\") \\\n .cast(\"int\")) \\\n .dropDuplicates()\n print('***** Make df_clean_iso_country processing ')\n df_clean_iso_country.printSchema()\n #df_clean_iso_country.show(2)\n except Exception as e:\n print(\"Unexpected error: %s\" % e)\n else:\n return(df_clean_iso_country)",
"def findCountryCode(self):\n RecordsWithCountry = []\n for state in pycountry.subdivisions:\n #print(state.name)\n for record in self.Records: \n if state.name == record.state:\n #print(state.country, record.state)\n r = RecordCountry(date=record.date,\n country=state.country.alpha_3,\n impressions=record.impressions,\n CTR=record.CTR)\n self.Records.remove(record)\n RecordsWithCountry.append(r)\n for record in self.Records: \n r = RecordCountry(date=record.date,\n country=\"XXX\",\n impressions=record.impressions,\n CTR=record.CTR)\n RecordsWithCountry.append(r)\n self.Records = RecordsWithCountry",
"def clean_countries(event_db):\n event_db[\"country_edb\"] = event_db[\"country_edb\"].apply(_clean_country_str)\n event_db = my_utils.split_strings_at_comma_and_distribute_to_new_rows(event_db, 'country_edb')\n return event_db",
"def reset_counties(self):\r\n self.countries.clear()",
"def remove_region_country(legacy_value: str, region: str, country: str, region_only: bool = False) -> str:\n if legacy_value.strip() == '':\n return legacy_value\n value: str = legacy_value\n if country and country == COUNTRY_CA:\n for key in DB2_PROVINCE_MAPPING:\n if value.find(key) > -1:\n if key.find('QUEBEC') == -1 or region == PROVINCE_QC:\n value = value.replace(key, '')\n elif country and country == COUNTRY_US:\n for key in DB2_STATE_MAPPING:\n if value.find(key) > -1:\n value = value.replace(key, '')\n if not region_only:\n for key_country in DB2_COUNTRY_MAPPING:\n if value.find(key_country) > -1:\n value = value.replace(key_country, '')\n value = value.strip()\n if value.endswith(','):\n value = value[0:(len(value) - 1)]\n if not region_only:\n if country and value == country:\n return ''\n for remove_country in DB2_REMOVE_TRAILING_COUNTRY:\n if value.endswith(remove_country):\n end_pos: int = len(value) - len(remove_country) + 1\n value = value[0:end_pos]\n for remove_country in DB2_REMOVE_STARTING_COUNTRY:\n if value.startswith(remove_country):\n start_pos: int = len(remove_country)\n value = value[start_pos:]\n return value.strip()",
"def mask_foreign_country(column):\n codes = misc_utils.load_country_code()\n # Remove New Zealand from foreign country list\n codes = codes.drop(codes[codes['Alpha-2'] == 'nz'].index)\n # Remove texts in brackets: belgian franc (convertible) -> belgian franc\n codes['Country'] = codes['Country'].replace({r'\\(.*\\)': ''}, regex=True).str.strip()\n regex = list()\n regex.append('|'.join(r'\\s' + codes['Country'] + r'\\b'))\n # Don't use Alpha-2 and Alpha-3 since there are lots of misreplacement\n # regex.append('|'.join(r'\\s' + codes['Alpha-2'] + r'\\b'))\n # regex.append('|'.join(r'\\s' + codes['Alpha-3'] + r'\\b'))\n regex_str = '|'.join(regex)\n column = column.replace(regex_str, ' $FOREIGN_COUNTRY ', regex=True)\n return column",
"def clean_data():\n datapath = Path(os.getcwd()) / \"data\"\n files = [str(file) for file in datapath.glob(\"*.csv\")]\n for file in files:\n if file.endswith(\"confirmed.csv\"):\n Confirmed = pd.read_csv(file)\n elif file.endswith(\"deaths.csv\"):\n Deaths = pd.read_csv(file)\n elif file.endswith(\"recovered.csv\"):\n Recovered = pd.read_csv(file)\n\n dataFrames = [Confirmed, Deaths, Recovered]\n countryList = list(dataFrames[0][\"Country/Region\"]) #list of valid countries\n countryList = list(dict.fromkeys(countryList))\n\n #create country population dictionary and align values with those in countryList\n countriesPop = {}\n countriesPop[\"US\"] = CountryInfo(\"USA\").population()\n countriesPop[\"Czechia\"] = CountryInfo(\"Czech Republic\").population()\n countriesPop[\"Taiwan*\"] = CountryInfo(\"Taiwan\").population()\n countriesPop[\"Korea, South\"] = CountryInfo(\"South Korea\").population()\n countriesPop[\"Eswatini\"] = CountryInfo(\"Swaziland\").population()\n countriesPop[\"Cote d'Ivoire\"] = CountryInfo(\"Ivory Coast\").population()\n\n for country in countryList:\n try:\n countriesPop[country] = CountryInfo(country).population()\n except KeyError:\n pass\n\n #remove unnecessary information from dataframes\n for count in range(len(dataFrames)):\n dataFrames[count] = dataFrames[count].drop(\"Province/State\",axis=1)\n dataFrames[count] = dataFrames[count].drop(\"Lat\",axis=1)\n dataFrames[count] = dataFrames[count].drop(\"Long\",axis=1)\n dataFrames[count] = dataFrames[count].rename(columns={\"Country/Region\": \"Country\"})\n dataFrames[count][\"Country\"] = dataFrames[count][\"Country\"].replace({\"Korea, South\": \"South Korea\"})\n dataFrames[count] = dataFrames[count].groupby(\"Country\").sum()\n\n # create per 100k capita values by dividing country data by population\n ConfirmedPC = dataFrames[0].copy()\n DeathsPC = dataFrames[1].copy()\n RecoveredPC = dataFrames[2].copy()\n countryList.append(\"South Korea\")\n\n for country in countryList:\n try:\n ConfirmedPC.loc[country] = ConfirmedPC.loc[country].divide(countriesPop[country]).multiply(100000) #confirmed cases per 100k inhabitants\n DeathsPC.loc[country] = DeathsPC.loc[country].divide(countriesPop[country]).multiply(100000) #deaths per 100k inhabitants\n RecoveredPC.loc[country] = RecoveredPC.loc[country].divide(countriesPop[country]).multiply(100000) #recovered cases per 100k inhabitants\n except KeyError:\n pass\n\n dataFrames.extend([ConfirmedPC, DeathsPC, RecoveredPC])\n\n return dataFrames, countryList",
"def filter_gtf(gtf_data):\n for entry in gtf_data[:]:\n if entry.type != 'CDS' and entry.type != 'operon':\n gtf_data.remove(entry)",
"def _filter_non_country_code_languge(self, raw_lang_data):\n\t\tmeta_lang_codes = filter(self._filter_region_code, filter(self._filter_iso369_iso_3166, raw_lang_data))\n\t\t# 過濾掉 variant e.g en_US_POSIX\n\t\tmeta_lang_codes = filter(self._filter_variant, meta_lang_codes)\n\t\treturn meta_lang_codes",
"def _drop_features(self):",
"def build_feature_filter(self):\n if self.features == [\"*\"]:\n random_iso = list(self.data.keys())[0]\n self.features = set()\n for lang_features in self.data.values():\n self.features |= set(lang_features.keys())\n self.features = list(self.features)\n if self.exclusions:\n self.features = [f for f in self.features if f not in self.exclusions]\n self.feature_filter = set(self.features)",
"def remove_redundant_regions(self):\r\n self.flanking_region.attributes.id = self._flanking_region.attributes.id\r\n self.flanking_region.attributes.parent = ''\r\n for feature in self.pcr_product:\r\n feature.attributes.id = feature.attributes.parent\r\n feature.attributes.parent = ''\r\n self._flanking_region = None\r\n self.gt_seq_region = []\r\n if self.pcr_product:\r\n snp_parent = self.pcr_product[0].attributes.id\r\n else:\r\n snp_parent = self.flanking_region.attributes.id\r\n for snp in self.snp:\r\n snp.attributes.parent = snp_parent",
"def prune_2015_data(raw_data):\n formatted_data = raw_data.drop(columns=[\n 'Majority/Majorité',\n 'Candidate Occupation/Profession du candidat',\n 'Majority Percentage/Pourcentage de majorité',\n 'Candidate Residence/Résidence du candidat',\n ])\n\n formatted_data.rename(columns={\n u'Electoral District Name/Nom de circonscription': 'distname',\n 'Electoral District Number/Numéro de circonscription': 'distnum',\n u'Province': 'province',\n u'Percentage of Votes Obtained /Pourcentage des votes obtenus': 'voteshare',\n u'Candidate/Candidat': 'candidate',\n u'Votes Obtained/Votes obtenus': 'numvotes',\n }, inplace=True)\n\n # Extract the party from the columns.\n formatted_data['party'] = formatted_data['candidate'].apply(\n lambda candidate: extract_party_from_candidate_field(candidate),\n )\n\n # 2015 data comes with the province but we take it from the district code for\n # congruency with 2019.\n formatted_data['province'] = formatted_data['distnum'].apply(\n lambda x: province_for_district_number(x),\n )\n\n # Re-order the columns.\n formatted_data = formatted_data[[\n 'distnum',\n 'distname',\n 'candidate',\n 'party',\n 'numvotes',\n 'voteshare',\n 'province',\n ]]\n\n return formatted_data",
"def populate_countries(self):\n # For each country in population.\n for name, pop in self.population.iterrows():\n p = pop['Population']\n # Get all relevant time series based on country name.\n c = self.raw_confirmed.loc[self.raw_confirmed['Country/Region'] == name].sum(numeric_only=True)\n d = self.raw_deceased.loc[self.raw_deceased['Country/Region'] == name].sum(numeric_only=True)\n r = self.raw_recovered.loc[self.raw_recovered['Country/Region'] == name].sum(numeric_only=True)\n # Create new country object.\n self.countries.append(country.Country(name, p, c, d, r))",
"def trim_features():\n pass",
"def country_code_update(df):\n from pycountry import countries as ct\n new_df = country_grouping(df)\n # country names in the data set that are not fit ISO standard\n completion = pd.DataFrame(np.array([['Bolivia', 'BO'],\n ['Brunei', 'BN'],\n ['Congo (Brazzaville)', 'CG'],\n ['Congo (Kinshasa)', 'CD'],\n ['Cote d\\'Ivoire', 'CI'],\n ['Holy See', 'VA'],\n ['Iran', 'IR'],\n ['Korea, South', 'KR'],\n ['Moldova', 'MD'],\n ['Russia', 'RU'],\n ['Taiwan*', 'TW'],\n ['Tanzania', 'TZ'],\n ['US', 'US'],\n ['Venezuela', 'VE'],\n ['Vietnam', 'VN'],\n ['Syria', 'SY'],\n ['Laos', 'LA'],\n ['West Bank and Gaza', 'PS'],\n ['Kosovo', 'XK'],\n ['Burma', 'MM']\n ]),\n columns=['c_name', 'c_code']\n )\n country_code_list = []\n for country_name in new_df['Country/Region']:\n try:\n if country_name in completion['c_name'].tolist():\n # print('exception covered: ', country_name)\n country_code = completion['c_code'].loc[completion['c_name'] == country_name].item()\n # identifies the cruise ships in the data set considered as a 'country'\n elif country_name == 'Diamond Princess' or country_name == 'MS Zaandam':\n country_code = 'Cruise Ship'\n else:\n country_code = ct.get(name=country_name).alpha_2\n except KeyError:\n print('no result: ', country_name)\n country_code = 'None'\n pass\n country_code_list.append(country_code)\n # print(country_code_list)\n new_df.insert(0, \"country_code\", country_code_list, True)\n new_df = new_df.drop(columns='Country/Region')\n unknown_index = new_df[new_df['country_code'] == 'Cruise Ship'].index\n new_df.drop(unknown_index, inplace=True) # drop when country_code = 'None', most likely are Cruise ships\n # new_df.set_index(new_df['country_code'])\n return new_df",
"def filter_sample_by_unk(self):\n if self.hparams.remove_unk is False:\n return self\n filter_entries = []\n unk = self.text_featurizer.unk_index\n if unk == -1:\n return self\n for items in self.entries:\n if unk not in self.text_featurizer.encode(items[2]):\n filter_entries.append(items)\n self.entries = filter_entries\n return self"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds for each customer, RFM scores and encode scores. When this method is called during building data_model step, then dataframe handling new RFM features is dumped into a file. | def data_transform_rfm(self) :
is_built_step = False
if self._encoder_rfm is None:
is_built_step = True
#-------------------------------------------------------------------------
# RFM feature is built
#-------------------------------------------------------------------------
ser_invoice_date = self._df_invoice_line.InvoiceDate
self.df_invoice_line, df_RFM, self.df_RFM_quantiles, self._day_now \
= p5_util.p5_df_rfm_build(self.df_invoice_line, day_now = self._day_now\
, df_RFM_threshold=self.df_RFM_quantiles)
self._df_invoice_line.InvoiceDate = ser_invoice_date
#-------------------------------------------------------------------------
# RFM score is added to dataframe
#-------------------------------------------------------------------------
df_merged = pd.merge(self.df_invoice_line\
, df_RFM[['CustomerID','RFM']], how='left', on=['CustomerID'])
self._df_invoice_line \
= pd.DataFrame(df_merged.values, index = self._df_invoice_line.index\
, columns=df_merged.columns)
#self._df_invoice_line \
#= pd.concat([ self.df_invoice_line,df_RFM[['CustomerID','RFM']] ], axis=1\
#,join='inner')
#-------------------------------------------------------------------------
# RFM encoding
#-------------------------------------------------------------------------
self._encoder_rfm, df_RFM_encoded \
= p5_util.df_rfm_one_hot_encode(df_RFM,'RFM', encoder=self._encoder_rfm)
#-------------------------------------------------------------------------
# Encoded RFM features are renamed
#-------------------------------------------------------------------------
df_customers_rfm, list_col_unchanged \
= p5_util.df_rename_columns(df_RFM_encoded, df_RFM_encoded.columns\
, 'w_rfm_')
self.strprint("df_customers_rfm =" +str(df_customers_rfm.shape))
#-------------------------------------------------------------------------
# dataframe with RFM encoded values per customer is dumped
#-------------------------------------------------------------------------
if is_built_step is True:
p5_util.object_dump(df_customers_rfm, self.df_customers_rfm_fileName)
else :
self._df_customers_rfm = df_customers_rfm.copy()
return | [
"def df_customers_fileRead(self):\n \n #-------------------------------------------------------------------------\n # RFM features are restored\n #-------------------------------------------------------------------------\n df_customers_rfm \\\n = p5_util.object_load(self.df_customers_rfm_fileName)\n self.strprint(\"RFM features : \"+str(df_customers_rfm.shape))\n \n #-------------------------------------------------------------------------\n # Time features are restored\n #-------------------------------------------------------------------------\n df_customers_timeFeature \\\n = p5_util.object_load(self._df_customers_timeFeature_fileName)\n self.strprint(\"Time features : \"+str(df_customers_timeFeature.shape))\n \n #-------------------------------------------------------------------------\n # NLP features are restored\n #-------------------------------------------------------------------------\n df_customers_nlp = p5_util.object_load(self._df_customers_nlp_fileName)\n self.strprint(\"NLP features : \"+str(df_customers_nlp.shape))\n\n if False:\n df_customers_rfm = self._df_customers_rfm.copy()\n df_customers_timeFeature = self._df_customers_timeFeature.copy()\n df_customers_nlp = self._df_customers_pca_nlp.copy()\n\n #-------------------------------------------------------------------------\n # Dataframe are aggregated; note that indexes are customerID.\n #-------------------------------------------------------------------------\n df_customers = pd.DataFrame()\n\n df_customers = pd.concat([df_customers,df_customers_rfm], axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_timeFeature]\\\n , join='inner', axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_nlp]\\\n , join='inner', axis=1)\n\n self.strprint(\"All features : \"+str(df_customers.shape))\n\n #----------------------------------------------------------------------\n # Dataframe is dumped into a file\n #----------------------------------------------------------------------\n p5_util.object_dump(df_customers, self._df_customers_fileName)\n if False:\n #----------------------------------------------------------------------\n # Dataframe is copied as an attribute\n #----------------------------------------------------------------------\n self._df_customers = df_customers.copy()\n \n return",
"def df_customers_features_build(self):\n\n df_customers_rfm = self._df_customers_rfm.copy()\n df_customers_timeFeature = self._df_customers_timeFeature.copy()\n df_customers_nlp = self._df_customers_pca_nlp.copy()\n\n #-------------------------------------------------------------------------\n # Dataframe are aggregated; note that indexes are customerID.\n #-------------------------------------------------------------------------\n df_customers = pd.DataFrame()\n\n df_customers = pd.concat([df_customers,df_customers_rfm], axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_timeFeature]\\\n , join='inner', axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_nlp]\\\n , join='inner', axis=1)\n \n self.strprint(\"All features : \"+str(df_customers.shape))\n self._df_customers = df_customers.copy()\n return",
"def collect_data(self):\n categories = []\n list_of_feature_lists = []\n feature_sets = set()\n with open(self.csv_path, \"r\") as f:\n reader = csv.DictReader(f)\n # collecting some stats for FDR calculation:\n self.PSM_count = 0\n self.decoy_count = 0\n\n if self[\"dump_svm_matrix\"]:\n self.init_svm_matrix_dump()\n additional_matrix_info = []\n\n for i, row in enumerate(\n sorted(\n reader,\n reverse=self[\"bigger_scores_better\"],\n key=lambda d: float(d[self.col_for_sorting]),\n )\n ):\n\n features = self.row_to_features(row)\n\n if tuple(features) in feature_sets:\n continue\n feature_sets.add(tuple(features))\n\n category, psm_FDR = self.get_psm_category(row)\n\n list_of_feature_lists.append(features)\n categories.append(category)\n\n if self[\"dump_svm_matrix\"]:\n label = -1 if row_is_decoy(row) else 1\n sequence = \"{0}.{1}#{2}.{3}\".format(\n row[\"Sequence Pre AA\"].strip(),\n row[\"Sequence\"].strip(),\n row[\"Modifications\"].strip(),\n row[\"Sequence Post AA\"].strip(),\n )\n additional_matrix_info.append(\n {\n \"psm_id\": row[\"Spectrum Title\"].strip(),\n \"label\": label,\n \"scannr\": row[\"Spectrum Title\"].strip().split(\".\")[-2],\n \"peptide\": sequence,\n \"proteins\": self.parse_protein_ids(row[\"Protein ID\"]),\n }\n )\n\n if i % 1000 == 0:\n score_val = float(row[self.col_for_sorting])\n msg = (\n \"Generating feature matrix from input csv \"\n \"(line ~{0}) with score {1} and FDR \"\n \"{2}\".format(i, score_val, psm_FDR)\n )\n print(msg, end=\"\\r\")\n\n # All data points are collected in one big matrix, to make standardization possible\n print(\"\\nConverting feature matrix to NumPy array...\")\n X_raw = np.array(list_of_feature_lists, dtype=float)\n\n print(\"Replacing empty/NaN values with the mean of each column...\")\n self.nan_replacer = Imputer()\n self.nan_replacer.fit(X_raw)\n X_raw = self.nan_replacer.transform(X_raw)\n # Standardize input matrix to ease machine learning! Scaled data has zero mean and unit variance\n print(\"Standardizing input matrix...\")\n self.scaler = SCALER.fit(X_raw)\n self.X = self.scaler.transform(X_raw)\n self.categories = np.array(categories)\n print()\n\n if self[\"dump_svm_matrix\"]:\n print(\"Dumping SVM matrix to\", self[\"dump_svm_matrix\"])\n\n for i, matrix_row in enumerate(self.X):\n matrix_row_info = additional_matrix_info[i]\n self.dump_svm_matrix_row(\n row=list(matrix_row),\n psm_id=matrix_row_info[\"psm_id\"],\n label=matrix_row_info[\"label\"],\n scannr=matrix_row_info[\"scannr\"],\n peptide=matrix_row_info[\"peptide\"],\n proteins=matrix_row_info[\"proteins\"],\n )\n\n print(\"Dumped SVM matrix to\", self[\"dump_svm_matrix\"])\n return",
"def create_features(overwrite=False):\n \n data_dict = pd.read_pickle('complete_dataset.pickle') # upload dictionary of tickers\n ticker_dict = data_dict['Raw_Data']\n\n # initialize dataframe\n first_key = list(ticker_dict.keys())[0] # find the first ticker\n df1 = ticker_dict[first_key].copy() # df for first ticker\n first_df_cols = df1.columns.tolist()\n df2 = aggregate_from_daily_ml(df1, first_key) # aggregate to monthly level\n j=0\n for key, value in ticker_dict.items(): # for each ticker, aggregate then concat to master df\n if key==first_key: continue\n if first_df_cols != value.columns.tolist(): print('bad columns for {}!'.format(key))\n df3 = aggregate_from_daily_ml(value, key)\n \n df2 = pd.concat([df2, df3])\n if j%(round(len(ticker_dict)/10))==0: print('Fraction done: {}'.format(round(j/len(ticker_dict),5)))\n j+=1\n df2 = df2.sort_index(level=[0,1])\n \n df2.columns = [col[0] + '_' + str(col[1]) if str(col[1])!='NA' else col[0] for col in df2.columns.tolist()]\n\n df3 = create_target(df2, threshold=0.0)\n df3.columns = [col[0] + '_' + str(col[1]) if str(col[1])!='NA' else col[0] for col in df3.columns.tolist()]\n\n\n if overwrite:\n print('Saving to data.pkl')\n df3.to_pickle('data.pkl')\n else:\n print('File not being saved. To save, use overwrite=True')\n\n return df3",
"def __prepare_feature_matrix__(self):\n #fname = r'./RecSys/feature_matrix/feature_matrix.csv'\n #if os.path.isfile(fname) :\n # self.feature_matrix = pd.read_csv(fname)\n # fname = r'./RecSys/feature_matrix/feature_matrix_pivot.csv'\n # self.feature_matrix_pivot = pd.read_csv(fname)\n #else:\n object_um = feature_matrix_cl.FeatureMatrixClass(self.data, self.active_users)\n self.feature_matrix, self.pivot = object_um.get_feature_matrix()\n self.active_users.to_csv('./RecSys/out/test/au_list.csv',index=False)",
"def build(self):\n list_of_mafs = []\n maf_generator = self.get_dataframe()\n\n for maf_as_dict in maf_generator:\n list_of_mafs.extend(maf_as_dict)\n\n reporting_path = os.path.join(app.config.get('REPORTING_ROOT_PATH'), app.config.get('REPORTING_PATH'), 'global')\n combined_maf = None\n try:\n combined_maf = pandas.DataFrame(list_of_mafs)\n except Exception as e:\n logger.error(f'Problem creating dataframe from list of dicts: {str(e)}')\n try:\n combined_maf.to_csv(\n os.path.join(reporting_path, f'{self.method}_combined_maf.tsv'),\n sep=\"\\t\",\n encoding='utf-8',\n index='false'\n )\n except Exception as e:\n # bad practice here catching base exception, but the pandas documentation did not reveal what errors or\n # exceptions to expect\n logger.error(f'Problem writing the combined maf file to csv:{str(e)}')\n abort(500)",
"def features_customers(df_customers):\n for i in PREMIER_VALS:\n k = 'premier_' + str(i)\n df_customers[k] = np.where(df_customers['premier'] == i, 1, 0)\n\n df_customers['age'] = datetime.now().date().year - df_customers['yearOfBirth']\n df_customers['male'] = np.where(df_customers['gender'] == 'M', 1, 0)\n df_customers['female'] = np.where(df_customers['gender'] == 'F', 1, 0)\n df_customers['days_in_asos'] = (datetime.now().date() - df_customers['dateCreated']).dt.days\n\n logger.info(\"Features from the customers table: {shape} {dtypes}\"\n .format(shape=df_customers.shape, dtypes=df_customers.dtypes))\n return df_customers",
"def __init__(self, sc, dataset_path):\n\n logger.info(\"Starting up the Recommendation Engine: \")\n\n self.sc = sc\n\n\t#Load cusomer data for later use\n\t\n logger.info(\"Loading Customer data...\")\n customer_file_path = os.path.join(dataset_path, 'tpo_customer.csv')\n customer_raw_RDD = self.sc.textFile(customer_file_path)\n customer_raw_data_header = customer_raw_RDD.take(1)[0]\n self.customer_RDD = customer_raw_RDD.filter(lambda line: line!=customer_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: ((tokens[0]),(tokens[1]),(tokens[2]),(tokens[3]),(tokens[4]),(tokens[5]),(tokens[6]),(tokens[7]))).cache()\n\tlogger.info(\"Loading Customer data success...\")\n\t#CUSTOMCUSTOMER_NAME,CUSTOMER_ADDRESS1,CUSTOMER_ADDRESS2,CUSTOMER_CITY,CUSTOMER_STATE,CUSTOMER_COUNTRY,CUSTOMER_ZIPCODE,CREATED_BY,CREATION_DATE,LAST_UPDATED_BY,LAST_UPDATE_DATE\n \n\n\n\t\n\t#Load turbine data for later use\t\n logger.info(\"Loading Turbine data...\")\n turbine_file_path = os.path.join(dataset_path, 'test_tpo_unit_config.csv')\n turbine_raw_RDD = self.sc.textFile(turbine_file_path)\n turbine_raw_data_header = turbine_raw_RDD.take(1)[0]\n self.turbine_RDD = turbine_raw_RDD.filter(lambda line: line!=turbine_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: ((tokens[0]),(tokens[1]),(tokens[2]),(tokens[3]),(tokens[5]),(tokens[34]),(tokens[51]),(tokens[35]))).cache()\n\tlogger.info(\"Loading Turbine data success...\")\n \n\t\n\t\n\t\n\t#Load site data for later use\t\n logger.info(\"Loading Site data...\")\n site_file_path = os.path.join(dataset_path, 'tpo_site.csv')\n site_raw_RDD = self.sc.textFile(site_file_path)\n site_raw_data_header = site_raw_RDD.take(1)[0]\n self.site_RDD = site_raw_RDD.filter(lambda line: line!=site_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: ((tokens[0]),(tokens[1]),(tokens[2]),(tokens[3]),(tokens[4]),(tokens[5]),(tokens[6]),(tokens[7]),(tokens[16]))).cache()\n\tlogger.info(\"Loading Site data success...\")\n\t\n\n\n\n\t# Load ratings data for later use\n logger.info(\"Loading Ratings data...\")\n ratings_file_path = os.path.join(dataset_path, 'ratings.csv')\n ratings_raw_RDD = self.sc.textFile(ratings_file_path)\n ratings_raw_data_header = ratings_raw_RDD.take(1)[0]\n self.ratings_RDD = ratings_raw_RDD.filter(lambda line: line!=ratings_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: (int(tokens[0]),int(tokens[1]),float(tokens[2]))).cache()\n # Load movies data for later use\n logger.info(\"Loading Movies data...\")\n movies_file_path = os.path.join(dataset_path, 'movies.csv')\n movies_raw_RDD = self.sc.textFile(movies_file_path)\n movies_raw_data_header = movies_raw_RDD.take(1)[0]\n self.movies_RDD = movies_raw_RDD.filter(lambda line: line!=movies_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: (int(tokens[0]),tokens[1],tokens[2])).cache()\n self.movies_titles_RDD = self.movies_RDD.map(lambda x: (int(x[0]),x[1])).cache()\n # Pre-calculate movies ratings counts\n self.__count_and_average_ratings()\n\n # Train the model\n self.rank = 8\n self.seed = 5L\n self.iterations = 10\n self.regularization_parameter = 0.1\n self.__train_model()",
"def generate_customers(file_len):\n print(\"Generating Customers\")\n customer = {\n 'customer_id':[],\n 'name': [],\n 'last_name':[],\n 'address':[],\n 'phone_number':[],\n 'email_address':[],\n 'status':[],\n 'credit_limit':[],\n }\n header = list(customer.keys())\n f_name = 'customers_' + str(file_len) + '.csv'\n file_name = Path(FILE_PATH) / f_name\n with open(file_name, 'w') as f:\n csv_file = csv.DictWriter(f, header, lineterminator='\\n')\n csv_file.writeheader()\n for idx in range(file_len):\n customer['customer_id'] = f'C{randint(0,999999):06}'\n customer['name'] = names.get_first_name()\n customer['last_name'] = names.get_last_name()\n\n try:\n address = RW.get_random_word(hasDictionaryDef=\"true\",includePartOfSpeech=\"noun\").capitalize()\n except:\n address = choice(['Rarotonga', 'Tuvalu', 'Samoa'])\n customer['address'] = address\n customer['phone_number'] = f\"{randint(0,999):3}-{randint(0,999):3}-{randint(0,9999):3}\"\n try:\n randurl = RW.get_random_word()\n except:\n randurl = 'gmail'\n customer['email_address'] = customer['name'].lower() + '.' + customer['last_name'].lower() + '@' + randurl + choice(['.com', '.net', '.io', '.org'])\n customer['status'] = choice([True, False])\n customer['credit_limit'] = round(uniform(0.0, 100000.00),2)\n\n CUSTOMER_IDS.append(customer['customer_id'])\n csv_file.writerow(customer)\n # print(customer)",
"def make_submission_file(w, unused_features, filename=\"prediction.csv\"):\n\n # load test datasets\n print_banner(\"7. Read test dataset from higgs-data/test.csv\") \n test_y, test_x, ind = load_csv_data('higgs-data/test.csv')\n\n # Construct Matrix Output with values of one\n y_pred = np.ones(len(test_y))\n\n # Split test dataset based\n print_banner(\"8. Split the test dataset into 8 subsets\") \n test_sets_x, _, indices = create_subsets(test_x, test_y)\n\n # Remove features of test datasets based on PRI_JET_NUM and DER_MASS_MMC\n print_banner(\"9. Remove features in each test subset based on PRI_JET_NUM and DER_MASS_MMC\")\n test_sets_x = remove_features(test_sets_x, unused_features) \n\n # Iterate through the test subsets with their models accordingly\n print_banner(\"10. Predict each test subset using their corresponding model\") \n for x, w, index in zip(test_sets_x, w, indices):\n\n # Perform z-score standardization and expand matrix features with logarithmic & polynomial & cross_term & square root basis function\n stand_x = generate_features(x, 2, True, with_log=True, with_sqrt=True, cross_terms=True)\n\n # Get the prediction\n y_pred[index] = predict_labels(w, stand_x)\n\n print_banner(\" Predicting subset: DONE\") \n \n # Creating submission file\n print_banner(\"11. Making final submission file with csv format\") \n create_csv_submission(ind, y_pred, filename)",
"def make_features(user_master:SparkDataFrame):\n df = user_master.select([f'feature{i}' for i in range(1,7) ] + [\"user_id\"] )\n cols = df.columns\n\n categoricalColumns = [f'feature{i}' for i in range(1,7)]\n\n stages = []\n for categoricalCol in categoricalColumns:\n stringIndexer = StringIndexer(inputCol = categoricalCol, outputCol = categoricalCol + 'Index')\n encoder = OneHotEncoder(inputCols=[stringIndexer.getOutputCol()], outputCols=[categoricalCol + \"classVec\"])\n stages += [stringIndexer, encoder]\n\n #label_stringIdx = StringIndexer(inputCol = 'item_id', outputCol = 'label')\n #stages += [label_stringIdx]\n\n\n assemblerInputs = [c + \"classVec\" for c in categoricalColumns] \n assembler = VectorAssembler(inputCols=assemblerInputs, outputCol=\"features\")\n stages += [assembler]\n\n \n pipeline = Pipeline(stages = stages)\n pipelineModel = pipeline.fit(df)\n df = pipelineModel.transform(df)\n selectedCols = ['features'] + cols\n df = df.select(selectedCols)\n #df.printSchema()\n\n return df",
"def run(self, dataset_path):\n features = self._generate_features(self._feature_extractors)\n features.to_csv(dataset_path)",
"def train(self) -> None:\n model = {}\n train_metrics = {}\n valid_metrics = {}\n filled_dataset: pd.DataFrame = None\n importances = {}\n for (\n label,\n labeled,\n [train_set, valid_set],\n ) in self.get_total_train_val_set_per_risk():\n\n model[label] = Pipeline(\n [\n (\"FeatureSelection\", FeatureSelectionAndGeneration(feats_num=500)),\n (\"Classification\", Classifier(label)),\n ]\n )\n model[label].fit(train_set[self.feat_names], train_set[label])\n train_preds = model[label].predict(train_set[self.feat_names])\n valid_preds = model[label].predict(valid_set[self.feat_names])\n train_metrics[label] = self.compute_metrics(train_set[label], train_preds)\n valid_metrics[label] = self.compute_metrics(valid_set[label], valid_preds)\n model[label].fit(labeled[self.feat_names], labeled[label])\n importances[label] = (\n model[label].named_steps[\"Classification\"].feature_importances_\n )\n if filled_dataset is None:\n filled_dataset = self.dataset[self.id_columns + self.lab_names].copy()\n filled_dataset.loc[~self.train_mask, label] = model[label].predict(\n self.dataset.loc[~self.train_mask, self.feat_names]\n )\n self.model = model\n self.save_model()\n with open(VALIDATION_METRICS_PATH, \"wb\") as out:\n pickle.dump(valid_metrics, out)\n with open(TRAINING_METRICS_PATH, \"wb\") as out:\n pickle.dump(train_metrics, out)\n with open(FEATURES_IMPORTANCES_PATH, \"wb\") as out:\n pickle.dump(importances, out)\n self.filled_dataset = filled_dataset\n self.filled_dataset.to_csv(FILLED_DATASET_PATH, index=False)\n prediction_mask = self.filled_dataset[self.id_columns + self.lab_names]\n prediction_mask[self.lab_names] = pd.isnull(self.dataset[self.lab_names])\n prediction_mask.to_csv(PREDICTION_MASK_PATH, index=False)\n import data.model.metrics\n\n importlib.reload(data.model.metrics)\n import data.model.predictions\n\n importlib.reload(data.model.predictions)",
"def generateMatrix(self):\n if self.tokenWeights and self.extraFeatures:\n nFeatures = self.wordId + self.wordId2 + len(self.EXTRA_WEIGHTS_LABELS)\n logging.info('Exporting TOKEN WEIGHTS AND EXTRA FEATURES %dx%d'%(self.docId, nFeatures))\n mtrx = np.zeros((self.docId, nFeatures))\n \n for docId, doc in self.documents.iteritems():\n # iterate through 1st sentence\n for wId, val in doc['s1'].iteritems():\n mtrx[docId, wId] = val\n # then iterate thru 2nd sentence, store on 2ND PARTITION\n for wId, val in doc['s2'].iteritems():\n mtrx[docId, self.wordId + wId] = val\n # finally extra features values stored at the end of the vector\n for label, val in doc['extraFeatures'].iteritems():\n mtrx[docId, self.wordId + self.wordId2 + self.EXTRA_WEIGHTS_LABELS.index(label)] = val\n\n elif self.tokenWeights and not self.extraFeatures:\n nFeatures = self.wordId + self.wordId2\n logging.info('Exporting TOKEN WEIGHTS %dx%d'%(self.docId, nFeatures))\n mtrx = np.zeros((self.docId, nFeatures))\n \n for docId, doc in self.documents.iteritems():\n # iterate through 1st sentence\n for wId, val in doc['s1'].iteritems():\n mtrx[docId, wId] = val\n # then iterate thru 2nd sentence, store on 2ND PARTITION\n for wId, val in doc['s2'].iteritems():\n mtrx[docId, self.wordId + wId] = val\n else:\n nFeatures = len(self.EXTRA_WEIGHTS_LABELS)\n logging.info('Exporting EXTRA FEATURES %dx%d'%(self.docId, nFeatures))\n mtrx = np.zeros((self.docId, nFeatures))\n \n for docId, doc in self.documents.iteritems():\n for label, val in doc['extraFeatures'].iteritems():\n mtrx[docId, self.EXTRA_WEIGHTS_LABELS.index(label)] = val\n logging.info('Matrix generated')\n logging.info(mtrx.shape)\n return mtrx",
"def _make_user_rec_sys_data(item_rec_sys_data):\n\n # Extracting customers with at least 2 reviews.\n review_counts = item_rec_sys_data.groupby(\"customer_id\").count()[[\"review_score\"]]\n all_ids = list(review_counts[review_counts[\"review_score\"] > 1].index)\n\n # Random Sample 100 ids.\n random_sample_ids = random.choices(all_ids, k=100)\n user_rec_sys_data = item_rec_sys_data[item_rec_sys_data[\"customer_id\"].isin(random_sample_ids)]\n\n # Resetting Index.\n user_rec_sys_data.reset_index(inplace=True)\n user_rec_sys_data = user_rec_sys_data.drop([\"customer_id\", \"index\"], axis=1)\n user_rec_sys_data.reset_index(inplace=True)\n user_rec_sys_data = user_rec_sys_data.rename(columns={\"index\": \"customer_id\"})\n\n # Saving file.\n file_path = Path.cwd() / \"datasets/user_rec_sys_data.csv\"\n user_rec_sys_data.to_csv(file_path, index=False)\n\n return user_rec_sys_data",
"def generate(self):\n\n generated_models = []\n\n for rate_set in self.rate_sets:\n\n resolution = self.resolution\n cfo_mechanism = ModelAnalysis('RFO', rate_set, resolution)\n generated_models.append(cfo_mechanism.frame)\n\n all_models = pd.concat(generated_models, axis=0)\n all_models.reset_index(inplace=True, drop=True)\n all_models.to_csv(self.output)\n\n return all_models.melt(id_vars=['delta', 'gamma', 'beta', 'alpha']).copy()",
"def process_dataset(self):\n\n logger.info('\\n')\n logger.info('=' * 40)\n logger.info('=\\t DeepRank Data Set')\n logger.info('=')\n logger.info('=\\t Training data')\n for f in self.train_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.valid_database:\n logger.info('=\\t Validation data')\n for f in self.valid_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.test_database:\n logger.info('=\\t Test data')\n for f in self.test_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n logger.info('=' * 40 + '\\n')\n sys.stdout.flush()\n\n # check if the files are ok\n self.check_hdf5_files(self.train_database)\n\n if self.valid_database:\n self.valid_database = self.check_hdf5_files(\n self.valid_database)\n\n if self.test_database:\n self.test_database = self.check_hdf5_files(\n self.test_database)\n\n # create the indexing system\n # alows to associate each mol to an index\n # and get fname and mol name from the index\n self.create_index_molecules()\n\n # get the actual feature name\n if self.mapfly:\n self.get_raw_feature_name()\n else:\n self.get_mapped_feature_name()\n\n # get the pairing\n self.get_pairing_feature()\n\n # get grid shape\n self.get_grid_shape()\n\n # get the input shape\n self.get_input_shape()\n\n # get renormalization factor\n if self.normalize_features or self.normalize_targets or self.clip_features:\n if self.mapfly:\n self.compute_norm()\n else:\n self.get_norm()\n\n logger.info('\\n')\n logger.info(\" Data Set Info:\")\n logger.info(\n f' Augmentation : {self.use_rotation} rotations')\n logger.info(\n f' Training set : {self.ntrain} conformations')\n logger.info(\n f' Validation set : {self.nvalid} conformations')\n logger.info(\n f' Test set : {self.ntest} conformations')\n logger.info(f' Number of channels : {self.input_shape[0]}')\n logger.info(f' Grid Size : {self.data_shape[1]}, '\n f'{self.data_shape[2]}, {self.data_shape[3]}')\n sys.stdout.flush()",
"def create_feature_file(self, source_file, feature_file):\n\n feature_data = dict()\n\n # read results and create feature data\n print 'Reading - ', source_file\n\n with open(source_file, 'r') as f:\n lines = f.read().splitlines()\n for line in lines:\n split_array = re.split('\\\\s+', line.strip())\n\n query_id = split_array[0]\n doc_no = split_array[2]\n score = split_array[4]\n\n label = self.qrel_data[query_id].get(doc_no, None)\n\n if label is not None:\n if query_id in feature_data:\n feature_data[query_id].update({doc_no: score})\n else:\n feature_data[query_id] = {doc_no: score}\n\n # write feature data into a json file\n write_json_file(feature_data, feature_file)",
"def create_features_matrix(self, train_data_ids, test_data_ids):\n train_matrix_list = self.get_feature_matrix_list(train_data_ids)\n test_matrix_list = self.get_feature_matrix_list(test_data_ids)\n\n list_to_file(train_matrix_list, TRAIN_DATA_FILE)\n list_to_file(test_matrix_list, TEST_DATA_FILE)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates new features from Description feature thanks to NLTK, a NLP package. NLP features are handled into a dataframe. A PCA reduction is applied on this dataframe. Features from dataframe are renamed with root ane w_nlp. When this method is called during building data_model step, then dataframe handling new NLP feature is dumped into a file. | def data_transform_nlp(self):
df_invoice_line = None
is_build_step = False
if self._vectorizer_nlp is None:
is_build_step = True
list_no_words=['SET','PACK']
df_invoice_line, csr_matrix_weights, self._vectorizer_nlp \
= p5_util.nlp_process(self.df_invoice_line\
, 'Description' , vectorizer= self._vectorizer_nlp\
, list_no_words=list_no_words, is_verbose= self.is_verbose)
if df_invoice_line is None:
self.strprint("***ERROR : NLP process interrupted!")
return
#-------------------------------------------------------------------------
# NLP weights are cumulated (sumerized) per customer
#-------------------------------------------------------------------------
if csr_matrix_weights is None:
csr_matrix_weights \
= p5_util.object_load('./data/matrix_weights_NLP.dump')
else:
pass
self.strprint("df_invoice_line : "+str(df_invoice_line.shape))
self.dbg_df = df_invoice_line.copy()
root_name = 'w_nlp_'
self._df_w_nlp = p5_util.df_nlp_sum_per_customer(df_invoice_line\
, csr_matrix_weights, root_name)
del(csr_matrix_weights)
#-------------------------------------------------------------------------
# Dimension reduction thanks to PCA
#-------------------------------------------------------------------------
self.strprint("self._df_w_nlp : "+str(self._df_w_nlp.shape))
root_name_pca = 'nlp_pca_'
n_dim = self._nlp_pca_ndim
df_customers_pca_nlp, self._pca_nlp \
= p5_util.df_pca_reduce(self._df_w_nlp, n_dim, root_name_pca\
, p_is_scale=False, pca=self._pca_nlp)
self.strprint("df_customers_pca_nlp : " +str(df_customers_pca_nlp.shape))
#-------------------------------------------------------------------------
# Backup of NLP features per customer
#-------------------------------------------------------------------------
if is_build_step is True:
p5_util.object_dump(df_customers_pca_nlp\
, self._df_customers_nlp_fileName)
else:
self._df_customers_pca_nlp = df_customers_pca_nlp.copy()
return | [
"def feature_description_nlp(self):\n \n #-------------------------------------------------------------------------\n # Returned dataframe is aggregated with weights from self.vectorizer\n #-------------------------------------------------------------------------\n list_no_words=['SET','PACK']\n self.df_invoice_line, vectorizer, matrix_weights \\\n = p5_util.nlp_process(self.df_invoice_line,'Description'\\\n , vectorizer=self.vectorizer, list_no_words=list_no_words)\n\n #-------------------------------------------------------------------------\n # Each vectorized column 'x' is renamed w_nlp_i\n #-------------------------------------------------------------------------\n dict_matching_name = dict()\n for col in self.df_invoice_line.columns:\n if str(col).isdigit() is True:\n new_col_name = \"w_nlp_\"+str(col)\n dict_matching_name[col] = new_col_name\n \n self.df_invoice_line.rename(columns=dict_matching_name,inplace=True)\n #-------------------------------------------------------------------------\n # Description is droped from columns\n #-------------------------------------------------------------------------\n del(self.df_invoice_line['Description'])",
"def featureprepare(self,df):\n try:\n df = self.featureselection(df)\n emp_len_dict= pickleload(self.dict_employ_len) # Load emp len\n df['emp_length'] = df['emp_length'].map(emp_len_dict)\n df['Long_emp_length'] = df['emp_length'].apply(lambda x: 'Yes' if x == 10 else 'No') # creating new feature\n df[\"emp_title\"].fillna('Missing', inplace=True)\n\n # Handling missing numerical value\n dict_Mean_var = pickleload(self.dict_Mean_var)\n for col, mean_val in dict_Mean_var.items():\n df[col].fillna(mean_val, inplace=True)\n\n # Handling rare values\n Freqlabels = pickleload(self.Freqlabels)\n for variable, frequent_labels in Freqlabels.items():\n df[variable] = np.where(df[variable].isin(frequent_labels), df[variable], 'Rare')\n\n # Encoding Categorical features\n x = pickleload(self.labelEncoder)\n for features, labels in x.items():\n df.loc[:, features] = labels.transform(df.loc[:, features])\n return df\n except Exception as e:\n self._Logger.error(\"Error in feature preparation: {}\".format(e))",
"def generate_features(\n df: pd.DataFrame, spacy_model: str, language: str\n) -> pd.DataFrame:\n logging.info(\"Loading Spacy model...\")\n nlp = spacy.load(spacy_model)\n\n # Makes all tokens lowercase\n logging.info(\"Lowercase\")\n df[\"token_lower\"] = df[\"token\"].str.lower()\n\n logging.info(\"Lemma, pos\")\n spacy_pipe = nlp.pipe(df[\"token_lower\"].values, disable=[\"ner\", \"parser\"])\n features_gen = ((doc[0].lemma_, doc[0].pos_) for doc in spacy_pipe)\n df[\"lemma\"], df[\"pos\"] = zip(*features_gen)\n\n # Prepare stemmers\n logging.info(\"Loading Snowball Stemmer...\")\n snow = SnowballStemmer(language=language)\n\n logging.info(\"Snowball stemmer\")\n df[\"snowballStemmer\"] = df.apply(lambda row: snow.stem(row[\"token_lower\"]), axis=1)\n\n logging.info(\"Loading Porter Stemmer...\")\n port = PorterStemmer()\n\n logging.info(\"Porter stemmer\")\n df[\"porterStemmer\"] = df.apply(lambda row: port.stem(row[\"token_lower\"]), axis=1)\n\n # Adds columns with a binary if the word contains a possible negation prefix or suffix\n logging.info(\"Prefix\")\n df[\"possible_prefix\"] = df.apply(\n lambda row: possible_negation_prefix(row[\"token_lower\"]), axis=1\n )\n\n logging.info(\"Suffix\")\n df[\"possible_suffix\"] = df.apply(\n lambda row: possible_negation_suffix(row[\"token_lower\"]), axis=1\n )\n\n # Adds new columns for the previous and next lemma and pos-tag\n logging.info(\"Add prev/next shifts\")\n df[\"prev_Lemma\"] = df[\"lemma\"].shift(periods=1)\n df[\"next_Lemma\"] = df[\"lemma\"].shift(periods=-1)\n df[\"prev_pos\"] = df[\"pos\"].shift(periods=1)\n df[\"next_pos\"] = df[\"pos\"].shift(periods=-1)\n return df",
"def add_features(df):\n\n # create a set of stopwords\n StopWords = set(stopwords.words('english'))\n \n # lowering and removing punctuation\n df['processed_text'] = df['message'].apply(lambda x: re.sub(r'[^\\w\\s]', '', x.lower()))\n \n # apply lemmatization\n df['processed_text'] = df['processed_text'].apply(\n lambda x: ' '.join([WordNetLemmatizer().lemmatize(token) for token in x.split()]))\n \n # get the length of the message\n df['length'] = df['processed_text'].apply(lambda x: len(x))\n \n # get the number of words in each message\n df['num_words'] = df['processed_text'].apply(lambda x: len(x.split()))\n \n # get the number of non stopwords in each message\n df['non_stopwords'] = df['processed_text'].apply(\n lambda x: len([t for t in x.split() if t not in StopWords]))\n \n # get the average word length\n df['avg_word_len'] = df['processed_text'].apply(\n lambda x: np.mean([len(t) for t in x.split() if t not in StopWords]) \\\n if len([len(t) for t in x.split() if t not in StopWords]) > 0 else 0)\n \n # update stop words (didn't want to remove negation)\n StopWords = StopWords.difference(\n [\"aren't\", 'nor', 'not', 'no', \"isn't\", \"couldn't\", \"hasn't\", \n \"hadn't\", \"haven't\", \"didn't\", \"doesn't\", \"wouldn't\", \"can't\"])\n \n # remove stop words from processed text message\n df['processed_text'] = df['processed_text'].apply(\n lambda x: ' '.join([token for token in x.split() if token not in StopWords]))\n \n # filter the words with length > 2\n df['processed_text'] = df['processed_text'].apply(\n lambda x: ' '.join([token for token in x.split() if len(token) > 2]))\n \n return df",
"def main_new_dataset():\n\tnewData = pd.read_csv('../xsense_data/global_dataset_abs_speed_diff_yaw.txt', sep=';')\n\n\tlistFeatures = ['Acc_X', 'Acc_Y', 'Speed_X', 'Speed_Y', 'Diff_Yaw']\n\tdata = newData.ix[:, listFeatures]\n\n\tworder = WordData(data)\n\twords = worder.create_words(worder.dataset)\n\tprint words\n\tprint worder.discretization_model\n\n\tcolWords = pd.Series(words, name='Word')\n\twordDataset = pd.concat([newData,colWords], axis=1)\n\twordDataset.to_csv('../xsense_data/word_global_dataset_abs_speed_diff_yaw.txt',sep=';')",
"def extract_features (document_collection, language_of_documents = \"en\", preprocessing_only = False):\n documents_without_stop_words = stop_words(document_collection, language=language_of_documents)\n lemmatized_documents = lemmatizer (documents_without_stop_words, language=language_of_documents)\n stemmed_documents = stemming (lemmatized_documents, language=language_of_documents)\n if preprocessing_only:\n return stemmed_documents\n documents_df = pd.DataFrame (stemmed_documents)\n documents_bag_of_words = bag_of_words(stemmed_documents)\n documents_tfidf = tfidf(stemmed_documents)\n \n result_df = pd.concat([documents_bag_of_words, documents_tfidf, documents_df], keys=[\"bag of words\", \"tfidf\", \"preprocessed documents\"], join = \"outer\")\n return result_df",
"def df_lda_preprocessing(df, col_name, remove_stopwords=True, add_features=False):\n df['text'] = df[col_name] # Create a copy of the input col_name: text\n \n # df_clean_sting(df, 'text') # Clean the text from col_name # TEST FJERN RENGØRING\n\n # Test other way of handling strings\n df_simple_clean_string(df, 'text')\n\n if add_features:\n df_make_features_from_string(df, 'text') # Add features\n\n # This is a hack soly for the scope of this project to concat ThreadSubject\n # When the message is initiated by the Member\n if col_name == 'SignalMessageBodyClean':\n df_aka = df.copy(deep=True)\n # df_aka['text_1'] = df_aka['ThreadSubject']\n # df_clean_sting(df_aka, 'ThreadTopic')\n df_simple_clean_string(df_aka, 'ThreadTopic')\n\n df['text'] = (df['text'] +' '+df_aka['ThreadTopic']).where(df['IsFirstMessageInthread']==1,df['text'])\n\n df_get_tokens(df, 'text') # Returns col: tokenized_text\n\n # df_stem_words(df, 'tokenized_text') # Returns col: stemmed_text\n\n df_bigrams(df, 'tokenized_text') # Returns bigrams\n df_trigrams(df, 'tokenized_text') # Returns trigrams\n\n df['ngrams'] = df['tokenized_text'] + df['bigrams'] + df['trigrams']\n\n if remove_stopwords:\n df_remove_stopwords(df, 'ngrams') # returns stopwords_removed",
"def generate_features(self, df, type=\"train\"):\n # Extract features.\n if type == \"train\":\n algorithmic_features = self.vectorizer.fit_transform(df[\"text\"])\n else:\n algorithmic_features = self.vectorizer.transform(df[\"text\"])\n\n def count_words_negated(text, words_to_check):\n negation_words = [\"not\", \"don't\", \"didn't\", \"didnt\", \"wasnt\", \"wasn't\"]\n negation_words_regex = \"|\".join(negation_words)\n words_to_check_regex = \"|\".join(words_to_check)\n text_sentences = re.split(\"[?.!]\", text) #simplifies checking words are in same sent\n my_regex = r\"\\b(%s)\\b.*\\b(%s)\\b|\\b(%s)\\b.*\\b(%s)\\b\"%(negation_words_regex, words_to_check_regex, \\\n words_to_check, negation_words_regex)\n out = len(re.findall(my_regex, text))\n return(out)\n\n # Define some functions that can transform the text into features.\n good_words = [\"good\", \"great\", \"better\", \"best\", \"efficient\", \"sweet\",\n \"delicious\", \"like\", \"love\", \"thanks\", \"perfect\"]\n bad_words = [\"bad\", \"worse\"]\n\n transform_functions = [\n (\"length\", lambda x: len(x)),\n (\"exclams\", lambda x: x.count(\"!\")),\n (\"question_marks\", lambda x: x.count(\"?\")),\n (\"sentences\", lambda x: x.count(\".\")),\n # Add one as a smooth.\n (\"words_per_sentence\", lambda x: x.count(\" \") / (x.count(\".\") + 1)),\n (\"letters_per_word\", lambda x: len(x) / (x.count(\" \") + 1)),\n (\"commas\", lambda x: x.count(\",\")),\n (\"negated_good_words\", lambda x: count_words_negated(x, good_words)),\n (\"negated_bad_words\", lambda x: count_words_negated(x, bad_words))\n ]\n hand_chosen_features = DataFrame()\n\n for col in [\"text\", \"summary\"]:\n for name, func in transform_functions:\n hand_chosen_features[\"{0}_{1}\".format(col, name)] = df[col].apply(func)\n\n hand_chosen_features['helpful_yes'] = df.helpfulness.apply(lambda x: x.split(\"/\")[0]).astype('int')\n hand_chosen_features['helpful_total'] = df.helpfulness.apply(lambda x: x.split(\"/\")[1]).astype('int')\n features = hstack([algorithmic_features, hand_chosen_features])\n if type == \"train\":\n # Select 2000 \"best\" columns based on chi squared.\n selector = SelectKBest(chi2, k=2000)\n selector.fit(features, df[\"score\"])\n self.collist = selector.get_support().nonzero()\n\n # Grab chi squared selected column subset.\n features = features.tocsc()[:, self.collist[0]].todense()\n\n return features",
"def create_new_features(self):\n train = self.train\n \n train['is_context'] = train['context_type'].isin(CONTEXT_TYPE_TEST)\n train['is_context_flow'] = train['listen_type'] * train['is_context']\n \n train['is_listened_context'] = train['is_listened'] * train['is_context']\n train['is_listened_flow'] = train['is_listened'] * train['listen_type']\n train['is_listened_context_flow'] = train['is_listened'] * train['is_context_flow']\n \n for feature in self.categorize_features:\n gby_feat = train.groupby(feature)\n new_features(train, gby_feat, feature, feature in self.listen_type_features, self.context_features, self.flow_features, self.fillna)\n \n # Variable combinations\n for feat1 in self.combo_features1:\n for feat2 in self.combo_features2:\n gby_feat = train.groupby([feat1, feat2])\n name = feat1 + '_' + feat2\n new_features(train, gby_feat, name, feat1 in self.listen_type_features, self.context_features, self.flow_features, self.fillna)",
"def other_features_(tweet, cleaned_tweet):\n #print(\"WARNING>>>>>>>>>>>>>>>>> VADERSENTIMENT DISABLED\")\n sentiment = nlp.sentiment_analyzer.polarity_scores(tweet)\n\n words = cleaned_tweet #Get text only\n\n syllables = textstat.syllable_count(words) #count syllables in words\n num_chars = sum(len(w) for w in words) #num chars in words\n num_chars_total = len(tweet)\n num_terms = len(tweet.split())\n num_words = len(words.split())\n avg_syl = round(float((syllables+0.001))/float(num_words+0.001),4)\n num_unique_terms = len(set(words.split()))\n ###Modified FK grade, where avg words per sentence is just num words/1\n FKRA = round(float(0.39 * float(num_words)/1.0) + float(11.8 * avg_syl) - 15.59,1)\n ##Modified FRE score, where sentence fixed to 1\n FRE = round(206.835 - 1.015*(float(num_words)/1.0) - (84.6*float(avg_syl)),2)\n\n\n twitter_objs = count_twitter_objs(tweet) #Count #, @, and http://\n features = [FKRA, FRE, syllables, num_chars, num_chars_total, num_terms, num_words,\n num_unique_terms, sentiment['compound'],\n twitter_objs[2], twitter_objs[1],]\n #features = pandas.DataFrame(features)\n return features",
"def apply_features(self, documents, labeled=...):\n ...",
"def _reorganize_data(self) -> None:\n\n metadata = self.metadata\n\n self.features = []\n self.labels = []\n\n for i in range(len(metadata)):\n try:\n if isinstance(metadata[i][\"hpt_res\"], str):\n hpt = ast.literal_eval(metadata[i][\"hpt_res\"])\n else:\n hpt = metadata[i][\"hpt_res\"]\n\n if isinstance(metadata[i][\"features\"], str):\n feature = ast.literal_eval(metadata[i][\"features\"])\n else:\n feature = metadata[i][\"features\"]\n\n self.features.append(feature)\n self.labels.append(hpt[metadata[i][\"best_model\"]][1])\n except Exception as e:\n logging.exception(e)\n self.labels = (np.array(self.labels) > self.threshold).astype(int)\n self.features = pd.DataFrame(self.features, copy=False)\n self.features.fillna(0, inplace=True)\n self.features_mean = np.average(self.features.values, axis=0)\n\n self.features_std = np.std(self.features.values, axis=0)\n\n self.features_std[self.features_std == 0] = 1.0\n\n return",
"def extract_features(patient, terms=[], label=None, pat_nr=None):\n\n # print \"label in features\",label\n\n terms = \" \".join(terms)\n\n n_sentences = len(patient)\n n_sentences_with_DIS = 0\n n_features = n_feature_functions\n X = sp.lil_matrix((1, n_features), dtype=float)\n\n\n for i, sentence in enumerate(patient):\n has_dis = re.search(r\"<DIS>\", sentence, re.IGNORECASE) is not None\n\n\n if has_dis:\n\n other_dis=False\n catch_dis_index = None\n n_sentences_with_DIS+=1\n\n for j, f in enumerate(FEATURE_FUNCTIONS):\n # print f.__name__\n if f.__name__ not in [\"num_of_sentences\",\"catch_DIS\"]:\n present = f(sentence, i, terms)\n X[0, j] += present + i if present else present\n\n other_dis = other_dis or present\n # uncomment 2 lines below to see distribution over features during training\n # distribution[f.__name__][0]+= present\n # distribution[f.__name__][1]+=label if (present and label!=None) else 0\n elif f.__name__ == \"num_of_sentences\":\n X[0, j] += f(sentence, i, terms)\n elif f.__name__ == \"catch_DIS\":\n catch_dis_index=j\n if not other_dis:\n # uncomment 2 lines below to see distribution over features during training\n # distribution[\"catch dis\"][0]+=1\n # distribution[\"catch dis\"][1]+=label if label!=None else 0\n X[0,catch_dis_index]+=i+1\n\n\n # else:\n # if num_of_sentences in FEATURE_FUNCTIONS:\n # for j, f in enumerate(FEATURE_FUNCTIONS):\n # if f.__name__ == \"num_of_sentences\":\n # X[0, j] += f(sentence, i, terms)\n # break\n # else:\n # print \"NUM OF SENTENCES NOT PART OF FEATFUNCS\"\n\n\n for j, f in enumerate(FEATURE_FUNCTIONS):\n if f.__name__ != \"num_of_sentences\" and n_sentences_with_DIS > 0:\n X[0, j] = X[0, j] / float(n_sentences_with_DIS)\n\n # # PRINTING\n # print_feats(X)\n\n return X",
"def create_features(self):\n\n parameters_path = self.hyperparameters[\"parameters_dir\"]\n\n cycle_types = self.datapath.diagnostic_summary.cycle_type.unique()\n X = pd.DataFrame()\n for quantity in self.hyperparameters[\"quantities\"]:\n for cycle_type in cycle_types:\n summary_diag_cycle_type = featurizer_helpers.get_fractional_quantity_remaining_nx(\n self.datapath, quantity, cycle_type,\n parameters_path=parameters_path\n )\n\n summary_diag_cycle_type.loc[:, \"cycle_type\"] = cycle_type\n summary_diag_cycle_type.loc[:, \"metric\"] = quantity\n X = X.append(summary_diag_cycle_type)\n\n X_condensed = self.get_threshold_targets(X)\n self.features = X_condensed",
"def clfFeature(feature, mode):\r\n \r\n feature_path = 'C:\\\\Users\\\\Tom\\\\Documents\\\\Informatiekunde\\\\Thesis\\\\features\\\\' + feature + '.txt'\r\n classlist = ['negative', 'positive']\r\n features = pd.DataFrame()\r\n\r\n for label in classlist:\r\n path = 'C:\\\\Users\\\\Tom\\\\Documents\\\\Informatiekunde\\\\Thesis\\\\data\\\\' + mode + '\\\\' + label + '\\\\'\r\n allFiles = glob.glob(path + \"*.txt\")\r\n for review in allFiles:\r\n title = review.strip('.txt').split('\\\\')[-1]\r\n file = open(review, 'r', encoding='utf8').read().lower()\r\n wordlist = []\r\n featreader = csv.reader(open(feature_path, 'r'), delimiter= '\\n')\r\n for word in featreader:\r\n if word[0] in file:\r\n wordlist.append(word[0])\r\n df = pd.DataFrame({'File': [title], feature.capitalize(): [', '.join(wordlist)]}).set_index('File')\r\n features = features.append(df)\r\n \r\n return features",
"def make_features(DF, wd, rules):\n \n # Make dictionary of all rules - lhs is/are the antecedent(s) and rhs is the consequence\n rule_dict = []\n for l in open(rules, 'r').readlines():\n if 'lhs' in l and 'rhs' in l:\n pass\n else:\n line = l.replace('}', '').replace('{', '').strip().split('\"')\n lhs = line[1].strip().split(\",\")\n rhs = line[5]\n r_kmers = lhs + [rhs]\n rule_dict += [r_kmers]\n print(\"Number of rules being converted to features: %i\" % (len(rule_dict)))\n\n\n # \n df = pd.read_csv(DF, sep='\\t', index_col = 0)\n row = pd.Series(df.index.values)\n\n for r in rule_dict:\n feat_name = ':'.join(r)\n df[feat_name] = df.apply(lambda row: rule_test(df, row, r), axis=1)\n\n df2 = pd.concat([df['Class'], df.filter(like=':')], axis = 1)\n\n save_name1 = DF + \"_plusARs\"\n save_name2 = DF + \"_onlyARs\"\n \n df.to_csv(save_name1, sep=\"\\t\", float_format='i')\n df2.to_csv(save_name2, sep=\"\\t\", float_format='i')",
"def construct_df_topics(self, n_words=20):\n\n self.check_model()\n topic_keywords = []\n keywords = array(self.vectorizer.get_feature_names())\n\n for topic_weights in self.model.components_:\n top_keyword_locs = (-topic_weights).argsort()[:n_words]\n topic_keywords.append(keywords.take(top_keyword_locs))\n\n self.df_topic_keywords = pd.DataFrame(topic_keywords)\n self.df_topic_keywords.columns = ['Word ' + str(i) for i in range(self.df_topic_keywords.shape[1])]\n self.df_topic_keywords.index = ['Topic ' + str(i) for i in range(self.df_topic_keywords.shape[0])]",
"def creating_feature_vector():\r\n\twordlist = []\r\n\tlabel = \"\"\r\n\tfw = open(\"feature_vector.txt\", \"w+\", encoding = \"utf-8\")\r\n\twith open(\"D:\\\\Python_Prac\\\\wordstag\\\\modules\\\\HI_EN_TRAIN.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\twordlist.append(line)\r\n\t\tfor index, line in enumerate(wordlist):\r\n\t\t\tif line == \"\\n\":\r\n\t\t\t\tcontinue\r\n\t\t\tcontext = line.split(\"\\t\")\r\n\t\t\tlabel = context[1]\r\n\t\t\tfeature_vector = label+\" \"\r\n\t\t\tngram_vector = ngram_frequency(str(context[0]))\r\n\t\t\tfor vector in ngram_vector:\r\n\t\t\t\tfeature_vector += str(vector)+\" \"\r\n\t\t\tfeature_vector += str(is_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_hindi(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_abbr(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_hindi(context[0]))+\" \"\r\n\t\t\tbefore = [0,0,0]\r\n\t\t\tafter = [0,0,0]\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index-i) < 0 or (index-i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbefore[2-i] = get_word_context(wordlist[index-i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index+i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tafter[2-i] = get_word_context(wordlist[index+i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in before:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfor i in after:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfeature_vector += \"\\n\"\r\n\t\t\tfw.write(feature_vector)\r\n\t\t\tprint(\"Proceeding...\"+str(index+1)+\" of 16683\")\r\n\r\n\tfw.close()",
"def create_language_model(df, word_col = 'words', output_filepath='./config/my_lang.txt.gz'):\n word_count = dict(Counter(\" \".join(df[word_col]).split(\" \"))) \n word_count_df = pd.DataFrame.from_dict(word_count,orient='index').reset_index()\n word_count_df.columns= ['words', 'n_appearances']\n \n #only keep actual words\n word_count_df['wordlength'] = word_count_df['words'].str.len()\n word_count_df = word_count_df[(word_count_df['wordlength'] >=3) | (word_count_df['words'].isin(stopwords_list))]\n word_count_df = word_count_df.sort_values('n_appearances',ascending=False).reset_index(drop=True)\n word_count_df['words'] = word_count_df['words'].str.lower()\n word_count_df['words'].to_csv(output_filepath,index=None, header=False,compression='gzip',encoding='utf-8')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build dataframe df_customers from transformed data. Transformed data are issued from NLP, Time and RFM features. See data_transform(). These data are stored as dataframes attributes. | def df_customers_features_build(self):
df_customers_rfm = self._df_customers_rfm.copy()
df_customers_timeFeature = self._df_customers_timeFeature.copy()
df_customers_nlp = self._df_customers_pca_nlp.copy()
#-------------------------------------------------------------------------
# Dataframe are aggregated; note that indexes are customerID.
#-------------------------------------------------------------------------
df_customers = pd.DataFrame()
df_customers = pd.concat([df_customers,df_customers_rfm], axis=1)
df_customers = pd.concat([df_customers,df_customers_timeFeature]\
, join='inner', axis=1)
df_customers = pd.concat([df_customers,df_customers_nlp]\
, join='inner', axis=1)
self.strprint("All features : "+str(df_customers.shape))
self._df_customers = df_customers.copy()
return | [
"def features_customers(df_customers):\n for i in PREMIER_VALS:\n k = 'premier_' + str(i)\n df_customers[k] = np.where(df_customers['premier'] == i, 1, 0)\n\n df_customers['age'] = datetime.now().date().year - df_customers['yearOfBirth']\n df_customers['male'] = np.where(df_customers['gender'] == 'M', 1, 0)\n df_customers['female'] = np.where(df_customers['gender'] == 'F', 1, 0)\n df_customers['days_in_asos'] = (datetime.now().date() - df_customers['dateCreated']).dt.days\n\n logger.info(\"Features from the customers table: {shape} {dtypes}\"\n .format(shape=df_customers.shape, dtypes=df_customers.dtypes))\n return df_customers",
"def df_customers_fileRead(self):\n \n #-------------------------------------------------------------------------\n # RFM features are restored\n #-------------------------------------------------------------------------\n df_customers_rfm \\\n = p5_util.object_load(self.df_customers_rfm_fileName)\n self.strprint(\"RFM features : \"+str(df_customers_rfm.shape))\n \n #-------------------------------------------------------------------------\n # Time features are restored\n #-------------------------------------------------------------------------\n df_customers_timeFeature \\\n = p5_util.object_load(self._df_customers_timeFeature_fileName)\n self.strprint(\"Time features : \"+str(df_customers_timeFeature.shape))\n \n #-------------------------------------------------------------------------\n # NLP features are restored\n #-------------------------------------------------------------------------\n df_customers_nlp = p5_util.object_load(self._df_customers_nlp_fileName)\n self.strprint(\"NLP features : \"+str(df_customers_nlp.shape))\n\n if False:\n df_customers_rfm = self._df_customers_rfm.copy()\n df_customers_timeFeature = self._df_customers_timeFeature.copy()\n df_customers_nlp = self._df_customers_pca_nlp.copy()\n\n #-------------------------------------------------------------------------\n # Dataframe are aggregated; note that indexes are customerID.\n #-------------------------------------------------------------------------\n df_customers = pd.DataFrame()\n\n df_customers = pd.concat([df_customers,df_customers_rfm], axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_timeFeature]\\\n , join='inner', axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_nlp]\\\n , join='inner', axis=1)\n\n self.strprint(\"All features : \"+str(df_customers.shape))\n\n #----------------------------------------------------------------------\n # Dataframe is dumped into a file\n #----------------------------------------------------------------------\n p5_util.object_dump(df_customers, self._df_customers_fileName)\n if False:\n #----------------------------------------------------------------------\n # Dataframe is copied as an attribute\n #----------------------------------------------------------------------\n self._df_customers = df_customers.copy()\n \n return",
"def test_get_transformer_customers():\n\n root_path = Path(__file__).absolute().parents[1]\n master_dss_file = root_path / 'examples' / 'opendss' / 'master.dss'\n\n simulator = opendss.OpenDSSSimulator(master_dss_file)\n xfmr_customers_df = dss_util.get_transformer_customers(\n simulator.dss_instance\n )\n\n assert isinstance(xfmr_customers_df, pd.DataFrame)",
"def create_features_customer(profile, transcript_training):\n\n # create avg/min/max amount features. Need to calculate amount features from transcript\n # because transcript_training only contains transactions for offer received and viewed.\n # such transactions do not have amount associated\n\n query = \"\"\"\n SELECT a.person, min(amount) as min_amount, max(amount) as max_amount, avg(amount) as avg_amount\n FROM transcript a\n JOIN transcript_quantile b\n ON a.person = b.person \n WHERE a.time <= b.received_time\n GROUP BY a.person\n \"\"\"\n\n profile_amount = u.read_dataframe_from_sql(query).set_index('person')\n\n # create avg/min/max amount duration_view\n profile_duration = create_features_using_groupby(transcript_training\\\n \t, 'profile', 'duration_view')\n\n # create view rate (average of label)\n profile_view_rate = create_features_using_groupby(transcript_training, 'profile', 'label'\\\n \t, minimum=False, maximum=False)\n profile_view_rate.columns=['view_rate_profile']\n\n # create trx rate (count of transactions per person/(max received time - min received time))\n profile_trx_rate = (transcript_training.groupby('person').size()*100\\\n /(transcript_training.groupby('person')['received_time'].max() \\\n - transcript_training.groupby('person')['received_time'].min())).reset_index()\n profile_trx_rate.columns = ['person', 'avg_trx_cnt']\n # set trx rate = 1 if max received time == min received time\n profile_trx_rate.loc[profile_trx_rate['avg_trx_cnt']==np.inf, 'avg_trx_cnt'] = 1\n profile_trx_rate = profile_trx_rate.set_index('person')\n\n profile_feat = profile_amount.join(profile_duration)\\\n .join(profile_view_rate).join(profile_trx_rate)\n\n assert pd.merge(profile, profile_feat, how='left', left_index=True, right_index=True).shape[0] == profile.shape[0]\\\n , \"rows do not match with original data (profile)\"\n\n profile = pd.merge(profile, profile_feat, how='left', left_index=True, right_index=True)\n\n return profile",
"def get_features_and_target(self, trades_features: pd.DataFrame, trades_target: pd.DataFrame) -> pd.DataFrame:\n \n sf_groups = trades_features.drop_duplicates(subset=['sf_account_id', 'trade_date', 'sku']).groupby('sf_account_id')\n\n # calculate features\n feature_dfs = []\n if 'product_name' in self.feature_categories:\n feature_dfs += [sf_groups.product_name.value_counts().unstack().notnull()]\n if 'product_category' in self.feature_categories:\n feature_dfs += [sf_groups.product_category.value_counts().unstack().notnull()]\n if 'reporting_channel' in self.feature_categories:\n feature_dfs += [sf_groups.sub_reporting_channel.value_counts().unstack().notnull()]\n if 'recency' in self.feature_categories:\n feature_dfs += [(trades_features.trade_date_dt.max()-sf_groups.trade_date_dt.max()).dt.days.to_frame().rename(columns={'trade_date_dt':'recency'})]\n if 'frequency' in self.feature_categories:\n feature_dfs += [sf_groups.product_name.count().to_frame().rename(columns={'product_name':'frequency'})]\n if 'total_spend' in self.feature_categories:\n feature_dfs += [sf_groups.cost_float.sum().to_frame().rename(columns={'cost_float':'total_spend'})]\n\n # concat features\n customer_df = pd.concat(feature_dfs, axis=1, sort=False) # outer join on index\n\n # add target variable\n for target_variable in self.target_variables:\n if (trades_target.product_name == target_variable).any():\n customer_df['target_'+target_variable] = trades_target.groupby(['sf_account_id', 'product_name']).trade_date.any().unstack()[target_variable]\n else:\n customer_df['target_'+target_variable] = False\n\n # remove customers with no purchases before cut off\n customer_df = customer_df[customer_df[customer_df.columns[customer_df.columns != 'target']].any(axis=1)]\n\n # replace nans with False\n customer_df.fillna(False, inplace=True)\n\n return customer_df",
"def read_customer(path):\n lines = sc.textFile(path, use_unicode=False)\n customers = lines.map(lambda line: line.split('\\t'))\n print customers.takeOrdered(1)\n schema_list = [(\"id\", IntegerType), (\"guid\", IntegerType), (\"gender\", StringType), (\"country\", StringType),\n (\"created_on\", DateType), (\"modified_on\", DateType), (\"yob\", IntegerType), (\"premier\", IntegerType)]\n schema_str = \"cust_id guid gender country created_on modified_on yob premier\"\n # fields = [StructField(field_name, col_type(), True) for field_name, col_type in schema_list]\n fields = [StructField(field_name, StringType(), True) for field_name in schema_str.split()]\n schema = StructType(fields)\n\n udfstring_to_int = udf(lambda x: int(x), IntegerType())\n udf_tenure = udf(lambda x: parse_created_on(x), IntegerType())\n\n customers_df = sqlContext.createDataFrame(customers, schema)\n\n customers_df = customers_df.withColumn(\"yob\", udfstring_to_int(\"yob\"))\n customers_df = customers_df.withColumn(\"tenure\", udf_tenure(\"created_on\"))\n\n # customers_df = customers_df.withColumn(\"premier\", customers_df.select(customers_df[\"premier\"].cast(IntegerType()).alias(\"premier\")))\n print customers_df.dtypes\n print customers_df.columns\n print customers_df.describe().collect()\n counts = (customers_df\n .groupBy('country')\n .count()\n .sort('count', ascending=False))\n\n for count in counts.take(10):\n print count\n\n # counts.sort('country', ascending=False)\n # .take(10))\n #\n # for count in counts:\n # print count\n print customers_df.freqItems(cols=[\"country\"])\n customers_df.show()\n customers_df.printSchema()\n return customers_df",
"def _create_train_df(self, features, target, label_encode=True):\n \n train_feature_df = self._load_data(features)\n train_target_df = self._load_data(target)\n train_df = self._merge_df(train_feature_df, train_target_df)\n train_df = self._clean_data(train_df)\n \n if label_encode:\n self._labelEncode(train_df, self.cat_cols)\n return train_df",
"def data_transform_rfm(self) :\n \n is_built_step = False\n if self._encoder_rfm is None:\n is_built_step = True \n \n #-------------------------------------------------------------------------\n # RFM feature is built\n #-------------------------------------------------------------------------\n ser_invoice_date = self._df_invoice_line.InvoiceDate\n \n self.df_invoice_line, df_RFM, self.df_RFM_quantiles, self._day_now \\\n = p5_util.p5_df_rfm_build(self.df_invoice_line, day_now = self._day_now\\\n , df_RFM_threshold=self.df_RFM_quantiles)\n \n self._df_invoice_line.InvoiceDate = ser_invoice_date\n \n #-------------------------------------------------------------------------\n # RFM score is added to dataframe\n #-------------------------------------------------------------------------\n df_merged = pd.merge(self.df_invoice_line\\\n , df_RFM[['CustomerID','RFM']], how='left', on=['CustomerID'])\n\n self._df_invoice_line \\\n = pd.DataFrame(df_merged.values, index = self._df_invoice_line.index\\\n , columns=df_merged.columns)\n \n\n #self._df_invoice_line \\\n #= pd.concat([ self.df_invoice_line,df_RFM[['CustomerID','RFM']] ], axis=1\\\n #,join='inner')\n \n \n #-------------------------------------------------------------------------\n # RFM encoding\n #-------------------------------------------------------------------------\n self._encoder_rfm, df_RFM_encoded \\\n = p5_util.df_rfm_one_hot_encode(df_RFM,'RFM', encoder=self._encoder_rfm)\n\n #-------------------------------------------------------------------------\n # Encoded RFM features are renamed\n #-------------------------------------------------------------------------\n df_customers_rfm, list_col_unchanged \\\n = p5_util.df_rename_columns(df_RFM_encoded, df_RFM_encoded.columns\\\n , 'w_rfm_')\n \n self.strprint(\"df_customers_rfm =\" +str(df_customers_rfm.shape))\n\n #-------------------------------------------------------------------------\n # dataframe with RFM encoded values per customer is dumped\n #-------------------------------------------------------------------------\n if is_built_step is True:\n p5_util.object_dump(df_customers_rfm, self.df_customers_rfm_fileName)\n else :\n self._df_customers_rfm = df_customers_rfm.copy()\n return",
"def get_customer_stats(self):\n try:\n names, waitings, totals, statuses, destinations, passwords, types, positions = zip(*[(p.name, p.get_waiting_time(),\n p.total_time(), status_to_str(p.status), p.get_position(), p.password, p.fleet_type, p.init_position)\n for p in self.customer_agents.values()])\n except ValueError:\n names, waitings, totals, statuses, destinations, passwords, types, positions = [], [], [], [], [], [], [], []\n\n df = pd.DataFrame.from_dict({\"name\": names, \"waiting_time\": waitings, \"total_time\": totals, \"status\": statuses, \"destination\": destinations, \"password\": passwords, \"fleet_type\": types, \"position\": positions})\n return df",
"def load_customers(dir):\n customSchema = StructType([ \\\n StructField(\"customerId2\", IntegerType(), True), \\\n StructField(\"churnlabel\", IntegerType(), True), \\\n StructField(\"gender\", StringType(), True), \\\n StructField(\"shippingCountry\", StringType(), True), \\\n StructField(\"dateCreated\", StringType(), True), \\\n StructField(\"yearOfBirth\", IntegerType(), True), \\\n StructField(\"premier\", IntegerType(), True)])\n\n df = sqlContext.read.format('com.databricks.spark.csv') \\\n .options(header='false', delimiter='\\t', nullValue='\\\\N') \\\n .load(get_dir_customers(dir) + '/*', schema=customSchema)\n\n return df",
"def active_customers():\n try:\n active_customers_query= \"SELECT \\\n customer.customerId, \\\n customer.languageId, \\\n customer.loginCounter, \\\n subscription.subscriptionTypeId, \\\n product.productType, \\\n product.code, \\\n product.months, \\\n product.isRenew \\\n FROM subscription \\\n INNER JOIN customer \\\n ON subscription.customerId = customer.customerId \\\n INNER JOIN product \\\n ON product.productId = subscription.subscriptionTypeId \\\n WHERE subscription.endDate >= CURDATE()\"\n\n active_customers_dataframe = _db_conn(customer_query=active_customers_query)\n\n print(\"Number of records in active_customers_dataframe is {}\".format(active_customers_dataframe.shape[0]))\n \n except Exception as e:\n print(\"trace: {}\".format(e))\n\n return active_customers_dataframe",
"def create_dataframe_for_training(data):\n feature_column_name = 'X'\n #data_cp = data[['label']].copy()\n for i, row in tqdm(data.iterrows(), total=len(data)):\n all_features = f'{row.claimant} {row.claim} {row.article_content}'\n data.loc[i, feature_column_name] = all_features\n\n return data[feature_column_name]",
"def features_orders(df_customers, df_receipts):\n df_customers.sort_values(by=['customerId2'], ascending=[True], inplace=True)\n # total amount of all the orders of a cusrtomer\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')['price'].sum(), 'sum_orders')\n # the min amount paid in one receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)[\n 'price'].sum().groupby('customerId2').min()['price'], 'min_order')\n # the mean amount paid per receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').mean()['price'], 'mean_order')\n # the max amount paid per receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').max()['price'], 'max_order')\n # the number of orders\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').count()['price'], 'count_orders')\n\n # the total amount of items bought by a user\n df_customers = _add_column(df_customers,\n df_receipts.groupby('customerId2')['itemQty'].sum(), 'sum_itemQty')\n # the min amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').min()['itemQty'], 'min_itemQty')\n # the mean amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').mean()['itemQty'], 'mean_itemQty')\n # the max amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').max()['itemQty'], 'max_itemQty')\n # from which dividion type a user buys\n for i in DIVISIONID_VALS:\n k = 'divisionId_' + str(i)\n df_receipts[k] = np.where(df_receipts['divisionId'] == i, 1, 0)\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')[k].sum(), k)\n # which source type a user uses to pay\n for i in SOURCEID_VALS:\n k = 'sourceId_' + str(i)\n df_receipts[k] = np.where(df_receipts['sourceId'] == i, 1, 0)\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')[k].sum(), k)\n\n logger.info(\"Features from the returns table: {shape} {dtypes}\"\n .format(shape=df_customers.shape, dtypes=df_customers.dtypes))\n return df_customers",
"def clean_customer_df(customer_df: pd.DataFrame) -> pd.DataFrame:\n \n # remove customers with more than 20 purchases\n if 'frequency' in customer_df.columns:\n customer_df = customer_df[customer_df.frequency < 20]\n \n return customer_df",
"def generate_customer_info(self):\n consecutive = check_consecutive(self.customerIds)\n print(\"consecutive\" + str(consecutive))\n if consecutive:\n for customer_id in self.customerIds:\n # next we need to 1) decide if the customer has insurance, and if yes, generate the EOB that gets sent to the customer\n insured = np.random.choice(\n self.distributions[\"insurance_status\"],\n 1,\n p=self.distributions[\"insurance_distribution\"],\n )[0]\n dob = self.dobs[customer_id - 1]\n customer = pd.DataFrame(\n {\n \"customer_id\": [customer_id],\n \"dob\": str(dob),\n \"insurance\": insured,\n \"experiment_id\": random.randint(1, 2),\n }\n )\n self.Customers = self.Customers.append(customer)\n else:\n print(\"Error generating customer info: customerIds aren't consecutive\")\n return True",
"def transform_train_data(df):\n return df.rdd.map(\n lambda x: (\n Vectors.dense([x.amount, x.split, x.maintain4, x.maintain12]),\n x.intime\n )\n ).toDF([\"features\", \"label\"])",
"def new_df(companies_filtered):\n name = []\n city = []\n latitude = []\n longitude = []\n zip_code = []\n for i in companies_filtered:\n name.append(i['name'])\n try: \n if i['offices'][0]['city'] == '':\n city.append(np.nan)\n else:\n city.append(i['offices'][0]['city'])\n latitude.append(i['offices'][0]['latitude'])\n longitude.append(i['offices'][0]['longitude'])\n except:\n city.append(np.nan)\n latitude.append(np.nan)\n longitude.append(np.nan)\n zip_code.append(np.nan)\n dict_ = {'company' : name, 'city' : city, 'latitude' : latitude, 'longitude': longitude}\n companies_df = pd.DataFrame.from_dict(dict_, orient='columns')\n \n return companies_df",
"def prepare_data(self):\n self.tokenizer = BertTokenizerFast.from_pretrained(\n self.tokenizer_name,\n cache_dir=self.cache_dir,\n tokenize_chinese_chars=False,\n strip_accents=False,\n )\n df = make_livedoor_corpus_dataset(self.data_dir)\n self.df_org = df\n if self.num_samples > 0:\n df = df.iloc[: self.num_samples]\n self.df_use = df",
"def make_data(dataFname, enc, features=None):\n\n origData = pandas.read_csv(dataFname)\n ids = origData['id']\n\n # remove unused columns\n if 'Unnamed: 0' in origData.columns: del origData['Unnamed: 0']\n del origData['id']\n\n # remove \"data leakage\" columns\n for f in prohobitedFeatures:\n del origData[f]\n\n # separate into X & y values\n xData = origData[[col for col in origData.columns if not col=='loss']]\n set_vars_as_type(xData, discreteVars, object)\n yVec = origData.loss if 'loss' in origData.columns else None\n\n # try f528 - f274\n xData['f528f274'] = xData['f528'] - xData['f274']\n\n # encode the categorical features f776 and f777\n if enc is None:\n enc = OneHotEncoder(n_values=[2, 2])\n enc.fit(xData[['f776', 'f777']])\n\n xData[['f776_isZero', 'f776_isOne', 'f777_isZero', 'f777_isOne']] = pandas.DataFrame(enc.transform(xData[['f776', 'f777']]).toarray())\n del xData['f776']\n del xData['f777']\n\n print_missing_values_info(origData)\n\n # feature selection\n if features:\n filteredXData = xData[features]\n else: # use ALL features\n filteredXData = xData\n\n return filteredXData, yVec, ids, enc"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build dataframe df_customers from transformed data. Transformed data are loaded from dumped files issued from NLP, Time and RFM features. See data_transform() | def df_customers_fileRead(self):
#-------------------------------------------------------------------------
# RFM features are restored
#-------------------------------------------------------------------------
df_customers_rfm \
= p5_util.object_load(self.df_customers_rfm_fileName)
self.strprint("RFM features : "+str(df_customers_rfm.shape))
#-------------------------------------------------------------------------
# Time features are restored
#-------------------------------------------------------------------------
df_customers_timeFeature \
= p5_util.object_load(self._df_customers_timeFeature_fileName)
self.strprint("Time features : "+str(df_customers_timeFeature.shape))
#-------------------------------------------------------------------------
# NLP features are restored
#-------------------------------------------------------------------------
df_customers_nlp = p5_util.object_load(self._df_customers_nlp_fileName)
self.strprint("NLP features : "+str(df_customers_nlp.shape))
if False:
df_customers_rfm = self._df_customers_rfm.copy()
df_customers_timeFeature = self._df_customers_timeFeature.copy()
df_customers_nlp = self._df_customers_pca_nlp.copy()
#-------------------------------------------------------------------------
# Dataframe are aggregated; note that indexes are customerID.
#-------------------------------------------------------------------------
df_customers = pd.DataFrame()
df_customers = pd.concat([df_customers,df_customers_rfm], axis=1)
df_customers = pd.concat([df_customers,df_customers_timeFeature]\
, join='inner', axis=1)
df_customers = pd.concat([df_customers,df_customers_nlp]\
, join='inner', axis=1)
self.strprint("All features : "+str(df_customers.shape))
#----------------------------------------------------------------------
# Dataframe is dumped into a file
#----------------------------------------------------------------------
p5_util.object_dump(df_customers, self._df_customers_fileName)
if False:
#----------------------------------------------------------------------
# Dataframe is copied as an attribute
#----------------------------------------------------------------------
self._df_customers = df_customers.copy()
return | [
"def df_customers_features_build(self):\n\n df_customers_rfm = self._df_customers_rfm.copy()\n df_customers_timeFeature = self._df_customers_timeFeature.copy()\n df_customers_nlp = self._df_customers_pca_nlp.copy()\n\n #-------------------------------------------------------------------------\n # Dataframe are aggregated; note that indexes are customerID.\n #-------------------------------------------------------------------------\n df_customers = pd.DataFrame()\n\n df_customers = pd.concat([df_customers,df_customers_rfm], axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_timeFeature]\\\n , join='inner', axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_nlp]\\\n , join='inner', axis=1)\n \n self.strprint(\"All features : \"+str(df_customers.shape))\n self._df_customers = df_customers.copy()\n return",
"def features_customers(df_customers):\n for i in PREMIER_VALS:\n k = 'premier_' + str(i)\n df_customers[k] = np.where(df_customers['premier'] == i, 1, 0)\n\n df_customers['age'] = datetime.now().date().year - df_customers['yearOfBirth']\n df_customers['male'] = np.where(df_customers['gender'] == 'M', 1, 0)\n df_customers['female'] = np.where(df_customers['gender'] == 'F', 1, 0)\n df_customers['days_in_asos'] = (datetime.now().date() - df_customers['dateCreated']).dt.days\n\n logger.info(\"Features from the customers table: {shape} {dtypes}\"\n .format(shape=df_customers.shape, dtypes=df_customers.dtypes))\n return df_customers",
"def load_customers(dir):\n customSchema = StructType([ \\\n StructField(\"customerId2\", IntegerType(), True), \\\n StructField(\"churnlabel\", IntegerType(), True), \\\n StructField(\"gender\", StringType(), True), \\\n StructField(\"shippingCountry\", StringType(), True), \\\n StructField(\"dateCreated\", StringType(), True), \\\n StructField(\"yearOfBirth\", IntegerType(), True), \\\n StructField(\"premier\", IntegerType(), True)])\n\n df = sqlContext.read.format('com.databricks.spark.csv') \\\n .options(header='false', delimiter='\\t', nullValue='\\\\N') \\\n .load(get_dir_customers(dir) + '/*', schema=customSchema)\n\n return df",
"def test_get_transformer_customers():\n\n root_path = Path(__file__).absolute().parents[1]\n master_dss_file = root_path / 'examples' / 'opendss' / 'master.dss'\n\n simulator = opendss.OpenDSSSimulator(master_dss_file)\n xfmr_customers_df = dss_util.get_transformer_customers(\n simulator.dss_instance\n )\n\n assert isinstance(xfmr_customers_df, pd.DataFrame)",
"def read_customer(path):\n lines = sc.textFile(path, use_unicode=False)\n customers = lines.map(lambda line: line.split('\\t'))\n print customers.takeOrdered(1)\n schema_list = [(\"id\", IntegerType), (\"guid\", IntegerType), (\"gender\", StringType), (\"country\", StringType),\n (\"created_on\", DateType), (\"modified_on\", DateType), (\"yob\", IntegerType), (\"premier\", IntegerType)]\n schema_str = \"cust_id guid gender country created_on modified_on yob premier\"\n # fields = [StructField(field_name, col_type(), True) for field_name, col_type in schema_list]\n fields = [StructField(field_name, StringType(), True) for field_name in schema_str.split()]\n schema = StructType(fields)\n\n udfstring_to_int = udf(lambda x: int(x), IntegerType())\n udf_tenure = udf(lambda x: parse_created_on(x), IntegerType())\n\n customers_df = sqlContext.createDataFrame(customers, schema)\n\n customers_df = customers_df.withColumn(\"yob\", udfstring_to_int(\"yob\"))\n customers_df = customers_df.withColumn(\"tenure\", udf_tenure(\"created_on\"))\n\n # customers_df = customers_df.withColumn(\"premier\", customers_df.select(customers_df[\"premier\"].cast(IntegerType()).alias(\"premier\")))\n print customers_df.dtypes\n print customers_df.columns\n print customers_df.describe().collect()\n counts = (customers_df\n .groupBy('country')\n .count()\n .sort('count', ascending=False))\n\n for count in counts.take(10):\n print count\n\n # counts.sort('country', ascending=False)\n # .take(10))\n #\n # for count in counts:\n # print count\n print customers_df.freqItems(cols=[\"country\"])\n customers_df.show()\n customers_df.printSchema()\n return customers_df",
"def _create_train_df(self, features, target, label_encode=True):\n \n train_feature_df = self._load_data(features)\n train_target_df = self._load_data(target)\n train_df = self._merge_df(train_feature_df, train_target_df)\n train_df = self._clean_data(train_df)\n \n if label_encode:\n self._labelEncode(train_df, self.cat_cols)\n return train_df",
"def create_features_customer(profile, transcript_training):\n\n # create avg/min/max amount features. Need to calculate amount features from transcript\n # because transcript_training only contains transactions for offer received and viewed.\n # such transactions do not have amount associated\n\n query = \"\"\"\n SELECT a.person, min(amount) as min_amount, max(amount) as max_amount, avg(amount) as avg_amount\n FROM transcript a\n JOIN transcript_quantile b\n ON a.person = b.person \n WHERE a.time <= b.received_time\n GROUP BY a.person\n \"\"\"\n\n profile_amount = u.read_dataframe_from_sql(query).set_index('person')\n\n # create avg/min/max amount duration_view\n profile_duration = create_features_using_groupby(transcript_training\\\n \t, 'profile', 'duration_view')\n\n # create view rate (average of label)\n profile_view_rate = create_features_using_groupby(transcript_training, 'profile', 'label'\\\n \t, minimum=False, maximum=False)\n profile_view_rate.columns=['view_rate_profile']\n\n # create trx rate (count of transactions per person/(max received time - min received time))\n profile_trx_rate = (transcript_training.groupby('person').size()*100\\\n /(transcript_training.groupby('person')['received_time'].max() \\\n - transcript_training.groupby('person')['received_time'].min())).reset_index()\n profile_trx_rate.columns = ['person', 'avg_trx_cnt']\n # set trx rate = 1 if max received time == min received time\n profile_trx_rate.loc[profile_trx_rate['avg_trx_cnt']==np.inf, 'avg_trx_cnt'] = 1\n profile_trx_rate = profile_trx_rate.set_index('person')\n\n profile_feat = profile_amount.join(profile_duration)\\\n .join(profile_view_rate).join(profile_trx_rate)\n\n assert pd.merge(profile, profile_feat, how='left', left_index=True, right_index=True).shape[0] == profile.shape[0]\\\n , \"rows do not match with original data (profile)\"\n\n profile = pd.merge(profile, profile_feat, how='left', left_index=True, right_index=True)\n\n return profile",
"def data_transform_rfm(self) :\n \n is_built_step = False\n if self._encoder_rfm is None:\n is_built_step = True \n \n #-------------------------------------------------------------------------\n # RFM feature is built\n #-------------------------------------------------------------------------\n ser_invoice_date = self._df_invoice_line.InvoiceDate\n \n self.df_invoice_line, df_RFM, self.df_RFM_quantiles, self._day_now \\\n = p5_util.p5_df_rfm_build(self.df_invoice_line, day_now = self._day_now\\\n , df_RFM_threshold=self.df_RFM_quantiles)\n \n self._df_invoice_line.InvoiceDate = ser_invoice_date\n \n #-------------------------------------------------------------------------\n # RFM score is added to dataframe\n #-------------------------------------------------------------------------\n df_merged = pd.merge(self.df_invoice_line\\\n , df_RFM[['CustomerID','RFM']], how='left', on=['CustomerID'])\n\n self._df_invoice_line \\\n = pd.DataFrame(df_merged.values, index = self._df_invoice_line.index\\\n , columns=df_merged.columns)\n \n\n #self._df_invoice_line \\\n #= pd.concat([ self.df_invoice_line,df_RFM[['CustomerID','RFM']] ], axis=1\\\n #,join='inner')\n \n \n #-------------------------------------------------------------------------\n # RFM encoding\n #-------------------------------------------------------------------------\n self._encoder_rfm, df_RFM_encoded \\\n = p5_util.df_rfm_one_hot_encode(df_RFM,'RFM', encoder=self._encoder_rfm)\n\n #-------------------------------------------------------------------------\n # Encoded RFM features are renamed\n #-------------------------------------------------------------------------\n df_customers_rfm, list_col_unchanged \\\n = p5_util.df_rename_columns(df_RFM_encoded, df_RFM_encoded.columns\\\n , 'w_rfm_')\n \n self.strprint(\"df_customers_rfm =\" +str(df_customers_rfm.shape))\n\n #-------------------------------------------------------------------------\n # dataframe with RFM encoded values per customer is dumped\n #-------------------------------------------------------------------------\n if is_built_step is True:\n p5_util.object_dump(df_customers_rfm, self.df_customers_rfm_fileName)\n else :\n self._df_customers_rfm = df_customers_rfm.copy()\n return",
"def prepare_data(self):\n self.tokenizer = BertTokenizerFast.from_pretrained(\n self.tokenizer_name,\n cache_dir=self.cache_dir,\n tokenize_chinese_chars=False,\n strip_accents=False,\n )\n df = make_livedoor_corpus_dataset(self.data_dir)\n self.df_org = df\n if self.num_samples > 0:\n df = df.iloc[: self.num_samples]\n self.df_use = df",
"def make_data(dataFname, enc, features=None):\n\n origData = pandas.read_csv(dataFname)\n ids = origData['id']\n\n # remove unused columns\n if 'Unnamed: 0' in origData.columns: del origData['Unnamed: 0']\n del origData['id']\n\n # remove \"data leakage\" columns\n for f in prohobitedFeatures:\n del origData[f]\n\n # separate into X & y values\n xData = origData[[col for col in origData.columns if not col=='loss']]\n set_vars_as_type(xData, discreteVars, object)\n yVec = origData.loss if 'loss' in origData.columns else None\n\n # try f528 - f274\n xData['f528f274'] = xData['f528'] - xData['f274']\n\n # encode the categorical features f776 and f777\n if enc is None:\n enc = OneHotEncoder(n_values=[2, 2])\n enc.fit(xData[['f776', 'f777']])\n\n xData[['f776_isZero', 'f776_isOne', 'f777_isZero', 'f777_isOne']] = pandas.DataFrame(enc.transform(xData[['f776', 'f777']]).toarray())\n del xData['f776']\n del xData['f777']\n\n print_missing_values_info(origData)\n\n # feature selection\n if features:\n filteredXData = xData[features]\n else: # use ALL features\n filteredXData = xData\n\n return filteredXData, yVec, ids, enc",
"def process_customers(self, customers_file):\n\t\tmin = max = None\n\t\tcustomers = {}\n\t\ttry:\n\t\t\tfor user_id, date_str in self.read_csv_file(customers_file):\n\t\t\t\tdate = self.convert_date(date_str)\n\t\t\t\tmin, max = self.min_max_date(min, max, date)\n\t\t\t\tcustomers[user_id] = date\n\t\texcept ValueError:\n\t\t\traise Exception('Customers file has unexpected format.')\n\n\t\tself.customers = customers\n\t\tself.min = min\n\t\tself.max = max",
"def create_dataframe_for_training(data):\n feature_column_name = 'X'\n #data_cp = data[['label']].copy()\n for i, row in tqdm(data.iterrows(), total=len(data)):\n all_features = f'{row.claimant} {row.claim} {row.article_content}'\n data.loc[i, feature_column_name] = all_features\n\n return data[feature_column_name]",
"def get_features_and_target(self, trades_features: pd.DataFrame, trades_target: pd.DataFrame) -> pd.DataFrame:\n \n sf_groups = trades_features.drop_duplicates(subset=['sf_account_id', 'trade_date', 'sku']).groupby('sf_account_id')\n\n # calculate features\n feature_dfs = []\n if 'product_name' in self.feature_categories:\n feature_dfs += [sf_groups.product_name.value_counts().unstack().notnull()]\n if 'product_category' in self.feature_categories:\n feature_dfs += [sf_groups.product_category.value_counts().unstack().notnull()]\n if 'reporting_channel' in self.feature_categories:\n feature_dfs += [sf_groups.sub_reporting_channel.value_counts().unstack().notnull()]\n if 'recency' in self.feature_categories:\n feature_dfs += [(trades_features.trade_date_dt.max()-sf_groups.trade_date_dt.max()).dt.days.to_frame().rename(columns={'trade_date_dt':'recency'})]\n if 'frequency' in self.feature_categories:\n feature_dfs += [sf_groups.product_name.count().to_frame().rename(columns={'product_name':'frequency'})]\n if 'total_spend' in self.feature_categories:\n feature_dfs += [sf_groups.cost_float.sum().to_frame().rename(columns={'cost_float':'total_spend'})]\n\n # concat features\n customer_df = pd.concat(feature_dfs, axis=1, sort=False) # outer join on index\n\n # add target variable\n for target_variable in self.target_variables:\n if (trades_target.product_name == target_variable).any():\n customer_df['target_'+target_variable] = trades_target.groupby(['sf_account_id', 'product_name']).trade_date.any().unstack()[target_variable]\n else:\n customer_df['target_'+target_variable] = False\n\n # remove customers with no purchases before cut off\n customer_df = customer_df[customer_df[customer_df.columns[customer_df.columns != 'target']].any(axis=1)]\n\n # replace nans with False\n customer_df.fillna(False, inplace=True)\n\n return customer_df",
"def create_features(overwrite=False):\n \n data_dict = pd.read_pickle('complete_dataset.pickle') # upload dictionary of tickers\n ticker_dict = data_dict['Raw_Data']\n\n # initialize dataframe\n first_key = list(ticker_dict.keys())[0] # find the first ticker\n df1 = ticker_dict[first_key].copy() # df for first ticker\n first_df_cols = df1.columns.tolist()\n df2 = aggregate_from_daily_ml(df1, first_key) # aggregate to monthly level\n j=0\n for key, value in ticker_dict.items(): # for each ticker, aggregate then concat to master df\n if key==first_key: continue\n if first_df_cols != value.columns.tolist(): print('bad columns for {}!'.format(key))\n df3 = aggregate_from_daily_ml(value, key)\n \n df2 = pd.concat([df2, df3])\n if j%(round(len(ticker_dict)/10))==0: print('Fraction done: {}'.format(round(j/len(ticker_dict),5)))\n j+=1\n df2 = df2.sort_index(level=[0,1])\n \n df2.columns = [col[0] + '_' + str(col[1]) if str(col[1])!='NA' else col[0] for col in df2.columns.tolist()]\n\n df3 = create_target(df2, threshold=0.0)\n df3.columns = [col[0] + '_' + str(col[1]) if str(col[1])!='NA' else col[0] for col in df3.columns.tolist()]\n\n\n if overwrite:\n print('Saving to data.pkl')\n df3.to_pickle('data.pkl')\n else:\n print('File not being saved. To save, use overwrite=True')\n\n return df3",
"def get_mall_data(): \n filename = 'mall_customers.csv'\n \n if os.path.isfile(filename):\n return pd.read_csv(filename, index_col=0)\n else: \n df = pd.read_sql(\"\"\"select * from customers\"\"\", get_connection('mall_customers'))\n df.to_csv(filename)\n return df",
"def transform_train_data(df):\n return df.rdd.map(\n lambda x: (\n Vectors.dense([x.amount, x.split, x.maintain4, x.maintain12]),\n x.intime\n )\n ).toDF([\"features\", \"label\"])",
"def clean_customer_df(customer_df: pd.DataFrame) -> pd.DataFrame:\n \n # remove customers with more than 20 purchases\n if 'frequency' in customer_df.columns:\n customer_df = customer_df[customer_df.frequency < 20]\n \n return customer_df",
"def make_features(user_master:SparkDataFrame):\n df = user_master.select([f'feature{i}' for i in range(1,7) ] + [\"user_id\"] )\n cols = df.columns\n\n categoricalColumns = [f'feature{i}' for i in range(1,7)]\n\n stages = []\n for categoricalCol in categoricalColumns:\n stringIndexer = StringIndexer(inputCol = categoricalCol, outputCol = categoricalCol + 'Index')\n encoder = OneHotEncoder(inputCols=[stringIndexer.getOutputCol()], outputCols=[categoricalCol + \"classVec\"])\n stages += [stringIndexer, encoder]\n\n #label_stringIdx = StringIndexer(inputCol = 'item_id', outputCol = 'label')\n #stages += [label_stringIdx]\n\n\n assemblerInputs = [c + \"classVec\" for c in categoricalColumns] \n assembler = VectorAssembler(inputCols=assemblerInputs, outputCol=\"features\")\n stages += [assembler]\n\n \n pipeline = Pipeline(stages = stages)\n pipelineModel = pipeline.fit(df)\n df = pipelineModel.transform(df)\n selectedCols = ['features'] + cols\n df = df.select(selectedCols)\n #df.printSchema()\n\n return df",
"def active_customers():\n try:\n active_customers_query= \"SELECT \\\n customer.customerId, \\\n customer.languageId, \\\n customer.loginCounter, \\\n subscription.subscriptionTypeId, \\\n product.productType, \\\n product.code, \\\n product.months, \\\n product.isRenew \\\n FROM subscription \\\n INNER JOIN customer \\\n ON subscription.customerId = customer.customerId \\\n INNER JOIN product \\\n ON product.productId = subscription.subscriptionTypeId \\\n WHERE subscription.endDate >= CURDATE()\"\n\n active_customers_dataframe = _db_conn(customer_query=active_customers_query)\n\n print(\"Number of records in active_customers_dataframe is {}\".format(active_customers_dataframe.shape[0]))\n \n except Exception as e:\n print(\"trace: {}\".format(e))\n\n return active_customers_dataframe"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a new customer identifier from existing dataset. | def createCustomerID(self):
customerID = self._df_invoice_original.CustomerID.max()
customerID += 1
return int(customerID) | [
"def test_companies_company_id_data_customers_customer_id_get(self):\n pass",
"def create_customer(cls, api, **data):\n return api.create_customer(**data)",
"def create_customer(data):\n mandatory_params = ['customer_name', 'mobile_number']\n result = api_utils.check_required_params(mandatory_params, data)\n if result:\n return result\n mobile_number = db_helper.mobile_number_unique(data['mobile_number'])\n if not mobile_number:\n return api_utils.error(\"There already is a customer with \\\n mobile number {} found\".format(data['mobile_number']), 404)\n\n new_customer = db_helper.add_new_customer(data['customer_name'],\n mobile_number)\n return jsonify({'new_customer': new_customer})",
"def create_customer(self, **kwargs):\n response = self.request('customers.json', body=kwargs, method='POST')\n return response",
"def generate_customer_info(self):\n consecutive = check_consecutive(self.customerIds)\n print(\"consecutive\" + str(consecutive))\n if consecutive:\n for customer_id in self.customerIds:\n # next we need to 1) decide if the customer has insurance, and if yes, generate the EOB that gets sent to the customer\n insured = np.random.choice(\n self.distributions[\"insurance_status\"],\n 1,\n p=self.distributions[\"insurance_distribution\"],\n )[0]\n dob = self.dobs[customer_id - 1]\n customer = pd.DataFrame(\n {\n \"customer_id\": [customer_id],\n \"dob\": str(dob),\n \"insurance\": insured,\n \"experiment_id\": random.randint(1, 2),\n }\n )\n self.Customers = self.Customers.append(customer)\n else:\n print(\"Error generating customer info: customerIds aren't consecutive\")\n return True",
"def create_or_update_customer(entity):\n\ttry:\n\t\torganisation = entity.get('organisation').replace(\"'\",\"\")\n\t\torganisation = \"%s(C)\"%organisation if is_supplier_or_customer_group(organisation) else organisation\n\t\tname = frappe.db.get_value('Customer', organisation)\n\t\tif not name:\n\t\t\tcustomer = frappe.new_doc(\"Customer\")\n\t\t\tcustomer.customer_name = organisation\n\t\telse:\n\t\t\tcustomer = frappe.get_doc(\"Customer\", name)\n\n\t\tcustomer.entity_id = entity.get('entity_id')\n\t\tcustomer.customer_type = 'Company'\n\t\tif entity.get('group'):\n\t\t\tif entity.get('group').strip() == 'General':\n\t\t\t\tcustomer.customer_group = 'All Customer Groups'\n\t\t\telif frappe.db.get_value('Customer Group', entity.get('group').strip()):\n\t\t\t\tcustomer.customer_group = entity.get('group').strip() or 'All Customer Groups'\n\t\t\telif frappe.db.get_value('Customer', entity.get('group').strip()):\n\t\t\t\tcustomer.customer_group = 'All Customer Groups'\n\t\t\telse:\n\t\t\t\tcustomer.customer_group = create_customer_group(entity.get('group').strip())\n\t\tcustomer.territory = 'Australia'\n\t\tcustomer.customer_status = 'Existing'\n\t\tcustomer.modified_date = entity.get('updated_at')\n\t\tcustomer.save(ignore_permissions=True)\n\t\tif \"(C)\" in customer.customer_name:\n\t\t\tfrappe.db.set_value(\"Cusomer\", customer.name, \"customer_name\", organisation.replace(\"(C)\", \"\"))\n\n\t\tcreate_or_update_contact(customer, entity)\n\t\tget_addresses(entity.get('entity_id'))\n\n\t\t# return status\n\t\treturn {\n\t\t\tentity.get(\"entity_id\"): {\n\t\t\t\t\"operation\": \"Customer Created\" if not name else \"Customer Updated\",\n\t\t\t\t\"name\": customer.name,\n\t\t\t\t\"modified_date\": entity.get(\"updated_at\")\n\t\t\t}\n\t\t}\n\texcept Exception, e:\n\t\tdocname = entity.get('entity_id')\n\t\tresponse = entity\n\t\tlog_sync_error(\"Customer\", docname, response, e, \"create_new_customer\")",
"def new_customer(self):\n print(\"========================================================\")\n print(\"==================== New Customers =====================\")\n print(\"========================================================\")\n self.add_customer()",
"def generate_fake_customer_data(records_to_create):\n num_existing_customers = len(Customer.query.all())\n if num_existing_customers == 0:\n first_name_list = ['TC{} First Name'.format(x) for x in\\\n range(0,records_to_create)]\n last_name_list = ['TC{} Last Name'.format(x) for x in\\\n range(0,records_to_create)]\n else:\n first_name_list = ['TC{} First Name'.format(x) for x in\\\n range(num_existing_customers ,num_existing_customers + records_to_create)]\n last_name_list = ['TC{} Last Name'.format(x) for x in\\\n range(num_existing_customers ,num_existing_customers + records_to_create )]\n\n upload_all_customers(first_name_list, last_name_list)\n print(first_name_list)",
"def add_customers(current_customers, new_customer_list):\n for new in new_customer_list:\n new_id = _get_next_cust_id()\n current_customers[new_id] = new\n customer_cases[new_id] = {}",
"def create_customer(self):\n try:\n db.create_all()\n except OperationalError as e:\n logging.error(getattr(e, 'message', repr(e)))\n sys.exit(1)\n cust=Customer(self.cust_id,self.name,self.email,self.phone)\n logging.info('New Customer Created Id:{} name:{} email:{} phone:{} '.format(self.cust_id,self.name,self.email,self.phone))\n db.session.add(cust)\n db.session.commit()",
"def create(self, *args, **kwargs):\n\n if not args and not kwargs:\n raise Exception('attributes for Customer are missing')\n\n initial_attributes = args[0] if args else kwargs\n attributes = dict((k, v) for k, v in initial_attributes.items())\n attributes.update({'service': self.SERVICE})\n _, _, customer = self.http_client.post(\"/customers\", body=attributes)\n return customer",
"def customer_id(self, customer_id: str):\n self._customer_id = customer_id",
"def test_add_customer_fail_id(self):\n add_customer('225555', customers[0][1], customers[0][2], customers[0][3],\n customers[0][4], customers[0][5], customers[0][6], customers[0][7])\n cursor = self.conn.execute('SELECT * FROM Customer;')\n results = cursor.fetchall()\n self.assertEqual(results, [])",
"def create(data):\n\n company_nr = ec.db.company.read_nr(data['company_id'])\n\n return mt.db.write_return(\n s_create,\n [\n _create_id(),\n company_nr,\n data['lastname'].strip(),\n data['firstname'].strip()\n ])[0]",
"def _create_customers(self, count):\n customers = []\n for _ in range(count):\n test_customer = CustomerFactory()\n resp = self.app.post(\n \"/customers\",\n json=test_customer.serialize(),\n content_type=\"application/json\",\n headers = self.headers\n )\n self.assertEqual(\n resp.status_code,\n status.HTTP_201_CREATED,\n \"Could not create test customer\",\n )\n new_customer = resp.get_json()\n test_customer.id = new_customer[\"id\"]\n customers.append(test_customer)\n return customers",
"def test_add_customer(self):\n\n c_1 = Customer.get(Customer.customer_id == 37431)\n\n self.assertEqual(c_1.customer_id, 37431)\n self.assertEqual(c_1.first_name, 'Bill')\n self.assertEqual(c_1.last_name, 'Gates')\n self.assertEqual(c_1.home_address, '500 5th Ave, Seattle, WA')\n self.assertEqual(c_1.phone_number, '206-709-3100')\n self.assertEqual(c_1.email_address, 'bgates@microsoft.com')\n self.assertEqual(c_1.credit_limit, 50000)\n self.assertEqual(c_1.active_status, False)",
"def test_process_customer_new_account_id(self):\n org_id = \"88888\"\n account_id = \"88888888\"\n # create customer initially without an account_id\n customer = self._create_customer_data(account=None, org_id=org_id)\n user_data = self._create_user_data()\n request_context = self._create_request_context(customer, user_data, create_customer=True, create_user=False)\n mock_request = request_context[\"request\"]\n mock_request.META[\"QUERY_STRING\"] = \"\"\n mock_request.path = \"/api/v1/tags/aws/\"\n middleware = IdentityHeaderMiddleware()\n middleware.process_request(mock_request)\n self.assertTrue(hasattr(mock_request, \"user\"))\n customer = Customer.objects.get(org_id=org_id)\n self.assertIsNone(customer.account_id)\n\n # send another request with the new account_id and ensure it is updated\n customer = self._create_customer_data(account=account_id, org_id=org_id)\n request_context = self._create_request_context(customer, user_data, create_customer=False, create_user=False)\n mock_request = request_context[\"request\"]\n mock_request.META[\"QUERY_STRING\"] = \"\"\n mock_request.path = \"/api/v1/tags/aws/\"\n middleware = IdentityHeaderMiddleware()\n middleware.process_request(mock_request)\n customer = Customer.objects.get(org_id=org_id)\n self.assertEqual(customer.account_id, account_id)",
"def add_customer(customer_id, name, lastname, home_address,\n phone_number, email_address, status, credit_limit):\n try:\n with database.transaction():\n customer = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit,\n )\n customer.save()\n except Exception as unknown_error:\n print(unknown_error)",
"def cust_id(self, cust_id):\n self._cust_id = cust_id"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Drop from df_invoice_line dataframe features in list given as parameter. All elements from list are checked to be into dataframe columns. | def list_feature_drop(self):
list_to_drop = list()
list_not_in_df = list()
#-------------------------------------------------------------------------
# Columns are checked to be into df_invoice_line dataframe
#-------------------------------------------------------------------------
for col in self._list_feature_to_drop:
if col in self.df_invoice_line.columns:
list_to_drop.append(col)
else:
list_not_in_df.append(col)
if 0 == len(list_to_drop):
self.strprint("\n*** ERROR : no element in list belonging to dataframe!")
else:
if len(self._list_feature_to_drop) != len(list_to_drop):
self.strprint("\n*** WARNING : followings features do not belong to \
dataframe : {}".format(list_not_in_df))
else:
pass
list_col_keep \
= [col for col in self.df_invoice_line.columns \
if col not in list_to_drop]
s
self.df_invoice_line = self.df_invoice_line[list_col_keep]
return | [
"def drop_dfcol(self, drop_list):\n self.data = self.df\n for lbl in drop_list:\n self.data = self.data.drop(lbl, axis=1)\n self.n_features = np.shape(self.data)[1]",
"def drop(self,df, column_list):\n df.drop(columns = column_list, inplace = True)\n return df",
"def _drop_features(self, X, drop_features):\n self.drop_features = drop_features\n if len(self.drop_features) != 0:\n cfp = ComprehensiveFCParameters()\n df2 = []\n for df in self.drop_features:\n if df in X.columns:\n df2.append(df) # exact match\n else:\n if df in cfp.keys() or df in ['fft_coefficient_hann']:\n df = '*__{:s}__*'.format(df) # feature calculator\n # wildcard match\n df2 += [col for col in X.columns if fnmatch(col, df)] \n X = X.drop(columns=df2)\n return X",
"def get_features(self, df):\n return df.drop(df.columns[self.target_col], axis=1)",
"def _drop_parts(self, parts):\n if parts is not None:\n attributes = []\n for part in parts:\n attributes.extend([col for col in list(self.data_frame) if col.startswith(\"part_0\" + str(part))])\n self.data_frame = self.data_frame.drop(attributes, axis=1)",
"def del_unwanted_cols_fact(data):\r\n # del data['do_plu']\r\n del data['dorder_receiveon']\r\n # del data['dorder_receiveon_time']\r\n return data",
"def preprocessData(df, removeCols):\n\tdf1=df.drop(removeCols, axis=1)\n\t\t\n\treturn df1",
"def drop_cols(features, cols_to_drop):\n print('Dropping specific column(s)...', end=\" \")\n features = features.drop(cols_to_drop, axis=1)\n print('Finished.')\n return features",
"def _drop_features(self):",
"def remove_columns(df, args):\n rm = args.remove.split(',')\n cols = [f\"X['{col}']\" for col in df.columns if col not in rm]\n cols = ', '.join(cols) \n return df >> select(eval(cols))",
"def skim_df(df, agent, task):\r\n x = df.drop(agent, axis=1)\r\n x = x.drop(task, axis=0)\r\n return x",
"def columns_to_drop(filepath, skiprows):\n candidates = ['unit', 'units', 'total', 'totals', 'id']\n df = pd.read_csv(filepath, skiprows=skiprows)\n drop = set()\n \n # find columns according to a list of names we should drop\n for item in df.columns:\n if item.upper() in [x.upper() for x in candidates]:\n drop.add(item)\n \n # find columns with only one unique value\n unique = df.nunique().to_dict()\n for column, n in unique.items():\n if n == 1:\n drop.add(column)\n \n # find columns with int values that are not a time period\n for column in df.columns:\n if df[column].dtype.name == 'int64':\n if not df[column].nunique() in [12, 24, 48, 96, 24*60/5, 24*60]:\n drop.add(column)\n \n return list(drop)",
"def trimDf(df):\n cols = set(df.columns)\n\n cols.remove('exclamationCount') # bug in our feature extraction code\n cols.remove('price') # considered only free apps\n cols.remove('appName') # removing appNames\n\n # return df[list(cols)]\n\n\n\n return df[list(('revSent', 'appLabel'))]",
"def get_all_numerical_features_list(input_df):\n features_list = [x for x in input_df.columns.values if x != 'email_address' and x != 'poi']\n features_list.insert(0,'poi') \n return features_list",
"def _feature_country_process(self):\n if 'Country' not in self._df_invoice_line.columns:\n return\n\n list_countries_keep = ['United Kingdom']\n rows_before = self._df_invoice_line.shape[0]\n \n df_invoice_line_new = pd.DataFrame()\n for country in list_countries_keep : \n df_invoice_line_new = df_invoice_line_new.append(\\\n self._df_invoice_line[self._df_invoice_line['Country']==country]\\\n , ignore_index=True)\n\n self.df_invoice_line = df_invoice_line_new\n del(df_invoice_line_new)\n \n rows_after = self._df_invoice_line.shape[0] \n _print_stat_rows(\"Countries filtering : \",rows_before, rows_after)\n\n \n #-------------------------------------------------------------------------\n # Due to the fact only one country is used, then this feature is dropped\n #-------------------------------------------------------------------------\n list_col_to_keep = [col for col in self._df_invoice_line.columns \\\n if col not in 'Country']\n \n self._df_invoice_line = self._df_invoice_line[list_col_to_keep] \n\n return",
"def get_features_from_df(df):\n features_df = df.drop(['vae','bandgap','spectrum','bandgap_pred'], axis=1, errors='ignore')\n return features_df",
"def leave_columns(column_list):\n column_list = intersect_columns(column_list)\n global loaded_dataset\n loaded_dataset = loaded_dataset[column_list]",
"def _drop_attributes(self, attributes):\n if attributes is not None:\n self.data_frame = self.data_frame.drop(attributes, axis=1)",
"def remove_intermediate_columns(dataframe):\n\n combined_dataframe_dropped_cols = dataframe.drop(columns = ['measureland_qualifier_flag_speed',\n 'measureland_qualifier_flag_distance',\n 'measureland_qualifier_flag_acceleration',\n 'measureland_qualifier_flag_visual'])\n\n print(\"Dimensions of combined dataframe after dropping columns:\", combined_dataframe_dropped_cols.shape)\n print(\"Combined dataframe after dropping columns: \", combined_dataframe_dropped_cols.sample(10))\n\n return combined_dataframe_dropped_cols"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process df_invoice_line.Description with NLTK package. | def feature_description_nlp(self):
#-------------------------------------------------------------------------
# Returned dataframe is aggregated with weights from self.vectorizer
#-------------------------------------------------------------------------
list_no_words=['SET','PACK']
self.df_invoice_line, vectorizer, matrix_weights \
= p5_util.nlp_process(self.df_invoice_line,'Description'\
, vectorizer=self.vectorizer, list_no_words=list_no_words)
#-------------------------------------------------------------------------
# Each vectorized column 'x' is renamed w_nlp_i
#-------------------------------------------------------------------------
dict_matching_name = dict()
for col in self.df_invoice_line.columns:
if str(col).isdigit() is True:
new_col_name = "w_nlp_"+str(col)
dict_matching_name[col] = new_col_name
self.df_invoice_line.rename(columns=dict_matching_name,inplace=True)
#-------------------------------------------------------------------------
# Description is droped from columns
#-------------------------------------------------------------------------
del(self.df_invoice_line['Description']) | [
"def process_text(self, text, language):",
"def data_transform_nlp(self):\n df_invoice_line = None\n \n is_build_step = False\n\n if self._vectorizer_nlp is None:\n is_build_step = True\n \n list_no_words=['SET','PACK']\n\n df_invoice_line, csr_matrix_weights, self._vectorizer_nlp \\\n = p5_util.nlp_process(self.df_invoice_line\\\n , 'Description' , vectorizer= self._vectorizer_nlp\\\n , list_no_words=list_no_words, is_verbose= self.is_verbose)\n \n if df_invoice_line is None:\n self.strprint(\"***ERROR : NLP process interrupted!\")\n return\n \n \n #-------------------------------------------------------------------------\n # NLP weights are cumulated (sumerized) per customer\n #-------------------------------------------------------------------------\n if csr_matrix_weights is None:\n csr_matrix_weights \\\n = p5_util.object_load('./data/matrix_weights_NLP.dump')\n else:\n pass\n \n self.strprint(\"df_invoice_line : \"+str(df_invoice_line.shape))\n \n self.dbg_df = df_invoice_line.copy()\n \n root_name = 'w_nlp_'\n self._df_w_nlp = p5_util.df_nlp_sum_per_customer(df_invoice_line\\\n , csr_matrix_weights, root_name)\n\n del(csr_matrix_weights)\n \n #-------------------------------------------------------------------------\n # Dimension reduction thanks to PCA\n #------------------------------------------------------------------------- \n self.strprint(\"self._df_w_nlp : \"+str(self._df_w_nlp.shape))\n\n root_name_pca = 'nlp_pca_'\n n_dim = self._nlp_pca_ndim\n \n df_customers_pca_nlp, self._pca_nlp \\\n = p5_util.df_pca_reduce(self._df_w_nlp, n_dim, root_name_pca\\\n , p_is_scale=False, pca=self._pca_nlp)\n \n self.strprint(\"df_customers_pca_nlp : \" +str(df_customers_pca_nlp.shape))\n\n #-------------------------------------------------------------------------\n # Backup of NLP features per customer\n #-------------------------------------------------------------------------\n if is_build_step is True:\n p5_util.object_dump(df_customers_pca_nlp\\\n , self._df_customers_nlp_fileName)\n else:\n self._df_customers_pca_nlp = df_customers_pca_nlp.copy()\n \n return",
"def remove_info(text, journal_id, label, doc_type='inkomst'):\r\n sections = text.split('NEWPAR')\r\n cleaned_text = ''\r\n diagnose_detected = False\r\n for section in sections:\r\n if section:\r\n section_header =list(filter(None, section.split(' ')))[0]\r\n #print(section_header)\r\n if 'diagnose' in section_header.lower() or 'DIAGNOSE' in section or 'Diagnose :' in section or 'Problemstilling :' in section:\r\n diagnose_detected = True\r\n else:\r\n cleaned_text += section + ' '\r\n if not diagnose_detected :\r\n print('No DIAGNOSE in: ', journal_id)\r\n return cleaned_text",
"def create_NER(self, dataframe):\n\n dataframe['entities'] = dataframe['line']\n entity_dict = {}\n entity_type = {}\n\n for i, val in enumerate(dataframe['entities']):\n e1 = re.findall('<e1>(.*?)</e1>', val)\n e2 = re.findall('<e2>(.*?)</e2>', val)\n entity_dict[i+1] = (str(e1[0]), str(e2[0]))\n doc = nlp(e1[0])\n for ent in doc.ents:\n if ent.label_:\n entity_type[i] = ent.label_\n else:\n entity_type[i] = ('NOT RECOGNIZED')\n \n doc = nlp(e2[0])\n for ent in doc.ents:\n if ent.label_:\n entity_type[i] = entity_type[i] + ent.label_\n else:\n entity_type[i] = entity_type[i] + ('NOT RECOGNIZED')\n\n entity_dataframe = self.create_dataframe(entity_dict, ['e1', 'e2'])\n entity_type_df = self.create_dataframe(entity_type, ['e1', 'e2'])\n\n dataframe = dataframe.drop(columns=['entities'])\n dataframe['e1'] = entity_dataframe['e1']\n dataframe['e2'] = entity_dataframe['e2']\n dataframe['e1_type'] = entity_type_df['e1']\n dataframe['e2_type'] = entity_type_df['e2']\n\n return dataframe",
"def prepare_text_data(descriptions):\n text_data = []\n for line in descriptions:\n tokens = prepare_text_for_lda(line)\n text_data.append(tokens)\n return text_data",
"def summary_line_and_description():",
"def get_sale_order_line_multiline_description_sale(self, product):\n\t\tres = super(SaleOrderLine, self).get_sale_order_line_multiline_description_sale(product)\n\t\tif product.description_sale:\n\t\t\tres = product.description_sale + self._get_sale_order_line_multiline_description_variants()\n\t\treturn res",
"def prepare_text(document):\n text_processing = textacy.preprocess_text(\n nlp(document).text.replace('-',' ').replace('\\n',''),\n fix_unicode=True,\n lowercase=True,\n transliterate=False,\n no_urls=False,\n no_emails=False,\n no_phone_numbers=False,\n no_numbers=True,\n no_currency_symbols=True,\n no_punct=True,\n no_contractions=True,\n no_accents=True\n )\n prepared_text = nlp(text_processing)\n print ('cleaning text...')\n return (prepared_text)",
"def nlp(self, text):\n # Runs the NLP model on the input.\n doc = self.nlp_model(text)\n\n to = []\n when = []\n body = []\n\n # Group the labels into variables.\n for token in doc:\n if token.dep_ == \"TO\":\n to.append(token.text)\n elif token.dep_ == \"WHEN\":\n when.append(token.text)\n elif token.dep_ == \"BODY\":\n body.append(token.text)\n log.debug(\"%s %s\", token.text, token.dep_)\n\n # Get the time entity from the NLP model.\n time = datetime.now()\n if len(when) == 0:\n time = time + timedelta(seconds=5)\n else:\n time = tc.parse_time(when)\n\n _body = \" \".join(body)\n\n return (to, time, _body)",
"def reduce_description_nadj(row):\n # Use global Tagger because its much faster\n prod_des = row['product_description']\n tags = nltk.tag._pos_tag(nltk.word_tokenize(prod_des), None, TAGGER)\n cleaned_string = \"\"\n for i in xrange(len(tags)):\n if is_noun_or_adjective(tags[i][1]):\n cleaned_string += (tags[i][0] + \" \")\n return downcase_str(cleaned_string)",
"def make_description(description):\n\n docbook_desc = \"<para>\\n\"\n first_line = 1\n\n for line in StringIO(description):\n if first_line:\n refpurpose = line\n first_line = 0\n if len(line.strip()) == 0:\n docbook_desc = docbook_desc + \"</para>\\n<para>\\n\"\n else:\n docbook_desc = docbook_desc + line\n\n docbook_desc = docbook_desc + \"\\n</para>\"\n return refpurpose, docbook_desc",
"def _get_invoiceable_lines(self, final=False):\n down_payment_line_ids = []\n invoiceable_line_ids = []\n pending_section = None\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n original_term_line = self._context.get(\"term_line\", False)\n\n if original_term_line:\n order_line = [self._context.get('so_line')]\n for item in self._context.get('so_line').fal_invoice_milestone_line_date_ids.filtered(lambda a: not a.is_final):\n for invoice in item.invoice_id.invoice_line_ids:\n order_line.append(invoice.sale_line_ids)\n\n for line in order_line:\n if line.display_type == 'line_section':\n # Only invoice the section if one of its lines is invoiceable\n pending_section = line\n continue\n if line.display_type != 'line_note' and float_is_zero(line.qty_to_invoice, precision_digits=precision):\n continue\n if line.qty_to_invoice > 0 or (line.qty_to_invoice < 0 and final) or line.display_type == 'line_note':\n if line.is_downpayment:\n # Keep down payment lines separately, to put them together\n # at the end of the invoice, in a specific dedicated section.\n down_payment_line_ids.append(line.id)\n continue\n if pending_section:\n invoiceable_line_ids.append(pending_section.id)\n pending_section = None\n invoiceable_line_ids.append(line.id)\n\n return self.env['sale.order.line'].browse(invoiceable_line_ids + down_payment_line_ids)\n else:\n return super(SaleOrder, self)._get_invoiceable_lines(final)",
"def _parse_tsv_defline(self, defline):\n fields = defline.split(\"\\t\")\n assert tuple(fields[:3]) == (\"Sample\", \"Chain\", \"Replicate\"), \",\".join(fields[:3])\n for b in fields[3:]:\n if b == \"Description\":\n break\n i1 = b.find(\"_\")\n i2 = b.find(\":\", i1)\n assert i1 >= 0 and i2 > i1, fields\n name = b[:i1]\n start = int(b[i1+1:i2])\n end = int(b[i2+1:])\n self.barcode_definitions.append(BarcodeDefinition(start, end, name))",
"def extract_text(self, data):",
"def extract_data(file_ner,file_pos,separator=\" \"):\n\n # read NER and POS from the two files\n words_tags=read_conll_file(file_ner)\n words_pos=read_conll_file(file_pos)\n \n ## some checks, e.g., that both files have same length, same tokens\n assert(len(words_tags)==len(words_pos))\n \n for (words,tags),(_,pos) in zip(words_tags,words_pos):\n for word,pos,tag in zip(words,pos,tags):\n # first letter is capitalized\n cap=\"+\" if word[0].isupper() else \"-\"\n hyphen = '+' if '-' in word else '-'\n l = str(len(word))\n #vowels = \"\".join(sorted([w for w in word.lower() if w in ['a','e','i','o','u','y']]))\n #################################\n ###### YOUR FEATURES HERE ####### \n #################################\n # 0=separator\n \n ## todo: output the cap feature and more \n ## make sure the format you output here is what the nerfeats.py script expects as fields!\n print separator.join([word.lower(),pos,cap, l, hyphen, tag])\n # sentence separator\n print \"\"",
"def add_text_layer(pdf, image, height, dpi):\r\n p1 = re.compile(r'bbox((\\s+\\d+){4})')\r\n p2 = re.compile(r'baseline((\\s+[\\d\\.\\-]+){2})')\r\n hocrfile = os.path.splitext(image)[0] + \".hocr\"\r\n hocr = etree.parse(hocrfile, html.XHTMLParser())\r\n for line in hocr.xpath('//*[@class=\"ocr_line\"]'):\r\n linebox = p1.search(line.attrib['title']).group(1).split()\r\n try:\r\n baseline = p2.search(line.attrib['title']).group(1).split()\r\n except AttributeError:\r\n baseline = [0, 0]\r\n linebox = [float(i) for i in linebox]\r\n baseline = [float(i) for i in baseline]\r\n xpath_elements = './/*[@class=\"ocrx_word\"]'\r\n if (not (line.xpath('boolean(' + xpath_elements + ')'))):\r\n # if there are no words elements present,\r\n # we switch to lines as elements\r\n xpath_elements = '.'\r\n for word in line.xpath(xpath_elements):\r\n rawtext = word.text_content().strip()\r\n if rawtext == '':\r\n continue\r\n font_width = pdf.stringWidth(rawtext, 'invisible', 8)\r\n if font_width <= 0:\r\n continue\r\n box = p1.search(word.attrib['title']).group(1).split()\r\n box = [float(i) for i in box]\r\n b = polyval(baseline,\r\n (box[0] + box[2]) / 2 - linebox[0]) + linebox[3]\r\n text = pdf.beginText()\r\n text.setTextRenderMode(3) # double invisible\r\n text.setFont('invisible', 8)\r\n text.setTextOrigin(box[0] * 72 / dpi, height - b * 72 / dpi)\r\n box_width = (box[2] - box[0]) * 72 / dpi\r\n text.setHorizScale(100.0 * box_width / font_width)\r\n text.textLine(rawtext)\r\n pdf.drawText(text)",
"def label_paragraphs(root_el, fastcase_data):\n # case metadata\n citations = [alphanum_lower(\" \".join((c[\"Volume\"], c[\"Reporter\"], c[\"Page\"]) + ((c[\"Suffix\"],) if \"Suffix\" in c else ()))) for c in fastcase_data['Citations']]\n name_clean = alphanum_lower(fastcase_data['PartyHeader']) if fastcase_data['PartyHeader'] else None\n court_clean = alphanum_lower(fastcase_data['CourtName'] or fastcase_data['CourtAbbreviation'])\n docket_numbers_clean = [alphanum_lower(d) for d in fastcase_data['DocketNumbers']]\n\n # via https://github.com/harvard-lil/CaselawAccessProjectSchemas/blob/master/casebodyxml/v1/casebodyxml.xsd\n states = {k:i for i, k in enumerate([None, \"citation\", \"parties\", \"docketnumber\", \"court\", \"otherdate\", \"decisiondate\", \"history\", \"syllabus\", \"attorneys\", \"judges\", \"disposition\", \"_opinionstart\", \"_preauthor\", \"author\", \"opinion\"])}\n reverse_states = {v:k for k, v in states.items()}\n\n state = 0\n header_els = []\n opinions = [[]]\n header_complete = False\n extra_els = []\n blank_els = []\n authors = []\n opinion_starts = []\n paragraph_id = 1\n\n def shift_to_opinion(i):\n \"\"\"Move i elements from the end of header to the start of opinion.\"\"\"\n if not i:\n return\n nonlocal header_els\n opinions[0][0:0] = header_els[-i:]\n header_els = header_els[:-i]\n\n def add_el(el, state, target_list=header_els):\n nonlocal blank_els, paragraph_id\n if state:\n if not reverse_states[state].startswith('_'):\n el.attrib['class'] = reverse_states[state]\n if state == states['_opinionstart']:\n opinion_starts.append((len(target_list), el))\n elif state == states['author']:\n authors.append((len(target_list), el))\n blank_els = []\n else:\n blank_els.append(el)\n el.attrib['id'] = f'p-{paragraph_id}'\n paragraph_id += 1\n target_list.append(el)\n\n def append_to_previous(line):\n PyQuery(header_els[-1]).append(PyQuery(line))\n\n for el_pq in PyQuery(root_el)('root').children().items():\n\n if extra_els:\n extra_els.append(el_pq)\n el_pq = extra_els.pop(0)\n\n el = el_pq[0]\n\n # mark the end of the labeled front matter (which may or may not align with actual end)\n if el.tag == 'header-end':\n header_complete = True\n if state == states[\"author\"]:\n state = states[\"opinion\"]\n continue\n\n # skip\n if el.text == \"COPYRIGHT MATERIAL OMITTED\":\n continue\n\n # add linebreak after element for indentation\n if not (el.tail and el.tail.startswith('\\n')):\n el.tail = '\\n' + (el.tail or '')\n\n line = inner_html(el)\n line_text = strip_tags(line)\n line_text_lower = line_text.lower()\n line_alphanum_chars = alphanum_lower(line_text)\n\n # if we've had 5 regular paragraphs in a row, assume we missed the start of the opinion\n if state < states[\"opinion\"] and len(blank_els) >= 5:\n shift_to_opinion(len(blank_els))\n state = states[\"opinion\"]\n\n # we have now reached the opinion and no longer have to process header lines\n if state >= states[\"opinion\"]:\n # check short lines for the start of a concurrence or dissent\n m = new_opinion_re.match(line_text)\n if m:\n el.attrib['class'] = 'author'\n el.attrib['opinion-type'] = opinion_type_lookup[m[1].lower()]\n opinions.append([])\n\n add_el(el, 0, opinions[-1])\n continue\n\n # citation\n if state <= states[\"citation\"]:\n if any(c in line_alphanum_chars for c in citations) or all(citation_like_re.match(s) for s in line.split('<br>')):\n state = states[\"citation\"]\n continue # don't include citation lines in output\n\n # parties\n if state < states[\"parties\"]:\n # special case -- if the case doesn't have a name, like NE2d/939/939ne2d586.xml,\n # assume that whatever comes after the last citation is the name\n if name_clean is None or line_alphanum_chars == name_clean:\n state = states[\"parties\"]\n add_el(el, state)\n elif header_els and name_clean == alphanum_lower(inner_html(header_els[-1]) + line):\n # handle edge case where name is split across two paragraphs\n append_to_previous(line)\n elif line_alphanum_chars.startswith(name_clean) or similar_strings(line_text, fastcase_data['PartyHeader']):\n # special cases -- NW2d/881/881 N.W.2d 813-4_Replace.xml, NW2d/792/792NW2d203.xml\n state = states[\"parties\"]\n add_el(el, state)\n else:\n # if we haven't found a valid name yet, paragraphs are just regular paragraphs\n add_el(el, 0)\n continue\n\n # docket numbers or court\n if state < states[\"court\"]:\n # detect 'Supreme Judicial Court of Massachusetts.' and 'United States Bankruptcy Appellate Panel of the Ninth Circuit.' as a court, but not\n # 'Court of Appeals Case No. 04A03-1707-IF-1724' or 'Consol. Court No. 16-00054'\n # line may be 'Court of Appeals of Virginia, Chesapeake.' if court is 'Court of Appeals of Virginia'\n # line may be 'North Carolina Court of Appeals.' if court is 'Court of Appeals of North Carolina'\n # if 'court' in line.lower() or 'panel' in line.lower()) and ('No.' not in line or 'Division No.' in line):\n if any(line_alphanum_chars.startswith(s) for s in docket_numbers_clean):\n state = states[\"docketnumber\"]\n elif line_alphanum_chars.startswith(court_clean) or (\n (line_text.endswith('Court of Appeals.') or any(line_text_lower.startswith(s) for s in ('court of appeal', 'supreme court')))\n ):\n state = states[\"court\"]\n else:\n state = states[\"docketnumber\"]\n add_el(el, state)\n continue\n\n # accidental start of opinion included in head matter\n # NW2d/737/737NW2d768_3New.xml -- \"On order of the Court ...\"\n if state >= states[\"decisiondate\"]:\n if line_text.startswith(\"On order of the Court\"):\n state = states[\"opinion\"]\n add_el(el, 0, opinions[-1])\n continue\n\n # dates\n # 'DATED at Olympia, Washington, this 31st day of October, 2018.'\n # '01-04-2017'\n if state <= states[\"decisiondate\"]:\n # long line isn't decision date -- SCt/134/134sct985_2.xml\n if len(line_text) < 80 and (date_re.search(line_text) or line_text_lower.startswith('dated at') or re.match(r'\\d{1,2}-\\d{2}-\\d{4}$', line_text)):\n if any(line_text.startswith(s) for s in ('Released', 'Submitted', 'Dissenting')) and 'Decided' not in line_text:\n # handle case like\n # 'Submitted June 5, 2007, at Lansing.'\n # 'Decided June 12, 2007, at 9:05 a.m.'\n # 'Released for Publication October 11, 2007\n # 'Dissenting Opinion of Chief Justice Maynard June 27, 2008.'\n # avoid\n # 'Submitted March 2, 2010.<br>Decided April 2, 2010.'\n state = states[\"otherdate\"]\n else:\n state = states[\"decisiondate\"]\n add_el(el, state)\n continue\n\n if state < states[\"judges\"]:\n # strip off judges lines appended to current line, and add as an extra_el\n # \"for Respondent.<strong>Justice BEATTY.</strong></p>\" SE2d/708/708se2d750.xml\n # \"... West Virginia Insurance Federation.<strong>DAVIS, Justice:</strong></p>\" SE2d/719/719se2d830.xml\n # \"for appellees.<strong>Present: HUMPHREYS, McCLANAHAN and BEALES, JJ.</strong><strong>BEALES, Judge.</strong>\" SE2d/708/708se2d429.xml\n while True:\n m = re.search('(.+)(<strong>([^<]+)</strong>)$', line)\n if m and is_judges_or_author(m[3]):\n extra_els.insert(0, PyQuery('<p>'+m[2]+'</p>'))\n line = m[1]\n el_pq.html(line)\n line_text = strip_tags(line)\n line_alphanum_chars = alphanum_lower(line_text)\n continue\n break\n\n # history\n # 'Appeal by defendant from judgment entered 8 December 2004 by Judge Robert H. Hobgood in Alamance County Superior Court. Heard in the Court of Appeals 2 November 2005.'\n if line_text_lower.startswith('appeal') or any(s in line_text for s in ('Superior Court', 'District Court', 'Circuit Court')):\n state = states[\"history\"]\n add_el(el, state)\n continue\n\n # syllabus\n if 'Syllabus by the Court' in line_text or (state == states[\"syllabus\"] and re.match(r'\\d+\\.|[a-z\\[]', line_text)):\n if re.match(r'[a-z\\[]', line_text):\n # handle case where syllabus is split midsentence\n append_to_previous(line)\n else:\n state = states[\"syllabus\"]\n add_el(el, state)\n continue\n\n # attorneys\n # 'Garrett D. Blanchfield, Jr., Reinhardt Wendorf & Blanchfield, St. Paul, MN, for Appellants.'\n if any(line_text.startswith(s) for s in (\"An amicus\", \"For the\", \"On behalf of\")) or any(s in line_text for s in (' for ', 'amici curiae', 'pro se')):\n state = states[\"attorneys\"]\n add_el(el, state)\n continue\n\n # titles that mark the start of an opinion, like \"OPINION\"\n if line_alphanum_chars in opinion_start_lines or any(line_alphanum_chars.startswith(s) for s in opinion_start_line_prefixes):\n state = states[\"_opinionstart\"]\n if line_text != \"OPINION\":\n add_el(el, state)\n continue\n\n # Handle paragraph that is definitely followed by author, like \"The opinion of the court was delivered by\", A3d/148/148 A.3d 441_Replace.xml\n if line_text == \"The opinion of the court was delivered by\":\n state = states[\"_preauthor\"]\n add_el(el, 0)\n continue\n if state == states[\"_preauthor\"]:\n add_el(el, states[\"author\"])\n state = states[\"opinion\"]\n continue\n\n # author\n # note, in theory fastcase_data[\"Author\"] could be useful for identifying author paragraph, but it's often not set,\n # and when it is it can also appear in the judges line and other places ...\n judges_or_author = is_judges_or_author(line_text)\n if judges_or_author == \"judges\":\n state = states[\"judges\"]\n add_el(el, state)\n continue\n elif judges_or_author == \"author\":\n add_el(el, states[\"author\"])\n state = states[\"opinion\"] if header_complete else states[\"author\"]\n continue\n\n # weird special case where there's an order provided before the start of the opinion\n # E.g. NW2d/740/740NW2d659_1.xml, 'ORDER ENTERED JUNE 8, 2007' and subsequent unlabeled lines\n if line_text.startswith(\"ORDER ENTERED\") or state == states[\"disposition\"]:\n state = states[\"disposition\"]\n add_el(el, state)\n continue\n\n # regular paragraph\n add_el(el, 0)\n continue\n\n # fixups\n labels = [el.attrib.get('class') for el in header_els]\n # rewrite special case like NE2d/944/944ne2d1119.xml:\n # [['parties', '...'],\n # ['docketnumber', 'Feb. 15'],\n # ['docketnumber', '2011.'],\n # ['court', 'Court of Appeals of New York.']]\n # to\n # [['parties', '...'],\n # ['court', 'Court of Appeals of New York.'],\n # ['decisiondate', 'Feb. 15, 2011.']]\n if labels == [None, 'docketnumber', 'docketnumber', 'court']:\n docket_combined = header_els[1].text + \", \" + header_els[2].text\n if date_re.match(docket_combined):\n header_els[1].attrib['class'] = 'decisiondate'\n header_els[1].text = docket_combined\n header_els = [header_els[0], header_els[3], header_els[1]]\n\n # change all author labels but the last to judges; we likely misdetected one earlier\n for i, el in authors[:-1]:\n el.attrib['class'] = \"judges\"\n\n # if we didn't find an author and the last line is unlabeled, assume that's the author with a typo --\n # e.g. NW2d/753/753NW2d552_1.xml , missing comma\n if header_els and not authors and not opinion_starts and state >= states[\"judges\"] and header_els[-1].attrib.get('class') is None:\n header_els[-1].attrib['class'] = \"author\"\n authors = [(len(header_els)-1, header_els[-1])]\n\n # move author, and any paragraphs after it, to beginning of first opinion\n move_index = opinion_starts[0][0] + 1 if opinion_starts else authors[-1][0] if authors else None\n if move_index is not None:\n shift_to_opinion(len(header_els)-move_index)\n\n return header_els, opinions",
"def _extract_textlevel(self, entry, features) -> bool:\n\n numbers = ''.join([i for i in entry['text'] if i.isdigit() or i == \" \"]).strip()\n # If one column just parse\n if len(self.info.col) == 1:\n #if self.info.row == \"Bilanzsumme\":\n # self.content[\"Bilanzsumme\"][0] = \" \".join(numbers)\n #else:\n self.content[self.structure[\"type\"][self.info.lidx]][0][self.info.row] = \" \".join(numbers)\n return True\n\n # First try to solve the problem with reocr the bbox\n if self.info.snippet and self.info.separator:\n if self._extract_reocrlevel(entry, numbers):\n return True\n if numbers == \"\" and self.info.lidx == self.info.nrow-1:\n return False\n numbers = numbers.split(\" \")\n\n # Check if line is date\n if features.counter_alphabetical < 2 and features.counter_special_chars > 3 and features.counter_numbers > 10:\n return False\n count_years = len(self.info.col) - 1\n count_numbers = 0\n number = \"\"\n for grpidx, numbergrp in enumerate(reversed(numbers)):\n # Check and clean artifacts\n count_numbers += len(numbergrp)\n if len(numbergrp) > 3 and grpidx > 0:\n if numbergrp[3:] == list(reversed(numbers))[grpidx - 1][:len(numbergrp[3:])]:\n numbergrp = numbergrp[:3]\n if len(numbergrp) == 3 and grpidx != len(numbers) and count_numbers < (\n features.counter_numbers / 2):\n number = (numbergrp + \" \" + number).strip()\n continue\n else:\n count_numbers = 0\n self.content[self.structure[\"type\"][self.info.lidx]][count_years][self.info.row] = (\n numbergrp + \" \" + number).strip()\n number = \"\"\n count_years -= 1\n if count_years == 0:\n self.content[self.structure[\"type\"][self.info.lidx]][count_years][self.info.row] = \" \".join(\n numbers[:len(numbers) - grpidx - 1])\n return True\n return True",
"def debian_multiline_description(description):\n return \"\\n \".join(line for line in description.split(\"\\n\") if line.strip() != \"\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Standardize quantitatives features. Standardizer is stored as object attribute. It will be copied into P5_SegmentClassifier object. | def feature_scale(self):
#-------------------------------------------------------------------------
# List of quantitative features to be standardized
#-------------------------------------------------------------------------
list_quant_feature = ['Quantity','UnitPrice']
self._list_quant_feature = list_quant_feature.copy()
#-------------------------------------------------------------------------
# Standardization is applied over quantitative features in list.
#-------------------------------------------------------------------------
X_std = self.std_scale.transform(self.df_invoice_line[self.list_quant_feature])
df_quant_std = pd.DataFrame(X_std, index=self.df_invoice_line.index)
#-------------------------------------------------------------------------
# Columns from standardized dataframe are renamed
#-------------------------------------------------------------------------
df_quant_std.rename(columns={0:'STD_Quantity',1:'STD_UnitPrice'}\
,inplace=True)
#-------------------------------------------------------------------------
# Standardized values dataframe is aggregated to df_invoice_line
#-------------------------------------------------------------------------
list_col_drop = ['Quantity','UnitPrice']
list_col_keep = \
[col for col in self.df_invoice_line.columns if col not in list_col_drop ]
self.df_invoice_line = self.df_invoice_line[list_col_keep]
self.df_invoice_line \
= pd.concat([self.df_invoice_line,df_quant_std], axis=1)
return | [
"def standardiser(self):\n # Select only numeric features first\n\n #self.X = self.data.loc[:, self.data.columns != self.target].values\n numeric_columns = []\n for col in self.X.columns:\n if self.X[col].dtype!='object':\n numeric_columns.append(col)\n scaler = preprocessing.StandardScaler().fit(self.X[numeric_columns]) \n # Now we can standardise\n self.X[numeric_columns] = scaler.transform(self.X[numeric_columns])",
"def test_scale_features_standardize(self):\n data = array([[0.564, 20.661], [-18.512, 41.168], [-0.009, 20.440]])\n cdata = CData(data)\n\n # correct answer computed in Mathematica\n # TODO: can we compute the right answer in Python?\n answer = array([[0.60355, -0.568043], [-1.1543, 1.15465], [0.550748, -0.586608]])\n\n # perform standardization feature scaling and check answer\n cdata.scale_features('standardize')\n self.assertTrue(allclose(cdata.data, answer))",
"def _standardize(self):\n deviation = np.std(self.series)\n self.series = (self.series - np.mean(self.series)) / (deviation if deviation != 0 else 1)",
"def standardize(features: {}) -> {}:\n\n # Extracting single set of features to compute standardisation values\n V, A, C, BB, D, E = [], [], [], [], [], []\n for id, featuresList in features.items():\n V.append(featuresList[\"volume\"])\n A.append(featuresList[\"area\"])\n C.append(featuresList[\"compactness\"])\n BB.append(featuresList[\"bbox_volume\"])\n D.append(featuresList[\"diameter\"])\n E.append(featuresList[\"eccentricity\"])\n\n sdVals = save_standardization_vals(V, A, C, BB, D, E)\n\n # Standardizing non-histogram features\n for id, featuresList in features.items():\n features[id][\"volume\"] = (featuresList[\"volume\"] - sdVals[\"V_mean\"]) / sdVals[\"V_std\"]\n features[id][\"area\"] = (featuresList[\"area\"] - sdVals[\"A_mean\"]) / sdVals[\"A_std\"]\n features[id][\"compactness\"] = (featuresList[\"compactness\"] - sdVals[\"C_mean\"]) / sdVals[\"C_std\"]\n features[id][\"bbox_volume\"] = (featuresList[\"bbox_volume\"] - sdVals[\"BB_mean\"]) / sdVals[\"BB_std\"]\n features[id][\"diameter\"] = (featuresList[\"diameter\"] - sdVals[\"D_mean\"]) / sdVals[\"D_std\"]\n features[id][\"eccentricity\"] = (featuresList[\"eccentricity\"] - sdVals[\"E_mean\"]) / sdVals[\"E_std\"]\n\n # Saving standardized features and standardization values in cache\n np.save(s.SAVED_DATA + \"features.npy\", features)\n np.save(s.SAVED_DATA + \"standardization_values.npy\", sdVals)\n\n return features",
"def standardize(X_train_input, X_test_input):\r\n from sklearn.preprocessing import StandardScaler\r\n sc = StandardScaler()\r\n sc.fit(X_train_input)\r\n\r\n X_train_std = sc.transform(X_train_input)\r\n X_test_std = sc.transform(X_test_input)\r\n \r\n return X_train_std, X_test_std",
"def standardize(self, matrix: ndarray) -> ndarray:\n # Standardize the matrix\n mean = matrix.mean(axis=0)\n std = matrix.std(axis=0)\n\n def standardizer(mat):\n # Ignore the first column of constant term\n mat = (mat - mean[np.newaxis, :])/std[np.newaxis, :]\n return mat\n self.standardizer = standardizer\n return standardizer(matrix)",
"def standardize(self, matrix: ndarray) -> ndarray:\n # Standardize the matrix\n mean = matrix[:, 1:].mean(axis=0)\n std = matrix[:, 1:].std(axis=0)\n\n def standardizer(mat: ndarray) -> ndarray:\n # Ignore the first column of constant term\n mat[:, 1:] = (mat[:, 1:] - mean[np.newaxis, :])/std[np.newaxis, :]\n return mat\n\n self.standardizer = standardizer\n return standardizer(matrix)",
"def _feature_scaling(self):",
"def standard_data(self, x_train, x_test):\n preprocessor = prep.StandardScaler().fit(x_train)\n x_train = preprocessor.transform(x_train)\n x_test = preprocessor.transform(x_test)\n return x_train, x_test",
"def do_preprocess_on_segment_raw(seg_raw_df):\n sigma = 2\n median_kernel_size = 5\n print \"=======================start preprocessing segment raw dataframe=================\"\n print \"parameters: \" + \"gaussian filter sigma: %.2f, median kernel size: %.2f\" % (sigma, median_kernel_size)\n pp_df = seg_raw_df.copy(deep=True)\n df_mean = pp_df[s_info.raw_value_names].mean()\n df_std = pp_df[s_info.raw_value_names].std()\n pp_df[s_info.raw_value_names] = pp_df.groupby(s_info.segment_col)[s_info.raw_value_names].transform(sp_signal.medfilt, median_kernel_size)\n pp_df[s_info.raw_value_names] = (pp_df[s_info.raw_value_names] - df_mean)/df_std\n pp_df[s_info.raw_value_names] = pp_df.groupby(s_info.segment_col)[s_info.raw_value_names].transform(gaussian_filter1d, sigma=sigma, axis=0, order=0, mode='reflect')\n return pp_df",
"def dataset_handling_with_standardisation(init_data):\n #\n ##Maximum number of points = 72 , keep around 80 values for even number\n max_len = 80\n ##Fluxes, Standardisation is done over 1 type of feature\n data = init_data.loc[:, [u'fluxes_0', u'fluxes_1', u'fluxes_2', u'fluxes_3', u'fluxes_4', u'fluxes_5']].values\n zp_array_flux = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_flux.append(n_data)\n zp_array_flux = np.array(zp_array_flux)\n print(zp_array_flux.shape)\n\n ##Fluxerrors, Standardisation is done over 1 type of feature\n data = init_data.loc[:,\n [u'fluxerrs_0', u'fluxerrs_1', u'fluxerrs_2', u'fluxerrs_3', u'fluxerrs_4', u'fluxerrs_5']].values\n zp_array_flux_error = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_flux_error.append(n_data)\n zp_array_flux_error = np.array(zp_array_flux_error)\n print(zp_array_flux_error.shape)\n\n ##Time, Standardisation is done over 1 type of feature\n data = init_data.loc[:, [u'mjds_0', u'mjds_1', u'mjds_2', u'mjds_3', u'mjds_4', u'mjds_5']].values\n zp_array_mjds = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_mjds.append(n_data)\n zp_array_mjds = np.array(zp_array_mjds)\n print(zp_array_mjds.shape)\n\n ##Concatenating everything\n zp_data = np.c_[zp_array_flux, zp_array_flux_error, zp_array_mjds]\n\n ##Adding redshift info// Gal pos info might be necessary to remove\n zp_data = np.c_[\n zp_data, init_data.loc[:, [u'gal_b', u'gal_l', u'hostgal_photoz', u'hostgal_photoz_err', u'hostgal_specz', u'mwebv']].values]\n print(zp_data.shape)\n\n ##Load labels and convert to integer\n labels = init_data.loc[:, [u'target']].values\n labels = labels.flatten()\n labels_name = np.array([6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95, 99])\n [np.place(labels, labels == labels_name[i], [i]) for i in range(len(labels_name))]\n\n return [zp_data, labels]",
"def quantize(self, df):\n if len(self.dict_scalers) == 0:\n raise Exception(\"[ERROR] quantize method called prior to\"\n \"normalization transform method \")\n\n quant_df = pd.DataFrame()\n if 'OneForAll' in self.dict_scalers:\n # quantization is applied on all features\n min_fp = float(np.amin(df))\n max_fp = float(np.amax(df))\n scale = (max_fp - min_fp) / (127 - (-127))\n zero_point = 127 - (max_fp / scale)\n quant_df = df / scale + zero_point\n else:\n # quantization is applied independently for each feature/column\n lbl_list = df.columns.values\n for lbl in lbl_list:\n min_fp = float(np.amin(df[lbl]))\n max_fp = float(np.amax(df[lbl]))\n scale = (max_fp - min_fp) / (127 - (-127))\n zero_point = 127 - (max_fp / scale)\n quant_df[lbl] = df[lbl] / scale + zero_point\n return quant_df.astype(np.int8)",
"def standardize_vcf(self):\n idx = 1\n for std_rec in self.standardize_records():\n # Apply size filter (but keep breakends (SVLEN=-1))\n if 0 < std_rec.info['SVLEN'] < self.min_size:\n continue\n\n # Exclude insertions of unknown SVLEN\n if std_rec.info['SVTYPE'] == 'INS' and std_rec.info['SVLEN'] == -1:\n continue\n\n # Exclude sites with no called samples unless requested otherwise\n if not any_called(std_rec) and not self.include_reference_sites:\n continue\n\n # Filter unstranded breakpoints\n if std_rec.info['STRANDS'] not in '++ +- -+ --'.split():\n continue\n\n # Assign new variant IDs\n if self.prefix is not None:\n std_rec.id = '{0}_{1}'.format(self.prefix, idx)\n idx += 1\n\n yield std_rec",
"def standardize_X_data(X_train, X_test):\n\n # Fit StandardScaler to X_train\n scaler = StandardScaler()\n scaler.fit(X_train)\n\n # Use the fit StandardScaler model to standardize both X_train and X_test\n X_train_std = scaler.transform(X_train)\n X_test_std = scaler.transform(X_test)\n\n return X_train_std, X_test_std",
"def _standardize_features(feature_table, standardization_dict=None):\n\n num_real_values_by_feature = numpy.sum(\n numpy.invert(numpy.isnan(feature_table.as_matrix())), axis=0)\n if numpy.any(num_real_values_by_feature < 2):\n raise ValueError('Each column of feature_table must have >= 2 real '\n 'values (not NaN).')\n\n if standardization_dict is None:\n feature_means = numpy.nanmean(feature_table.as_matrix(), axis=0)\n feature_standard_deviations = numpy.nanstd(\n feature_table.as_matrix(), axis=0, ddof=1)\n\n standardization_dict = {FEATURE_NAMES_KEY: list(feature_table),\n ORIGINAL_MEANS_KEY: feature_means,\n ORIGINAL_STDEVIATIONS_KEY: feature_standard_deviations}\n\n else:\n standardization_dict = _reorder_standardization_dict(\n standardization_dict, list(feature_table))\n\n feature_names = list(feature_table)\n num_features = len(feature_names)\n standardized_feature_table = None\n\n for j in range(num_features):\n these_standardized_values = (\n (feature_table[feature_names[j]].values -\n standardization_dict[ORIGINAL_MEANS_KEY][j]) /\n standardization_dict[ORIGINAL_STDEVIATIONS_KEY][j])\n\n nan_indices = numpy.where(numpy.isnan(these_standardized_values))[0]\n these_standardized_values[nan_indices] = 0.\n\n if standardized_feature_table is None:\n standardized_feature_table = pandas.DataFrame.from_dict(\n {feature_names[j]: these_standardized_values})\n else:\n standardized_feature_table = standardized_feature_table.assign(\n **{feature_names[j]: these_standardized_values})\n\n return standardized_feature_table, standardization_dict",
"def _standardize(self, x):\r\n\t\tkurts = kurtosis(x) # calculate Fisher kurtosis\r\n\t\tk_x = np.abs(kurts)**(1./4) # the quantity for standardization (k_x in [1])\r\n\t\tx_hat = x / k_x # the standardized data\r\n\t\treturn x_hat",
"def standardize(X):\n\n X_out = (X - np.mean(X))/np.std(X)\n\n return X_out",
"def standardization(self):\n if self._standardization is None:\n raise AttributeError('Labels have not been standardized.')\n return self._standardization",
"def scaled(cls, data, cutoff=0.5):\n data = cls._numpify(data)\n data_scaled = data / np.mean(data)\n return SubselectionAlgorithm.cutoff(data_scaled, cutoff=cutoff)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns market segment ID related to a customer thanks to customer invoices lines given as parameter. Features transformations are applied on data included into invoice lines. Once done, a machine learning algorithm is invocated in order to predict customer market segment. | def get_customer_marketSegment(self, df_invoice_line_customer):
#-------------------------------------------------------------------------
# Building data model
#-------------------------------------------------------------------------
self.data_transform(df_invoice_line_customer)
#-------------------------------------------------------------------------
# Customer features are built thanks to transformers.
#-------------------------------------------------------------------------
self.df_customers_features_build()
#-------------------------------------------------------------------------
# Customer market segment is predicted
#-------------------------------------------------------------------------
X_test = self._df_customers.values
y_pred = self._classifier_model.predict(X_test)
segmentID = y_pred[0]
return segmentID | [
"def predict_segment(self, df_invoice_line=None):\n if df_invoice_line is not None:\n self.data_transform(df_invoice_line) \n self.df_customers_features_build() \n else:\n pass\n X_test = self._df_customers.values\n y_pred = self._classifier_model.predict(X_test)\n return y_pred[0]",
"def order_process(self, customerID, list_stockCode, list_quantity\\\n , orderDate=None):\n\n segmentID = -1\n\n #-------------------------------------------------------------------------\n # A new customer is created and inserted into data-set.\n #-------------------------------------------------------------------------\n if customerID is None:\n customerID = int(self.createCustomerID())\n else:\n pass\n \n #-------------------------------------------------------------------------\n # A new dataframe with new invoice lines are created.\n #-------------------------------------------------------------------------\n df_invoice_line = self.create_customer_df_invoice_line(customerID\\\n , list_stockCode, list_quantity, orderDate)\n \n #-------------------------------------------------------------------------\n # Original dataframe is updated with customer invoices lines.\n #-------------------------------------------------------------------------\n print(\"order_process : shape before concat= \"+str(self._df_invoice_original.shape))\n self._df_invoice_original \\\n = pd.concat([self._df_invoice_original, df_invoice_line], axis=0)\n print(\"order_process : shape after concat= \"+str(self._df_invoice_original.shape))\n \n #-------------------------------------------------------------------------\n # All invoices lines (including new one) related to customer is retrieved \n # from original dataframe.\n #-------------------------------------------------------------------------\n df_invoice_line_customer \\\n = self.get_customer_history_df_invoice_line(customerID)\n\n #-------------------------------------------------------------------------\n # When calling get_customer_marketSegment(), df_invoice_line_customer is\n # concatened to the original dataframe.\n #------------------------------------------------------------------------- \n segmentID = self.get_customer_marketSegment(df_invoice_line_customer)\n \n return segmentID, customerID",
"def create_customer_df_invoice_line(self, customerID, list_stockCode\\\n , list_quantity, invoiceDate):\n \n dict_invoice = dict()\n\n dict_invoice['Quantity'] = list_quantity\n dict_invoice['StockCode'] = list_stockCode\n\n #------------------------------------------------------------------------\n # Build invoiceDate from local current time\n #------------------------------------------------------------------------\n if invoiceDate is None:\n time_struct = time.localtime()\n invoiceDate = str(time_struct.tm_year)+'-'+str(time_struct.tm_mon)\\\n +'-'+str(time_struct.tm_mday)\n invoiceDate +=' '\n invoiceDate +=str(time_struct.tm_hour)+':'+str(time_struct.tm_min)\\\n +':'+str(time_struct.tm_sec)\n invoiceDate = pd.Timestamp(invoiceDate)\n else:\n pass\n\n\n #------------------------------------------------------------------------\n # Lists initialization\n #------------------------------------------------------------------------\n list_customerID = list()\n list_invoiceNo = list()\n list_invoiceDate = list()\n list_invoice_line_index = list()\n \n #------------------------------------------------------------------------\n # Increase Invoice number\n #------------------------------------------------------------------------\n invoiceNo = max(self._df_invoice_original.InvoiceNo)\n invoiceNo += 1\n\n #------------------------------------------------------------------------\n # Get latest invoice line index value\n #------------------------------------------------------------------------\n invoice_line_index = max(self._df_invoice_original.index)\n\n #------------------------------------------------------------------------\n # Build lists for CustomerID, InvoiceNo, InvoiceDate\n # A list of incremented indexes is built for new rows.\n #------------------------------------------------------------------------\n for quantity in list_quantity:\n list_customerID.append(customerID)\n list_invoiceNo.append(invoiceNo)\n list_invoiceDate.append(invoiceDate)\n invoice_line_index += 1\n list_invoice_line_index.append(invoice_line_index) \n\n \n dict_invoice['CustomerID'] = list_customerID\n dict_invoice['InvoiceNo'] = list_invoiceNo\n dict_invoice['InvoiceDate'] = list_invoiceDate\n\n #------------------------------------------------------------------------\n # Get description list from list of stock codes.\n #------------------------------------------------------------------------\n list_description = self.getDescriptionList(list_stockCode)\n \n dict_invoice['Description'] = list_description\n\n #------------------------------------------------------------------------\n # Get unit price list from list of stock codes.\n #------------------------------------------------------------------------\n list_unitPrice = self.getUnitPriceList(list_stockCode)\n \n dict_invoice['UnitPrice'] = list_unitPrice\n\n #------------------------------------------------------------------------\n # Dataframe with new invoices lines is created.\n #------------------------------------------------------------------------\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys()\\\n , index=list_invoice_line_index)\n \n return df_invoice_line",
"def get_customer_history_df_invoice_line(self, customerID):\n df_invoice_line \\\n = self._df_invoice_original[self._df_invoice_original.CustomerID \\\n == customerID]\n return df_invoice_line",
"def _create_invoice(self):\n self.ensure_one()\n partner = self.member_id.partner_id\n invoice = self.env['account.invoice'].create({\n 'partner_id': partner.id,\n 'account_id': partner.property_account_receivable_id.id,\n 'fiscal_position_id': partner.property_account_position_id.id\n })\n for line in self.line_ids:\n product = line.activity_id.product_id\n # Handling of invoice lines : needs cache record for onchange, then\n # real writing...\n invoice_line = self.env['account.invoice.line'].new({\n 'product_id': product.id,\n 'invoice_id': invoice.id\n })\n invoice_line._onchange_product_id()\n line_values = dict(invoice_line._cache)\n line_values['price_unit'] = line.price\n invoice_line = self.env['account.invoice.line'].create(line_values)\n invoice.compute_taxes()\n line.registration_id.invoice_line_id = invoice_line.id\n return invoice",
"def get_from_transaction_id_and_invoice(self, cr, uid, st_line, context=None):\n st_obj = self.pool.get('account.bank.statement.line')\n res = {}\n invoice_obj = self.pool.get('account.invoice')\n invoice_id = invoice_obj.search(\n cr, uid,\n [('transaction_id', '=', st_line['transaction_id'])],\n context=context)\n if len(invoice_id) > 1:\n raise ErrorTooManyPartner(\n _('Line named \"%s\" (Ref:%s) was matched by more than '\n 'one partner.') % (st_line['name'], st_line['ref']))\n elif len(invoice_id) == 1:\n invoice = invoice_obj.browse(cr, uid, invoice_id[0],\n context=context)\n res['partner_id'] = invoice.partner_id.id\n # we want the move to have the same ref than the found\n # invoice's move, thus it will be easier to link them for the\n # accountants\n if invoice.move_id:\n res['ref'] = invoice.move_id.ref\n st_vals = st_obj.get_values_for_line(\n cr, uid,\n profile_id=st_line['profile_id'],\n master_account_id=st_line['master_account_id'],\n partner_id=res.get('partner_id', False),\n line_type=st_line['type'],\n amount=st_line['amount'] if st_line['amount'] else 0.0,\n context=context)\n res.update(st_vals)\n return res",
"def get_customer_segments(self):\n self.data = pd.merge(self.data, self.cs.fetch()[['client', 'segments']], on='client', how='left')",
"def _prepare_invoice_lines(self, exchange_line, order_line):\n invoice_type = {\n 'sale.order.line': {\n 'higher': 'out_invoice', 'lower': 'out_refund',\n 'type': 'sale', 'field': 'exchange_sale_line_id'\n },\n 'purchase.order.line': {\n 'higher': 'in_invoice', 'lower': 'in_refund',\n 'type': 'purchase', 'field': 'exchange_purchase_line_id'\n },\n }\n product = exchange_line.exchange_product_id or exchange_line.product_id\n data = {\n 'invoice_type': False,\n 'values': {\n 'product_id': product.id,\n 'quantity': exchange_line.quantity,\n 'name': 'Exchange for [%s]' % exchange_line.product_id.display_name,\n }\n }\n if exchange_line.exchange_product_id or \\\n exchange_line.price_subtotal > order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['higher']\n elif exchange_line.price_subtotal < order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['lower']\n else:\n return {}\n data[invoice_type[order_line._name]['type']] = order_line.order_id\n data['values'][invoice_type[order_line._name]['field']] = order_line.id\n data['values']['price_unit'] = exchange_line.price_unit\n # TODO i think we should take the different between prices NOT the all price\n # abs(exchange_line.price_unit - order_line.price_unit)\n return data",
"def invoice_id(self):\n return self._invoice_id",
"def inv_line_new_characteristic_hashcode(self, invoice_line):\n return \"%s-%s-%s\"%(\n invoice_line['account_id'],\n invoice_line.get('analytic_account_id',\"False\"),\n invoice_line.get('date_maturity',\"False\"))",
"def _get_account_analytic_invoice(self, cursor, user, picking, move_line):\n if move_line.purchase_line_id:\n return move_line.purchase_line_id.order_id.account_analytic_id.id\n return super(stock_picking, self)._get_account_analytic_invoice(cursor, user, picking, move_line)",
"def generate_customer_info(self):\n consecutive = check_consecutive(self.customerIds)\n print(\"consecutive\" + str(consecutive))\n if consecutive:\n for customer_id in self.customerIds:\n # next we need to 1) decide if the customer has insurance, and if yes, generate the EOB that gets sent to the customer\n insured = np.random.choice(\n self.distributions[\"insurance_status\"],\n 1,\n p=self.distributions[\"insurance_distribution\"],\n )[0]\n dob = self.dobs[customer_id - 1]\n customer = pd.DataFrame(\n {\n \"customer_id\": [customer_id],\n \"dob\": str(dob),\n \"insurance\": insured,\n \"experiment_id\": random.randint(1, 2),\n }\n )\n self.Customers = self.Customers.append(customer)\n else:\n print(\"Error generating customer info: customerIds aren't consecutive\")\n return True",
"def single_customer(customer_name, invoice_file):\n customer_furniture = partial(add_furniture, invoice_file=invoice_file, customer_name=customer_name)\n\n FIELD_NAME = ['customer_name', 'item_code', 'item_description', 'item_monthly_price']\n\n def add_rentals(rental_items):\n ''' \n Add rentals info file for individual customer \n :param rental_items customer rental item information\n '''\n nonlocal FIELD_NAME\n\n with open(rental_items, 'r') as rental_file:\n reader = csv.DictReader(rental_file, fieldnames=('item_code', 'item_description', 'item_monthly_price'))\n for item in reader:\n customer_furniture(**item)\n\n return add_rentals",
"def generate_invoice(self):\n if self.state == 'done' and self.sale_picking_line_ids:\n invoice = self.env['account.invoice'].create({\n 'partner_id': self.sale_order_id.partner_invoice_id.id,\n 'partner_shipping_id': self.sale_order_id.partner_shipping_id.id,\n 'sale_picking_id': self.id,\n })\n invoice._onchange_partner_id()\n invoice.write({\n 'payment_term_id': self.sale_order_id.payment_term_id.id if self.sale_order_id else False,\n 'origin': self.name,\n 'currency_id': self.sale_order_id.currency_id.id if self.sale_order_id else False,\n })\n for product_id, lines in groupby(self.sale_picking_line_ids, lambda l: l.product_id):\n lines = list(lines)\n # AMH#-- product uom (traitment) ...\n qty = sum([l.nbr_carton for l in lines])\n result = self.to_create_invoice_line(invoice, product_id)\n if len(result) == 1:\n result = result[0]\n invoice_line = self.env['account.invoice.line'].create({\n 'invoice_id': invoice.id,\n 'product_id': product_id.id,\n 'quantity': qty,\n 'name': result['name'],\n 'uom_id': result['uom_id'],\n 'account_id': result['account_id'],\n 'price_unit': result['price_unit'],\n 'invoice_line_tax_ids': [(6, 0, result['taxes'])],\n })\n invoice_line._onchange_product_id()\n invoice_line.write({\n 'name': result['name'],\n 'uom_id': result['uom_id'],\n 'price_unit': result['price_unit'],\n 'invoice_line_tax_ids': [(6, 0, result['taxes'])],\n })\n self.account_invoice_id = invoice",
"def get_customer_segments(self, date):\n date = current_date_to_day().isoformat() if date is None else date\n self.products = pd.merge(self.products,\n self.cs.fetch(start_date=convert_dt_to_day_str(date))[['client', 'segments']],\n on='client', how='left')",
"def single_customer(customer_name, invoice_file):\n def rental_function(rental_items):\n with open(rental_items) as file:\n new_item = partial(add_furniture, invoice_file, customer_name)\n reader = csv.reader(file)\n for row in reader:\n identifier = row[0]\n item = row[1]\n price = row[2]\n new_item(identifier, item, price)\n\n return rental_function",
"def compute_counterpart_lines(self):\n for item in self:\n move_debit_lines = []\n move_credit_lines = []\n\n # list of all the move lines of the payment's move\n line_list = []\n for entry in item.journal_entry_ids:\n for line in entry.line_ids:\n if line.account_id.treasury_planning:\n line_list.append(line)\n\n # for each line above collect all the reconciled counterpart lines\n for line in line_list:\n if line.credit > 0 and line.debit == 0:\n for match in line.matched_debit_ids:\n move_debit_lines.append(match.debit_move_id.id)\n\n if line.credit == 0 and line.debit > 0:\n for match in line.matched_credit_ids:\n move_credit_lines.append(match.credit_move_id.id)\n\n if move_credit_lines:\n counterpart_move_ids = move_credit_lines\n else:\n counterpart_move_ids = move_debit_lines\n\n # bank move share is transformed to dictionary\n bank_move_dict = (ast.literal_eval(item.cf_share) if\n item.cf_share else {})\n\n # the share of each counterpart line is \"merged or added\"\n # in a weighted manner to the bank line share\n for cpt in counterpart_move_ids:\n dest_move_line = self.env['account.move.line'].browse(cpt)\n weight = round(dest_move_line.balance / item.amount, 2)\n # counterpart share is transformed into dictionary\n move_line_dict = ast.literal_eval(dest_move_line.cf_share)\n\n # each key is finally added to the bank line share\n for key, value in move_line_dict.iteritems():\n draft_dictionary = dictop.sum_dictionary(\n bank_move_dict.get(key, {}), 1,\n move_line_dict.get(key, {}), weight)\n bank_move_dict[key] = dictop.check_dict_total(\n draft_dictionary, 1)\n\n # the dictionary is transformed into string and assigned\n item.cf_share = json.dumps(bank_move_dict)",
"def single_customer(customer_name, invoice_file):\n def customer_rental(rental_items):\n \"\"\"\n Loop through rental_items file and append each row to curried invoice_file with same\n customer_name\n \"\"\"\n customer = partial(add_furniture, invoice_file=invoice_file, customer_name=customer_name)\n with open(rental_items, \"r\") as rental_csv:\n for row in csv.reader(rental_csv):\n customer(item_code=row[0], item_description=row[1], item_monthly_price=row[2])\n return customer_rental",
"def _get_invoiceable_lines(self, final=False):\n down_payment_line_ids = []\n invoiceable_line_ids = []\n pending_section = None\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n original_term_line = self._context.get(\"term_line\", False)\n\n if original_term_line:\n order_line = [self._context.get('so_line')]\n for item in self._context.get('so_line').fal_invoice_milestone_line_date_ids.filtered(lambda a: not a.is_final):\n for invoice in item.invoice_id.invoice_line_ids:\n order_line.append(invoice.sale_line_ids)\n\n for line in order_line:\n if line.display_type == 'line_section':\n # Only invoice the section if one of its lines is invoiceable\n pending_section = line\n continue\n if line.display_type != 'line_note' and float_is_zero(line.qty_to_invoice, precision_digits=precision):\n continue\n if line.qty_to_invoice > 0 or (line.qty_to_invoice < 0 and final) or line.display_type == 'line_note':\n if line.is_downpayment:\n # Keep down payment lines separately, to put them together\n # at the end of the invoice, in a specific dedicated section.\n down_payment_line_ids.append(line.id)\n continue\n if pending_section:\n invoiceable_line_ids.append(pending_section.id)\n pending_section = None\n invoiceable_line_ids.append(line.id)\n\n return self.env['sale.order.line'].browse(invoiceable_line_ids + down_payment_line_ids)\n else:\n return super(SaleOrder, self)._get_invoiceable_lines(final)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function creates an invoice compounding invoices lines from data given as parameters. Once done, this function computes market segment customer belongs to. If customerID is None, then a new customer identifier is created before order process to take place. | def order_process(self, customerID, list_stockCode, list_quantity\
, orderDate=None):
segmentID = -1
#-------------------------------------------------------------------------
# A new customer is created and inserted into data-set.
#-------------------------------------------------------------------------
if customerID is None:
customerID = int(self.createCustomerID())
else:
pass
#-------------------------------------------------------------------------
# A new dataframe with new invoice lines are created.
#-------------------------------------------------------------------------
df_invoice_line = self.create_customer_df_invoice_line(customerID\
, list_stockCode, list_quantity, orderDate)
#-------------------------------------------------------------------------
# Original dataframe is updated with customer invoices lines.
#-------------------------------------------------------------------------
print("order_process : shape before concat= "+str(self._df_invoice_original.shape))
self._df_invoice_original \
= pd.concat([self._df_invoice_original, df_invoice_line], axis=0)
print("order_process : shape after concat= "+str(self._df_invoice_original.shape))
#-------------------------------------------------------------------------
# All invoices lines (including new one) related to customer is retrieved
# from original dataframe.
#-------------------------------------------------------------------------
df_invoice_line_customer \
= self.get_customer_history_df_invoice_line(customerID)
#-------------------------------------------------------------------------
# When calling get_customer_marketSegment(), df_invoice_line_customer is
# concatened to the original dataframe.
#-------------------------------------------------------------------------
segmentID = self.get_customer_marketSegment(df_invoice_line_customer)
return segmentID, customerID | [
"def create_customer_df_invoice_line(self, customerID, list_stockCode\\\n , list_quantity, invoiceDate):\n \n dict_invoice = dict()\n\n dict_invoice['Quantity'] = list_quantity\n dict_invoice['StockCode'] = list_stockCode\n\n #------------------------------------------------------------------------\n # Build invoiceDate from local current time\n #------------------------------------------------------------------------\n if invoiceDate is None:\n time_struct = time.localtime()\n invoiceDate = str(time_struct.tm_year)+'-'+str(time_struct.tm_mon)\\\n +'-'+str(time_struct.tm_mday)\n invoiceDate +=' '\n invoiceDate +=str(time_struct.tm_hour)+':'+str(time_struct.tm_min)\\\n +':'+str(time_struct.tm_sec)\n invoiceDate = pd.Timestamp(invoiceDate)\n else:\n pass\n\n\n #------------------------------------------------------------------------\n # Lists initialization\n #------------------------------------------------------------------------\n list_customerID = list()\n list_invoiceNo = list()\n list_invoiceDate = list()\n list_invoice_line_index = list()\n \n #------------------------------------------------------------------------\n # Increase Invoice number\n #------------------------------------------------------------------------\n invoiceNo = max(self._df_invoice_original.InvoiceNo)\n invoiceNo += 1\n\n #------------------------------------------------------------------------\n # Get latest invoice line index value\n #------------------------------------------------------------------------\n invoice_line_index = max(self._df_invoice_original.index)\n\n #------------------------------------------------------------------------\n # Build lists for CustomerID, InvoiceNo, InvoiceDate\n # A list of incremented indexes is built for new rows.\n #------------------------------------------------------------------------\n for quantity in list_quantity:\n list_customerID.append(customerID)\n list_invoiceNo.append(invoiceNo)\n list_invoiceDate.append(invoiceDate)\n invoice_line_index += 1\n list_invoice_line_index.append(invoice_line_index) \n\n \n dict_invoice['CustomerID'] = list_customerID\n dict_invoice['InvoiceNo'] = list_invoiceNo\n dict_invoice['InvoiceDate'] = list_invoiceDate\n\n #------------------------------------------------------------------------\n # Get description list from list of stock codes.\n #------------------------------------------------------------------------\n list_description = self.getDescriptionList(list_stockCode)\n \n dict_invoice['Description'] = list_description\n\n #------------------------------------------------------------------------\n # Get unit price list from list of stock codes.\n #------------------------------------------------------------------------\n list_unitPrice = self.getUnitPriceList(list_stockCode)\n \n dict_invoice['UnitPrice'] = list_unitPrice\n\n #------------------------------------------------------------------------\n # Dataframe with new invoices lines is created.\n #------------------------------------------------------------------------\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys()\\\n , index=list_invoice_line_index)\n \n return df_invoice_line",
"def _create_invoice(self):\n self.ensure_one()\n partner = self.member_id.partner_id\n invoice = self.env['account.invoice'].create({\n 'partner_id': partner.id,\n 'account_id': partner.property_account_receivable_id.id,\n 'fiscal_position_id': partner.property_account_position_id.id\n })\n for line in self.line_ids:\n product = line.activity_id.product_id\n # Handling of invoice lines : needs cache record for onchange, then\n # real writing...\n invoice_line = self.env['account.invoice.line'].new({\n 'product_id': product.id,\n 'invoice_id': invoice.id\n })\n invoice_line._onchange_product_id()\n line_values = dict(invoice_line._cache)\n line_values['price_unit'] = line.price\n invoice_line = self.env['account.invoice.line'].create(line_values)\n invoice.compute_taxes()\n line.registration_id.invoice_line_id = invoice_line.id\n return invoice",
"def get_customer_marketSegment(self, df_invoice_line_customer):\n #-------------------------------------------------------------------------\n # Building data model \n #-------------------------------------------------------------------------\n self.data_transform(df_invoice_line_customer)\n\n #-------------------------------------------------------------------------\n # Customer features are built thanks to transformers.\n #-------------------------------------------------------------------------\n self.df_customers_features_build()\n \n #-------------------------------------------------------------------------\n # Customer market segment is predicted\n #-------------------------------------------------------------------------\n X_test = self._df_customers.values\n y_pred = self._classifier_model.predict(X_test)\n segmentID = y_pred[0]\n \n return segmentID",
"def generate_invoice(self):\n if self.state == 'done' and self.sale_picking_line_ids:\n invoice = self.env['account.invoice'].create({\n 'partner_id': self.sale_order_id.partner_invoice_id.id,\n 'partner_shipping_id': self.sale_order_id.partner_shipping_id.id,\n 'sale_picking_id': self.id,\n })\n invoice._onchange_partner_id()\n invoice.write({\n 'payment_term_id': self.sale_order_id.payment_term_id.id if self.sale_order_id else False,\n 'origin': self.name,\n 'currency_id': self.sale_order_id.currency_id.id if self.sale_order_id else False,\n })\n for product_id, lines in groupby(self.sale_picking_line_ids, lambda l: l.product_id):\n lines = list(lines)\n # AMH#-- product uom (traitment) ...\n qty = sum([l.nbr_carton for l in lines])\n result = self.to_create_invoice_line(invoice, product_id)\n if len(result) == 1:\n result = result[0]\n invoice_line = self.env['account.invoice.line'].create({\n 'invoice_id': invoice.id,\n 'product_id': product_id.id,\n 'quantity': qty,\n 'name': result['name'],\n 'uom_id': result['uom_id'],\n 'account_id': result['account_id'],\n 'price_unit': result['price_unit'],\n 'invoice_line_tax_ids': [(6, 0, result['taxes'])],\n })\n invoice_line._onchange_product_id()\n invoice_line.write({\n 'name': result['name'],\n 'uom_id': result['uom_id'],\n 'price_unit': result['price_unit'],\n 'invoice_line_tax_ids': [(6, 0, result['taxes'])],\n })\n self.account_invoice_id = invoice",
"def action_create_invoice(self):\r\n inv_obj = self.env['account.invoice']\r\n inv_line_obj = self.env['account.invoice.line']\r\n # account_id = self.income_acc_id\r\n inv_val = {\r\n 'type': 'out_invoice',\r\n # 'transaction_ids': self.ids,\r\n 'state': 'draft',\r\n 'partner_id': self.customer_name.id or False,\r\n 'date_invoice': fields.Date.context_today(self),\r\n 'origin': self.booking_no,\r\n 'freight_booking': self.id,\r\n 'account_id': self.customer_name.property_account_receivable_id.id or False,\r\n 'company_id': self.company_id.id,\r\n 'user_id': self.sales_person.id,\r\n }\r\n\r\n invoice = inv_obj.create(inv_val)\r\n for line in self.cost_profit_ids:\r\n sale_unit_price_converted = line.list_price * line.profit_currency_rate\r\n account_id = False\r\n if line.product_id.property_account_income_id:\r\n account_id = line.product_id.property_account_income_id\r\n elif line.product_id.categ_id.property_account_income_categ_id:\r\n account_id = line.product_id.categ_id.property_account_income_categ_id\r\n if sale_unit_price_converted > 0:\r\n inv_line = inv_line_obj.create({\r\n 'invoice_id': invoice.id or False,\r\n 'account_id': account_id.id or False,\r\n 'name': line.product_id.name or '',\r\n 'product_id': line.product_id.id or False,\r\n 'quantity': line.profit_qty or 0.0,\r\n 'uom_id': line.uom_id.id or False,\r\n 'price_unit': sale_unit_price_converted or 0.0\r\n })\r\n line.write({'invoice_id': invoice.id or False,\r\n 'inv_line_id': inv_line.id or False})\r\n\r\n self.shipment_booking_status = '10'",
"def invoice_line_create(self, invoice_id, qty):\n invoice_lines = self.env['account.invoice.line']\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n for line in self:\n if not float_is_zero(qty, precision_digits=precision):\n vals = line._prepare_invoice_line(qty=qty)\n vals.update({'invoice_id': invoice_id, 'purchase_line_id': line.id})\n invoice_lines |= self.env['account.invoice.line'].create(vals)\n return invoice_lines",
"def gen_invoice():\n invoice_id = gen_id()\n account_id = gen_id()\n invoice_date = datetime(2012, random.choice(range(1,13)), random.choice(range(1,29)))\n\n invoice_item_amounts = [gen_invoice_item(account_id, invoice_id, invoice_date)]\n invoice_amount = sum([i['total_amount'] for i in invoice_item_amounts])\n\n # Create invoice\n invoice = Invoice(invoice_id=invoice_id,\n account_id=account_id,\n invoice_date=invoice_date,\n invoice_amount=invoice_amount)\n invoice.save()\n\n # Create payment\n payment_id = None\n if roll_dice(1):\n payment_results = gen_payment(account_id=account_id, \n invoice_id=invoice_id, \n invoice_date=invoice_date, \n amount=invoice_amount)\n payment_id = payment_results[0]\n payment_date = payment_results[1]\n\n # Create refund\n if roll_dice(0) and payment_id is not None:\n refund = gen_refund(account_id=account_id, \n invoice_id=invoice_id, \n invoice_date=invoice_date, \n amount=invoice_amount, \n payment_id=payment_id,\n payment_date=payment_date)\n\n # Create term extension\n if roll_dice(0) and payment is not None:\n ext = gen_term_extension(account_id=account_id, \n invoice_id=invoice_id, \n invoice_date=invoice_date,\n service_start=invoice_items[0].service_start,\n service_end=invoice_items[0].service_end)",
"def create_invoice(self, payment_req_bash_var, amount_msat: int) -> None:\n pass",
"def _prepare_invoice(self, cr, uid, order, lines, context=None):\n invoice_vals = super(sale_order, self)._prepare_invoice(cr, uid, order,\n lines, context=context)\n invoice_vals.update({'partner_bank_id': order.partner_bank_id.id})\n return invoice_vals",
"def _prepare_invoice_lines(self, exchange_line, order_line):\n invoice_type = {\n 'sale.order.line': {\n 'higher': 'out_invoice', 'lower': 'out_refund',\n 'type': 'sale', 'field': 'exchange_sale_line_id'\n },\n 'purchase.order.line': {\n 'higher': 'in_invoice', 'lower': 'in_refund',\n 'type': 'purchase', 'field': 'exchange_purchase_line_id'\n },\n }\n product = exchange_line.exchange_product_id or exchange_line.product_id\n data = {\n 'invoice_type': False,\n 'values': {\n 'product_id': product.id,\n 'quantity': exchange_line.quantity,\n 'name': 'Exchange for [%s]' % exchange_line.product_id.display_name,\n }\n }\n if exchange_line.exchange_product_id or \\\n exchange_line.price_subtotal > order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['higher']\n elif exchange_line.price_subtotal < order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['lower']\n else:\n return {}\n data[invoice_type[order_line._name]['type']] = order_line.order_id\n data['values'][invoice_type[order_line._name]['field']] = order_line.id\n data['values']['price_unit'] = exchange_line.price_unit\n # TODO i think we should take the different between prices NOT the all price\n # abs(exchange_line.price_unit - order_line.price_unit)\n return data",
"def create_invoice_line_items(self):\n order_line_items = OrderLineItem.objects(order=self.order, status='new')\n for order_line_item in order_line_items:\n InvoiceLineItem(invoice=self,\n order_line_item=order_line_item,\n price=order_line_item.price,\n quantity=order_line_item.quantity,\n notes=order_line_item.notes\n ).save()\n order_line_item.update(status='invoiced')",
"def invoice_line_create(self, invoice_id, qty, position, sequence):\n self.ensure_one()\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n invoice_line_id = False\n if not float_is_zero(qty, precision_digits=precision):\n vals = self._prepare_invoice_line(invoice_id, qty, position)\n vals.update({'invoice_id': invoice_id, 'sequence': sequence, 'sale_line_ids': [(6, 0, [self.procurement_id.sale_line_id.id])]})\n invoice_line_id = self.env['account.invoice.line'].create(vals)\n \n return invoice_line_id",
"def _prepare_invoice(self):\n\t\tself.ensure_one()\n\t\tcompany_id = self.company_id.id\n\t\tjournal_id = (self.env['account.invoice'].with_context(company_id=company_id or self.env.user.company_id.id)\n\t\t\t.default_get(['journal_id'])['journal_id'])\n\t\tif not journal_id:\n\t\t\traise UserError(_('Please define an accounting sales journal for this company.'))\n\t\tinvoice_vals = {\n\t\t\t'name': self.client_order_ref or '',\n\t\t\t'origin': self.name,\n\t\t\t'type': 'out_invoice',\n\t\t\t'account_id': self.partner_invoice_id.property_account_receivable_id.id,\n\t\t\t'partner_id': self.partner_invoice_id.id,\n\t\t\t'partner_shipping_id': self.partner_shipping_id.id,\n\t\t\t'journal_id': journal_id,\n\t\t\t'currency_id': self.pricelist_id.currency_id.id,\n\t\t\t'comment': self.note,\n\t\t\t'payment_term_id': self.payment_term_id.id,\n\t\t\t'fiscal_position_id': self.fiscal_position_id.id or self.partner_invoice_id.property_account_position_id.id,\n\t\t\t'company_id': company_id,\n\t\t\t'user_id': self.user_id and self.user_id.id,\n\t\t\t'team_id': self.team_id.id,\n\t\t\t'transaction_ids': [(6, 0, self.transaction_ids.ids)],\n\t\t\t'bl_number': self.bl_no,\n\t\t\t'container_no': self.client_order_ref,\n\t\t\t'job_number':self.id,\n\n\t\t}\n\t\treturn invoice_vals",
"def generate_customer_info(self):\n consecutive = check_consecutive(self.customerIds)\n print(\"consecutive\" + str(consecutive))\n if consecutive:\n for customer_id in self.customerIds:\n # next we need to 1) decide if the customer has insurance, and if yes, generate the EOB that gets sent to the customer\n insured = np.random.choice(\n self.distributions[\"insurance_status\"],\n 1,\n p=self.distributions[\"insurance_distribution\"],\n )[0]\n dob = self.dobs[customer_id - 1]\n customer = pd.DataFrame(\n {\n \"customer_id\": [customer_id],\n \"dob\": str(dob),\n \"insurance\": insured,\n \"experiment_id\": random.randint(1, 2),\n }\n )\n self.Customers = self.Customers.append(customer)\n else:\n print(\"Error generating customer info: customerIds aren't consecutive\")\n return True",
"def create_invoices(self, cr, uid, ids, context=None):\n group_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'hmtk_ndk_sale_customization', 'group_full_invoicing')[1]\n group = self.pool.get('res.groups').browse(cr, uid, group_id, context=context)\n picking_obj = self.pool.get('stock.picking')\n full_invoice_uids = [x.id for x in group.users]\n sale_obj = self.pool.get('sale.order')\n act_window = self.pool.get('ir.actions.act_window')\n wizard = self.browse(cr, uid, ids[0], context)\n sale_ids = context.get('active_ids', [])\n picking_id = picking_obj.search(cr, uid, [('sale_id', '=', sale_ids[0])], context=context)\n if wizard.advance_payment_method == 'all':\n if picking_id:\n picking = picking_obj.browse(cr, uid, picking_id, context=context)[0]\n if uid in full_invoice_uids or picking.state == 'done':\n # create the final invoices of the active sales orders\n return super(sale_advance_payment_inv,self).create_invoices(cr, uid, ids, context=context)\n else:\n raise osv.except_osv(_('Warning!'), _('You do not have permissions for full invoicing. You can create partial invoice. Select option other than Invoice the whole sales order.'))\n elif uid in full_invoice_uids:\n return super(sale_advance_payment_inv,self).create_invoices(cr, uid, ids, context=context)\n else:\n raise osv.except_osv(_('Warning!'), _('Allow full invoicing for this user under user groups to perform this action.')) \n else:\n return super(sale_advance_payment_inv,self).create_invoices(cr, uid, ids, context=context)",
"def compute_counterpart_lines(self):\n for item in self:\n move_debit_lines = []\n move_credit_lines = []\n\n # list of all the move lines of the payment's move\n line_list = []\n for entry in item.journal_entry_ids:\n for line in entry.line_ids:\n if line.account_id.treasury_planning:\n line_list.append(line)\n\n # for each line above collect all the reconciled counterpart lines\n for line in line_list:\n if line.credit > 0 and line.debit == 0:\n for match in line.matched_debit_ids:\n move_debit_lines.append(match.debit_move_id.id)\n\n if line.credit == 0 and line.debit > 0:\n for match in line.matched_credit_ids:\n move_credit_lines.append(match.credit_move_id.id)\n\n if move_credit_lines:\n counterpart_move_ids = move_credit_lines\n else:\n counterpart_move_ids = move_debit_lines\n\n # bank move share is transformed to dictionary\n bank_move_dict = (ast.literal_eval(item.cf_share) if\n item.cf_share else {})\n\n # the share of each counterpart line is \"merged or added\"\n # in a weighted manner to the bank line share\n for cpt in counterpart_move_ids:\n dest_move_line = self.env['account.move.line'].browse(cpt)\n weight = round(dest_move_line.balance / item.amount, 2)\n # counterpart share is transformed into dictionary\n move_line_dict = ast.literal_eval(dest_move_line.cf_share)\n\n # each key is finally added to the bank line share\n for key, value in move_line_dict.iteritems():\n draft_dictionary = dictop.sum_dictionary(\n bank_move_dict.get(key, {}), 1,\n move_line_dict.get(key, {}), weight)\n bank_move_dict[key] = dictop.check_dict_total(\n draft_dictionary, 1)\n\n # the dictionary is transformed into string and assigned\n item.cf_share = json.dumps(bank_move_dict)",
"def action_create_invoice(self):\n for service in self:\n if service.amount <= 0.0:\n msg = _(\n \"You can not create service invoice without amount!!\"\n \"Please add Service amount first !!\"\n )\n raise ValidationError(msg)\n\n deposit_inv_ids = self.env[\"account.move\"].search(\n [\n (\"vehicle_service_id\", \"=\", service.id),\n (\"move_type\", \"=\", \"out_invoice\"),\n (\"state\", \"in\", [\"draft\", \"open\", \"in_payment\"]),\n ]\n )\n if deposit_inv_ids:\n msg = _(\n \"Deposit invoice is already Pending\\n\"\n \"Please proceed that deposit invoice first\"\n )\n raise UserError(msg)\n\n if not service.purchaser_id:\n msg = _(\n \"Please configure Driver from vehicle or in \" \"a service order!!\"\n )\n raise UserError(msg)\n\n inv_ser_line = [\n (\n 0,\n 0,\n {\n \"name\": ustr(\n service.service_type_id and service.service_type_id.name\n )\n + \" - Service Cost\",\n \"price_unit\": service.amount,\n \"account_id\": service.vehicle_id\n and service.vehicle_id.income_acc_id\n and service.vehicle_id.income_acc_id.id\n or False,\n },\n )\n ]\n for line in service.parts_ids:\n inv_line_values = {\n \"product_id\": line.product_id and line.product_id.id or False,\n \"name\": line.product_id and line.product_id.name or \"\",\n \"price_unit\": line.price_unit or 0.00,\n \"quantity\": line.qty,\n \"account_id\": service.vehicle_id\n and service.vehicle_id.income_acc_id\n and service.vehicle_id.income_acc_id.id\n or False,\n }\n inv_ser_line.append((0, 0, inv_line_values))\n inv_values = {\n \"partner_id\": service.purchaser_id and service.purchaser_id.id or False,\n \"move_type\": \"out_invoice\",\n \"invoice_date\": service.date_open,\n \"invoice_date_due\": service.date_complete,\n \"invoice_line_ids\": inv_ser_line,\n \"vehicle_service_id\": service.id,\n \"is_invoice_receive\": True,\n }\n self.env[\"account.move\"].create(inv_values)",
"def get_customer_history_df_invoice_line(self, customerID):\n df_invoice_line \\\n = self._df_invoice_original[self._df_invoice_original.CustomerID \\\n == customerID]\n return df_invoice_line",
"def generate_new_visit(self):\n if self.consecutive:\n customer_id = np.random.choice(\n self.customerIds, 1\n ) # choose a customer at random\n insured = self.Customers[self.Customers[\"customer_id\"] == customer_id[0]][\n \"insurance\"\n ].values[\n 0\n ] # does the customer have insurance?\n experiment_id = self.Customers[\n self.Customers[\"customer_id\"] == customer_id[0]\n ][\"experiment_id\"].values[\n 0\n ] # does the customer have insurance?\n\n event_list = (\n self.billing_choose_dates()\n ) # generate dates associated with this invoice\n cpt_code = random.sample(self.CPTCodes, 1)[0]\n date_of_service = str(event_list.values[0][0])\n created_on = str(event_list.values[1][0])\n date_of_eob = str(event_list.values[2][0])\n date_of_provider_adjustment = str(event_list.values[3][0])\n date_of_patient_payment = str(event_list.values[4][0])\n # generate a new invoice\n (invoice_id, charge_amount) = self.generate_new_invoice(\n created_on, date_of_service, customer_id, cpt_code\n )\n # generate subsequent EOB (i.e. copay, EOB adjustment, EOB payment)\n remaining_amount = self.generate_eob(\n date_of_service,\n date_of_eob,\n insured,\n invoice_id,\n cpt_code,\n charge_amount,\n )\n # generate provider adjustments\n remaining_amount = self.generate_provider_adjustment(\n date_of_provider_adjustment, invoice_id, cpt_code, remaining_amount\n )\n # generate a possible payment from the patient\n remaining_amount = self.generate_patient_payment(\n date_of_patient_payment,\n invoice_id,\n cpt_code,\n remaining_amount,\n experiment_id,\n )\n # record the remaining amounts in a separate table.\n self.record_remaining_amount(\n date_of_patient_payment, invoice_id, cpt_code, remaining_amount\n )\n return True\n else:\n print(\"Error generating new invoice- customerIds aren't consecutive\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the segment identifier a customers is predicted to belongs to. | def predict_segment(self, df_invoice_line=None):
if df_invoice_line is not None:
self.data_transform(df_invoice_line)
self.df_customers_features_build()
else:
pass
X_test = self._df_customers.values
y_pred = self._classifier_model.predict(X_test)
return y_pred[0] | [
"def get_customer_marketSegment(self, df_invoice_line_customer):\n #-------------------------------------------------------------------------\n # Building data model \n #-------------------------------------------------------------------------\n self.data_transform(df_invoice_line_customer)\n\n #-------------------------------------------------------------------------\n # Customer features are built thanks to transformers.\n #-------------------------------------------------------------------------\n self.df_customers_features_build()\n \n #-------------------------------------------------------------------------\n # Customer market segment is predicted\n #-------------------------------------------------------------------------\n X_test = self._df_customers.values\n y_pred = self._classifier_model.predict(X_test)\n segmentID = y_pred[0]\n \n return segmentID",
"def segment_id(self):\n return self._segment_id + 1",
"def getSeg_id(self):\n return self.seg_id",
"def get_segmentation_id_by_segment_id(context, segment_id):\n session = context.session\n return session.query(db_models.AristaProvisionedNets.segmentation_id). \\\n filter_by(id=segment_id).first()",
"def segment(self):\n return self._pci_address['segment']",
"def segment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"segment_name\")",
"def get_segmentation_id(context, project_id, network_id):\n session = context.session\n return session.query(db_models.AristaProvisionedNets.segmentation_id). \\\n filter_by(project_id=project_id,\n network_id=network_id).first()",
"def get_segment_name(self, offset):\n self.ret = idc.get_segm_name(offset)\n return self.ret",
"def segment_number(self):\n if hasattr(self, '_m_segment_number'):\n return self._m_segment_number if hasattr(self, '_m_segment_number') else None\n\n self._m_segment_number = self.segment_number_raw.value\n return self._m_segment_number if hasattr(self, '_m_segment_number') else None",
"def segment_counter(self):\n return self._data_writer.get_segment_counter()",
"def isSegment(self, seg):\n # Find the intersection of segment with train\n if len(set(map(lambda x: x.stn_code, self.stn_list)).intersection(set(seg)))>=2:\n # Find the min intersection point\n mini = 100\n for stn in seg:\n for i in range(len(self.stn_list)):\n if self.stn_list[i].stn_code==stn and mini > i:\n mini = i\n\n maxi = -1\n for stn in seg:\n for i in range(len(self.stn_list)):\n if self.stn_list[i].stn_code==stn and maxi < i:\n maxi = i\n\n return [mini, maxi]\n else:\n return [-1,-1] # Segment intersection not found",
"def get_segm_num(*args):\n return _ida_segment.get_segm_num(*args)",
"def get_customer_segments(self):\n self.data = pd.merge(self.data, self.cs.fetch()[['client', 'segments']], on='client', how='left')",
"def segment_func1(self):\n # computing neighboors graph\n A = self.normal_graph()\n\n # SpectralClustering segmentation\n sc = SpectralClustering(3, affinity='precomputed', n_init=10, assign_labels='discretize')\n labels = sc.fit_predict(A)\n\n return labels",
"def getSentenceId(self):\n return( int(self.id.split('.')[1]) )",
"def segment(self, segment_id):\n # Get the list of all the segments and pull out the first one,\n # if possible\n match = self.segments(segment_id)\n # We should never get an IndexError, since segments will instead\n # throw an KeyError\n return match[0]",
"def segment_prefix(self) -> Optional[str]:\n return pulumi.get(self, \"segment_prefix\")",
"def segment_names(self):\n return self.project['segment_names']",
"def get_segm_name(*args):\n return _ida_segment.get_segm_name(*args)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns list of stock codes from list of items descriptions. | def getStockCodeList(self, list_description=None):
list_stockCode = list()
df = self._df_invoice_original
if list_description is None:
list_stockCode = list(df.StockCode.unique())
else :
for description in list_description:
stockCode = df[df.Description==description].StockCode.unique()[0]
list_stockCode.append(stockCode)
return list_stockCode | [
"def getDescriptionList(self, list_stockCode=None):\n df = self._df_invoice_original\n\n list_description = list()\n if list_stockCode is None :\n list_description = list(df.Description.unique())\n else:\n for stockCode in list_stockCode:\n description = df[df.StockCode==stockCode].Description.unique()[0]\n list_description.append(description)\n \n return list_description",
"def codes(self):\n return [card.code for card in self.cards]",
"def find_item_codes(transaction):\n t = transaction\n item_codes = []\n if t['transaction_type'] in ('SALE', 'REFUND'):\n # Search using line item IDs and order_id\n for oli in (t['order_line_items'] or []):\n li_id = oli['line_item_id']\n item_codes.append(\n get_item_code_for_order(t['order_id'], order_line_item_id=li_id)\n )\n else:\n # Search for ITEM reference\n for ref in (transaction['references'] or []):\n if ref['reference_type'] == 'ITEM_ID':\n item_codes.append(\n get_item_code_for_item_id(ref['reference_id'])\n )\n\n return item_codes",
"def parse_quantities(quantities):\n codes = []; names = []\n\n for q in quantities:\n c, n = parse_quantity(q)\n codes.append(c)\n names.append(n)\n\n return codes, names",
"def discount_codes(self):\n return [DiscountCode(x) for x in self._dict.get('discount_codes', [])]",
"def get_price_list(self, item_list):\n price_list = []\n for item in item_list:\n price_list.append(Inventory.stock[item].price)\n return price_list",
"def codelists():\n return CodelistSet()",
"def get_order_lists(self, n_items, n_quantities):\n arr_stock_code = self._df_invoice_original.StockCode.unique()\n arr_stock_code = np.random.choice(arr_stock_code, n_items)\n list_stockCode = list(arr_stock_code)\n list_quantities = np.ones(arr_stock_code.shape[0])\n list_quantities *=n_quantities\n\n return list_stockCode, list_quantities",
"def license_codes(self) -> Sequence[str]:\n return pulumi.get(self, \"license_codes\")",
"def getStockAndMarketCodeList(date, cur):\n\tsql = \"\"\"select stockCode, marketCode from stockPriceHistDaily\n\t\t\twhere date = '%s' \"\"\" % (date)\n\tcur.execute(sql)\n\tcodelist = cur.fetchall()\n\treturn codelist",
"def get_pcode_list(self) -> List[str]:\n return self.pcodes",
"def split_stock_list1(stock_list):\n list_by_stock = []\n appl = []\n msft = []\n # Loop through stock list\n for stock_item in stock_list:\n # Identify company\n if stock_item[SYMBOL] == \"APPL\":\n appl.append(stock_item)\n elif stock_item[SYMBOL] == \"MSFT\":\n msft.append(stock_item)\n\n # Append appl and msft to return list\n list_by_stock.append(appl)\n list_by_stock.append(msft)\n\n return list_by_stock",
"def currency_symbols(self) -> typing.List[str]:\n return [currency[0] for currency in self.currency_list]",
"def all_currency_codes():\n return [(a, CURRENCIES[a].name) for a in CURRENCIES]",
"def currency_codes():\n return list(settings.CURRENCIES)",
"def create_not_included_list(codes):\n string = '\\\\begin{itemize}\\n'\n for code in codes:\n title = get_course_title_only(code)\n string += '\\\\item{' + title + '}\\n'\n string += '\\\\end{itemize}\\n'\n return string",
"def get_code_mapping( id ):\n returnVal = []\n theCodes = _theRegistry.get_code( id )\n codes = theCodes.get_codes()\n descs = theCodes.get_descriptions()\n for (code, desc) in map(None, codes, descs):\n returnVal.append( { 'code' : code, 'description' : desc } )\n return returnVal",
"def intcodes_from_list(self, intcode_list):\n intcodes = defaultdict(int) # return 0 by default\n for addr, code in enumerate(intcode_list):\n intcodes[addr] = int(code)\n return intcodes",
"def load_codes(workbook) -> List[str]:\n\n codes = []\n wb = load_workbook(workbook)\n for ws in wb:\n for row in ws.iter_rows(min_row=7, min_col=1, max_col=1, values_only=True):\n code = row[0]\n if code is None:\n continue\n else:\n code = code.replace(\" \", \"\")\n codes.append(code)\n return codes"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns list of imtes unit price from list of stock codes. | def getUnitPriceList(self, list_stockCode):
df = self._df_invoice_original
list_unitPrice = list()
for stockCode in list_stockCode:
unitPrice = df[df.StockCode==stockCode].UnitPrice.unique()[0]
list_unitPrice.append(unitPrice)
return list_unitPrice | [
"def get_price_list(self, item_list):\n price_list = []\n for item in item_list:\n price_list.append(Inventory.stock[item].price)\n return price_list",
"def getStockCodeList(self, list_description=None):\n list_stockCode = list()\n df = self._df_invoice_original\n \n if list_description is None:\n list_stockCode = list(df.StockCode.unique())\n else :\n for description in list_description:\n stockCode = df[df.Description==description].StockCode.unique()[0]\n list_stockCode.append(stockCode)\n return list_stockCode",
"def getDescriptionList(self, list_stockCode=None):\n df = self._df_invoice_original\n\n list_description = list()\n if list_stockCode is None :\n list_description = list(df.Description.unique())\n else:\n for stockCode in list_stockCode:\n description = df[df.StockCode==stockCode].Description.unique()[0]\n list_description.append(description)\n \n return list_description",
"def dishlist_prices(n: list) -> list:\r\n return [dish.price for dish in n]",
"def list_price(self):\n price = self._safe_get_element_text('ItemAttributes.ListPrice.Amount')\n currency = self._safe_get_element_text('ItemAttributes.ListPrice.CurrencyCode')\n if price:\n return price / 100, currency\n else:\n return None, None",
"def fetch_stocks(code):\n return list(stocks_table.find({\"code\": code}))",
"def get_units(self) -> List[str]:\n result = []\n for elements in self._get_results_list():\n result.append(elements[3])\n return result",
"def add_gst (list_of_prices):\n\n add_gst=[]\n for item in list_of_prices:\n list_with_gst = round(item*1.15,2)\n add_gst+=[list_with_gst]\n return add_gst",
"def bid_prices(self):\n return [float(bid[0]) for bid in self.__bid]",
"def get_data(self,ticker_list):\n return self.broker.get_price_list(ticker_list,self._time0,self._time1)",
"def get_prices(self):\n price_range = self.soup.html.body.findAll('div', {'class': 'search-result__price'})\n prices = np.zeros(12)\n k = 0\n for i in price_range:\n # convert string into integer\n prices[k] = int(re.sub('[^\\d\\.]', '', i.text[1:]))\n k += 1\n # remove first and last entries (ads)\n prices = prices[1:-1]\n\n print(\"Prices extracted ✓\")\n return prices",
"def currency_symbols(self) -> typing.List[str]:\n return [currency[0] for currency in self.currency_list]",
"def GetTickers():\n # Constants used\n b3Industries = ['Energia-Petroleo-Gas',\n 'Industria-Financeira',\n 'Saude-Farmaceutica',\n 'Telecomunicacoes-Tecnologia',\n 'Industria-Alimenticia',\n 'Industria-Manufatureira',\n 'Servicos-diversos',\n 'Varejo',\n 'Construcao-Equipamentos',\n 'Bens-de-consumo',\n 'Industrias-em-geral']\n\n yahooFinanceUrl = 'https://br.financas.yahoo.com/industries/'\n\n # List to store the dropdown menu values\n stockInfo = []\n\n http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',\n ca_certs=certifi.where())\n\n for industry in b3Industries:\n page = http.request('GET', yahooFinanceUrl+industry)\n page_html = BeautifulSoup(page.data, 'lxml')\n for search in page_html.select(r\"tbody a.Fw\\(b\\)\"):\n if search['data-symbol'] != search['title']:\n stockInfo.append({'value': search['data-symbol'],\n 'label': (\n search['data-symbol'] +\n ' | ' + search['title'])\n })\n\n return stockInfo",
"def extract_numeric(data_list):\n try:\n data_list = [float(d) for d in data_list]\n except:\n pass\n numeric_part = []\n unit = []\n for data in data_list:\n data = str(data)\n data = data.replace(\",\", \"\")\n numeric_part.append(re.findall(r'([-]?([0-9]*[.])?[0-9]+)', data))\n this_unit = 1\n for unit_key in unit_dict.keys():\n if unit_key in data:\n this_unit = unit_dict[unit_key]\n break\n unit.append(this_unit)\n numeric_part = [x for x in numeric_part if len(x) > 0]\n if len(numeric_part) != len(data_list):\n print(f\"Warning: extract_numeric() found different number of numeric part({len(numeric_part)}) and data list({len(data_list)})\")\n numeric_part = [float(x[0][0])*unit[i] for i,x in enumerate(numeric_part)]\n return numeric_part",
"def getCurrencies():",
"def price_per_period(coin_data):\r\n #print(type(coin_data)\r\n #Initialize list_price\r\n list_price = []\r\n #Extract closing price for each period and add to list_price\r\n for idx in range(len(coin_data)):\r\n price = float(coin_data[idx]['close'])\r\n list_price.append(price)\r\n #Reduce list_price to the last 42 closing prices for later analysis\r\n list_price = list_price[-42:]\r\n return list_price",
"def update_item_prices(self, code, discount):\n if code in Inventory.__stock.getkeys():\n discount_amount = Inventory.__stock[code].price * (discount / 100)\n Inventory.__stock[code].price -= discount_amount\n else:\n for items in Inventory.__stock:\n if items.category == 'code':\n items.price -= items.price * (discount / 100)",
"def calculate_prices(self, good=None):\n\n stock = self.calculate_init_stock(good)\n buy = self.buying_price()\n\n if stock == 0:\n sell = 0\n buy = buy + (buy * 0.5)\n\n elif stock < 500:\n # mild bug: stock, without selling price\n sell = self.selling_price()\n elif stock >= 500:\n # higher production, lower prices\n sell = self.selling_price() / 2\n buy = buy - (buy * 0.5)\n\n return [buy, sell, stock]",
"def get_order_lists(self, n_items, n_quantities):\n arr_stock_code = self._df_invoice_original.StockCode.unique()\n arr_stock_code = np.random.choice(arr_stock_code, n_items)\n list_stockCode = list(arr_stock_code)\n list_quantities = np.ones(arr_stock_code.shape[0])\n list_quantities *=n_quantities\n\n return list_stockCode, list_quantities"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns list of items descriptions from list of stock codes. | def getDescriptionList(self, list_stockCode=None):
df = self._df_invoice_original
list_description = list()
if list_stockCode is None :
list_description = list(df.Description.unique())
else:
for stockCode in list_stockCode:
description = df[df.StockCode==stockCode].Description.unique()[0]
list_description.append(description)
return list_description | [
"def getStockCodeList(self, list_description=None):\n list_stockCode = list()\n df = self._df_invoice_original\n \n if list_description is None:\n list_stockCode = list(df.StockCode.unique())\n else :\n for description in list_description:\n stockCode = df[df.Description==description].StockCode.unique()[0]\n list_stockCode.append(stockCode)\n return list_stockCode",
"def listLowItems(conn):\n curs = conn.cursor(MySQLdb.cursors.DictCursor)\n curs.execute(\n '''select description from inventory where status = \"low\"''')\n tupleList = curs.fetchall()\n list = [dictionary['description'] for dictionary in tupleList]\n return '\\n, '.join(list)",
"def itemsDeCodigo(self, COD_TABELA):\n params = {\n \"COD_TABELA\": COD_TABELA,\n \"LMIN\": 0,\n \"LMAX\": 99999\n }\n fields = [\"ITEM_TABELA\", \"DESCRICAO\"]\n try:\n items = self.api.performGETRequest(self.path, params, fields, cached=self.cacheTime).content\n # Primeiro item de uma de ITEMS de uma TABELA é sempre a descrição do conteúdo\n return items[1:]\n except AttributeError:\n raise AttributeError(\"Nenhum item encontreado para este código.\")",
"def create_not_included_list(codes):\n string = '\\\\begin{itemize}\\n'\n for code in codes:\n title = get_course_title_only(code)\n string += '\\\\item{' + title + '}\\n'\n string += '\\\\end{itemize}\\n'\n return string",
"def module_description_list(head):\n r = []\n if head:\n item = head\n while item:\n item = item.contents\n r.append((item.name, item.shortname, item.longname, item.help))\n item = item.next\n libvlc_module_description_list_release(head)\n return r",
"def module_description_list(head):\r\n r = []\r\n if head:\r\n item = head\r\n while item:\r\n item = item.contents\r\n r.append((item.name, item.shortname, item.longname, item.help))\r\n item = item.next\r\n libvlc_module_description_list_release(head)\r\n return r",
"def split_stock_list1(stock_list):\n list_by_stock = []\n appl = []\n msft = []\n # Loop through stock list\n for stock_item in stock_list:\n # Identify company\n if stock_item[SYMBOL] == \"APPL\":\n appl.append(stock_item)\n elif stock_item[SYMBOL] == \"MSFT\":\n msft.append(stock_item)\n\n # Append appl and msft to return list\n list_by_stock.append(appl)\n list_by_stock.append(msft)\n\n return list_by_stock",
"def fetch_stocks(code):\n return list(stocks_table.find({\"code\": code}))",
"def get_price_list(self, item_list):\n price_list = []\n for item in item_list:\n price_list.append(Inventory.stock[item].price)\n return price_list",
"def get_order_lists(self, n_items, n_quantities):\n arr_stock_code = self._df_invoice_original.StockCode.unique()\n arr_stock_code = np.random.choice(arr_stock_code, n_items)\n list_stockCode = list(arr_stock_code)\n list_quantities = np.ones(arr_stock_code.shape[0])\n list_quantities *=n_quantities\n\n return list_stockCode, list_quantities",
"def getUnitPriceList(self, list_stockCode):\n df = self._df_invoice_original\n\n list_unitPrice = list()\n \n for stockCode in list_stockCode:\n unitPrice = df[df.StockCode==stockCode].UnitPrice.unique()[0]\n list_unitPrice.append(unitPrice)\n return list_unitPrice",
"def find_corresponding_stocks_infosfera_quandl() -> List[Dict[str, str]]:\n corresponding_stocks = []\n stock_names_to_codes_infosfera = _read_stock_names_to_codes()\n quandl_possible_codes = _read_quandl_possible_stock_codes()\n for company_name, company_code in stock_names_to_codes_infosfera.items():\n if company_code in quandl_possible_codes:\n corresponding_stocks.append({\n 'company_name': company_name.upper(),\n 'company_code': company_code\n })\n\n return corresponding_stocks",
"def get_format_code_example_list():\n folderpath = os.path.realpath(os.path.dirname(__file__))\n filepath = os.path.join(folderpath, YT_VID_AUD_FORMAT_CODES_FILENAME)\n format_code_example_list = open(filepath).read()\n return format_code_example_list",
"def map_item_name(self, list_item_id):\n\n return [self.packages.get(k) for k in list_item_id]",
"def test_item_description():\n list_item = ['donut']\n list_description = ['This is a donut']\n list_property = ['openable']\n item_m.assembling_items(list_item, list_description, list_property)\n assert item_m.item_description('donut') == 'This is a donut'\n assert item_m.item_description('donut') != 'ewfwefwefe'",
"def parse_quantities(quantities):\n codes = []; names = []\n\n for q in quantities:\n c, n = parse_quantity(q)\n codes.append(c)\n names.append(n)\n\n return codes, names",
"def GetTickers():\n # Constants used\n b3Industries = ['Energia-Petroleo-Gas',\n 'Industria-Financeira',\n 'Saude-Farmaceutica',\n 'Telecomunicacoes-Tecnologia',\n 'Industria-Alimenticia',\n 'Industria-Manufatureira',\n 'Servicos-diversos',\n 'Varejo',\n 'Construcao-Equipamentos',\n 'Bens-de-consumo',\n 'Industrias-em-geral']\n\n yahooFinanceUrl = 'https://br.financas.yahoo.com/industries/'\n\n # List to store the dropdown menu values\n stockInfo = []\n\n http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',\n ca_certs=certifi.where())\n\n for industry in b3Industries:\n page = http.request('GET', yahooFinanceUrl+industry)\n page_html = BeautifulSoup(page.data, 'lxml')\n for search in page_html.select(r\"tbody a.Fw\\(b\\)\"):\n if search['data-symbol'] != search['title']:\n stockInfo.append({'value': search['data-symbol'],\n 'label': (\n search['data-symbol'] +\n ' | ' + search['title'])\n })\n\n return stockInfo",
"def get_stocks():\n print(\"fetching remote...\")\n code_dataframes = pd.read_html(\n 'http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0]\n # 우리가 필요한 것은 회사명과 종목코드이기 때문에 필요없는 column들은 제외해준다.\n print(\"parsing and filtering data...\")\n code_dataframes.종목코드 = code_dataframes.종목코드.map('{:06d}'.format)\n # 한글로된 컬럼명을 영어로 바꿔준다.\n code_dataframes = code_dataframes[['회사명', '종목코드']]\n code_dataframes = code_dataframes.rename(\n columns={'회사명': 'name', '종목코드': 'code'})\n codes = code_dataframes['code']\n names = code_dataframes['name']\n stocks = []\n for i in range(len(names)):\n stocks.append({\n 'name': names[i],\n 'code': codes[i]\n })\n return stocks",
"def itemnames():\n g = ['KIS_NA_39', 'VII_57', 'MX_48', 'MX_56', 'KIS_NA_42', 'VII_54',\n 'MX_S_48', 'MX_S_52', 'MX_52', 'KIS_NA_45', 'KIS_NA_51', 'MIP_45',\n 'MIP_49', 'MIP_52', 'MIP_plus_48', 'MIP_plus_51', 'MX_42', 'MX_45',\n 'MIP_G_42', 'KIS_42', 'KIS_NA_48']\n return(g)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates new dataframe with invoices lines issued from given parameters. Once done, the new dataframe is aggregated with original one. | def create_customer_df_invoice_line(self, customerID, list_stockCode\
, list_quantity, invoiceDate):
dict_invoice = dict()
dict_invoice['Quantity'] = list_quantity
dict_invoice['StockCode'] = list_stockCode
#------------------------------------------------------------------------
# Build invoiceDate from local current time
#------------------------------------------------------------------------
if invoiceDate is None:
time_struct = time.localtime()
invoiceDate = str(time_struct.tm_year)+'-'+str(time_struct.tm_mon)\
+'-'+str(time_struct.tm_mday)
invoiceDate +=' '
invoiceDate +=str(time_struct.tm_hour)+':'+str(time_struct.tm_min)\
+':'+str(time_struct.tm_sec)
invoiceDate = pd.Timestamp(invoiceDate)
else:
pass
#------------------------------------------------------------------------
# Lists initialization
#------------------------------------------------------------------------
list_customerID = list()
list_invoiceNo = list()
list_invoiceDate = list()
list_invoice_line_index = list()
#------------------------------------------------------------------------
# Increase Invoice number
#------------------------------------------------------------------------
invoiceNo = max(self._df_invoice_original.InvoiceNo)
invoiceNo += 1
#------------------------------------------------------------------------
# Get latest invoice line index value
#------------------------------------------------------------------------
invoice_line_index = max(self._df_invoice_original.index)
#------------------------------------------------------------------------
# Build lists for CustomerID, InvoiceNo, InvoiceDate
# A list of incremented indexes is built for new rows.
#------------------------------------------------------------------------
for quantity in list_quantity:
list_customerID.append(customerID)
list_invoiceNo.append(invoiceNo)
list_invoiceDate.append(invoiceDate)
invoice_line_index += 1
list_invoice_line_index.append(invoice_line_index)
dict_invoice['CustomerID'] = list_customerID
dict_invoice['InvoiceNo'] = list_invoiceNo
dict_invoice['InvoiceDate'] = list_invoiceDate
#------------------------------------------------------------------------
# Get description list from list of stock codes.
#------------------------------------------------------------------------
list_description = self.getDescriptionList(list_stockCode)
dict_invoice['Description'] = list_description
#------------------------------------------------------------------------
# Get unit price list from list of stock codes.
#------------------------------------------------------------------------
list_unitPrice = self.getUnitPriceList(list_stockCode)
dict_invoice['UnitPrice'] = list_unitPrice
#------------------------------------------------------------------------
# Dataframe with new invoices lines is created.
#------------------------------------------------------------------------
df_invoice_line \
= pd.DataFrame(dict_invoice, columns=dict_invoice.keys()\
, index=list_invoice_line_index)
return df_invoice_line | [
"def merge_purchase_invoice(self):\r\n active_id = self.env['purchase.order'].browse(self.env['purchase.order']._context.get('active_ids'))\r\n journal_id = self.env['account.journal'].search([('type', '=', 'purchase')]) \r\n active_id_count = 0\r\n active_count = 0\r\n exist_vendor = []; invoice = [];exist_vendors = [];ctx = ();invoice_id = []\r\n for rec in active_id : \r\n po_reference = self.env['account.invoice'].search([('origin', 'like', rec.name)])\r\n active_count = len(active_id)\r\n if rec.picking_count >= 1 and rec.picking_count != rec.invoice_count:\r\n len_name = [] \r\n for inv in po_reference: \r\n len_name = inv.origin.split(\":\") \r\n if rec.name in len_name:\r\n if po_reference.state == 'draft':\r\n for record in po_reference.invoice_line_ids:\r\n print (record.line_id)\r\n for res in rec.order_line:\r\n if res.id == record.line_id: \r\n record.write({'quantity':res.qty_received})\r\n res.write({'qty_invoiced':record.quantity})\r\n \r\n else:\r\n \r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received \r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':datetime.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n else:\r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':date.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n invoices = []\r\n invoice_counts = 0\r\n for record in invoice:\r\n invoice_id = self.env['account.invoice'].create(record)\r\n invoices.append(invoice_id.id)\r\n invoice_counts = len(invoices)\r\n if active_id_count == 1:\r\n if invoice_counts == 1:\r\n form_view = self.env.ref('purchase.view_invoice_supplier_purchase_form').id\r\n tree_view = self.env.ref('account.invoice_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'res_id':invoices[0],\r\n 'views_id':False,\r\n 'views':[(form_view , 'form'), (tree_view , 'tree')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n } \r\n else: \r\n form_view = self.env.ref('account.invoice_supplier_form').id\r\n tree_view = self.env.ref('account.invoice_supplier_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'views_id':True,\r\n 'views':[(tree_view , 'tree'), (form_view , 'form')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n }",
"def _prepare_invoice_lines(self, exchange_line, order_line):\n invoice_type = {\n 'sale.order.line': {\n 'higher': 'out_invoice', 'lower': 'out_refund',\n 'type': 'sale', 'field': 'exchange_sale_line_id'\n },\n 'purchase.order.line': {\n 'higher': 'in_invoice', 'lower': 'in_refund',\n 'type': 'purchase', 'field': 'exchange_purchase_line_id'\n },\n }\n product = exchange_line.exchange_product_id or exchange_line.product_id\n data = {\n 'invoice_type': False,\n 'values': {\n 'product_id': product.id,\n 'quantity': exchange_line.quantity,\n 'name': 'Exchange for [%s]' % exchange_line.product_id.display_name,\n }\n }\n if exchange_line.exchange_product_id or \\\n exchange_line.price_subtotal > order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['higher']\n elif exchange_line.price_subtotal < order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['lower']\n else:\n return {}\n data[invoice_type[order_line._name]['type']] = order_line.order_id\n data['values'][invoice_type[order_line._name]['field']] = order_line.id\n data['values']['price_unit'] = exchange_line.price_unit\n # TODO i think we should take the different between prices NOT the all price\n # abs(exchange_line.price_unit - order_line.price_unit)\n return data",
"def _create_invoice(self):\n self.ensure_one()\n partner = self.member_id.partner_id\n invoice = self.env['account.invoice'].create({\n 'partner_id': partner.id,\n 'account_id': partner.property_account_receivable_id.id,\n 'fiscal_position_id': partner.property_account_position_id.id\n })\n for line in self.line_ids:\n product = line.activity_id.product_id\n # Handling of invoice lines : needs cache record for onchange, then\n # real writing...\n invoice_line = self.env['account.invoice.line'].new({\n 'product_id': product.id,\n 'invoice_id': invoice.id\n })\n invoice_line._onchange_product_id()\n line_values = dict(invoice_line._cache)\n line_values['price_unit'] = line.price\n invoice_line = self.env['account.invoice.line'].create(line_values)\n invoice.compute_taxes()\n line.registration_id.invoice_line_id = invoice_line.id\n return invoice",
"def invoices(self, invoices):\n\n\n self._invoices = invoices",
"def create_invoice_line_items(self):\n order_line_items = OrderLineItem.objects(order=self.order, status='new')\n for order_line_item in order_line_items:\n InvoiceLineItem(invoice=self,\n order_line_item=order_line_item,\n price=order_line_item.price,\n quantity=order_line_item.quantity,\n notes=order_line_item.notes\n ).save()\n order_line_item.update(status='invoiced')",
"def invoice_items(self,org_id=None,query={}):\n if org_id is None:\n org_id = self.org_id\n query_end_date = datetime.strptime(query['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n # Given a 'query_end_date' to find the invoice containing the\n # line items for that date we need to find the invoice which \n # has 'endDate' equal to the end of the month of the `query_end_date`\n query_first_next_month = query_end_date + relativedelta(months=+1) - relativedelta(days=(query_end_date.day-1))\n target_invoices = []\n invoices = self.invoices(org_id)\n if self.verbose:\n print('Searching invoices org_id={}'.format(org_id))\n print('query={} query_end_date={}'.format(query,query_end_date))\n print('Result keys: {}'.format( invoices['content'].keys() ))\n print('Total result count: {}'.format( invoices['content']['totalCount'] ))\n for invoice in invoices['content']['results']:\n #pprint.pprint(invoice)\n end_date = datetime.strptime(invoice['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if self.verbose: \n print('invoice({})[\\'endDate\\']={} end_date={}'.format(invoice['id'],invoice['endDate'],end_date))\n if end_date == query_first_next_month:\n target_invoices.append(invoice)\n \n if self.verbose: \n print('Target invoices: {}'.format(target_invoices))\n \n\n target_line_items = []\n for invoice in target_invoices:\n invoice_details = self.invoices(org_id,invoice['id']) \n print('invoice_details: {}'.format(invoice_details))\n for item in invoice_details['content']['lineItems']:\n end_date = datetime.strptime(item['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if end_date == query_end_date:\n target_line_items.append(item)\n if self.verbose:\n print('target_line_items: {}'.format(target_line_items)) \n return target_line_items",
"def _get_fee_lines(self):\n lines = []\n for item in self:\n for line in item.fee_structure.fee_type_ids:\n name = line.fee_type.product_id.description_sale\n if not name:\n name = line.fee_type.product_id.name\n # Todo here to implement logic for students last invoiced fees\n # search last invoice for the lin and take the paid_upto\n fee_type_last_paid=self.env['account.invoice.line'].search([('invoice_id.student_id','=',item.student_id.id),\n ('product_id','=',line.fee_type.product_id.id)],order='invoice_id asc', limit=1)\n if fee_type_last_paid.paid_upto:\n #Here to set feetype started from\n fee_type_paid_upto = fee_type_last_paid.paid_upto\n ##fee_type_paid_upto= the date of fee_type started\n else:\n fee_type_paid_upto='2018-12-31'\n\n duration_in_month = 0\n qty=0\n #todo calculate number of fee_type unit to invooice\n if line.fee_type.payment_type==0:\n if len(fee_type_last_paid)==0:\n qty=1\n\n else:\n duration_in_month=len(self.env['education.academic.month'].search([('start_date','>',fee_type_paid_upto),\n ('end_date','<=',item.till_month.end_date)]))\n\n qty=duration_in_month//line.fee_type.payment_type\n\n # if (duration_in_month%line.fee_type.payment_type)>0 :\n # qty=qty+1\n last_paid_month=self.env['education.academic.month'].search([('end_date','=',datetime.datetime.strptime(fee_type_paid_upto,'%Y-%m-%d'))])\n new_paid_upto=self.env['education.academic.month'].search([('id','=',last_paid_month.id+qty*line.fee_type.payment_type)]).end_date\n\n if qty>0:\n # and rate applicable for students\n fee_line = {\n 'price_unit': line.fee_amount,\n 'quantity': qty,\n 'product_id': line.fee_type.product_id,\n 'name': name,\n 'paid_upto': new_paid_upto,\n 'account_id': item.journal_id.default_debit_account_id\n }\n lines.append((0, 0, fee_line))\n item.invoice_line_ids = lines\n if not item.date_invoice:\n item.date_invoice=datetime.date.today()\n item.date_due=datetime.date.today()+ datetime.timedelta(days=15)",
"def order_process(self, customerID, list_stockCode, list_quantity\\\n , orderDate=None):\n\n segmentID = -1\n\n #-------------------------------------------------------------------------\n # A new customer is created and inserted into data-set.\n #-------------------------------------------------------------------------\n if customerID is None:\n customerID = int(self.createCustomerID())\n else:\n pass\n \n #-------------------------------------------------------------------------\n # A new dataframe with new invoice lines are created.\n #-------------------------------------------------------------------------\n df_invoice_line = self.create_customer_df_invoice_line(customerID\\\n , list_stockCode, list_quantity, orderDate)\n \n #-------------------------------------------------------------------------\n # Original dataframe is updated with customer invoices lines.\n #-------------------------------------------------------------------------\n print(\"order_process : shape before concat= \"+str(self._df_invoice_original.shape))\n self._df_invoice_original \\\n = pd.concat([self._df_invoice_original, df_invoice_line], axis=0)\n print(\"order_process : shape after concat= \"+str(self._df_invoice_original.shape))\n \n #-------------------------------------------------------------------------\n # All invoices lines (including new one) related to customer is retrieved \n # from original dataframe.\n #-------------------------------------------------------------------------\n df_invoice_line_customer \\\n = self.get_customer_history_df_invoice_line(customerID)\n\n #-------------------------------------------------------------------------\n # When calling get_customer_marketSegment(), df_invoice_line_customer is\n # concatened to the original dataframe.\n #------------------------------------------------------------------------- \n segmentID = self.get_customer_marketSegment(df_invoice_line_customer)\n \n return segmentID, customerID",
"def prepare_report(user, from_date, to_date,\n show_which=\"worked\", # \"worked\", \"invoiced\", or \"paid\"\n client_ids=[], project_ids=[]):\n\n if show_which == \"worked\":\n sessions = Session.objects.filter(\n project__client__user=user,\n date__gte=from_date,\n date__lte=to_date\n )\n elif show_which == \"invoiced\":\n sessions = Session.objects.filter(\n project__client__user=user,\n invoice__invoice_date__gte=from_date,\n invoice__invoice_date__lte=to_date\n )\n elif show_which == \"paid\":\n sessions = Session.objects.filter(\n project__client__user=user,\n invoice__paid_date__gte=from_date,\n invoice__paid_date__lte=to_date\n )\n else:\n raise ValueError(\"Invalid value for the 'show_which' argument \"\n \"supplied\")\n\n if client_ids != []:\n sessions = sessions.filter(\n project__client__in=client_ids\n )\n if project_ids != []:\n sessions = sessions.filter(\n project__in=project_ids\n )\n\n # Starting Python 3.6, the dict maintains order as inserted\n # When running this on a different computer with older Python,\n # the sessions_per_date was all jumbled-up.\n # https://stackoverflow.com/questions/1867861/dictionaries-how-to-keep-keys-values-in-same-order-as-declared\n date_range = pd.date_range(from_date, to_date).date\n sessions_per_date = {today: sessions.filter(date=today)\n for today in date_range}\n\n total_earned = sum([sesh.get_money_earned() for sesh in sessions])\n\n context = {\n 'sessions': sessions, # obsolete if sessions_per_date will work\n 'from': from_date,\n 'to': to_date,\n 'date_range': date_range,\n 'sessions_per_date': sessions_per_date,\n 'total_earned': total_earned,\n }\n\n if client_ids != []:\n context['clients'] = Client.objects.filter(pk__in=client_ids)\n\n if project_ids != []:\n context['projects'] = Project.objects.filter(pk__in=project_ids)\n\n return context",
"def make_invoices(self):\n cents = Decimal('.01')\n\n logging.debug('Marking current invoices as deleted for policy ' + self.policy.policy_number)\n for invoice in self.policy.invoices:\n invoice.deleted = 1\n\n required_invoices = self.billing_schedules[self.policy.billing_schedule]\n bill_amount = Decimal(self.policy.annual_premium) / Decimal(self.billing_schedules.get(self.policy.billing_schedule))\n logging.debug('Creating invoices...')\n invoices = []\n\n for i in range(required_invoices):\n months_after_eff_date = i * (12 / required_invoices)\n bill_date = self.policy.effective_date + relativedelta(months=months_after_eff_date)\n invoice = Invoice(self.policy.id,\n bill_date,\n bill_date + relativedelta(months=1),\n bill_date + relativedelta(months=1, days=14),\n bill_amount)\n logging.debug('Created invoice, due: ' + str(invoice.amount_due))\n invoices.append(invoice)\n\n for invoice in invoices:\n db.session.add(invoice)\n logging.debug('Begin db commit for invoices on ' + self.policy.policy_number)\n db.session.commit()\n logging.debug('End db commit')",
"def action_create_invoice(self):\r\n inv_obj = self.env['account.invoice']\r\n inv_line_obj = self.env['account.invoice.line']\r\n # account_id = self.income_acc_id\r\n inv_val = {\r\n 'type': 'out_invoice',\r\n # 'transaction_ids': self.ids,\r\n 'state': 'draft',\r\n 'partner_id': self.customer_name.id or False,\r\n 'date_invoice': fields.Date.context_today(self),\r\n 'origin': self.booking_no,\r\n 'freight_booking': self.id,\r\n 'account_id': self.customer_name.property_account_receivable_id.id or False,\r\n 'company_id': self.company_id.id,\r\n 'user_id': self.sales_person.id,\r\n }\r\n\r\n invoice = inv_obj.create(inv_val)\r\n for line in self.cost_profit_ids:\r\n sale_unit_price_converted = line.list_price * line.profit_currency_rate\r\n account_id = False\r\n if line.product_id.property_account_income_id:\r\n account_id = line.product_id.property_account_income_id\r\n elif line.product_id.categ_id.property_account_income_categ_id:\r\n account_id = line.product_id.categ_id.property_account_income_categ_id\r\n if sale_unit_price_converted > 0:\r\n inv_line = inv_line_obj.create({\r\n 'invoice_id': invoice.id or False,\r\n 'account_id': account_id.id or False,\r\n 'name': line.product_id.name or '',\r\n 'product_id': line.product_id.id or False,\r\n 'quantity': line.profit_qty or 0.0,\r\n 'uom_id': line.uom_id.id or False,\r\n 'price_unit': sale_unit_price_converted or 0.0\r\n })\r\n line.write({'invoice_id': invoice.id or False,\r\n 'inv_line_id': inv_line.id or False})\r\n\r\n self.shipment_booking_status = '10'",
"def gen_invoice():\n invoice_id = gen_id()\n account_id = gen_id()\n invoice_date = datetime(2012, random.choice(range(1,13)), random.choice(range(1,29)))\n\n invoice_item_amounts = [gen_invoice_item(account_id, invoice_id, invoice_date)]\n invoice_amount = sum([i['total_amount'] for i in invoice_item_amounts])\n\n # Create invoice\n invoice = Invoice(invoice_id=invoice_id,\n account_id=account_id,\n invoice_date=invoice_date,\n invoice_amount=invoice_amount)\n invoice.save()\n\n # Create payment\n payment_id = None\n if roll_dice(1):\n payment_results = gen_payment(account_id=account_id, \n invoice_id=invoice_id, \n invoice_date=invoice_date, \n amount=invoice_amount)\n payment_id = payment_results[0]\n payment_date = payment_results[1]\n\n # Create refund\n if roll_dice(0) and payment_id is not None:\n refund = gen_refund(account_id=account_id, \n invoice_id=invoice_id, \n invoice_date=invoice_date, \n amount=invoice_amount, \n payment_id=payment_id,\n payment_date=payment_date)\n\n # Create term extension\n if roll_dice(0) and payment is not None:\n ext = gen_term_extension(account_id=account_id, \n invoice_id=invoice_id, \n invoice_date=invoice_date,\n service_start=invoice_items[0].service_start,\n service_end=invoice_items[0].service_end)",
"def _prepare_invoice_line(self, qty):\n values = super(SaleOrderLine, self)._prepare_invoice_line(qty)\n values.update({\n 'account_analytic_id': self.analytic_account_id.id,\n 'analytic_tag_ids': [[6, 0, self.analytic_tag_ids.ids]],\n })\n return values",
"def get_customer_history_df_invoice_line(self, customerID):\n df_invoice_line \\\n = self._df_invoice_original[self._df_invoice_original.CustomerID \\\n == customerID]\n return df_invoice_line",
"def _prepare_invoice(self, cr, uid, order, lines, context=None):\n invoice_vals = super(sale_order, self)._prepare_invoice(cr, uid, order,\n lines, context=context)\n invoice_vals.update({'partner_bank_id': order.partner_bank_id.id})\n return invoice_vals",
"def merge_invoice(self, cr, uid, invoices, context=None):\n order_ids = []\n pick_ids = []\n if len(invoices) <= 1:\n return False\n parent = self.pool.get('account.invoice').browse(cr, uid, context['active_id'])\n for inv in invoices:\n if parent.partner_id != inv.partner_id:\n raise osv.except_osv(_(\"Partners don't match!\"), _(\"Can not merge invoice(s) on different partners or states !.\"))\n\n if inv.state != 'draft':\n raise osv.except_osv(_(\"Invalid action !\"), _(\"You can merge only invoices in draft state.\"))\n\n # Merge invoices that are in draft state\n inv_line_obj = self.pool.get('account.invoice.line')\n name = parent.name\n comment = parent.comment\n origin = parent.origin\n for inv in invoices:\n if inv.id == parent.id:\n continue\n\n # check if a line with the same product already exist. if so add quantity. else hang up invoice line to first invoice head.\n if inv.name:\n # Find if the same name already exist, if yes, skip to add.\n name_list = name.replace(' ', '').split(',')\n if inv.name not in name_list:\n name += ', %s' % inv.name\n if inv.comment:\n comment = comment and comment + ', %s' % inv.comment or inv.comment\n if inv.origin:\n origin += ', %s' % inv.origin\n line_ids = inv_line_obj.search(cr, uid, [('invoice_id', '=', inv.id)])\n for inv_lin in inv_line_obj.browse(cr, uid, line_ids):\n mrg_pdt_ids = inv_line_obj.search(cr, uid, [('invoice_id', '=', parent.id), ('product_id', '=', inv_lin.product_id.id),\n ('uos_id', '=', inv_lin.uos_id.id), ('price_unit', '=', inv_lin.price_unit) # kittiu: extra condition, unit price must also be the same.\n ])\n if len(mrg_pdt_ids) == 1 and inv.type == parent.type: # product found --> add quantity\n inv_line_obj.write(cr, uid, mrg_pdt_ids, {'quantity': inv_line_obj._can_merge_quantity(cr, uid, mrg_pdt_ids[0], inv_lin.id)})\n inv_line_obj.unlink(cr, uid, inv_lin.id)\n elif inv.type == parent.type:\n inv_line_obj.write(cr, uid, inv_lin.id, {'invoice_id': parent.id})\n else:\n inv_line_obj.write(cr, uid, inv_lin.id, {'invoice_id': parent.id, 'quantity': -inv_lin.quantity})\n\n if inv.sale_order_ids:\n order_ids += [order.id for order in inv.sale_order_ids]\n if inv.picking_ids:\n pick_ids += [picking.id for picking in inv.picking_ids]\n\n self.write(cr, uid, parent.id, {'origin': origin, 'name': name, 'comment': comment})\n\n #Remove By DRB\n #cr.execute('update sale_order_invoice_rel set invoice_id = %s where invoice_id = %s', (parent.id, inv.id))\n #cr.execute('update picking_invoice_rel set invoice_id = %s where invoice_id = %s', (parent.id, inv.id))\n\n self.unlink(cr, uid, [inv.id])\n #Distinct List\n order_ids = list(set(order_ids))\n pick_ids = list(set(pick_ids))\n\n self.write(cr, uid, parent.id, {'sale_order_ids': [(6, 0, order_ids)], 'picking_ids': [(6, 0, pick_ids)]})\n self.button_reset_taxes(cr, uid, [parent.id])\n return parent.id",
"def invoices(self):\r\n return Invoices(self)",
"def generate_report(df, start_date, end_date):\n # Remove any transactions that had to do with collecting or returning security\n security_df = df[(df[CATEGORY] == 'Security') | (df[CATEGORY] == 'Security-Income')]\n df = df[(df[CATEGORY] != 'Security')]\n\n # Exclude the data for everything except our quarter\n period_data = df.loc[start_date:end_date] # Note: NOT using extended quarter range\n rental_income = period_data[period_data[CATEGORY] == 'Rent']\n utilities = period_data[(period_data[CATEGORY] == 'Utilities')]\n other_income = period_data[(period_data['Transaction Type'] == 'credit') & (period_data[CATEGORY] != 'Rent')]\n expenses = period_data[(period_data['Transaction Type'] == 'debit')]\n unpaid_util_overages = float(0)\n\n # print(rental_income)\n # print(other_income)\n # print(expenses)\n \n html_config.initialize()\n print(html_config.HTML_OPEN)\n\n print('<H1>Income and Expense Report for %s-%s:' % (start_date, end_date), '</H1><p>')\n\n # List all unit specific rents and expenses for the quarter\n for UNIT in sorted(rental_income['Unit'].unique()):\n # Show rental income info\n temp_df = rental_income[rental_income['Unit'] == UNIT]\n print('<br><H2>Total rent for Unit ', UNIT, ': ${:,.2f}'.format(temp_df['Amount'].sum()), '</H2>')\n print(temp_df[['Description', 'Amount']].to_html())\n \n if not SKIP_UTIL_ANALYSIS:\n # Show utilities payments and calculate any overage due\n temp_df = utilities[(utilities['Unit'] == UNIT) & (utilities['Transaction Type'] == 'debit')]\n print('<br><H2>Utilities Expenses for Unit', UNIT, ': ${:,.2f}'.format(temp_df['Amount'].sum()), '</h2>')\n overage = temp_df.assign(Overage=lambda x: x.Amount - limit_df.loc[UNIT].Amount)\n # Disable warning when setting negative overage values to zero\n pd.set_option('mode.chained_assignment', None)\n overage.Overage[overage.Overage < 0] = 0\n pd.set_option('mode.chained_assignment', 'warn')\n print(overage[['Description', 'Amount', 'Overage']].to_html())\n # Show if utilties costs exceeded allotted amount\n if (overage['Overage'].sum() > 0):\n print('<h3>Utilities overage for unit', UNIT, ': ${:,.2f}'.format(overage['Overage'].sum()), '</h3>')\n unpaid_util_overages += overage['Overage'].sum()\n # Show any untilities that were collected \n overage_collected = utilities[(utilities['Unit'] == UNIT) & (utilities['Transaction Type'] == 'credit')]\n if not overage_collected.empty:\n print('<br><H2>Util overages collected for Unit ', UNIT, ': ${:,.2f}'.format(overage_collected['Amount'].sum()), '</H2>')\n print(overage_collected[['Description', 'Amount', CATEGORY]].to_html())\n print('<h3>Net Utils overage for unit', UNIT, 'for period: ${:,.2f}'.format(overage['Overage'].sum() - overage_collected['Amount'].sum()), '</h3>')\n \n\n \n # Generate unit specific Utility usage reports\n if GEN_TENANT_UTIL_REPORTS and OUTPUT_DIRECTORY:\n TENANT_FILE = '%s/122-Spring-St-%s-%s-Unit-%s-utils.html' % (OUTPUT_DIRECTORY, start_date, end_date, UNIT)\n TENANT_REPORTS.append(TENANT_FILE)\n sys.stdout = open(TENANT_FILE, 'w')\n print(html_config.HTML_OPEN)\n\n print('<H1>Unit', UNIT, '</H1>')\n print('<br><H2>Utilities Expenses for: %s-%s' % (start_date, end_date))\n print('<br>Utilites included in rent: ${:,.2f}'.format(limit_df.loc[UNIT].Amount))\n print('</H2>')\n print(overage[['Description', 'Amount', 'Overage']].to_html())\n # Show if any utilties overage may be due\n if (overage['Overage'].sum() > 0):\n print('<h3>Utilities overage for unit', UNIT, ': ${:,.2f}'.format(overage['Overage'].sum()), '</h3>')\n if not overage_collected.empty:\n print('<br><H2>Util overages collected for Unit ', UNIT, ': ${:,.2f}'.format(overage_collected['Amount'].sum()), '</H2>')\n print(overage_collected[['Description', 'Amount', CATEGORY]].to_html())\n print('<h3>Net Utils overage for unit', UNIT, 'for period: ${:,.2f}'.format(overage['Overage'].sum() - overage_collected['Amount'].sum()), '</h3>')\n\n print(html_config.HTML_CLOSE)\n\n # Restore stdout to the main report file\n sys.stdout = open(REPORT_FILE, 'a')\n \n # Show other unit specific transactions\n if SKIP_UTIL_ANALYSIS:\n unit_exp = expenses[(expenses['Unit'] == UNIT)]\n unit_income = other_income[other_income['Unit'] == UNIT]\n else:\n unit_exp = expenses[(expenses['Unit'] == UNIT) & (expenses[CATEGORY] != 'Utilities')]\n unit_income = other_income[(other_income['Unit'] == UNIT) & (other_income[CATEGORY] != 'Utilities')]\n \n if not unit_exp.empty:\n print('<br><H2>Other Unit specific expenses for: ', UNIT, ': ${:,.2f}'.format(unit_exp['Amount'].sum()), '</h2>')\n print(unit_exp[['Description', 'Amount', 'Unit', CATEGORY]].to_html())\n print('<p>')\n \n # Show any other unit specific credit\n other_income = other_income[other_income['Unit'] == UNIT]\n if not other_income.empty:\n print('<br><H2>Expense offsets for Unit ', UNIT, ': ${:,.2f}'.format(other_income['Amount'].sum()), '</H2>')\n print(other_income[['Description', 'Amount', CATEGORY]].to_html())\n \n # Add a line between units\n print('<hr>')\n \n # List the shared income and expenses for the quarter\n temp_df = other_income[other_income['Unit'].isnull()]\n if not temp_df.empty:\n print ('<br><H2>Non unit specific income: ${:,.2f}'.format(temp_df['Amount'].sum()), '</h2>')\n print(temp_df[['Description', 'Amount', CATEGORY]].to_html())\n gen_expenses = expenses[expenses['Unit'].isnull()]\n if not gen_expenses.empty:\n print ('<br><H1>Non unit specific expenses</h1>')\n # Get the list of expense categories and generate summary for each\n for category in sorted(gen_expenses[CATEGORY].unique()):\n temp_df = gen_expenses[(gen_expenses[CATEGORY] == category)]\n print ('<br><H2>'+ category +': ${:,.2f}'.format(temp_df['Amount'].sum()), '</h2>')\n print(temp_df[['Description', 'Amount', CATEGORY]].to_html())\n \n # If there were any security transactions in the period give a security report\n if not security_df.loc[start_date:end_date].empty:\n temp_df = security_df.loc[start_date:end_date] \n print('<hr><H2>Security related transactions:</H2>')\n print(temp_df[['Description', 'Amount', 'Transaction Type', 'Unit']].to_html())\n for UNIT in sorted(rental_income['Unit'].unique()):\n unit_df = security_df[security_df['Unit'] == UNIT]\n collected = unit_df[(unit_df['Transaction Type'] == 'credit')]['Amount'].sum()\n returned = unit_df[(unit_df['Transaction Type'] == 'debit')]['Amount'].sum()\n print('<center><H4>Current Liability on Unit '+str(UNIT)+': ${:,.2f}'.format(collected-returned), '</H4></center>')\n \n # # Summarize the periods income and expenses -- old way to be discarded...\n # print('<br><H3>Total income for period: ${:,.2f}'.format(rental_income['Amount'].sum()), '</H3>')\n # print('<H3>Total expense for period: ${:,.2f}'.format(expenses['Amount'].sum() - other_income['Amount'].sum()), '</H3>')\n # print('<H3>Profit for period: ${:,.2f}'.format(rental_income['Amount'].sum() + other_income['Amount'].sum() -expenses['Amount'].sum()), '</H3>')\n # Summarize the periods income and expenses\n print('<br><H3>Total income for period: ${:,.2f}'.format(rental_income['Amount'].sum()))\n print('<br><H3>Total expense for period: ${:,.2f}'.format(expenses['Amount'].sum() - other_income['Amount'].sum()), '</H3>')\n print('<H3>Profit for period: ${:,.2f}'.format(rental_income['Amount'].sum() + other_income['Amount'].sum() -expenses['Amount'].sum()), '</H3>')\n# print('</H3>')\n \n print(html_config.HTML_CLOSE)\n sys.stdout.flush()",
"def get_rows(contract_address: str, file_name: str, receipts_filename: str) -> pd.DataFrame():\n receipts_df = pd.read_csv(receipts_filename)\n receipts_df = receipts_df.sort_values(['block_number', 'transaction_index'], ignore_index=True)\n df = pd.read_csv(file_name)\n df = df.sort_values(['block_number', 'transaction_index'], ignore_index=True)\n\n df = df.loc[receipts_df['status'] == 1] \n df = df.loc[df[\"to_address\"] == contract_address.lower()]\n df = df.reset_index()\n df = df.drop(columns='index')\n return df"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a dataframe with all invoice lines from customerID given as parameter. | def get_customer_history_df_invoice_line(self, customerID):
df_invoice_line \
= self._df_invoice_original[self._df_invoice_original.CustomerID \
== customerID]
return df_invoice_line | [
"def create_customer_df_invoice_line(self, customerID, list_stockCode\\\n , list_quantity, invoiceDate):\n \n dict_invoice = dict()\n\n dict_invoice['Quantity'] = list_quantity\n dict_invoice['StockCode'] = list_stockCode\n\n #------------------------------------------------------------------------\n # Build invoiceDate from local current time\n #------------------------------------------------------------------------\n if invoiceDate is None:\n time_struct = time.localtime()\n invoiceDate = str(time_struct.tm_year)+'-'+str(time_struct.tm_mon)\\\n +'-'+str(time_struct.tm_mday)\n invoiceDate +=' '\n invoiceDate +=str(time_struct.tm_hour)+':'+str(time_struct.tm_min)\\\n +':'+str(time_struct.tm_sec)\n invoiceDate = pd.Timestamp(invoiceDate)\n else:\n pass\n\n\n #------------------------------------------------------------------------\n # Lists initialization\n #------------------------------------------------------------------------\n list_customerID = list()\n list_invoiceNo = list()\n list_invoiceDate = list()\n list_invoice_line_index = list()\n \n #------------------------------------------------------------------------\n # Increase Invoice number\n #------------------------------------------------------------------------\n invoiceNo = max(self._df_invoice_original.InvoiceNo)\n invoiceNo += 1\n\n #------------------------------------------------------------------------\n # Get latest invoice line index value\n #------------------------------------------------------------------------\n invoice_line_index = max(self._df_invoice_original.index)\n\n #------------------------------------------------------------------------\n # Build lists for CustomerID, InvoiceNo, InvoiceDate\n # A list of incremented indexes is built for new rows.\n #------------------------------------------------------------------------\n for quantity in list_quantity:\n list_customerID.append(customerID)\n list_invoiceNo.append(invoiceNo)\n list_invoiceDate.append(invoiceDate)\n invoice_line_index += 1\n list_invoice_line_index.append(invoice_line_index) \n\n \n dict_invoice['CustomerID'] = list_customerID\n dict_invoice['InvoiceNo'] = list_invoiceNo\n dict_invoice['InvoiceDate'] = list_invoiceDate\n\n #------------------------------------------------------------------------\n # Get description list from list of stock codes.\n #------------------------------------------------------------------------\n list_description = self.getDescriptionList(list_stockCode)\n \n dict_invoice['Description'] = list_description\n\n #------------------------------------------------------------------------\n # Get unit price list from list of stock codes.\n #------------------------------------------------------------------------\n list_unitPrice = self.getUnitPriceList(list_stockCode)\n \n dict_invoice['UnitPrice'] = list_unitPrice\n\n #------------------------------------------------------------------------\n # Dataframe with new invoices lines is created.\n #------------------------------------------------------------------------\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys()\\\n , index=list_invoice_line_index)\n \n return df_invoice_line",
"def test_get_line_customers():\n\n root_path = Path(__file__).absolute().parents[1]\n master_dss_file = root_path / 'examples' / 'opendss' / 'master.dss'\n\n simulator = opendss.OpenDSSSimulator(master_dss_file)\n line_customers_df = dss_util.get_line_customers(\n simulator.dss_instance\n )\n\n assert isinstance(line_customers_df, pd.DataFrame)",
"def order_process(self, customerID, list_stockCode, list_quantity\\\n , orderDate=None):\n\n segmentID = -1\n\n #-------------------------------------------------------------------------\n # A new customer is created and inserted into data-set.\n #-------------------------------------------------------------------------\n if customerID is None:\n customerID = int(self.createCustomerID())\n else:\n pass\n \n #-------------------------------------------------------------------------\n # A new dataframe with new invoice lines are created.\n #-------------------------------------------------------------------------\n df_invoice_line = self.create_customer_df_invoice_line(customerID\\\n , list_stockCode, list_quantity, orderDate)\n \n #-------------------------------------------------------------------------\n # Original dataframe is updated with customer invoices lines.\n #-------------------------------------------------------------------------\n print(\"order_process : shape before concat= \"+str(self._df_invoice_original.shape))\n self._df_invoice_original \\\n = pd.concat([self._df_invoice_original, df_invoice_line], axis=0)\n print(\"order_process : shape after concat= \"+str(self._df_invoice_original.shape))\n \n #-------------------------------------------------------------------------\n # All invoices lines (including new one) related to customer is retrieved \n # from original dataframe.\n #-------------------------------------------------------------------------\n df_invoice_line_customer \\\n = self.get_customer_history_df_invoice_line(customerID)\n\n #-------------------------------------------------------------------------\n # When calling get_customer_marketSegment(), df_invoice_line_customer is\n # concatened to the original dataframe.\n #------------------------------------------------------------------------- \n segmentID = self.get_customer_marketSegment(df_invoice_line_customer)\n \n return segmentID, customerID",
"def get_invoice_list(self):\n Pass",
"def get_rows(contract_address: str, file_name: str, receipts_filename: str) -> pd.DataFrame():\n receipts_df = pd.read_csv(receipts_filename)\n receipts_df = receipts_df.sort_values(['block_number', 'transaction_index'], ignore_index=True)\n df = pd.read_csv(file_name)\n df = df.sort_values(['block_number', 'transaction_index'], ignore_index=True)\n\n df = df.loc[receipts_df['status'] == 1] \n df = df.loc[df[\"to_address\"] == contract_address.lower()]\n df = df.reset_index()\n df = df.drop(columns='index')\n return df",
"def invoices(self,org_id=None,invoice_id=''):\n if org_id is None:\n org_id = self.org_id\n return self.get('{}/orgs/{}/invoices/{}'.format(ApiVersion.A1.value,org_id,invoice_id))",
"def return_customer_orders(customer_id):\n with MY_CONNECTION as connection:\n cursor = connection.cursor()\n cursor.execute(\n \"\"\"\n SELECT id_order, id_customer, id_product, quantity, total_price,\n payment_status, send_status, order_date, location\n FROM Orders\n Where id_customer=?\n \"\"\",\n (customer_id,))\n return cursor.fetchall()",
"def get_all_customer_ids():\n table = data_manager.get_table_from_file(\"sales/sales.csv\")\n return get_all_customer_ids_from_table(table)",
"def invoices(self):\r\n return Invoices(self)",
"def invoices(self):\r\n return inv.Invoices(self)",
"def invoices(self):\n return Invoices(self)",
"def invoice_items(self,org_id=None,query={}):\n if org_id is None:\n org_id = self.org_id\n query_end_date = datetime.strptime(query['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n # Given a 'query_end_date' to find the invoice containing the\n # line items for that date we need to find the invoice which \n # has 'endDate' equal to the end of the month of the `query_end_date`\n query_first_next_month = query_end_date + relativedelta(months=+1) - relativedelta(days=(query_end_date.day-1))\n target_invoices = []\n invoices = self.invoices(org_id)\n if self.verbose:\n print('Searching invoices org_id={}'.format(org_id))\n print('query={} query_end_date={}'.format(query,query_end_date))\n print('Result keys: {}'.format( invoices['content'].keys() ))\n print('Total result count: {}'.format( invoices['content']['totalCount'] ))\n for invoice in invoices['content']['results']:\n #pprint.pprint(invoice)\n end_date = datetime.strptime(invoice['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if self.verbose: \n print('invoice({})[\\'endDate\\']={} end_date={}'.format(invoice['id'],invoice['endDate'],end_date))\n if end_date == query_first_next_month:\n target_invoices.append(invoice)\n \n if self.verbose: \n print('Target invoices: {}'.format(target_invoices))\n \n\n target_line_items = []\n for invoice in target_invoices:\n invoice_details = self.invoices(org_id,invoice['id']) \n print('invoice_details: {}'.format(invoice_details))\n for item in invoice_details['content']['lineItems']:\n end_date = datetime.strptime(item['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if end_date == query_end_date:\n target_line_items.append(item)\n if self.verbose:\n print('target_line_items: {}'.format(target_line_items)) \n return target_line_items",
"def invoice(self, invoice_number):\r\n return inv.Invoice(self, invoice_number)",
"def do_invoice_list(cc, args):\n\n # Invoice details as Unicode response\n invoice = cc.reports.list_invoice(all_tenants=args.all_tenants)\n utils.process_dict_and_display_invoice_list(invoice)",
"def invoices(self):\r\n return inv.AccountInvoices(self)",
"def LineItems(order_id):\n global QUERY \n\n output = getLineItems(QUERY, [], order_id)\n\n return output",
"def invoices_in_route(self):\n from invoicing.models import Invoice\n invoices = Invoice.objects.filter(\n route=self.number, print_date__range=(date.today() - timedelta(6), date.today()),\n canceled=False).count()\n return invoices",
"def construct_invoice_query(customer_ref: Ref) -> str:\n customer_ref_value_column = PseudoColumn(\"CustomerRef\")\n create_time_column = PseudoColumn(\"MetaData.CreateTime\")\n\n return str(\n Query.from_(\"Invoice\")\n .select(\"*\")\n .where(customer_ref_value_column == customer_ref.value)\n .where(create_time_column >= INVOICE_FILTER_START_DATE)\n ).replace(\"\\\"Invoice\\\"\", \"Invoice\")",
"def customer_history(self, CustomerID, all=False, no_blank_columns=True):\n \n customer_history = []\n for entry in self._data:\n if entry[self._clmn['CustomerKey']] == CustomerID.upper():\n if all is False:\n if entry[self._clmn['Status']] != 0:\n customer_history.append(entry)\n else:\n customer_history.append(entry)\n return self._return_as_dict(customer_history, no_blank_columns)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a list of customers that have been excluded of data sampling used for building model. By default, 10 customers identifier is returned. If customerCount value is None, or <= 0, then list of all customers that have been excluded of data sampling is returned. | def get_listCustomer_out_sample(self, customerCount=10):
if customerCount is None :
listCustomer= list(self._df_invoice_line_out_sample.CustomerID.unique())
else:
if customerCount <= 0 :
listCustomer \
= list(self._df_invoice_line_out_sample.CustomerID.unique())
else:
listCustomer \
= list(self._df_invoice_line_out_sample.CustomerID.unique()[:customerCount])
return listCustomer | [
"def remove_exiting_customers(self):\n self.customers=[c for c in self.customers if c.is_active()]",
"def get_all_customer_ids():\n table = data_manager.get_table_from_file(\"sales/sales.csv\")\n return get_all_customer_ids_from_table(table)",
"def get_all_customers():\n data = user_obj.get_all_customers()\n return data",
"def _add_customers_negative_data():\n return [\n (\"235\", \"Name\", \"Lastname\", \"Address\", \"phone\", \"email\", \"active\", 0),\n (\"678\", \"Name\", \"Lastname\", \"Address\", \"phone\", \"email\", \"iinactive\", 10),\n ]",
"def get_customer_names(self):\n sql = select([self.weights_external.c.customer_name]).\\\n distinct().\\\n order_by(self.weights_external.c.customer_name)\n return [r[0] for r in self.engine.execute(sql)]",
"def get_all_customers_not_purchasing_next_quarter(self) -> list:\n\n #Check if predictions can be made\n if self._load_model:\n y_labels = self._predict_labels()\n return y_labels[y_labels == 0].index.tolist()\n\n else:\n raise NoTrainedModelError('There is no trained model to make predictions with, please call initialize_purchase_predictor() first or set load_existing_model to True.')",
"def served_customers(self):\n print(f\"{self.name.title()} has served {self.number_served} customers.\")",
"def get_customer_list(self):\n return self._customer_repo.get_customer_list()",
"def test_list_active_customer_when_there_are_none(self):\n LOGGER.info(\"*** TEST ***: list_active_customers when none\")\n # Given\n basic_operations.delete_customer(\"FY2020-001\")\n\n # When\n self.assertEqual(0, basic_operations.list_active_customers())",
"def return_all_customer_info():\n all_customer_records = Customer.select()\n\n\n\n for persion in all_customer_records:\n print(f\"Customer id: {person.customer_id}\\nFirst Name: {person.first_name}\\nLast Name: {person.last_name}\\n\"\n f\"Home Address: {person.home_address}\\nPhone Number: {person.phone_number}\\n\"\n f\"Email Address: {person.email_address}\\nStatus: {person.status}\\nCredit Limit: ${person.credit_limit}\\n\")\n\n\n if __name__ == \"__main__\":\n cc.main()\n\n search_customer(\"W3434fd\")\n list_active_customers()",
"def get_page_customers(self):\n return self.driver.find_elements(*CustomerGroupsPage.CUSTOMER_GROUP)",
"def list_active_customers():\n LOGGER.info('Getting active customers')\n return cm.Customer.select().where(cm.Customer.customer_status).count()",
"def active_customer_sales_calls():\n customers_to_call = Customer.select().where(Customer.active_status)\n customer_count = list_active_customers()\n logging.info(\"Getting %s records for sales call query\", customer_count)\n return [f\"{i.first_name} {i.last_name} {i.phone_number}\" for i in customers_to_call]",
"def get_excluded_observations(self):\n\n return copy.deepcopy(self._excluded_observations)",
"def get_filtered_customers(**kwargs):\n \n response = []\n try:\n engine = create_db_engine()\n db_conn = engine.connect()\n with db_conn as connection:\n rows = connection.execute(\"select * from customers\")\n res = generate_response(rows)\n \n\n return res\n \n #response = generate_api_response(rows)\n db_conn.close() \n except Exception as e:\n print(\"get customers data engne exce---*******************\"+str(e))\n raise ValueError\n\n return response",
"def get_customer_count(self):\n return self._df_invoice_original.CustomerID.unique().shape[0]",
"def list(self, **params):\n\n _, _, customers = self.http_client.get(\"/customers\", params=params)\n return customers",
"def get_unselected_benefits(cls, excluded_benefits):\n benefits = cls.query.filter(cls.id.notin_(excluded_benefits))\n return [benefit.serialize() for benefit in benefits]",
"def excluded(cls):\n return []"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns number of invoices from original dataset. | def get_invoice_count(self):
return self._df_invoice_original.InvoiceNo.unique().shape[0] | [
"def _compute_no_of_invoices(self):\n for record in self:\n record.invoice_count = len(record.invoice_ids)",
"def get_invl_count(self):\n return self._df_invoice_original.index.unique().shape[0]",
"def invoices(self):\n from invoice.models.inv import Invoice\n\n return Invoice.objects.filter(customer=self).count()",
"def _compute_count_invoice(self):\n for serv in self:\n serv.invoice_count = self.env[\"account.move\"].search_count(\n [\n (\"move_type\", \"=\", \"out_invoice\"),\n (\"vehicle_service_id\", \"=\", serv.id),\n ]\n )",
"def find_number_of_pages_in_invoice(filename, data):\n print('Checking page count', filename)\n return len(data['analyzeResult']['readResults'])",
"def invoices(self):\r\n return inv.Invoices(self)",
"def get_customer_count(self):\n return self._df_invoice_original.CustomerID.unique().shape[0]",
"def expired_invoices_count(self):\n return self.get_expired_invoices().count()",
"def getNumOfInvoice(self,id,start,finish):\n self.calls += 1\n invoice = self.getResponse(self.buildParams(id,start,finish))\n if not self.isNumeric(invoice):\n middle = self.diveDates(start,finish)\n plusMiddle = middle + timedelta(days = 1)\n middle = self.removeHours(middle)\n plusMiddle = self.removeHours(plusMiddle)\n invoice = self.getNumOfInvoice(id,start,middle)+\\\n self.getNumOfInvoice(id,plusMiddle,finish)\n return invoice",
"def invoices(self):\r\n return inv.AccountInvoices(self)",
"def countAndGetCallInvoice(self,id,start,finish):\n self.calls = 0\n return self.getNumOfInvoice(id,start,finish)",
"def invoices_in_route(self):\n from invoicing.models import Invoice\n invoices = Invoice.objects.filter(\n route=self.number, print_date__range=(date.today() - timedelta(6), date.today()),\n canceled=False).count()\n return invoices",
"def get_invitations_count(self) -> Dict:\n pass",
"def test_total_invoices(self):\n sale = SaleFactory(total_value=1000)\n InvoiceFactory(sale=sale, total_value=50)\n InvoiceFactory(sale=sale, total_value=500)\n self.assertEqual(sale.total_invoices, 550)",
"def invoices(self):\r\n return Invoices(self)",
"def _compute_return_invoice(self):\n for serv in self:\n serv.return_inv_count = self.env[\"account.move\"].search_count(\n [(\"move_type\", \"=\", \"out_refund\"), (\"vehicle_service_id\", \"=\", serv.id)]\n )",
"def invoices(self, invoices):\n\n\n self._invoices = invoices",
"def test_total_invoices_in_cero(self):\n sale = SaleFactory(total_value=100)\n self.assertEqual(sale.total_invoices, 0)",
"def invoices(self):\n return Invoices(self)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns number of customers from original dataset. | def get_customer_count(self):
return self._df_invoice_original.CustomerID.unique().shape[0] | [
"def get_total_customers(self, report_df):\n\n return len(report_df.loc[:, 'customer_id'].unique())",
"def bus_total_customers(self) -> int:\n return self.dss_obj.BUSI(4, 0)",
"def test_customer_count(self):\n\n # test that enpoint returns the correct count of products\n rv = self.app.get('/customer/count',\n headers=self.headers,\n content_type='application/json')\n data = json.loads(rv.get_data(as_text=True))\n\n assert rv.status_code == 200\n assert data[\"count\"] == \"19\"",
"def increment_number_served(self, customers):\n self.number_served += customers",
"def get_total_trans(all_customers_data, trans_column):\n return all_customers_data.select(trans_column).distinct().count()",
"def list_active_customers():\n try:\n active_customer_count = 0\n for _ in Customer.select().where(Customer.status == 'Active'):\n active_customer_count += 1\n logger.info(\n f\"Successfully counted active customers {active_customer_count}\"\n )\n return active_customer_count\n except Exception as unknown_error:\n logger.error(f\"Error. Failed to count customers. {unknown_error}\")\n print(\n f'Error. Not able to count number of active customers.'\n ' {unknown_error}'\n )",
"def get_invoice_count(self):\n return self._df_invoice_original.InvoiceNo.unique().shape[0]",
"def served_customers(self):\n print(f\"{self.name.title()} has served {self.number_served} customers.\")",
"def _get_number_of_ccds(repository, dataset):\n lines = [l\n for l in open(os.path.join(repository,\n dataset + '.fits')).readlines()\n if(l.strip())]\n return(len(lines))",
"def list_active_customers():\n db_customers = Customers.select()\n LOGGER.debug(\"Calculating number of active customers\")\n # Technically used this in Lesson 03, but it is a comprehension. Another method added below.\n number_active = sum([int(x.status) for x in db_customers])\n LOGGER.info(\"There are %d active customers\", number_active)\n\n return number_active",
"def bus_interruptions_total_customers(self) -> float:\n return self.dss_obj.BUSF(9, 0)",
"def cluster_obs_count(self):\n return(self.merged_data.groupby(\n 'labels').count().transpose().iloc[0, :])",
"def bottom_twenty_customers(data_frame):\n data_frame.loc[:, 'total_payment'] = (data_frame['unit_rental_price']\n * data_frame['quantity_rented']\n * data_frame['rental_period_months'])\n data_set = data_frame.groupby(['customer_id']).agg({'total_payment': 'sum'})\n data_set = data_set.nsmallest(20, 'total_payment')\n return data_set",
"def test_companies_company_id_data_customers_customer_id_get(self):\n pass",
"def count_elements_in_dataset(dataset):\n return dataset.count()",
"def set_number_served(self, customers):\r\n self.number_served = customers",
"def customers_served(self):\n print(f\"{self.restaurant_name} \"\n f\"has served {self.number_served} customers.\")",
"def getUsersProductsN(data):\n return (len(np.unique(data[:, 0])), len(np.unique(data[:, 1])))",
"def get_invl_count(self):\n return self._df_invoice_original.index.unique().shape[0]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns number of invoice lines (number of rows) from original dataset. | def get_invl_count(self):
return self._df_invoice_original.index.unique().shape[0] | [
"def get_invoice_count(self):\n return self._df_invoice_original.InvoiceNo.unique().shape[0]",
"def getNumRows(self) -> int:\n ...",
"def _compute_no_of_invoices(self):\n for record in self:\n record.invoice_count = len(record.invoice_ids)",
"def get_num_lines(new_crystal_text):\n header_text = new_crystal_text[0]\n for i, line in enumerate(new_crystal_text[1:]):\n if line==header_text:\n num_lines = i+1\n return num_lines",
"def getNoOfRows(self):\n return _patchExtractor.patchExtractor_getNoOfRows(self)",
"def _count_nrows(self):\n with open(self.filename) as fobj:\n if self.offset > 0:\n fobj.seek(self.offset)\n\n if self.delim is not None:\n # for ascii this can be slow\n nrows = 0\n for line in fobj:\n nrows += 1\n else:\n # For binary, try to figure out the number of rows based on\n # the number of bytes\n\n rowsize=self.dtype.itemsize\n # go to end\n fobj.seek(0,2)\n datasize = fobj.tell() - self.offset\n nrows = datasize//rowsize\n\n return nrows",
"def get_table_nb_lines(self, table):\n sql = \"SELECT COUNT(*) FROM \" + table + \";\"\n cur = self._connection.cursor()\n cur.execute(sql)\n res = cur.fetchall()\n cur.close()\n return res[0][0]",
"def find_number_of_pages_in_invoice(filename, data):\n print('Checking page count', filename)\n return len(data['analyzeResult']['readResults'])",
"def line_count(self):\r\n count = 0\r\n with open(self.read_file, 'r') as f:\r\n csv_reader = csv.DictReader(f)\r\n for line in csv_reader:\r\n count += 1\r\n return count",
"def n_lines(self):\n try: \n return self._n_lines\n except AttributeError:\n self._n_lines = len(self.lines())\n return self._n_lines",
"def len(self, table):\n return self.get_table_nb_lines(table)",
"def __get_lines(self):\n return self._target.getPropertyValue('NumberOfLines')",
"def get_customer_count(self):\n return self._df_invoice_original.CustomerID.unique().shape[0]",
"def get_total_line_counts(self):\n return get_total_line_counts(self.files.all())",
"def nrows(self):\n \n return self.ccdRows + self.overRows",
"def num_lines(self, snapshot: Bug, filepath: str) -> int:\n return len(self._line_offsets(snapshot, filepath))",
"def count_lines(self):\n return count_lines(self.filename)",
"def getNumRows(self):\n return glp_get_num_rows(self.lp)",
"def nrow(records):\n return len(records)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns JSON structure issued form dataframe content given as parameter . | def json_df_builder(self, df, marketID, RFM=None):
#-------------------------------------------------------------------------
# Extract from dataframe content to be returned
#-------------------------------------------------------------------------
str_customerID = str(df.CustomerID.unique()[0])
invoice_count = len(df.InvoiceNo.unique())
item_count = df.Quantity.sum()
invl_count = df.shape[0]
ser_incomes = df.UnitPrice * df.Quantity
incomes = ser_incomes.sum()
str_incomes = "{0:1.2F}".format(incomes)
mean_unit_price = incomes/item_count
str_mean_unit_price = "{0:1.2F}".format(mean_unit_price)
serInvoiceDate = df.InvoiceDate
str_old_date = serInvoiceDate.map(str).min()
str_new_date = serInvoiceDate.map(str).max()
#-------------------------------------------------------------------------
# Build JSON structure form content
#-------------------------------------------------------------------------
json_result = '{\n'
json_result += '\t "_results":[\n'
json_result += "{\n"
json_result += "\t\t"+" \"customerID\":"+str_customerID+"\n"
json_result += "\t\t"+",\"marketID\":"+str(marketID)+"\n"
json_result += "\t\t"+",\"invoice_count\":"+str(invoice_count)+"\n"
json_result += "\t\t"+",\"item_count\":"+str(item_count)+"\n"
json_result += "\t\t"+",\"invl_count\":"+str(invl_count)+"\n"
json_result += "\t\t"+",\"mean_unit_price\":"+str_mean_unit_price+"\n"
json_result += "\t\t"+",\"incomes\":"+str_incomes+"\n"
json_result += "\t\t"+",\"old_date\":"+str_old_date+"\n"
json_result += "\t\t"+",\"new_date\":"+str_new_date+"\n"
if RFM is not None:
json_result += "\t\t"+",\"RFM\":"+RFM+"\n"
else:
pass
json_result += "}\n"
json_result += '\n\t]\n}'
return json_result | [
"def toJSON(self, df) :\n ret = json.dumps(df, indent=4)\n return ret",
"def get_json(df):\n\t import json\n\t import datetime\n\t def convert_timestamp(item_date_object):\n\t if isinstance(item_date_object, (datetime.date, datetime.datetime)):\n\t return item_date_object.strftime(\"%Y-%m-%d\")\n\t \n\t dict_ = df.to_dict(orient='records')\n\n\t return json.dumps(dict_, default=convert_timestamp)",
"def precipitation():\r\n return jsonify(df.to_dict('dict'))",
"def df_to_serving_json(self, df):\n\n df.drop(columns=['time','place'], inplace=True)\n js = '{\"instances\": ['\n for h,values in df.iterrows():\n js += '{\"X\": ['+','.join(list(map(str,values)))+']},'\n js += ']}'\n\n return js",
"def df_json(df_desc):\n op_json = json.loads(df_desc.to_json())\n for col_name in op_json:\n op_json[col_name]['column_name'] = col_name\n return op_json",
"def df_to_json(df):\n json_data = json.loads(df.to_json(orient='records'))\n return json_data",
"def converting(df):\n\n # Write in file and let user know the name of the file\n json = df.to_json(orient = \"records\")\n with open(\"json.js\", \"w\") as file:\n file.write(json)",
"def serialize_dataframe(\n self, dataframe: pd.DataFrame, orient: str, date_format: str\n ) -> dict:\n\n df_json = dataframe.to_json(orient=orient, date_format=date_format)\n return {\"orient\": orient, \"date_format\": date_format, \"data_json\": df_json}",
"def sql_query_reponse(df):\n df_query_pandas = df.toPandas()\n df_query_json = df_query_pandas.to_json(orient='records')\n df_query_json = json.loads(df_query_json)\n return df_query_pandas, df_query_json",
"def make_org_report_json(df_raw):\n\n report = make_org_report(df_raw)\n json_report = json.dumps(report)\n return json_report",
"def export_df_to_json(df):\n\n df.to_json(r'exports/data.json')",
"def serialize(self, df, indent=None):\n j = json.loads(pd.DataFrame.to_json(df, orient=\"values\"))\n j.insert(0, list(df.columns))\n return json.dumps(j, indent=indent)",
"def get_data(dataframe,index=None):\n dflen = len(dataframe)\n if index==None or index <0 or index >= dflen:\n index = randint(0,dflen)\n return dataframe.iloc[index].to_json()",
"def export_dataframe_json(dataframe, filename=\"dataframe.json\", orient='split'):\n global result\n dataframe.to_json(filename, orient=orient)\n with open(filename, \"rb\") as binary_file:\n file_bin = binary_file.read()\n assert file_bin is not None\n result = file_bin\n resultMetadata.put(\"file.extension\", \".json\")\n resultMetadata.put(\"file.name\", filename)\n resultMetadata.put(\"content.type\", \"application/json\")",
"def _data_frame(content):\n response = loads(content)\n key = [x for x in response.keys() if x in c.response_data][0]\n frame = DataFrame(response[key])\n final_frame = _convert(frame)\n return final_frame",
"def receive_json_ids(dataframe, jsondata, just_headers = False):\n\n dict_data = ast.literal_eval(jsondata)\n jsondict = {1: 'Kingdom', 2: 'Phylum', 3: 'Class', 4: 'Order', 5: 'Family', 6: 'Genus', 7: 'Species', 8 : 'Strain'}\n # this checks how long the jsondata is and from this it selects the correct Letter out of the jsondict #\n suffix = jsondict[len(dict_data)]\n\n # This selects the data which has the same name as the recieved jsondata\n fw_subset = dataframe[(dataframe[\"fw_\"+ suffix] == dict_data[-1])] \n rv_subset = dataframe[(dataframe[\"rv_\"+suffix] == dict_data[-1])]\n\n # This is only used so that the columns can be easily renamed in something more generic so the append will merge the correct columns\n columns_rename = pd.DataFrame(columns=[\"bitscore\", \"identity\", \"length\"])\n\n # Get the specified data\n fw_sideDf = fw_subset[[\"fw_bit\", \"fw_id\", \"fw_coverage_length\"]]\n rv_sideDf = rv_subset[[\"rv_bit\", \"rv_id\", \"rv_coverage_length\"]]\n\n # Get headers\n fw_headers = fw_subset.index.values.tolist()\n rv_headers = rv_subset.index.values.tolist()\n\n if just_headers:\n return fw_headers, rv_headers\n \n # Rename the columns\n fw_sideDf.columns = columns_rename.columns\n rv_sideDf.columns = columns_rename.columns\n # Combine the two dataframes in one since they have the same column names it will merge completly\n sideDf = fw_sideDf.append(rv_sideDf)\n # Count and group the different entries also convert them into a json\n count_id = sideDf.round(0).groupby(['identity']).size().to_json(orient='table')\n count_bit = sideDf.round(0).groupby(['bitscore']).size().to_json(orient='table')\n count_length = sideDf.round(0).groupby(['length']).size().to_json(orient='table')\n fw_seqs = fw_subset[\"fw_seq\"].tolist()\n rv_seqs = rv_subset[\"rv_seq\"].tolist()\n\n # Get taxonomy id's\n tax_ids = set([*fw_subset.fw_accession.tolist(), *rv_subset.rv_accession.tolist()])\n tax_len = len(tax_ids)\n if tax_len == 0:\n tax_id = \"None\"\n elif tax_len == 1:\n tax_id = list(tax_ids)[0]\n else:\n tax_id = \"More\"\n\n response = {\n \"count_id\":count_id,\n \"count_bit\": count_bit,\n \"count_length\": count_length,\n \"node_name\": dict_data[-1],\n \"tax_id\": str(tax_id),\n \"fw_headers\": fw_headers,\n \"rv_headers\": rv_headers,\n \"fw_seqs\": fw_seqs,\n \"rv_seqs\": rv_seqs\n }\n return jsonify(response)",
"def obtenergeo_json_df(dataframe, geodataframe, columna, primer_ageb):\n primer_ageb_str= str(primer_ageb)\n\n # Filtrar el dataframe y unir con el geodataframe\n dataframe = dataframe.loc[:, dataframe.columns.isin([columna, primer_ageb_str])]\n geodataframe = geodataframe.merge(dataframe, on=columna)\n\n # Renombrar la columna por si no se llama id\n geodataframe['id'] = geodataframe[columna]\n\n # Convertir el geodataframe en geojson\n with open('./data/production_data/ageb_geometry/ageb_distancias.json') as geofile:\n jdata = json.load(geofile)\n\n # Revisar el geojson\n jdata = checar_geojson(jdata)\n\n return geodataframe, jdata",
"def tobs():\r\n return jsonify(df_temperature.to_dict('dict'))",
"def get_dataframe():\n jsonified_df = redis_instance.hget(\n tasks.REDIS_HASH_NAME, tasks.REDIS_KEYS[\"DATASET\"]\n ).decode(\"utf-8\")\n df = pd.DataFrame(json.loads(jsonified_df))\n return df"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function is used for validation process. It returns a list of stockCode items and a list of quantities for each item. | def get_order_lists(self, n_items, n_quantities):
arr_stock_code = self._df_invoice_original.StockCode.unique()
arr_stock_code = np.random.choice(arr_stock_code, n_items)
list_stockCode = list(arr_stock_code)
list_quantities = np.ones(arr_stock_code.shape[0])
list_quantities *=n_quantities
return list_stockCode, list_quantities | [
"def get_items(self):\n \n items = []\n\n params = self.request.query_params\n\n if 'items[]' in params:\n items = params.getlist('items[]', [])\n elif 'item' in params:\n items = [params.get('item', None)]\n\n if type(items) not in [list, tuple]:\n item = [items]\n\n valid_ids = []\n\n for item in items:\n try:\n valid_ids.append(int(item))\n except (ValueError):\n pass\n\n # List of StockItems which match provided values\n valid_items = StockItem.objects.filter(pk__in=valid_ids)\n\n return valid_items",
"def get_items(self):\n\n items = []\n\n params = self.request.query_params\n\n if 'items[]' in params:\n items = params.getlist('items[]', [])\n elif 'item' in params:\n items = [params.get('item', None)]\n\n if type(items) not in [list, tuple]:\n items = [items]\n\n valid_ids = []\n\n for item in items:\n try:\n valid_ids.append(int(item))\n except (ValueError):\n pass\n\n # List of StockItems which match provided values\n valid_items = StockItem.objects.filter(pk__in=valid_ids)\n\n return valid_items",
"def validate_order_items(self, value):\n\n for item in value:\n try:\n inventory_obj = item['bakery_item'].bakery_item_inventory\n\n if inventory_obj.quantity - item['quantity'] < 0:\n raise ValidationError(\"stock for bakery_item {} is not available with requested quantity\".format(\n item['bakery_item'].id))\n except ObjectDoesNotExist as e:\n raise serializers.ValidationError(\"bakery_item_inventory does not exist error is :::{}\".format(e))\n return value",
"def parse_quantities(quantities):\n codes = []; names = []\n\n for q in quantities:\n c, n = parse_quantity(q)\n codes.append(c)\n names.append(n)\n\n return codes, names",
"def verify_order_item_availability(self, data):\n\n if isinstance(data, str):\n data = json.loads(data)\n\n confirmed_order_stock_items = []\n\n for i in data['order_stock_items']:\n subqry = self.db.query(InventoryItem.site_id,\n func.max(InventoryItem.version).label('maxversion')) \\\n .filter(InventoryItem.product_id == i['product_id']) \\\n .group_by(InventoryItem.site_id).subquery('t2')\n\n inventory_items = self.db.query(InventoryItem) \\\n .filter(InventoryItem.product_id == i['product_id']) \\\n .join(subqry,\n (InventoryItem.site_id == subqry.c.site_id) &\n (InventoryItem.version == subqry.c.maxversion)) \\\n .all()\n\n inventory_items = Enumerable(inventory_items)\n inventory_count = inventory_items.sum(lambda x: x.available_stock)\n\n has_stock = inventory_count >= i['units']\n confirmed_order_stock_items.append({'product_id': i['product_id'],\n 'has_stock': has_stock})\n\n rejected = all(item['has_stock'] for item in confirmed_order_stock_items)\n\n if not rejected:\n payload = {'order_id': data['order_id'],\n 'order_stock_items': confirmed_order_stock_items}\n self.dispatch('rejected_order_stock', payload)\n else:\n self.dispatch('confirmed_order_stock', {'order_id': data['order_id']})",
"def test_find_stock_items(self):\n pass",
"def quantities_available(quantities):\n available = []\n for q in quantities:\n available.append(quantity_available(q))\n return available",
"def split_stock_list1(stock_list):\n list_by_stock = []\n appl = []\n msft = []\n # Loop through stock list\n for stock_item in stock_list:\n # Identify company\n if stock_item[SYMBOL] == \"APPL\":\n appl.append(stock_item)\n elif stock_item[SYMBOL] == \"MSFT\":\n msft.append(stock_item)\n\n # Append appl and msft to return list\n list_by_stock.append(appl)\n list_by_stock.append(msft)\n\n return list_by_stock",
"def getUnitPriceList(self, list_stockCode):\n df = self._df_invoice_original\n\n list_unitPrice = list()\n \n for stockCode in list_stockCode:\n unitPrice = df[df.StockCode==stockCode].UnitPrice.unique()[0]\n list_unitPrice.append(unitPrice)\n return list_unitPrice",
"def stocks(self):\n return self.quantity",
"def get_items(self):\n return [item for item in self.items if item.quantity > 0]",
"def get_item_variants(self, item_id, item_name, start):\n\n item_url = f\"https://www.supremenewyork.com/shop/{item_id}.json\"\n\n item_variants = rq.get(item_url, headers=self.headers, proxies=self.proxy).json()\n\n for stylename in item_variants[\"styles\"]:\n for itemsize in stylename[\"sizes\"]:\n item = [item_name, stylename[\"name\"], itemsize['name'], item_variants[\"description\"], 'https:' + stylename[\"image_url\"], item_url.split('.json')[0]]\n if itemsize[\"stock_level\"] != 0:\n # Checks if it already exists in our instock\n if self.checker(item):\n pass\n else:\n # Add to instock dict\n self.instock.append(item)\n \n # Send a notification to the discord webhook with the in-stock product\n if start == 0:\n print('Sending new Notification')\n self.discord_webhook(item)\n logging.info(msg='Successfully sent Discord notification')\n\n else:\n if self.checker(item):\n self.instock.remove(item)",
"def get_price_list(self, item_list):\n price_list = []\n for item in item_list:\n price_list.append(Inventory.stock[item].price)\n return price_list",
"def test_several_items(self):\n actual = a1.stock_price_summary([0.01, 0.03, -0.02, -0.14, 0, 0, 0.10, -0.01])\n expected = (0.14, -0.17)\n self.assertEqual(actual, expected)",
"def validate(self, attrs):\n exception_body = []\n for orderline in attrs.get('orderlines', []):\n product = orderline['product']\n\n # If orderline has less units than available, all good.\n if orderline['units'] <= product.units:\n continue\n\n # else error is accumulated\n if product.units > 0:\n exception_body.append({product.name: 'Only {0} units available.'.format(str(product.units))})\n else:\n exception_body.append({product.name: 'Out of stock'})\n\n # If any orderline has problem, reject order.\n if exception_body:\n raise exceptions.PermissionDenied({'errors': exception_body})\n\n return attrs",
"def test_CalculateStockItemOrders(self):\n symbol = \"XXXX\"\n\n # Create ActiveStockItem\n activeStockItem = ActiveStockItem(symbol=symbol)\n quantity = 2\n buyStepSize = 1\n activeStockItem.SellStepSize = 2\n activeStockItem.SellStepType = SellDeltaType.FIXED\n activeStockItem.StartPrice = 20.55\n activeStockItem.QuantityMultiplier = 1\n activeStockItem.MaxActiveBuy = 2\n priceCoordinates:List[PriceCoordinate] = []\n priceCoordinates.append(PriceCoordinate(startPrice=0,quantity=quantity, \n buyDeltaType=BuyDeltaType.FIXED, fixedBuyDelta=buyStepSize))\n activeStockItem.PriceCoordinates = priceCoordinates\n\n # Create PortfolioPosition\n portfolioPosition = PortfolioPosition(symbol=symbol)\n portfolioPosition.Quantity = 9\n \n expectedLimitOrders:List[OrderInfo] = [\n OrderInfo(Settings.NewOrderId, symbol, 22.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 21.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 20.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 19.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 18.55, 1, True, True),\n OrderInfo(Settings.NewOrderId, symbol, 16.55, 1, True, False),\n OrderInfo(Settings.NewOrderId, symbol, 15.55, 2, False, False)\n ]\n\n possibleLimitOrders:List[OrderInfo] = self.manageOrdersHelpers.GeneratePossibleLimitOrders(activeStockItem, portfolioPosition.Quantity)\n\n self.assertSequenceEqual(expectedLimitOrders, possibleLimitOrders)\n\n placeOrders, cancelOrders = self.moneyMaker.CalculateStockItemOrders(activeStockItem, [], portfolioPosition)\n\n print(placeOrders)\n\n print(cancelOrders)\n\n for activeStockItem in ActiveStockItems:\n print(activeStockItem.Symbol)",
"def _get_stock_item_ids(cls, *skus):\n return linnapi.inventory.get_stock_item_ids_by_sku(*skus)",
"def getStockCodeList(self, list_description=None):\n list_stockCode = list()\n df = self._df_invoice_original\n \n if list_description is None:\n list_stockCode = list(df.StockCode.unique())\n else :\n for description in list_description:\n stockCode = df[df.Description==description].StockCode.unique()[0]\n list_stockCode.append(stockCode)\n return list_stockCode",
"def stocks(self):\n return self.quantity - self.reserved"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sentence generator for an entire corpus directory. | def sentences_for_dir(path='./',separate=True,gzipped=True):
for filename in cowfiles(path):
for metadata, data in sentence_generator(filename,separate,gzipped):
yield metadata, data | [
"def gen_sentences_from_dir(path, ext=\"txt\"):\n for doc in gen_documents_from_dir(path, ext):\n for s in gen_sentences_from_string(doc):\n yield s",
"def load_data_sentences(dirname):\n sentence_list = []\n for fname in os.listdir(dirname):\n with open(os.path.join(dirname, fname)) as file:\n #sentence_list.append(gensim.models.word2vec.LineSentence(file))\n sentence_list.append(file)\n return sentence_list",
"def process_docs(directory, vocab):\n for filename in listdir(directory):\n if not filename.endswith('.txt'):\n continue\n path = directory + '/' + filename\n add_doc_to_vocab(path, vocab)",
"def corpusMake(n):\r\n # for each files in the directory, if they are from the texts folder, then dump it in the list\r\n # this function also auto sorts the list (hence why i had to make it text01, text02)\r\n files = [f for f in listdir('tests-spanish') if isfile(join('tests-spanish', f))]\r\n\r\n # then extract each files and we dump them in a list\r\n books = []\r\n for i in files:\r\n if n == 10:\r\n i = 'texts-10/' + i\r\n elif n == 5:\r\n i = 'texts-5/' + i\r\n else:\r\n i = 'tests-spanish/' + i\r\n with open(i,'r', encoding='utf8', errors='ignore') as f:\r\n doc = f.readlines()\r\n books.append(doc)\r\n doc = ''\r\n \r\n f = open('corpusdata.txt', 'a')\r\n #wipe everything before adding it to the list\r\n #or just create a new file everytime\r\n f.seek(0)\r\n f.truncate()\r\n # then we need to put everything into a file and turn it into a corpus\r\n for i in range(len(books)):\r\n book = str(books[i][0])\r\n f.write(str(book))\r\n f.write('\\n')\r\n f.close()\r\n \r\n # a function to load the data\r",
"def _get_gigaword_sentences():\n articles = list()\n with open(config['gigaword_sen_folder'], 'r') as f:\n for line in f:\n art = DucArticle()\n art.sentence = line.strip('\\n')\n articles.append(art)\n return articles",
"def iter_documents(top_directory):\n for root, dirs, files in os.walk(top_directory):\n for file in filter(lambda file: file.endswith('.txt'), files):\n document = open(os.path.join(root, file)).read() # read the entire document, as one big string\n yield utils.tokenize(document, lower=True) # or whatever tokenization suits you",
"def import_spontaneous_speech_corpus(corpus_name, directory, **kwargs):\n\n dialect = kwargs.pop('dialect', 'textgrid')\n stop_check = kwargs.pop('stop_check', None)\n call_back = kwargs.pop('call_back', None)\n speaker_source = kwargs.pop('speaker_source', None)\n delimiter = kwargs.pop('delimiter', None)\n\n corpus = SpontaneousSpeechCorpus(corpus_name,directory)\n\n words = []\n phones = []\n textgrids = []\n wavs = []\n if call_back is not None:\n call_back('Finding files...')\n call_back(0,1)\n cur = 0\n for root, subdirs, files in os.walk(directory):\n if stop_check is not None and stop_check():\n return\n for f in files:\n if dialect == 'textgrid' and f.lower().endswith('.textgrid'):\n textgrids.append(os.path.join(root,f))\n elif dialect == 'buckeye' and f.endswith('.words'):\n words.append(os.path.join(root,f))\n elif dialect == 'buckeye' and f.endswith('.phones'):\n phones.append(os.path.join(root,f))\n elif dialect == 'timit' and f.endswith('.wrd'):\n words.append(os.path.join(root,f))\n elif dialect == 'timit' and f.endswith('.phn'):\n phones.append(os.path.join(root,f))\n elif f.endswith('.wav'):\n wavs.append(os.path.join(root,f))\n if dialect == 'textgrid':\n word_tier_name = kwargs.pop('word_tier_name', None)\n phone_tier_name = kwargs.pop('phone_tier_name', None)\n dialogs = align_textgrid_info(textgrids, wavs, speaker_source, stop_check, call_back)\n else:\n dialogs = align_dialog_info(words, phones, wavs, speaker_source, stop_check, call_back)\n if call_back is not None:\n call_back('Processing discourses...')\n call_back(0,len(dialogs))\n cur = 0\n\n for d, v in dialogs.items():\n if stop_check is not None and stop_check():\n return\n if call_back is not None:\n cur += 1\n call_back(cur)\n discourse_info = {'name':d}\n if dialect == 'textgrid':\n if 'textgrid' not in v:\n continue\n data = textgrids_to_data(v['textgrid'], word_tier_name,\n phone_tier_name,\n v['speaker'], delimiter)\n else:\n if 'words' not in v:\n continue\n if 'phones' not in v:\n continue\n data = files_to_data(v['words'], v['phones'], dialect)\n discourse_info['speaker'] = Speaker(v['speaker'])\n\n if 'wav' in v:\n discourse_info['wav_path'] = v['wav']\n corpus.add_discourse(data, discourse_info,delimiter=delimiter)\n return corpus",
"def process_docs(directory, vocab):\n for file_name in listdir(directory):\n file_path = directory + '/' + file_name\n add_doc_to_vocab(file_path, vocab)",
"def _create_corpus(ocr_detection_dir):\n corpus = []\n for filename in os.listdir(ocr_detection_dir):\n filename = os.path.join(ocr_detection_dir, filename)\n with open(filename, 'r', encoding='utf-8') as f:\n data = json.load(f)\n corpus.append(data['text'].replace('\\n', ' '))\n if len(corpus) % 1000 == 0:\n tf.logging.info('On %i', len(corpus))\n tf.logging.info(\"total: %i\", len(corpus))\n return corpus",
"def make_sentences(self):\n\n if self.document == None:\n return\n\n sent = sent_tokenize(self.document) # contains raw sentences\n\n\n # Create parameters for NER and Dependency Parsing a\n # and pass it to the sentence objcet\n\n # set config file\n config = CP.RawConfigParser()\n config = config\n config.read('config.py')\n\n # Server for dependency parsing\n\n server = ServerProxy(JsonRpc20(),TransportTcpIp(addr=(\"127.0.0.1\", 8080), timeout=200.0))\n\n # Parameters for Named entitye recognition\n\n # get the classifier and tagger location from config file\n tagger = config.get('NER','tagger') # gets the path of the stanford tagger\n classifier = config.get('NER','classifier') # gets the path of the stanford classifier\n st = StanfordNERTagger(classifier,tagger)\n for i in range(len(sent)):\n s = Sentence(sent[i],i,server, st, 'test')\n self.sentences.append(s)",
"def generate_sentence(self, start_text, sentence_count=1, up_to_count = None, on_word_generated=None):\n if up_to_count:\n sentence_count = random.randint(1, up_to_count)\n text = start_text\n sentence = ''\n reached_end = False\n for _ in range(sentence_count):\n while (not reached_end):\n word = self.generate_word(text)\n # Sometimes it generates token <|endoftext|> and then starts doing a newscast or something.\n if 'endoftext' in word:\n reached_end = True\n break\n if on_word_generated:\n on_word_generated(word) \n text += word\n sentence += word\n if '.' in word:\n break\n return sentence",
"def build_corpus(self):\n print(\"Inside the build_corpus >>>>>\")\n documentsCount = 0\n documents = self.documents\n\t\t\n with open(self.documents_path) as file:\n for documents in file.readlines():\n documents = documents.rstrip('}\\n ').strip('0\\t').strip('1\\t').split(' ')\n documentsCount = documentsCount +1\n self.documents.append(documents)\n\t\t\t\n self.number_of_documents = documentsCount",
"def process_corpus(self):\n sentences = []\n sentence = []\n with open(str(self.file), encoding=self.encoding) as f:\n\n line = f.readline()\n\n while line:\n\n if line.startswith(\"#\"):\n line = f.readline()\n continue\n\n if line.strip().replace(\"\", \"\") == \"\":\n if len(sentence) > 0:\n self.infer_space_after(sentence)\n if self.tagging_scheme is not None:\n self.convert_tag_scheme(\n sentence, target_scheme=\"iobes\"\n )\n\n sentences.append(sentence)\n sentence = []\n\n else:\n fields = re.split(r\"\\s+\", line)\n token = fields[0] # text column\n token_tags = {\n v: fields[k]\n for k, v in self.columns.items()\n if v != \"text\"\n }\n sentence.append({\"name\": token, \"tags\": token_tags})\n\n line = f.readline()\n\n return sentences",
"def _doc2vec_doc_stream(paths, n, tokenizer=word_tokenize, sentences=True):\n i = 0\n p = Progress()\n for path in paths:\n with open(path, 'r') as f:\n for line in f:\n i += 1\n p.print_progress(i/n)\n\n # We do minimal pre-processing here so the model can learn\n # punctuation\n line = line.lower()\n\n if sentences:\n for sent in sent_tokenize(line):\n tokens = tokenizer(sent)\n yield LabeledSentence(tokens, ['SENT_{}'.format(i)])\n else:\n tokens = tokenizer(line)\n yield LabeledSentence(tokens, ['SENT_{}'.format(i)])",
"def make_sentences(tree):\n \n tokens = tree.find(\"{http://www.dspin.de/data/textcorpus}TextCorpus/{http://www.dspin.de/data/textcorpus}tokens\")\n lemmas = tree.find(\"{http://www.dspin.de/data/textcorpus}TextCorpus/{http://www.dspin.de/data/textcorpus}lemmas\")\n POStags = tree.find(\"{http://www.dspin.de/data/textcorpus}TextCorpus/{http://www.dspin.de/data/textcorpus}POStags\")\n sentences = tree.find(\"{http://www.dspin.de/data/textcorpus}TextCorpus/{http://www.dspin.de/data/textcorpus}sentences\")\n \n end_ids = []\n \n for sentence in sentences:\n token_ids = sentence.attrib[\"tokenIDs\"].split()\n end_word_id = token_ids[len(token_ids)-1]\n end_ids.append(end_word_id)\n\n \n s = []\n \n for i, token in enumerate(tokens):\n try:\n token_id = token.attrib[\"ID\"]\n \n word = token.text\n lemma = lemmas[i].text\n pos = POStags[i].text\n index = i\n \n s.append((word, lemma, pos, index))\n \n if token_id in end_ids:\n yield s\n s = []\n except:\n pass\n print \"token skipped...\"",
"def peoples_speech(\n corpus_dir: Pathlike,\n output_dir: Pathlike,\n):\n prepare_peoples_speech(\n corpus_dir,\n output_dir=output_dir,\n )",
"def generate_corpus():\n data = load_data()\n questions = [s.split(' ', 1)[1].lower() for s in data]\n return questions",
"def load_sentences(path, lower, zeros=True):\n sentences = []\n sentence = []\n for line in codecs.open(path, 'r', 'utf8'):\n line = zero_digits(line.rstrip()) if zeros else line.rstrip()\n if not line:\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n sentence = []\n else:\n word = line.split()\n assert len(word) >= 2\n sentence.append(word)\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n return sentences",
"def gen_documents_from_dir(path, min_length=0, ext=\"txt\"):\n files = [os.path.join(path, f) for f in os.listdir(path) if f.endswith(ext)]\n for fi,f in enumerate(files):\n logger.debug(\"Loading file %d / %d...\" % (fi+1, len(files)))\n fstr = open(f).read()\n if not min_length or len(fstr) > min_length:\n logger.debug(\"Yielding file %s\" % f)\n yield strip_string(open(f).read())"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build each tree in the 'forest' of trees. After each iteration, evaluate the tree and reweight the input sample such that incorrect events are weighted up and correct events are weighted down | def build(self):
# weights to apply to training samples, updated on each
# iteration of the boosting algo, normalised to 1
sigWeights = np.ones(self.nSig, dtype=float)
bkgWeights = np.ones(self.nBkg, dtype=float)
reweight = 1.0/(np.sum(sigWeights)+np.sum(bkgWeights))
sigWeights *= reweight
bkgWeights *= reweight
# Weight of each tree, strong classifers have higher weight
self.treeWeights = np.zeros(self.ntrees, dtype=float)
for i in xrange(self.ntrees):
# build new tree
newTree = Tree()
newTree.load(self.sigData,self.bkgData,weights=(sigWeights,bkgWeights))
newTree.build()
self.dTrees.append(newTree)
# evaluate trees
# keep track of each event
err = 0.0
sigWrong = np.zeros(self.nSig)
bkgWrong = np.zeros(self.nBkg)
for j in range(self.nSig):
if newTree.classify(np.array((self.sigData[j,])))<0:
sigWrong[i]=1
err+=sigWeights[j]
for j in range(self.nBkg):
if newTree.classify(np.array((self.bkgData[j,])))>0:
bkgWrong[i]=1
err+=bkgWeights[j]
alpha = self.beta*math.log((1.0-err)/err)
print err,alpha
corFactor = math.exp(-alpha)
wrongFactor = math.exp(alpha)
if (err<1e-20 or err >= 0.5):
print "SOEMTHING WRONG!!"
self.treeWeights[i] = alpha
# reweight training samples
for j in range(self.nSig):
if sigWrong[j]:
sigWeights[j]*=wrongFactor
else :
sigWeights[j]*=corFactor
for j in range(self.nBkg):
if bkgWrong[j]:
bkgWeights[j]*=wrongFactor
else :
bkgWeights[j]*=corFactor
# normalise weights
reweight = 1.0/(np.sum(sigWeights)+np.sum(bkgWeights))
sigWeights *= reweight
bkgWeights *= reweight | [
"def grow_forest(forest, X, y, seeds, labels=None):\n # Convert data\n X, = check_arrays(X, dtype=DTYPE, sparse_format=\"dense\")\n # Make a list container for grown trees\n n_trees = forest.n_estimators\n trees = []\n # For each tree in the forest\n for i in range(n_trees):\n # Make a np.random.RandomState instance from the tree's planting seed\n random_state = check_random_state(seeds[i])\n # generate a random seed for a branching seed\n seed = random_state.randint(MAX_INT)\n # Make a decision tree object\n tree = forest._make_estimator(append=False)\n # Init the tree's RandomState instance with generated seed\n # this will randomize what features the tree will use\n tree.set_params(random_state=check_random_state(seed))\n # If we are bootstraping\n if forest.bootstrap:\n # If we are given labels\n if labels is not None:\n # Then need to bootstrap via labels\n # We can do this by using StratifiedShuffleSplit\n # to gain a random sample from each lable\n sss = cross_validation.StratifiedShuffleSplit(labels, \n n_iter=1, \n test_size=np.unique(labels).size, \n random_state=check_random_state(seed))\n # Then we'll bootstrap our X and y for the lable samples chosen\n for train, test in sss:\n X_lbs = X[test]\n y_lbs = y[test]\n break\n \n # Then get the number of samples\n n_samples = X_lbs.shape[0]\n # To generate a uniform sample weight\n curr_sample_weight = np.ones((n_samples,), dtype=np.float64)\n # Then randomly choses n_samples from all samples with replacement \n indices = random_state.randint(0, n_samples, n_samples)\n # Use this method of bincount to make a randome benning histogram\n # that will sum up to n_samples\n sample_counts = bincount(indices, minlength=n_samples)\n # Apply these randomized counts to the old uniform weights\n curr_sample_weight *= sample_counts\n # Fit the tree using these new sample weights\n tree.fit(X_lbs, y_lbs, sample_weight=curr_sample_weight, check_input=False)\n # Then set the indices of the tree only to the samples that had non-zero weights\n tree.indices_ = sample_counts > 0.\n else:\n # Then get the number of samples\n n_samples = X.shape[0]\n # To generate a uniform sample weight\n curr_sample_weight = np.ones((n_samples,), dtype=np.float64)\n # Then randomly choses n_samples from all samples with replacement \n indices = random_state.randint(0, n_samples, n_samples)\n # Use this method of bincount to make a randome benning histogram\n # that will sum up to n_samples\n sample_counts = bincount(indices, minlength=n_samples)\n # Apply these randomized counts to the old uniform weights\n curr_sample_weight *= sample_counts\n # Fit the tree using these new sample weights\n tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)\n # Then set the indices of the tree only to the samples that had non-zero weights\n tree.indices_ = sample_counts > 0.\n # If we aren't bootstraping\n else:\n # This just fit the data with no random weights\n tree.fit(X, y, check_input=False)\n # Add the grown tree to the container \n trees.append(tree)\n # return all of the trained trees\n return trees",
"def populate_forest_func(forest, root_func, tree_funcs):\n data_arr, out_arr, start_index, end_index = root_func.args\n\n # -- SETUP BLOCK\n setup_block = root_func.append_basic_block(\"setup\")\n builder = ir.IRBuilder(setup_block)\n loop_iter = builder.alloca(INT, 1, \"loop-idx\")\n builder.store(start_index, loop_iter)\n condition_block = root_func.append_basic_block(\"loop-condition\")\n builder.branch(condition_block)\n # -- END SETUP BLOCK\n\n # -- CONDITION BLOCK\n builder = ir.IRBuilder(condition_block)\n comp = builder.icmp_signed(\"<\", builder.load(loop_iter), end_index)\n core_block = root_func.append_basic_block(\"loop-core\")\n term_block = root_func.append_basic_block(\"term\")\n builder.cbranch(comp, core_block, term_block)\n # -- END CONDITION BLOCK\n\n # -- CORE LOOP BLOCK\n builder = ir.IRBuilder(core_block)\n # build args arr, convert categoricals vars from float to int\n args = []\n loop_iter_reg = builder.load(loop_iter)\n\n n_args = ir.Constant(INT, forest.n_args)\n iter_mul_nargs = builder.mul(loop_iter_reg, n_args)\n idx = (builder.add(iter_mul_nargs, iconst(i)) for i in range(forest.n_args))\n raw_ptrs = [builder.gep(root_func.args[0], (c,)) for c in idx]\n for is_cat, ptr in zip(forest.categorical_bitmap, raw_ptrs):\n el = builder.load(ptr)\n if is_cat:\n args.append(builder.fptosi(el, INT_CAT))\n else:\n args.append(el)\n # iterate over each tree, sum up results\n res = builder.call(tree_funcs[0], args)\n for func in tree_funcs[1:]:\n # could be inlined, but optimizer does for us\n tree_res = builder.call(func, args)\n res = builder.fadd(tree_res, res)\n ptr = builder.gep(out_arr, (loop_iter_reg,))\n builder.store(res, ptr)\n tmpp1 = builder.add(loop_iter_reg, iconst(1))\n builder.store(tmpp1, loop_iter)\n builder.branch(condition_block)\n # -- END CORE LOOP BLOCK\n\n # -- TERMINAL BLOCK\n ir.IRBuilder(term_block).ret_void()\n # -- END TERMINAL BLOCK",
"def build_forest(training_set, thresh_cands, T):\n forest = []\n for _ in range(T):\n forest.append(build_tree(training_set, thresh_cands))\n return forest",
"def populate_synthetic_tree(self):\r\n logging.debug('populating synthetic tree...')\r\n a_data = self.realData\r\n ndata = a_data.shape[1]\r\n for i in range(ndata):\r\n ptx = a_data[0, i]\r\n pty = a_data[1, i]\r\n leaf = self.root.find_subnode(ptx, pty)\r\n leaf.n_count += 1\r\n\r\n # traverse the tree and update leaf counts\r\n stack = deque()\r\n stack.append(self.root)\r\n while len(stack) > 0:\r\n cur_node = stack.popleft()\r\n if cur_node.n_isLeaf is True: # leaf\r\n cur_node.n_count += self.differ.getNoise(1, 0.5 * self.param.Eps)\r\n else:\r\n stack.append(cur_node.nw)\r\n stack.append(cur_node.ne)\r\n stack.append(cur_node.sw)\r\n stack.append(cur_node.se)",
"def test_synthetic_tree_prep(self):\n np.random.seed(6)\n\n # load the `Reactions` (built from 3 reaction templates)\n path_to_rxns = './data/ref/rxns_hb.json.gz'\n r_ref = ReactionSet()\n r_ref.load(path_to_rxns)\n rxns = r_ref.rxns\n\n # load the reference building blocks (100 here)\n path_to_building_blocks = './data/building_blocks_matched.csv.gz'\n building_blocks = pd.read_csv(path_to_building_blocks, compression='gzip')['SMILES'].tolist()\n\n num_trials = 25\n num_finish = 0\n num_error = 0\n num_unfinish = 0\n\n trees = []\n for _ in tqdm(range(num_trials)):\n tree, action = synthetic_tree_generator(building_blocks,\n rxns,\n max_step=5)\n if action == 3:\n trees.append(tree)\n num_finish += 1\n elif action == -1:\n num_error += 1\n else:\n num_unfinish += 1\n\n synthetic_tree_set = SyntheticTreeSet(sts=trees)\n synthetic_tree_set.save('./data/st_data.json.gz')\n\n # check that the number of finished trees generated is == 3, and that\n # the number of unfinished trees generated is == 0\n self.assertEqual(num_finish, 3)\n self.assertEqual(num_unfinish, 0)\n\n # check here that the synthetic trees were correctly saved by\n # comparing to a provided reference file in 'SynNet/tests/data/ref/'\n sts_ref = SyntheticTreeSet()\n sts_ref.load('./data/ref/st_data.json.gz')\n for st_idx, st in enumerate(sts_ref.sts):\n st = st.__dict__\n ref_st = sts_ref.sts[st_idx].__dict__\n self.assertTrue(st == ref_st)",
"def build_tree(self, w):\n w_abs = np.abs(w)\n if sum(w_abs) != 1.:\n w_abs = w_abs / sum(w_abs)\n self.w = w_abs\n self.tree = np.zeros(w.shape)\n self._build_node(w_abs, 1)\n self.w_apx = extract_distribution(self.tree)\n\n n_levels = np.ceil(np.log2(len(w)))\n self.lfsr = []\n for n in range(int(n_levels)):\n seed = np.random.randint(1, int(2**(self.lfsr_nbits-n)-1))\n self.lfsr.append(LFSR(self.lfsr_nbits-n, seed))",
"def buildTree(rows, maxDepth = None, scoref=entropy, depth = 0):\n #A base condition for the recursion. Check if this branch of a split has no data\n if len(rows)==0:\n return decisionNode( )\n newDepth = depth + 1 #Calculate the depth of the next split.\n #Check if the depth at the next split is greater than a maximum specified depth\n if (maxDepth == 0 or maxDepth) and (newDepth > maxDepth): \n return decisionNode(results=__uniqueCounts(rows)) #If so, stop splitting.\n current_score=scoref(rows) #Calculate the current value of the score function.\n # Set up some variables to track the best criteria\n best_gain=0.0 #Initialize a value for the best gain from all possible splits\n best_criteria=None #Initialize a variable for the best column to split on\n best_sets=None #Initialize a variable for the best split's true and false data.\n\n #Count the number of columns in the row, minus the results column \n column_count=len(rows[0])-1\n for col in range(0,column_count): #Iterate over all the columns of the data\n #Generate the list of different values in this column\n column_values={} #Initialize a dictionary to store the column values\n for row in rows: \n #Iterate over each row, adding a key in the dict for each observed value\n column_values[row[col]]=1\n # Divide the dataset on each value in this column.\n for value in column_values.keys( ):\n (set1,set2)=__divideset(rows,col,value)\n #Calculate the fraction of data in the true branch\n p=float(len(set1))/len(rows) \n #Calculate the gain on the chosen score function using this split.\n gain=current_score-p*scoref(set1)-(1-p)*scoref(set2) \n #Check if this split provides a better gain than the best previous split\n if gain>best_gain and len(set1)>0 and len(set2)>0:\n best_gain=gain\n best_criteria=(col,value)\n best_sets=(set1,set2)\n # Recursively create the subbranches\n if best_gain>0:\n trueBranch=buildTree(best_sets[0], maxDepth = maxDepth, depth = newDepth)\n falseBranch=buildTree(best_sets[1], maxDepth = maxDepth, depth = newDepth)\n return decisionNode(col=best_criteria[0],value=best_criteria[1],\n tb=trueBranch,fb=falseBranch)\n else:\n return decisionNode(results=__uniqueCounts(rows))",
"def __init__(self, dims, treeCount, incAdd = 1, testDims = 3, dimCount = 4, rotCount = 32):\n # Support structures...\n self.cats = dict() # Dictionary from cat to internal indexing number.\n self.treeCount = treeCount\n self.incAdd = incAdd\n \n # Setup the classification forest...\n self.classify = DF()\n self.classify.setInc(True)\n self.classify.setGoal(Classification(None, 1))\n self.classify.setGen(LinearClassifyGen(0, 1, testDims, dimCount, rotCount))\n \n self.classifyData = MatrixGrow()\n self.classifyTrain = self.treeCount\n \n # Setup the density estimation forest...\n self.density = DF()\n self.density.setInc(True)\n self.density.setGoal(DensityGaussian(dims))\n self.density.setGen(LinearMedianGen(0, testDims, dimCount, rotCount))\n self.density.getPruner().setMinTrain(48)\n \n self.densityData = MatrixGrow()\n self.densityTrain = self.treeCount",
"def train(self):\r\n stack_nodes = [self.root]\r\n # !!! Write code to train decision tree. If the node is pure, set the majority_class attribute.\r\n # Use .pop(0) to pop the top of the stack\r\n for data in stack_nodes:\r\n self.split_attr, self.split_value =compute_best_split()\r\n if self.split_attr <= self.split_value:\r\n self.root =get_data_for_left()\r\n else\r\n self.root =get_data_for_right()\r\n\r\n stack_nodes.pop(0)\r\n pass",
"def main():\n\n ''' Reading the training data file '''\n original_training_data = pd.read_csv(\"DT_Data_CakeVsMuffin_v012_TRAIN.csv\")\n\n ''' Storing the final decision tree '''\n final_tree = decision_tree(original_training_data,0)\n\n ''' Printing the final decision tree '''\n print(\"This is the resulting decision tree: \\n\")\n print(final_tree)\n\n ''' Iterating through the dictionary by using the key values '''\n for key in final_tree.keys():\n ''' Parent = Flour <= 5.1636'''\n parent = key\n ''' left_child = [{'Oils <= 3.1265': [{'Flour <= 2.7291': [{'Proteins <= 2.6527': ['Muffin', 'CupCake']}, 'Muffin']}, 'CupCake']}'''\n left_child = final_tree[parent][0]\n ''' right_child = {'Oils <= 7.7793': ['Muffin', {'Flour <= 8.2225': ['CupCake', 'Muffin']}]}]'''\n right_child = final_tree[parent][1]\n\n ''' Writing a file which generates code for classification '''\n file = open('HW06_Parchand_Nihal_Classifier.py','w+')\n file.write(\"'''Importing libraries''' \"\n \"\\n\\nimport pandas as pd \\n\\ndef main():\"\n \"\\n\\tdata_df = pd.read_csv('DT_Data_CakeVsMuffin_v012_TEST.csv')\"\n \"\\n\\tresult = []\"\n \"\\n\\tfor row in range(0,len(data_df)):\"\n \"\\n\\t\\tFlour = data_df.loc[row][0]\"\n \"\\n\\t\\tSugar = data_df.loc[row][1]\"\n \"\\n\\t\\tOils = data_df.loc[row][2]\"\n \"\\n\\t\\tProteins = data_df.loc[row][3]\"\n \"\\n\\t\\tif {}:\\n\".format(parent))\n\n ''' Iterating through the left_tree '''\n for key in left_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n\n ''' Iterating through the inner left_tree '''\n for inner_key in left_child[key][0].keys():\n file.write(\"\\t\\t\\t\\tif {}:\\n\".format(inner_key))\n\n for inner_inner_key in ((left_child[key][0])[inner_key])[0]:\n file.write(\"\\t\\t\\t\\t\\tif {}:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\t\\t\\telse:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(1)\\n\")\n\n file.write(\"\\t\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\telse:\\n\")\n\n ''' Iterating through the right_tree '''\n for key in right_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\")\n for inner_key in right_child[key][1].keys():\n file.write(\"\\t\\t\\telif {}:\\n\".format(inner_key))\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\\n\")\n\n ''' Writing the results of classifier to a csv file '''\n file.write(\n \"\\twith open('HW06_Parchand_Nihal_MyClassifications.csv', 'w+') as file2:\\n\"\n \"\\t\\tfor value in result:\\n\"\n \"\\t\\t\\tfile2.write(str(value))\\n\"\n \"\\t\\t\\tfile2.write('\\\\n')\\n\\n\"\n \"main()\")",
"def __init__(self, train, test, maxDepth, minSize, numTree, attriBag, relativeRatio):\n\t\tself.forest = []\n\t\tself.train, self.test = deepcopy(train), deepcopy(test)\n\t\tself.actualTrain = [row[-1] for row in self.train]\n\t\tself.actualTest = [row[-1] for row in self.test]\n\t\tfor tree in range(numTree):\n\t\t\ttrainSet, testSet = train_validation_split(self.train, 0.8)\n\t\t\tself.forest.append(Decision_Tree(trainSet, testSet, maxDepth, minSize, attriBag, relativeRatio))",
"def build_tree(data, impurity, chi_value):\r\n if len(np.unique(data[:, -1])) == 1:\r\n return np.unique(data[:, -1])[0], data.shape[0]\r\n else:\r\n best_feature = 0\r\n best_threshold = 0\r\n best_gain = 0\r\n for i in range(np.subtract(data.shape[1], 1)):\r\n for j in range(np.subtract(data.shape[0], 1)):\r\n threshold = np.mean(np.array(data[j][i], data[j + 1][i]))\r\n child_a = data[np.where(data[:, i] <= threshold)]\r\n weight_a = np.divide(child_a.shape[0], data.shape[0])\r\n child_b = data[np.where(data[:, i] > threshold)]\r\n weight_b = np.divide(child_b.shape[0], data.shape[0])\r\n gain = impurity(data) \\\r\n - (np.multiply(weight_a, impurity(child_a))\r\n + np.multiply(weight_b, impurity(child_b)))\r\n if gain > best_gain:\r\n best_gain = gain\r\n best_threshold = threshold\r\n best_feature = i\r\n majority = 0.0 if np.divide(data[np.where(data[:, -1] == 0.0)].shape[0], data.shape[0]) > 0.5 else 1.0\r\n root = DecisionNode(best_feature, best_threshold, data, data.shape[0], majority)\r\n if chi_value == 1 or root.chi_square < chi_table[chi_value]:\r\n child_a = build_tree(data[np.where(data[:, best_feature] <= best_threshold)], impurity, chi_value)\r\n child_b = build_tree(data[np.where(data[:, best_feature] > best_threshold)], impurity, chi_value)\r\n root.add_child(child_a)\r\n root.add_child(child_b)\r\n else:\r\n class_values = np.unique(data[:, -1])\r\n best_class = 0\r\n best_count = 0\r\n for i in range(len(class_values)):\r\n temp = np.where(data[:, -1] == class_values[i])[0]\r\n if temp.shape[0] > best_count:\r\n best_count = temp.shape[0]\r\n best_class = class_values[i]\r\n\r\n return best_class, data.shape[0]\r\n\r\n return root",
"def get_featured_tree(self):\n\n for t in self.tree.get_terminals():\n t.sample_series = self.feature_table[t.name]\n self.feature_tree = self.recursion_tree(self.tree.root)\n for clade in self.feature_tree.find_clades(order='level'):\n clade.depth = 1+len(self.feature_tree.get_path(clade))\n \n #i = 0\n #for clade in self.feature_tree.find_clades(order='level'):\n # clade.ID_num = i \n #clade.abu = np.mean(clade.sample_series.values)\n #clade.domain_otu = clade.sample_series.idxmax()",
"def generateAndTrain(generationParameters, trainingParameters,\n resultsDir): \n \n \n generatedObjectTree = simpleRandomBifurcations(generationParameters[\"numberOfLevels\"],\n generationParameters[\"maxNumberOfNodes\"],\n bifurcationFunctionArguments=generationParameters[\"bifurcationsParameter\"],\n gaussian=generationParameters[\"gaussian\"])\n \n \n # ['BranchSizes', 'RootName', 'LevelSize', 'NumberOfNodes', 'NumberOfLevels']\n # print treeStats.allStats(generatedObjectTree)[\"NumberOfLevels\"]\n \n tuples = generateNTuples(generatedObjectTree,\n generationParameters[\"numberOfTuples\"],\n verbose=False,\n withChildren=generationParameters[\"withChildren\"],\n withNoise=generationParameters[\"withNoise\"])\n \n \n # trainedTree = obtainTree(tuples)\n allTrees, homeless, parentToDescendantStats, bestParentToDescendantStats = obtainTreeNoise2(tuples,\n trainingParameters[\"threshold\"],\n verbose=False)\n \n purity = purityFromGroundTruth(generatedObjectTree,\n allTrees,\n verbose=False)\n \n return generatedObjectTree, allTrees, purity",
"def _grow_random_tree(self):\n # selected the bootstrap sample and random subspace\n bootstrap_sample = self.df.sample(n=self.bootstrap_size, replace=False)\n random_subspace = random.sample(self.feature_space, self.subspace_size)\n\n # build tree based on the bootstrap sample and random subspace\n # top-level branch depth is set at 1\n logging.info('current training with subspace: {}'.format(json.dumps(random_subspace, indent=4)))\n return self._fork_tree(data=bootstrap_sample, subspace=random_subspace, current_branch_depth=1)",
"def subtree_reconfigure_forest(\r\n self,\r\n num_trees=8,\r\n num_restarts=10,\r\n restart_fraction=0.5,\r\n subtree_maxiter=100,\r\n subtree_size=10,\r\n subtree_search=('random', 'bfs'),\r\n subtree_select=('random',),\r\n subtree_weight_what=('flops', 'size'),\r\n subtree_weight_pwr=(2,),\r\n parallel='auto',\r\n parallel_maxiter_steps=4,\r\n minimize='flops',\r\n progbar=False,\r\n inplace=False,\r\n ):\r\n tree = self if inplace else self.copy()\r\n\r\n # candidate trees\r\n num_keep = max(1, int(num_trees * restart_fraction))\r\n\r\n # how to rank the trees\r\n score = get_score_fn(minimize)\r\n\r\n # set up the initial 'forest' and parallel machinery\r\n pool = parse_parallel_arg(parallel)\r\n if pool is not None:\r\n is_worker = maybe_leave_pool(pool)\r\n # store the trees as futures for the entire process\r\n forest = [pool.scatter(tree)]\r\n maxiter = subtree_maxiter // parallel_maxiter_steps\r\n else:\r\n forest = [tree]\r\n maxiter = subtree_maxiter\r\n\r\n if progbar:\r\n import tqdm\r\n pbar = tqdm.tqdm(total=num_restarts)\r\n pbar.set_description(_describe_tree(tree))\r\n\r\n try:\r\n for _ in range(num_restarts):\r\n\r\n # on the next round take only the best trees\r\n forest = itertools.cycle(forest[:num_keep])\r\n\r\n # select some random configurations\r\n saplings = [{\r\n 'tree': next(forest),\r\n 'maxiter': maxiter,\r\n 'minimize': minimize,\r\n 'subtree_size': subtree_size,\r\n 'subtree_search': random.choice(subtree_search),\r\n 'select': random.choice(subtree_select),\r\n 'weight_pwr': random.choice(subtree_weight_pwr),\r\n 'weight_what': random.choice(subtree_weight_what),\r\n } for _ in range(num_trees)]\r\n\r\n if pool is None:\r\n forest = [_reconfigure_tree(**s) for s in saplings]\r\n res = [{'tree': t, **_get_tree_info(t)} for t in forest]\r\n else:\r\n # submit in smaller steps to saturate processes\r\n for _ in range(parallel_maxiter_steps):\r\n for s in saplings:\r\n s['tree'] = submit(pool, _reconfigure_tree, **s)\r\n\r\n # compute scores remotely then gather\r\n forest_futures = [s['tree'] for s in saplings]\r\n res_futures = [submit(pool, _get_tree_info, t)\r\n for t in forest_futures]\r\n res = [{'tree': tree_future, **res_future.result()}\r\n for tree_future, res_future in\r\n zip(forest_futures, res_futures)]\r\n\r\n # update the order of the new forest\r\n res.sort(key=score)\r\n forest = [r['tree'] for r in res]\r\n\r\n if progbar:\r\n pbar.update()\r\n if pool is None:\r\n d = _describe_tree(forest[0])\r\n else:\r\n d = submit(pool, _describe_tree, forest[0]).result()\r\n pbar.set_description(d)\r\n\r\n finally:\r\n if progbar:\r\n pbar.close()\r\n\r\n if pool is None:\r\n tree.set_state_from(forest[0])\r\n else:\r\n tree.set_state_from(forest[0].result())\r\n maybe_rejoin_pool(is_worker, pool)\r\n\r\n return tree",
"def testPredictionMultipleTree(self):\n with self.cached_session() as session:\n tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()\n text_format.Merge(\n \"\"\"\n trees {\n nodes {\n bucketized_split {\n feature_id: 1\n threshold: 28\n left_id: 1\n right_id: 2\n }\n metadata {\n gain: 7.62\n }\n }\n nodes {\n leaf {\n scalar: 1.14\n }\n }\n nodes {\n leaf {\n scalar: 8.79\n }\n }\n }\n trees {\n nodes {\n bucketized_split {\n feature_id: 1\n threshold: 26\n left_id: 1\n right_id: 2\n }\n }\n nodes {\n bucketized_split {\n feature_id: 0\n threshold: 50\n left_id: 3\n right_id: 4\n }\n }\n nodes {\n leaf {\n scalar: 7.0\n }\n }\n nodes {\n leaf {\n scalar: 5.0\n }\n }\n nodes {\n leaf {\n scalar: 6.0\n }\n }\n }\n trees {\n nodes {\n bucketized_split {\n feature_id: 0\n threshold: 34\n left_id: 1\n right_id: 2\n }\n }\n nodes {\n leaf {\n scalar: -7.0\n }\n }\n nodes {\n leaf {\n scalar: 5.0\n }\n }\n }\n tree_weights: 0.1\n tree_weights: 0.2\n tree_weights: 1.0\n \"\"\", tree_ensemble_config)\n\n # Create existing ensemble with one root split\n tree_ensemble = boosted_trees_ops.TreeEnsemble(\n 'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())\n tree_ensemble_handle = tree_ensemble.resource_handle\n resources.initialize_resources(resources.shared_resources()).run()\n\n feature_0_values = [36, 32]\n feature_1_values = [11, 27]\n\n # Example 1: tree 0: 1.14, tree 1: 5.0, tree 2: 5.0 = >\n # logit = 0.1*1.14+0.2*5.0+1*5\n # Example 2: tree 0: 1.14, tree 1: 7.0, tree 2: -7 = >\n # logit= 0.1*1.14+0.2*7.0-1*7.0\n expected_logits = [[6.114], [-5.486]]\n\n # Prediction should work fine.\n predict_op = boosted_trees_ops.predict(\n tree_ensemble_handle,\n bucketized_features=[feature_0_values, feature_1_values],\n logits_dimension=1)\n\n logits = session.run(predict_op)\n self.assertAllClose(expected_logits, logits)",
"def build_trees(self, model_data):\n self.trees = []\n\n for tree_data in model_data:\n self.trees.append(TreeEstimator(tree_data))",
"def grow_tree(self):\n\n decision_node = self.root\n internal_env = copy.copy(self.env)\n\n while (not decision_node.is_final) and decision_node.visits > 1:\n\n a = self.select(decision_node)\n\n new_random_node = decision_node.next_random_node(a, self._hash_action)\n\n (new_decision_node, r) = self.select_outcome(internal_env, new_random_node)\n\n new_decision_node = self.update_decision_node(new_decision_node, new_random_node, self._hash_space)\n\n new_decision_node.reward = r\n new_random_node.reward = r\n\n decision_node = new_decision_node\n\n decision_node.visits += 1\n cumulative_reward = self.evaluate(internal_env)\n\n while not decision_node.is_root:\n random_node = decision_node.father\n cumulative_reward += random_node.reward\n random_node.cumulative_reward += cumulative_reward\n random_node.visits += 1\n decision_node = random_node.father\n decision_node.visits += 1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
classify a given event. Iterates over each tree in the forest and then returns the weighted average of the results | def classify(self, event):
results = np.zeros(self.ntrees, dtype=float)
for i,dt in enumerate(self.dTrees):
results[i] = self.treeWeights[i]*dt.classify(event)
return np.sum(results)*(1.0/np.sum(self.treeWeights)) | [
"def analyze_tree(dataset,my_tree,column_class=-1):\n #get the relevant starting variables\n labels = dataset[:,column_class]\n N, class_num = np.shape(dataset)\n datapred = np.zeros(N)\n #now loop and get the predictions\n for i in range(N):\n prediction = predict_tree(dataset[i,:], my_tree)\n datapred[i] = prediction\n #now get the accuracy\n check_array = datapred == labels\n return np.sum(check_array)/(np.shape(check_array)[0])",
"def classProbs(observation, tree, classes):\n res = classify(observation, tree) #res = results\n total = sum(res.values())\n probs = []\n for c in classes:\n if c in res.keys():\n probs.append(float(res[c])/total)\n else:\n probs.append(0)\n return probs",
"def forest_classify(forest, obs):\n votes = {}\n for l in LABELS:\n votes[l] = 0\n for tree in forest:\n l = tree.classify(obs)\n votes[l] += 1\n return max(votes.keys(), key=(lambda key: votes[key]))\n # leader = None\n # for l in LABELS:\n # if leader == None:\n # leader = [l]\n # else:\n # if votes[l]>leader:\n # leader = [l]\n # elif votes[l]==leader:\n # leader.append(l)\n # return random.choice(leader)",
"def classify(series, tree):\n feature = tree[0]\n subtree = tree[1]\n\n answer = series[feature]\n response = subtree[answer]\n\n if type(response) != list: #base case\n return subtree[answer]\n else:\n return classify(series, response) #recursive case",
"def analyze_forest(dataset,forest,column_class=-1):\n #get the relevant starting variables\n labels = dataset[:,column_class]\n N, class_num = np.shape(dataset)\n datapred = np.zeros(N)\n #now loop and get the predictions\n for i in range(N):\n prediction = predict_forest(dataset[i,:], forest)\n datapred[i] = prediction\n #now get the accuracy\n check_array = datapred == labels\n return np.sum(check_array)/(np.shape(check_array)[0])",
"def classify(tree, example):\n while tree.children:\n print(\"Example: \", example)\n print(\"Tree.value:\", tree.value)\n print(\"Tree.children\", tree.children)\n tree = tree.children[example[tree.value]]\n return tree.value",
"def analyze_forest(self, **kwargs):\n # a forest must already exist in order to run analyze_forest\n self._check_prerequisites(prereq_list=['forest_exists'])\n\n # collect list of every branch in every tree in the forest\n branch_list = []\n for tree in self.forest:\n branch_list += self._follow_tree_analyze(current_tree=tree, current_branch_depth=1)\n\n # get unique list of features in branch_list\n # also store this as an instance variable to be saved in forest_with_metadata\n unique_features = set([x['feature'] for x in branch_list])\n self.features_in_forest = list(unique_features)\n\n # set up structure to store feature scores and initiate scores at 0\n feature_scores_raw = dict()\n feature_scores_relative = dict()\n for feature in self.features_in_forest:\n feature_scores_raw[feature] = 0\n feature_scores_relative[feature] = 0\n\n # iterate through branch_list to populate feature_scores with raw scores\n cumulative_raw_score = 0\n for branch in branch_list:\n feature = branch['feature']\n branch_depth = branch['branch_depth']\n\n # basic method to weight higher branches more than lower branches\n raw_score = 1 / branch_depth\n\n feature_scores_raw[feature] += raw_score\n cumulative_raw_score += raw_score\n\n # populate relative_score (relative scores sum to 1)\n for feature in self.features_in_forest:\n feature_scores_relative[feature] = feature_scores_raw[feature] / cumulative_raw_score\n\n # rank by relative score\n feature_scores_ranked = sorted(feature_scores_relative.items(), key=lambda x: x[1], reverse=True)\n\n # for clean display, represent it as a dataframe and print it\n feature_scores_ranked_df = pd.DataFrame(feature_scores_ranked, columns=['feature', 'importance'])\n feature_scores_ranked_df.index = [x + 1 for x in range(len(feature_scores_ranked))]\n feature_scores_ranked_df.index.name = 'rank'\n print('--------\\nAnalysis\\n--------\\n', feature_scores_ranked_df.to_string())\n\n # return data if requested via return_data arg\n if kwargs.get('return_data') == True:\n return feature_scores_relative",
"def classify(self, tree, datapoint):\n\n\t\tif type(tree) == type(\"string\"):\n\t\t\treturn tree\n\t\telse:\n\t\t\ta = list(tree.keys())[0]\n\t\t\tfor i in range(len(self.featureNames)):\n\t\t\t\tif self.featureNames[i]==a:\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\ttry:\n\t\t\t\tt = tree[a][datapoint[i]]\n\t\t\t\treturn self.classify(t,datapoint)\n\t\t\texcept:\n\t\t\t\treturn None",
"def classify(tree, inputs):\n\n # if this is a leaf node, return its value\n if tree in [True, False]:\n return tree\n\n # otherwise find the correct subtree\n attribute, subtree_dict = tree\n\n subtree_key = inputs.get(attribute) # None if input is missing attribute\n\n if subtree_key not in subtree_dict: # if no subtree for key,\n subtree_key = None # we'll use the None subtree\n\n subtree = subtree_dict[subtree_key] # choose the appropriate subtree\n return classify(subtree, inputs) # and use it to classify the input",
"def getMean(tree, i=0, flag=False):\n if tree.right_tree.value is not None:\n tree.right_tree.label_class = getMean(tree.right_tree, i=i+1)\n if tree.left_tree.value is not None:\n tree.left_tree.label_class = getMean(tree.left_tree, i=i+1)\n means = (tree.right_tree.label_class + tree.left_tree.label_class) / 2.0\n print(\"level is {} and value is {}\".format(i, means))\n return means",
"def classify(tree, input):\n\n #if this is a leaf node, return its value\n if tree in [True, False]:\n return tree\n\n #otherwise this tree consists of an attribute to split on\n #and a dict whose keys are values of that attribute\n #and whose values are subtrees to consider next\n attribute, subtree_dict = tree\n\n subtree_key = input.get(attribute) #None if input is missing\n\n if subtree_key not in subtree_dict: #if no subtree for key, use None\n subtree_key = None\n\n subtree = subtree_dict[subtree_key] # choose the appropriate subtree\n return classify(subtree, input) # and use it to classify the input",
"def traverse_tree(self, example):\n current_node = self.root\n while not current_node.is_leaf:\n feature_value = example[self.get_feature_index(current_node.feature)]\n current_node = current_node.children[feature_value]\n\n return current_node.pred",
"def classify(self, features):\n class_labels=[]\n pred_cnt=[{0:0,1:0} for x in range(0,len(features))]\n #creating a voting mechansim where the vote of each tree in the random forest is stored and the majority vote wins.\n for tree in self.trees:\n if tree.root:\n class_labels=[]\n for cnt,feature in enumerate(features):\n class_labels.append(tree.root.decide(feature))\n for ind,label in enumerate(class_labels):\n try:\n pred_cnt[ind][label]+=1\n except Exception as e:\n pass\n for i in range(0,len(pred_cnt)):\n if pred_cnt[i][0]>pred_cnt[i][1]:\n class_labels[i]=0\n else:\n class_labels[i]=1\n return class_labels",
"def classify(self, document, tree):\n if type(tree) is ClassTreeNode:\n return tree.c\n else:\n if tree.word in document.bag_of_words:\n return self.classify(document, tree.children[0])\n else:\n return self.classify(document, tree.children[1])",
"def class_analyzer():\n # Get the list of food words\n foods = []\n with open(os.path.join(TESTS, 'food_words.txt'), 'r') as fi:\n foods = fi.read().splitlines()\n fi.close()\n\n # Get the raw result list for all the files\n raw = {}\n with open(os.path.join(TESTS, 'file_tag_dict.txt'), 'r') as fi:\n raw = json.load(fi)\n fi.close()\n \n # Compile a dictionary of the tag sums for each category\n results = {}\n num_files = {}\n for file in raw:\n # Finds the first character in the name with a digit, then gets the category name using it, since all files named in the same format of category + number.jpg\n number_pos = re.search(\"\\d\", file) # Finds the first number\n category = file[:number_pos.start()]\n\n # Adds the tags into the category sum\n if not category in results:\n results[category] = {}\n for tag in raw[file]:\n if tag in foods:\n if tag in results[category]:\n results[category][tag] += math.exp(raw[file][tag]) - 1 # Natural exponential gives more power to tags with high confidence \n else:\n results[category][tag] = math.exp(raw[file][tag]) - 1\n\n # Stores the number of files for later use\n if category in num_files:\n num_files[category] += 1\n else:\n num_files[category] = 1\n\n # Convert the sum into an average\n for category in results:\n for tag in results[category]:\n sum = results[category][tag]\n results[category][tag] /= num_files[category]\n\n # Output averages to a file\n with open(os.path.join(TESTS, 'average_tags.txt'), 'w') as fi:\n fi.write(json.dumps(results))\n fi.close()",
"def weight(tree):\n return label(tree)",
"def classify(tree, data):\n data = data[:]\n classification = []\n \n for record in data:\n classification.append(get_classification(record, tree))\n\n return classification",
"def ensemble_average_energy(self):",
"def _get_prediction(self, row, tree):\n\n if tree.is_leaf():\n return tree.label\n if row[tree.label] > tree.mean:\n return self._get_prediction(row, tree.right)\n else:\n return self._get_prediction(row, tree.left)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Node frontiers generator using breadthfirst search. | def bfs_nodes_generator(graph, source, reverse=...):
... | [
"def breadthfirst(self):\n import os\n cwd = os.getcwd()\n os.chdir('/Users/raj/Documents/algorithms_in_python/linked_lists/')\n from linked_collections import LinkedQueue\n os.chdir(cwd) # change to cwd\n if not self.is_empty():\n lq = LinkedQueue()\n lq.enqueue(self.root())\n while not lq.is_empty():\n p = lq.dequeue()\n yield p\n for c in self.children(p):\n lq.enqueue(c)",
"def breadthfirst(self):\n if not self.is_empty():\n fringe = LinkedQueue() # known positions not yet yielded\n fringe.enqueue(self.root()) # starting with the root\n while not fringe.is_empty():\n p = fringe.dequeue() # remove from front of the queue\n yield p # report this position\n for c in self.children(p):\n fringe.enqueue(c) # add children to back of queue",
"def breadth_first(self,start_node):\n visited = []\n output=[]\n q=Queue()\n visited.append(start_node)\n q.enqueue(start_node)\n graph=self.adjacency_list\n\n while len(q):\n current = q.dequeue()\n output.append(current)\n\n for neighbor in graph[current]:\n if neighbor not in visited:\n visited.append(neighbor)\n q.enqueue(neighbor)\n return output\n\n\n\n \n # print(\"Appending new method was succeeded \")",
"def breadth_first_traverse(self) -> Generator:\n assist_queue = deque()\n assist_queue.append(self.root_node)\n while assist_queue:\n current_node = assist_queue.popleft()\n yield current_node\n if current_node.children:\n for child in current_node.children:\n assist_queue.append(child)",
"def breadth_first_for_each(self, cb):\n\n # If I input [5, 3, 10, 4, 9, 11] with 5 to be the starting value\n #\n # 5[0]\n # 3[1] 10[2]\n # 4[3] 9[4] 11[5]\n #\n #\n # Step 1: [5] , []\n # Step 2: [3, 10] , [5]\n # Step 3: [10, 4], [5, 3]\n # Step 4: [4, 9, 11], [5, 3, 10]\n # Step 5: [9, 11], [5, 3, 10, 4]\n # Step 6: [11], [5, 3, 10, 4, 9]\n # Step 7: [], [5, 3, 10, 4, 9, 11]\n\n # Compare the parent node with the number we're searching for\n # Compare parent then add in child, starting from left to right, gotta check to make sure child is there\n # Optional I can use a while loop and just iterate until value equal cb\n\n\n # -- Iterative --\n\n # To keep track of numbers\n queue = []\n\n # Assigning current to the class node itself\n current = self\n\n # Append the first value into the queue [5], []\n queue.append(current.value)\n\n # While len is greater than 0\n while len(queue) > 0:\n # Compare the instance of BinarySearchTree class value attribute to the first index\n # of queue.\n if current.value == queue[0]:\n # If it is true, then we check both \"current\" left and right child nodes to be not None\n # and then append them to queue.\n if current.left is not None:\n queue.append(current.left.value)\n if current.right is not None:\n queue.append(current.right.value)\n # From here, I call the anonymous function \"cb\" with the current value\n cb(current.value)\n # Finally pop the first value of the list of queue\n queue.pop(0)\n else:\n # If Line 61 is false, then I check if the first index of Queue to be less than or\n # greater than of the current.value (Note: this will work on ordinary binary trees except for heaps)\n if queue[0] < current.value:\n # If there's no left node, then it'll reassign current back to self so it can start back at root.\n if not current.left:\n current = self\n # Else, reassign current to the its left child node.\n else:\n current = current.left\n else:\n # If there's no right node, then it'll reassign current back to self so it can start back at root.\n if not current.right:\n current = self\n # Else, reassign current to the its left child node.\n else:\n current = current.right\n\n # -- Recursion --\n # Step 1: append the first value into the queue [5], []\n # if current is None or not Queue:\n # return\n # else:\n # Bread.append(current)\n # # Step 2: Compare the first index to what we're searching for, it it is true, return the value, if it is\n # # false then we'll add in both child nodes. i + 1 left, i + 2 right for arrays, but we can just\n # # check self.left and self.right inside classes\n # if Queue[0] == cb(self.value):\n # return Queue[0]\n # # Step 3: If it doesn't match, remove the current node from Queue [], [5]\n # Queue.remove(current)\n # # Step 4: append the left child [3], [5]\n # Queue.append(self.left)\n # # Step 5: append the right child [3, 10], [5]\n # Queue.append(self.right)\n # # Step 6: Check left side first and repeat from step 2 down to 5 until we find the value or return None\n # if self.left == Bread[0] and not None:\n # self.current = self.left\n # # Remove duplicates from Queue\n # Queue.remove(self.left)\n # return self.breadth_first_for_each(cb)\n\n # elif self.right == Bread[0] and not None:\n # self.current = self.right\n # Queue.remove(self.right)\n # return self.breadth_first_for_each(cb)",
"def breadth_first(self):\n if self._root:\n nodes = [self._root]\n for node in nodes:\n try:\n nodes.append(node.left)\n except(AttributeError):\n pass\n try:\n nodes.append(node.right)\n except(AttributeError):\n pass\n try:\n yield node.val\n except(AttributeError):\n pass",
"def _bfs_nodes(cls, graph, source, size, **kwargs):\n if size < 1:\n return iter(())\n\n return itertools.chain(\n (source,),\n itertools.islice((v for u, v in nx.bfs_edges(graph, source)), size-1)\n )",
"def bfs_edges_generator(graph, source, reverse=...):\n ...",
"def breadth_first_iterate(execution_graph, starting_node, direction,\n through_flows=True, through_retries=True,\n through_tasks=True):\n initial_nodes_iter, connected_to_functors = _extract_connectors(\n execution_graph, starting_node, direction,\n through_flows=through_flows, through_retries=through_retries,\n through_tasks=through_tasks)\n q = collections.deque(initial_nodes_iter)\n while q:\n node = q.popleft()\n node_attrs = execution_graph.nodes[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n q.extend(connected_to_functor(node))",
"def breadth_first_list(graph, current=\"a\"):\n queue = []\n queue.append(current)\n while queue:\n current = queue.pop(0)\n print(current)\n for node in graph.get(current):\n queue.append(node)",
"def breadth_first_search(self, departure):\n colors = {}\n for node in self.nodes:\n colors[node] = \"white\"\n \n parents = {}\n fifo = [departure]\n colors[departure] = \"grey\"\n parents[departure] = None\n \n for node in sorted(self.nodes):\n\n while fifo != []:\n in_progress = fifo.pop(0)\n for neighbour \\\n in sorted(self.adjacency_list[in_progress]):\n if colors[neighbour] == \"white\":\n parents[neighbour] = in_progress\n colors[neighbour] = \"grey\"\n fifo.append(neighbour)\n colors[in_progress] = \"black\"\n \n \n \n if colors[node] != \"white\":\n continue\n colors[node] = \"grey\"\n parents[node] = None\n fifo.append(node)\n \n return parents",
"def bfs_iterative(graph,start):\n\tvisited = set()\n\twatched = set()\n\tnodes_queue = [start] # List that helps as queue\n\twatched.add(start)\n\t\n\twhile nodes_queue:\n\t\tcurrent_node = nodes_queue.pop(0)\n\n\t\tprint(\"visiting\",current_node)\n\t\tvisited.add(current_node)\n\t\t\n\t\tfor adjacent_node in graph[current_node]:\n\t\t\tif (adjacent_node not in watched) and (adjacent_node not in visited):\n\t\t\t\tnodes_queue.append(adjacent_node)\n\t\t\t\t#path.add(adjacent_node)",
"def test_b_traversal_from_one_neighbor_loop_gets_two_node_list(empty_weight_graph):\n g = empty_weight_graph\n g.add_edge(3, 5, 5)\n g.add_edge(5, 3, 4)\n assert g.breadth_first_traversal(3) == [3, 5]",
"def breadth_first_traversal(graph_node, vertex_list):\n nodes_queue = deque()\n nodes_queue.append(graph_node)\n\n while len(nodes_queue) > 0:\n node = nodes_queue.popleft()\n\n if node.data not in vertex_list:\n vertex_list.append(node.data)\n\n for neighbor_node in node.neighbors:\n nodes_queue.append(neighbor_node)",
"def my_bfs_edges_m(G, source):\n \"\"\" This function is essentially identical to that one at \n http://networkx.github.com/documentation/latest/_modules/networkx/algorithms/traversal/breadth_first_search.html#bfs_edges \n There are two exceptiioins: \"\"\"\n \n \"\"\" 1.) In the original function provides a set called visited where all the\n nodes are stored which were, as stated, already visited. This is not needed\n here because the information wheter and when a node was visited is stored\n at the node level.\n 2.) The selection of nodes for bfs differs. A node is selected if:\n a) the node has no doi (date of infection), i.e. the node has never\n been selected before\n b) the node is selected again, when there is - in comparison to the old\n date of infection - an earlier date. DoC is the date of contact and if\n DoC < doi of the node, then the node is selected\n c) the node is selected if the infector of this node has changed\n d) The node will only be selected if the parent node has an doi, and if\n this date of infection is before the actual contact. This guarantees\n that the node can only be infected by an infected parent node \"\"\"\n\n stack = [(source, iter(G[source]))]\n while stack:\n parent, children = stack[0]\n try:\n child = next(children)\n e = (parent, child)\n sDoC = G.get_edge_data(*e)[\"trade_date\"]\n DoC = datetime.strptime(sDoC.title(), \"%d%b%Y\").date()\n \"\"\" this if clause is diffent from the original bfs search\n function. Everything else is identical \"\"\"\n if ('doi' not in G.node[child] or DoC < G.node[child]['doi'] or (DoC == G.node[child]['doi'] and G.node[child]['Infector'] != G.node[parent]['Infector'])) and (DoC > G.node[parent]['doi']):\n G.node[child]['k'] = G.node[parent]['k']+1\n G.node[child]['doi'] = DoC\n G.node[child]['ttc'] = G.node[parent]['ttc'] + (DoC - G.node[parent]['doi'])\n G.node[child]['Infector'] = G.node[parent]['Infector']\n yield parent, child\n stack.append((child, iter(G[child])))\n except StopIteration:\n stack.pop(0)",
"def topological_nodes_generator(graph, reverse=...):\n ...",
"def bft(self, starting_vertex):\n # Create a q and enqueue starting vertex\n qq = Queue()\n # Put the starting point in that\n qq.enqueue([starting_vertex])\n # Create a set of traversed vertices\n visited = set()\n # While queue is not empty:\n while qq.size() > 0:\n # dequeue/pop the first vertex\n path = qq.dequeue()\n # if not visited\n if path[-1] not in visited:\n # DO THE THING!!!!!!!\n print(path[-1])\n # mark as visited\n visited.add(path[-1])\n # enqueue all neightbors\n for next_vert in self.get_neighbors(path[-1]):\n new_path = list(path)\n new_path.append(next_vert)\n qq.enqueue(new_path)",
"def test_b_traversal_from_neighborless_node_gets_one_node_list(node_weight_graph):\n assert node_weight_graph.breadth_first_traversal(2) == [2]",
"def breadth_first_tree_search(problem):\r\n\r\n frontier = deque([Node(problem.initial)]) # FIFO queue\r\n\r\n while frontier:\r\n #print(frontier)\r\n node = frontier.popleft()\r\n if problem.goal_test(node.state):\r\n return node\r\n frontier.extend(node.expand(problem))\r\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Edges frontiers generator using breadthfirst search. | def bfs_edges_generator(graph, source, reverse=...):
... | [
"def bfs_nodes_generator(graph, source, reverse=...):\n ...",
"def breadthfirst(self):\n import os\n cwd = os.getcwd()\n os.chdir('/Users/raj/Documents/algorithms_in_python/linked_lists/')\n from linked_collections import LinkedQueue\n os.chdir(cwd) # change to cwd\n if not self.is_empty():\n lq = LinkedQueue()\n lq.enqueue(self.root())\n while not lq.is_empty():\n p = lq.dequeue()\n yield p\n for c in self.children(p):\n lq.enqueue(c)",
"def breadthfirst(self):\n if not self.is_empty():\n fringe = LinkedQueue() # known positions not yet yielded\n fringe.enqueue(self.root()) # starting with the root\n while not fringe.is_empty():\n p = fringe.dequeue() # remove from front of the queue\n yield p # report this position\n for c in self.children(p):\n fringe.enqueue(c) # add children to back of queue",
"def dfs_edges_generator(graph, source, reverse=...):\n ...",
"def breadth_first(self,start_node):\n visited = []\n output=[]\n q=Queue()\n visited.append(start_node)\n q.enqueue(start_node)\n graph=self.adjacency_list\n\n while len(q):\n current = q.dequeue()\n output.append(current)\n\n for neighbor in graph[current]:\n if neighbor not in visited:\n visited.append(neighbor)\n q.enqueue(neighbor)\n return output\n\n\n\n \n # print(\"Appending new method was succeeded \")",
"def bft(self, starting_vertex):\n # create an empty queue and enqueue the starting vertex ID\n queue = Queue()\n queue.enqueue(starting_vertex)\n # create an emtpy Set to stoe the visited vertices\n visited = set()\n # while the queue is not empty ...\n while queue.size() > 0:\n # dequeue the first vertex\n vert = queue.dequeue()\n # if that vertex has not been visited..\n if vert not in visited:\n # mark it as visited\n visited.add(vert)\n print(vert)\n # then add all of its neighbors to the back of the queue\n for neighbor in self.vertices[vert]: # self.get_neighbors(vert)\n queue.enqueue(neighbor)",
"def my_bfs_edges_m(G, source):\n \"\"\" This function is essentially identical to that one at \n http://networkx.github.com/documentation/latest/_modules/networkx/algorithms/traversal/breadth_first_search.html#bfs_edges \n There are two exceptiioins: \"\"\"\n \n \"\"\" 1.) In the original function provides a set called visited where all the\n nodes are stored which were, as stated, already visited. This is not needed\n here because the information wheter and when a node was visited is stored\n at the node level.\n 2.) The selection of nodes for bfs differs. A node is selected if:\n a) the node has no doi (date of infection), i.e. the node has never\n been selected before\n b) the node is selected again, when there is - in comparison to the old\n date of infection - an earlier date. DoC is the date of contact and if\n DoC < doi of the node, then the node is selected\n c) the node is selected if the infector of this node has changed\n d) The node will only be selected if the parent node has an doi, and if\n this date of infection is before the actual contact. This guarantees\n that the node can only be infected by an infected parent node \"\"\"\n\n stack = [(source, iter(G[source]))]\n while stack:\n parent, children = stack[0]\n try:\n child = next(children)\n e = (parent, child)\n sDoC = G.get_edge_data(*e)[\"trade_date\"]\n DoC = datetime.strptime(sDoC.title(), \"%d%b%Y\").date()\n \"\"\" this if clause is diffent from the original bfs search\n function. Everything else is identical \"\"\"\n if ('doi' not in G.node[child] or DoC < G.node[child]['doi'] or (DoC == G.node[child]['doi'] and G.node[child]['Infector'] != G.node[parent]['Infector'])) and (DoC > G.node[parent]['doi']):\n G.node[child]['k'] = G.node[parent]['k']+1\n G.node[child]['doi'] = DoC\n G.node[child]['ttc'] = G.node[parent]['ttc'] + (DoC - G.node[parent]['doi'])\n G.node[child]['Infector'] = G.node[parent]['Infector']\n yield parent, child\n stack.append((child, iter(G[child])))\n except StopIteration:\n stack.pop(0)",
"def bft(self, starting_vertex):\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n q = Queue() # create an empty Queue \n q.enqueue(starting_vertex) # set enqueue with the starting vertex\n\n while q.size() > 0: # loop if the size is greater than 0\n v = q.dequeue() # dequeue and store \n\n if v not in visited: # if v has not in the set \n visited.add(v) # add v to the set \n print(v) \n # Then add all of its neighbors to the back of the queue\n for neighbor in self.vertices[v]: # loop through neighbors \n q.enqueue(neighbor) # add each neighbor to the end of the que ",
"def _bfs_nodes(cls, graph, source, size, **kwargs):\n if size < 1:\n return iter(())\n\n return itertools.chain(\n (source,),\n itertools.islice((v for u, v in nx.bfs_edges(graph, source)), size-1)\n )",
"def bfs_iterative(graph,start):\n\tvisited = set()\n\twatched = set()\n\tnodes_queue = [start] # List that helps as queue\n\twatched.add(start)\n\t\n\twhile nodes_queue:\n\t\tcurrent_node = nodes_queue.pop(0)\n\n\t\tprint(\"visiting\",current_node)\n\t\tvisited.add(current_node)\n\t\t\n\t\tfor adjacent_node in graph[current_node]:\n\t\t\tif (adjacent_node not in watched) and (adjacent_node not in visited):\n\t\t\t\tnodes_queue.append(adjacent_node)\n\t\t\t\t#path.add(adjacent_node)",
"def bft(self, starting_vertex):\n # Create a q and enqueue starting vertex\n qq = Queue()\n # Put the starting point in that\n qq.enqueue([starting_vertex])\n # Create a set of traversed vertices\n visited = set()\n # While queue is not empty:\n while qq.size() > 0:\n # dequeue/pop the first vertex\n path = qq.dequeue()\n # if not visited\n if path[-1] not in visited:\n # DO THE THING!!!!!!!\n print(path[-1])\n # mark as visited\n visited.add(path[-1])\n # enqueue all neightbors\n for next_vert in self.get_neighbors(path[-1]):\n new_path = list(path)\n new_path.append(next_vert)\n qq.enqueue(new_path)",
"def test_b_traversal_from_one_neighbor_loop_gets_two_node_list(empty_weight_graph):\n g = empty_weight_graph\n g.add_edge(3, 5, 5)\n g.add_edge(5, 3, 4)\n assert g.breadth_first_traversal(3) == [3, 5]",
"def edges(self):\n\t\tleftback = self.center + self.left*self.wr - self.forward*self.hr\n\t\tleftfront = self.center + self.left*self.wr + self.forward*self.hr\n\t\trightfront = self.center - self.left*self.wr + self.forward*self.hr\n\t\trightback = self.center - self.left*self.wr - self.forward*self.hr\n\t\tyield (leftback, leftfront)\n\t\tyield (leftfront, rightfront)\n\t\tyield (rightfront, rightback)\n\t\tyield (rightback, leftback)",
"def BreadthFirstSearch(graph, source):\r\n \r\n # Dictionary dataInfo will be used to store the information about each vertex. (Ancestors, descendants, distance from source, and color)\r\n dataInfo = {} \r\n \r\n # List queue will be used to store the vertices currently in the queue, these vertices will all be gray.\r\n queue = []\r\n \r\n # Loops through the vertices in the graph, creates a key in the dictionary for each vertice, with default values.\r\n for vertex in graph[\"V\"]:\r\n dataInfo[str(vertex)] = {\"ancestor\": \"\", \"descendants\": [], \"distance\": \"\", \"color\": \"white\"}\r\n \r\n # At key source (variable) in dataInfo dictionary, key ancestor is set to have no value other than \"NA\" (as it is the starting point), and distance to 0 (as it will always be zero as it is the source).\r\n dataInfo[str(source)][\"ancestor\"] = \"NA\"\r\n dataInfo[str(source)][\"distance\"] = 0\r\n\r\n def symmetricVertex(edge, otherVertex):\r\n \r\n \"\"\"\r\n Function symmetricVertex takes arguments edge, a list of an edge from the graph dictionary, and otherVertex, an integer that is the other vertex in the edge with the sourceVertex. The function will return the point other than the otherVertex, and will be used to find adjacent vertices relative to the current vertex in the queue. Example: edge ([1, 2]), otherVertex (1), the function will return 2.\r\n \"\"\"\r\n \r\n for num in edge:\r\n if num != otherVertex:\r\n return num\r\n \r\n\r\n def pathFinder(graph, sourceVertex):\r\n \r\n \"\"\"\r\n Function pathFinder takes arguments graph, a dictionary, with the same keys for the edges and the vertices and sourceVertex, an integer. The function will loop through all of the edges in the graph and find adjacent vertices relative to the current sourceVertex. sourceVertex values will be in the queue. The function will edit dictionaries and lists, not return any value.\r\n \"\"\"\r\n \r\n # List removeEdges will be used to store the edges that will be removed from the graph dictionary after the loop ends. Makes the code more efficient, as you don't want to loop through a million vertices every time, now do you?\r\n removeEdges = []\r\n \r\n # Loop through edges in the graph, will be used to find adjacent vertices.\r\n for edge in graph[\"E\"]:\r\n \r\n # If the sourceVertex is in the edge and the edge is not discovered yet, then edit and change values in the main dictionary, dataInfo.\r\n if (sourceVertex in edge) and (dataInfo[str(symmetricVertex(edge, sourceVertex))] != \"gray\"):\r\n otherVertex = symmetricVertex(edge, sourceVertex)\r\n \r\n # Adds variable otherVertex to the descendants of the sourceVertex.\r\n dataInfo[str(sourceVertex)][\"descendants\"].append(otherVertex)\r\n \r\n # Updates key(otherVertex) to correct values. Ancestor is always the sourceVertex, the distance is always the distance of sourceVertex incremented by one, and the color is updated to gray as it is added to the queue.\r\n dataInfo[str(otherVertex)] = {\"ancestor\": sourceVertex, \"descendants\": [], \"distance\": (dataInfo[str(sourceVertex)][\"distance\"] + 1), \"color\": \"gray\"}\r\n \r\n # Edge includes two discovered edges, so it will be removed to stop redundancy. It is added to the removeEdges list.\r\n removeEdges.append(edge)\r\n \r\n # Appends the discovered vertex to the queue.\r\n queue.append(otherVertex)\r\n \r\n # After the loop ends, the edges that contain the source vertex have been exhausted, so the color is updated to black.\r\n dataInfo[str(sourceVertex)][\"color\"] = \"black\" \r\n \r\n # If the sourceVertex is in the queue, it is removed, as all of the edges containing it have been exhausted.\r\n if sourceVertex in queue:\r\n queue.remove(sourceVertex)\r\n \r\n # Loop through the edges in the removeEdges list, each edge will be removed.\r\n for edge in removeEdges:\r\n graph[\"E\"].remove(edge)\r\n \r\n # The function pathFinder is called on the graph and the source vertex, which sets up the queue.\r\n pathFinder(graph, source)\r\n \r\n # While the list queue contains values, the pathFinder function is called on the graph, and the queue value at index 0.\r\n while len(queue) != 0:\r\n pathFinder(graph, queue[0])\r\n \r\n # Loop below is for formatting of the data, makes it easier to read.\r\n for key in dataInfo:\r\n print \"Vertex: \" + key + \", Distance: \" + str(dataInfo[key][\"distance\"]) + \", Ancestor: \" + str(dataInfo[key][\"ancestor\"]) + \", Descendants: \" + str(dataInfo[key][\"descendants\"]) + \", Color: \" + str(dataInfo[key][\"color\"]) + \".\" \r\n \r\n # Returns dictionary dataInfo.\r\n return dataInfo",
"def bft(self, starting_vertex):\n pass # TODO\n\n visited = {i: 0 for i in self.vertices}\n\n my_queue = Queue()\n my_queue.enqueue(starting_vertex)\n while my_queue.size() > 0:\n\n current_node = my_queue.dequeue()\n\n if visited[current_node] == 0:\n print(current_node)\n\n visited[current_node] = 1\n for node in self.vertices[current_node]:\n my_queue.enqueue(node)",
"def breadth_first_iterate(execution_graph, starting_node, direction,\n through_flows=True, through_retries=True,\n through_tasks=True):\n initial_nodes_iter, connected_to_functors = _extract_connectors(\n execution_graph, starting_node, direction,\n through_flows=through_flows, through_retries=through_retries,\n through_tasks=through_tasks)\n q = collections.deque(initial_nodes_iter)\n while q:\n node = q.popleft()\n node_attrs = execution_graph.nodes[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n q.extend(connected_to_functor(node))",
"def bfs(graph,start):\n #keeps track of nodes to be visited\n queue = []\n #keeps track of nodes already visited\n explored = []\n queue.append(start)\n while queue:\n #remove first node from queue\n curr_node = queue.pop(0)\n #check if node is visited\n if curr_node not in explored:\n explored.append(curr_node)\n adjacent_nodes = graph[curr_node]\n #add adjacent nodes to queue\n for i in adjacent_nodes:\n queue.append(i)\n return explored",
"def breadth_first_traversal(graph_node, vertex_list):\n nodes_queue = deque()\n nodes_queue.append(graph_node)\n\n while len(nodes_queue) > 0:\n node = nodes_queue.popleft()\n\n if node.data not in vertex_list:\n vertex_list.append(node.data)\n\n for neighbor_node in node.neighbors:\n nodes_queue.append(neighbor_node)",
"def breadth_first_traverse(self) -> Generator:\n assist_queue = deque()\n assist_queue.append(self.root_node)\n while assist_queue:\n current_node = assist_queue.popleft()\n yield current_node\n if current_node.children:\n for child in current_node.children:\n assist_queue.append(child)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Node frontiers generator using topological traversal. | def topological_nodes_generator(graph, reverse=...):
... | [
"def bfs_nodes_generator(graph, source, reverse=...):\n ...",
"def _get_front_nodes(self):\n ret = []\n node = self.root.getprevious()\n while node is not None:\n ret.append(node)\n node = node.getprevious()\n return ret",
"def breadth_first_iterate(execution_graph, starting_node, direction,\n through_flows=True, through_retries=True,\n through_tasks=True):\n initial_nodes_iter, connected_to_functors = _extract_connectors(\n execution_graph, starting_node, direction,\n through_flows=through_flows, through_retries=through_retries,\n through_tasks=through_tasks)\n q = collections.deque(initial_nodes_iter)\n while q:\n node = q.popleft()\n node_attrs = execution_graph.nodes[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n q.extend(connected_to_functor(node))",
"def depth_first_iterate(execution_graph, starting_node, direction,\n through_flows=True, through_retries=True,\n through_tasks=True):\n initial_nodes_iter, connected_to_functors = _extract_connectors(\n execution_graph, starting_node, direction,\n through_flows=through_flows, through_retries=through_retries,\n through_tasks=through_tasks)\n stack = list(initial_nodes_iter)\n while stack:\n node = stack.pop()\n node_attrs = execution_graph.nodes[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n stack.extend(connected_to_functor(node))",
"def dfs_edges_generator(graph, source, reverse=...):\n ...",
"def frontier_depth_iterative(self):\n frontier = []\n interior = [ self ]\n counter = 0\n while len(interior) > 0:\n node = interior.pop()\n if node.is_leaf():\n frontier.append( (node.value, counter) )\n interior.pop()\n continue\n counter += 1\n if node.left:\n interior.append(node.left)\n if node.right:\n interior.append(node.right)\n\n return frontier",
"def nodes_iter(topology):\n return topology.nodes_iter()",
"def nodes_iter(self) -> Generator:\n for n in self.graph.nodes(data=True):\n yield n",
"def _depth_first_iterate(graph, connected_to_functors, initial_nodes_iter):\n stack = list(initial_nodes_iter)\n while stack:\n node = stack.pop()\n node_attrs = graph.node[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n stack.extend(connected_to_functor(node))",
"def __iter__(self):\n # set current node to front node\n current = self.front\n # while current != None\n while current:\n # send out current node's data\n yield current.data\n # move to next node\n current = current.prior",
"def predecessors(self, node: Node):\n return iter(self.get_node(node_id) for node_id in node.in_nodes_ids)",
"def topological_order(self):\n count = {}\n children = {}\n for node in self.nodes():\n count[node] = 0\n\n for node in self.nodes():\n _children = sorted(node.children(),\n key=lambda j: j._node_index, reverse=True)\n children[node] = _children\n for successor in _children:\n count[successor] += 1\n\n ready = [node for node in self.nodes() if count[node] == 0]\n ready = sorted(ready, key=lambda j: j._node_index, reverse=True)\n while ready:\n node = ready.pop(-1)\n yield node\n for successor in children[node]:\n count[successor] -= 1\n if count[successor] == 0:\n ready.append(successor)",
"def nodes(self):\n next_node = self.head\n while next_node is not None:\n yield next_node\n next_node = next_node.link",
"def build_node_chains(self):\n\n self.node_chain_lookup = -np.ones(self.tri.npoints, dtype=np.int)\n self.node_chain_list = []\n\n node_chain_idx = 1\n\n self.node_chain_list.append([]) # placeholder for any isolated base-level nodes\n\n for node1 in self.node_high_to_low: \n if (self.node_chain_lookup[node1] != -1): \n continue\n\n junction, this_chain = self._node_walk_downhill(node1)\n\n if len(this_chain) > 1:\n self.node_chain_list.append(this_chain)\n \n self.node_chain_lookup[this_chain[0:-1]] = node_chain_idx \n if self.node_chain_lookup[this_chain[-1]] == -1:\n self.node_chain_lookup[this_chain[-1]] = node_chain_idx\n\n node_chain_idx += 1\n\n else: \n self.node_chain_list[0].append(this_chain[0])\n self.node_chain_lookup[this_chain[0]] = 0\n\n return",
"def forwarding(predecessor, source):\r\n pass # TODO\r",
"def walk_preorder(self):\n yield self\n for child in self.get_children():\n for descendant in child.walk_preorder():\n yield descendant",
"def get_predecessors(self, node): \n preds = []\n child_state = self.node_to_state(node)\n for it in self.predecessors:\n parent_node = (node[0] + it[0], node[1] + it[1])\n parent_state = self.node_to_state(parent_node)\n edge = self.interpolate(child_state, parent_state, self.distance_bw_states(child_state, parent_state)/self.path_resolution)\n preds.append([parent_node, edge])\n return preds",
"def dominance_frontier(self):\n\t\tcount = ctypes.c_ulonglong()\n\t\tblocks = core.BNGetBasicBlockDominanceFrontier(self.handle, count, False)\n\t\tresult = []\n\t\tfor i in range(0, count.value):\n\t\t\tresult.append(self._create_instance(core.BNNewBasicBlockReference(blocks[i]), self.view))\n\t\tcore.BNFreeBasicBlockList(blocks, count.value)\n\t\treturn result",
"def PreOrderTraversal(tree_node: NodeType) -> Iterator[NodeType]:\n stack = [tree_node]\n while stack:\n node = stack.pop()\n yield node\n stack.extend(node.children[::-1])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Edge frontiers generator using depthfirstsearch (DFS). Multiple source nodes can be specified to start the DFS traversal. One needs to make sure that each source node belongs to different connected component, so the frontiers can be easily merged. Otherwise, the behavior is undefined. | def dfs_edges_generator(graph, source, reverse=...):
... | [
"def bfs_nodes_generator(graph, source, reverse=...):\n ...",
"def breadth_first_search(self, source: int) -> list:\n # Time complexity: O(num_vertices + num_edges), aka O(V+E)\n \n # Note: This initialization is a must, since other methods may change defaults\n self.color = [Color.WHITE] * len(self.adjlist)\n # For source vertex and all undiscovered vertices, their parents are None\n self.parent = [None] * len(self.adjlist)\n # Distance (ie total num of edges) from source to the vertex\n self.distance = [None] * len(self.adjlist)\n \n # Source is discovered, but not all its neighbors are discovered, so gray\n self.color[source] = Color.GRAY\n self.distance[source] = 0\n queue = [] # Use-and-discard FIFO queue\n traversal = [] # Record the BFS traversal route\n queue.append(source)\n traversal.append(source)\n while queue:\n # We use queue as FIFO here\n u = queue.pop(0)\n for v in self.adjlist[u]:\n if self.color[v] == Color.WHITE:\n # White means undiscovered, so discover it\n self.color[v] = Color.GRAY\n self.distance[v] = self.distance[u] + 1\n self.parent[v] = u\n queue.append(v)\n traversal.append(v)\n # When u's adjlist is exhausted, turn u to black\n self.color[u] = Color.BLACK\n return traversal",
"def bfs_edges_generator(graph, source, reverse=...):\n ...",
"def bfs(self, source):\n \n if self.is_empty():\n raise Exception(\"Cannot perform BFS on an empty graph!\")\n \n if not source in self.verteces():\n raise Exception(\"Can't find vertex:\" + str(source))\n \n visit = Visit() \n \n queue = Queue() \n queue.put(source)\n\n while not queue.empty():\n vertex = queue.get()\n \n if not visit.is_discovered(vertex):\n # we just discovered the node\n visit.log(vertex).discovery_time = visit.last_time() + 1\n \n for neighbor in self.adj(vertex): \n neighbor_log = visit.log(neighbor)\n if neighbor_log.parent == None and neighbor != source:\n neighbor_log.parent = vertex\n queue.put(neighbor) \n \n return visit",
"def _bfs_nodes(cls, graph, source, size, **kwargs):\n if size < 1:\n return iter(())\n\n return itertools.chain(\n (source,),\n itertools.islice((v for u, v in nx.bfs_edges(graph, source)), size-1)\n )",
"def BreadthFirstSearch(graph, source):\r\n \r\n # Dictionary dataInfo will be used to store the information about each vertex. (Ancestors, descendants, distance from source, and color)\r\n dataInfo = {} \r\n \r\n # List queue will be used to store the vertices currently in the queue, these vertices will all be gray.\r\n queue = []\r\n \r\n # Loops through the vertices in the graph, creates a key in the dictionary for each vertice, with default values.\r\n for vertex in graph[\"V\"]:\r\n dataInfo[str(vertex)] = {\"ancestor\": \"\", \"descendants\": [], \"distance\": \"\", \"color\": \"white\"}\r\n \r\n # At key source (variable) in dataInfo dictionary, key ancestor is set to have no value other than \"NA\" (as it is the starting point), and distance to 0 (as it will always be zero as it is the source).\r\n dataInfo[str(source)][\"ancestor\"] = \"NA\"\r\n dataInfo[str(source)][\"distance\"] = 0\r\n\r\n def symmetricVertex(edge, otherVertex):\r\n \r\n \"\"\"\r\n Function symmetricVertex takes arguments edge, a list of an edge from the graph dictionary, and otherVertex, an integer that is the other vertex in the edge with the sourceVertex. The function will return the point other than the otherVertex, and will be used to find adjacent vertices relative to the current vertex in the queue. Example: edge ([1, 2]), otherVertex (1), the function will return 2.\r\n \"\"\"\r\n \r\n for num in edge:\r\n if num != otherVertex:\r\n return num\r\n \r\n\r\n def pathFinder(graph, sourceVertex):\r\n \r\n \"\"\"\r\n Function pathFinder takes arguments graph, a dictionary, with the same keys for the edges and the vertices and sourceVertex, an integer. The function will loop through all of the edges in the graph and find adjacent vertices relative to the current sourceVertex. sourceVertex values will be in the queue. The function will edit dictionaries and lists, not return any value.\r\n \"\"\"\r\n \r\n # List removeEdges will be used to store the edges that will be removed from the graph dictionary after the loop ends. Makes the code more efficient, as you don't want to loop through a million vertices every time, now do you?\r\n removeEdges = []\r\n \r\n # Loop through edges in the graph, will be used to find adjacent vertices.\r\n for edge in graph[\"E\"]:\r\n \r\n # If the sourceVertex is in the edge and the edge is not discovered yet, then edit and change values in the main dictionary, dataInfo.\r\n if (sourceVertex in edge) and (dataInfo[str(symmetricVertex(edge, sourceVertex))] != \"gray\"):\r\n otherVertex = symmetricVertex(edge, sourceVertex)\r\n \r\n # Adds variable otherVertex to the descendants of the sourceVertex.\r\n dataInfo[str(sourceVertex)][\"descendants\"].append(otherVertex)\r\n \r\n # Updates key(otherVertex) to correct values. Ancestor is always the sourceVertex, the distance is always the distance of sourceVertex incremented by one, and the color is updated to gray as it is added to the queue.\r\n dataInfo[str(otherVertex)] = {\"ancestor\": sourceVertex, \"descendants\": [], \"distance\": (dataInfo[str(sourceVertex)][\"distance\"] + 1), \"color\": \"gray\"}\r\n \r\n # Edge includes two discovered edges, so it will be removed to stop redundancy. It is added to the removeEdges list.\r\n removeEdges.append(edge)\r\n \r\n # Appends the discovered vertex to the queue.\r\n queue.append(otherVertex)\r\n \r\n # After the loop ends, the edges that contain the source vertex have been exhausted, so the color is updated to black.\r\n dataInfo[str(sourceVertex)][\"color\"] = \"black\" \r\n \r\n # If the sourceVertex is in the queue, it is removed, as all of the edges containing it have been exhausted.\r\n if sourceVertex in queue:\r\n queue.remove(sourceVertex)\r\n \r\n # Loop through the edges in the removeEdges list, each edge will be removed.\r\n for edge in removeEdges:\r\n graph[\"E\"].remove(edge)\r\n \r\n # The function pathFinder is called on the graph and the source vertex, which sets up the queue.\r\n pathFinder(graph, source)\r\n \r\n # While the list queue contains values, the pathFinder function is called on the graph, and the queue value at index 0.\r\n while len(queue) != 0:\r\n pathFinder(graph, queue[0])\r\n \r\n # Loop below is for formatting of the data, makes it easier to read.\r\n for key in dataInfo:\r\n print \"Vertex: \" + key + \", Distance: \" + str(dataInfo[key][\"distance\"]) + \", Ancestor: \" + str(dataInfo[key][\"ancestor\"]) + \", Descendants: \" + str(dataInfo[key][\"descendants\"]) + \", Color: \" + str(dataInfo[key][\"color\"]) + \".\" \r\n \r\n # Returns dictionary dataInfo.\r\n return dataInfo",
"def depth_first_traversal(self, start, end, visit, seen=None):\n\n if seen is None:\n seen = {start}\n\n vert = self.vertices[start]\n visit(vert)\n\n for neighbor in vert.get_neighbors():\n if neighbor not in seen:\n if neighbor is end:\n return visit(self.vertices[end])\n else:\n seen.add(neighbor)\n return self.depth_first_traversal(neighbor, end, visit, seen)",
"def test_limited_DFS(self):\n g = self.get_default_graph()\n g.dfs(vertex_set=[1])\n g.dfs(vertex_set=[1,3])\n g.dfs(vertex_set=[1,2])",
"def get_dfs_paths(self, starting_vertex, destination_vertex, path=None):\n if path is None:\n path = [starting_vertex]\n if starting_vertex == destination_vertex:\n yield path\n if starting_vertex in self.vertices:\n for v in self.vertices[starting_vertex] - set(path):\n yield from self.get_dfs_paths(v, destination_vertex, path + [v])",
"def depth_first_iterate(execution_graph, starting_node, direction,\n through_flows=True, through_retries=True,\n through_tasks=True):\n initial_nodes_iter, connected_to_functors = _extract_connectors(\n execution_graph, starting_node, direction,\n through_flows=through_flows, through_retries=through_retries,\n through_tasks=through_tasks)\n stack = list(initial_nodes_iter)\n while stack:\n node = stack.pop()\n node_attrs = execution_graph.nodes[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n stack.extend(connected_to_functor(node))",
"def _dfs_cycle_forest(G, root=None):\n # Create a directed graph from the depth-first search tree with\n # root node `root` in which tree edges are directed toward the\n # root and nontree edges are directed away from the root. For\n # each node with an incident nontree edge, this creates a\n # directed cycle starting with the nontree edge and returning to\n # that node.\n #\n # The `parent` node attribute stores the parent of each node in\n # the DFS tree. The `nontree` edge attribute indicates whether\n # the edge is a tree edge or a nontree edge.\n #\n # We also store the order of the nodes found in the depth-first\n # search in the `nodes` list.\n H = nx.DiGraph()\n nodes = []\n for u, v, d in nx.dfs_labeled_edges(G, source=root):\n if d == 'forward':\n # `dfs_labeled_edges()` yields (root, root, 'forward')\n # if it is beginning the search on a new connected\n # component.\n if u == v:\n H.add_node(v, parent=None)\n nodes.append(v)\n else:\n H.add_node(v, parent=u)\n H.add_edge(v, u, nontree=False)\n nodes.append(v)\n # `dfs_labeled_edges` considers nontree edges in both\n # orientations, so we need to not add the edge if it its\n # other orientation has been added.\n elif d == 'nontree' and v not in H[u]:\n H.add_edge(v, u, nontree=True)\n else:\n # Do nothing on 'reverse' edges; we only care about\n # forward and nontree edges.\n pass\n return H, nodes",
"def dfs_nodes_attributes_iter(G, source=None, attribute=None, upstream=True,\n null_val=None):\n if source is None:\n # produce edges for all components\n nodes = G\n else:\n # produce edges for components with source\n nodes = [source]\n visited = set()\n for start in nodes:\n if start in visited:\n continue\n visited.add(start)\n if upstream:\n stack = [(iter(G.pred[start]), start)]\n else:\n stack = [(iter(G.succ[start]), start)]\n while stack:\n parents, child = stack[-1]\n try:\n parent = next(parents)\n node = G.nodes[parent]\n if attribute in node and node[attribute] is not null_val:\n # by not appending to the search stack, this reach is no\n # longer traversed\n yield parent, node\n else:\n # keep searching\n if upstream:\n stack.append((iter(G.pred[parent]), parent))\n else:\n stack.append((iter(G.succ[parent]), parent))\n\n if parent not in visited:\n visited.add(parent)\n\n except StopIteration:\n # print edge['facilityid'], 'terminal', [i[1] for i in stack]\n stack.pop()",
"def dfs(self, starting_vertex, destination_vertex):\n s = Stack()\n path = [starting_vertex]\n s.push(path)\n visited = set()\n while s.size() > 0:\n nextPath = s.pop()\n lastNode = nextPath[-1]\n if lastNode not in visited:\n if lastNode == destination_vertex:\n #print(\"dfs path found\")\n return nextPath\n visited.add(lastNode)\n for i in self.vertices[lastNode]:\n newPath = list(nextPath)\n newPath.append(i)\n s.push(newPath)",
"def dfs(visited: list, graph: AdjList, node: int):\n if node not in visited:\n visited.append(node)\n for neighbour in graph[node]:\n dfs(visited, graph, neighbour)",
"def my_bfs_edges_m(G, source):\n \"\"\" This function is essentially identical to that one at \n http://networkx.github.com/documentation/latest/_modules/networkx/algorithms/traversal/breadth_first_search.html#bfs_edges \n There are two exceptiioins: \"\"\"\n \n \"\"\" 1.) In the original function provides a set called visited where all the\n nodes are stored which were, as stated, already visited. This is not needed\n here because the information wheter and when a node was visited is stored\n at the node level.\n 2.) The selection of nodes for bfs differs. A node is selected if:\n a) the node has no doi (date of infection), i.e. the node has never\n been selected before\n b) the node is selected again, when there is - in comparison to the old\n date of infection - an earlier date. DoC is the date of contact and if\n DoC < doi of the node, then the node is selected\n c) the node is selected if the infector of this node has changed\n d) The node will only be selected if the parent node has an doi, and if\n this date of infection is before the actual contact. This guarantees\n that the node can only be infected by an infected parent node \"\"\"\n\n stack = [(source, iter(G[source]))]\n while stack:\n parent, children = stack[0]\n try:\n child = next(children)\n e = (parent, child)\n sDoC = G.get_edge_data(*e)[\"trade_date\"]\n DoC = datetime.strptime(sDoC.title(), \"%d%b%Y\").date()\n \"\"\" this if clause is diffent from the original bfs search\n function. Everything else is identical \"\"\"\n if ('doi' not in G.node[child] or DoC < G.node[child]['doi'] or (DoC == G.node[child]['doi'] and G.node[child]['Infector'] != G.node[parent]['Infector'])) and (DoC > G.node[parent]['doi']):\n G.node[child]['k'] = G.node[parent]['k']+1\n G.node[child]['doi'] = DoC\n G.node[child]['ttc'] = G.node[parent]['ttc'] + (DoC - G.node[parent]['doi'])\n G.node[child]['Infector'] = G.node[parent]['Infector']\n yield parent, child\n stack.append((child, iter(G[child])))\n except StopIteration:\n stack.pop(0)",
"def test_DFS(self):\n g = self.get_default_graph()\n g.dfs()",
"def dft(self, starting_vertex):\n \n visited = []\n stack = Stack()\n\n stack.add(starting_vertex)\n\n while len(stack):\n current = stack.pop()\n\n if current not in visited:\n print(current)\n visited.append(current)\n \n for child in self.vertices[current]:\n if child not in visited:\n stack.add(child)",
"def depth_first_traversal(graph_node, vertex_list):\n nodes_stack = Stack()\n nodes_stack.push(graph_node)\n\n while len(nodes_stack) > 0:\n node = nodes_stack.pop()\n\n if node.data not in vertex_list:\n vertex_list.append(node.data)\n\n for neighbor_node in node.neighbors:\n nodes_stack.push(neighbor_node)",
"def find_cycle(G, source=None, orientation=None):\n if not G.is_directed() or orientation in (None, 'original'):\n def tailhead(edge):\n return edge[:2]\n elif orientation == 'reverse':\n def tailhead(edge):\n return edge[1], edge[0]\n elif orientation == 'ignore':\n def tailhead(edge):\n if edge[-1] == 'reverse':\n return edge[1], edge[0]\n return edge[:2]\n\n explored = set()\n cycle = []\n final_node = None\n for start_node in G.nbunch_iter(source):\n if start_node in explored:\n # No loop is possible.\n continue\n\n edges = []\n # All nodes seen in this iteration of edge_dfs\n seen = {start_node}\n # Nodes in active path.\n active_nodes = {start_node}\n previous_head = None\n\n for edge in nx.edge_dfs(G, start_node, orientation):\n # Determine if this edge is a continuation of the active path.\n tail, head = tailhead(edge)\n if head in explored:\n # Then we've already explored it. No loop is possible.\n continue\n if previous_head is not None and tail != previous_head:\n # This edge results from backtracking.\n # Pop until we get a node whose head equals the current tail.\n # So for example, we might have:\n # (0, 1), (1, 2), (2, 3), (1, 4)\n # which must become:\n # (0, 1), (1, 4)\n while True:\n try:\n popped_edge = edges.pop()\n except IndexError:\n edges = []\n active_nodes = {tail}\n break\n else:\n popped_head = tailhead(popped_edge)[1]\n active_nodes.remove(popped_head)\n\n if edges:\n last_head = tailhead(edges[-1])[1]\n if tail == last_head:\n break\n edges.append(edge)\n\n if head in active_nodes:\n # We have a loop!\n cycle.extend(edges)\n final_node = head\n break\n else:\n seen.add(head)\n active_nodes.add(head)\n previous_head = head\n\n if cycle:\n break\n else:\n explored.update(seen)\n\n else:\n assert(len(cycle) == 0)\n raise nx.exception.NetworkXNoCycle('No cycle found.')\n\n # We now have a list of edges which ends on a cycle.\n # So we need to remove from the beginning edges that are not relevant.\n\n for i, edge in enumerate(cycle):\n tail, head = tailhead(edge)\n if tail == final_node:\n break\n\n return cycle[i:]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
find the feature to use for the next node split and also find where the plit should be in that feature This loops through the split options within a feature to find the best gini score, then it loops through each feature to compare optimal gini scores | def find_split(self, X, y):
choices = y.size
if choices <= 1:
return None, None
# find the number of each option in the current node.
options_parent = [np.sum(y == c) for c in range(self.num_outcomes)]
# find the gini of current node.
best_gini = 1.0 - sum((n / choices) ** 2 for n in options_parent)
best_idx, best_split = None, None
# loop through the features to get splits and options.
for idx in range(self.num_features):
splits, options = zip(*sorted(zip(X[:, idx], y)))
num_left = [0] * self.num_outcomes
num_right = options_parent.copy()
for i in range(1, choices):
c = options[i - 1]
num_left[c] += 1
num_right[c] -= 1
gini_left = 1.0 - sum(
(num_left[x] / i) ** 2 for x in range(self.num_outcomes)
)
gini_right = 1.0 - sum(
(num_right[x] / i) ** 2 for x in range(self.num_outcomes)
)
gini = (i * gini_left + (choices - i) * gini_right) / choices
if splits[i] == splits[i - 1]:
continue
if gini < best_gini:
best_gini = gini
best_idx = idx
best_split = (splits[i] + splits[i - 1]) / 2
return best_idx, best_split | [
"def find_best_split(data, feature_names, min_samples_leaf=5, random_subset=False, column_class=-1):\n N, f_n = np.shape(data)\n n = f_n - 1\n root_n = int(np.ceil(np.sqrt(n)))\n if (column_class == -1):\n column_class = f_n - 1\n if (random_subset == True):\n list_features = np.random.choice(np.arange(0, f_n-1, 1), root_n).tolist()\n feature_names_search = np.array(feature_names)[list_features].tolist()\n else:\n feature_names_search = feature_names\n G = gini(data)\n best_question = None\n #initialize the value to optimize\n info_best = -np.inf\n #begin the optimization loop\n for it_f in range(len(feature_names)):\n if (column_class == it_f):\n continue\n #functionality for limiting features to split on\n if (feature_names[it_f] not in feature_names_search):\n continue\n #the list for unique vals\n val_list = list()\n for sample in range(N):\n #get the unique value to create the question with\n val = data[sample,it_f]\n if (val not in val_list):\n val_list.append(val)\n #create the question\n question = Question(column=it_f , value=val, feature_names=feature_names)\n left, right = partition(data, question)\n #make sure the partition counts exceed the necessary number\n if (right is not None and left is not None):\n m_l,_ = np.shape(left)\n m_r,_ = np.shape(right)\n if (m_l >= min_samples_leaf and m_r >= min_samples_leaf):\n #compute the info gain\n gain = info_gain(left, right, G)\n #now check if it is the best\n if (gain > info_best):\n info_best = gain\n best_question = question\n return info_best, best_question",
"def _find_best_split(self, node):\n\n # init \n max_impurity_decrease = 0\n best_split_feature_idx = 0\n best_split_threshold = np.inf # all goes to the left\n \n for feature_idx in self._search_scope(node.X):\n x_feature = node.X[:,feature_idx]\n possible_thresholds = set(x_feature)\n\n for threshold in possible_thresholds:\n left_idx = x_feature < threshold\n right_idx = np.array([not i for i in left_idx])\n impurity_decrease = self._compute_impurity_decrease(node, left_idx, right_idx)\n\n if impurity_decrease > max_impurity_decrease:\n best_split_feature_idx = feature_idx\n best_split_threshold = threshold\n max_impurity_decrease = impurity_decrease\n \n return best_split_feature_idx, best_split_threshold, max_impurity_decrease",
"def __get_split_feature(self, data_set, target_feature, tree_features):\n\n if self.__criterion == 'entropy':\n feature_gains = {feature: self.__gain(data_set, feature, target_feature) for (feature) in tree_features}\n split_feature = max(feature_gains, key=feature_gains.get)\n return split_feature\n elif self.__criterion == 'gini':\n feature_ginis = {feature: self.__gini(data_set, feature, target_feature) for (feature) in tree_features}\n split_feature = min(feature_ginis, key=feature_ginis.get)\n return split_feature\n # TODO: I should check this (gini index).",
"def determine_best_split(data, potential_splits, mltask):\n\n first_iteration = True\n for column_index in potential_splits:\n for value in potential_splits[column_index]:\n data_below,data_above = split_data(data, column_index, value)\n \n if mltask == 'regression':\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_mse)\n \n # classification\n else:\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_entropy)\n \n \n if first_iteration or current_overall_metric <= best_overall_metric:\n first_iteration = False\n \n best_overall_metric = current_overall_metric\n best_split_column = column_index\n best_split_value = value\n \n \n return best_split_column,best_split_value",
"def _select_split_node(self, data):\n\n if self.impurity_measure == 'entropy':\n ig = self._info_gain(data)\n opt_ig, opt_col, opt_mean = 0, 0, 0\n for k in ig.keys():\n if ig[k]['ig'] > opt_ig:\n opt_ig = ig[k]['ig']\n opt_col = k\n opt_mean = ig[k]['mean']\n elif self.impurity_measure == 'gini':\n g = self._gini_index(data)\n opt_gini, opt_col, opt_mean = 1, 0, 0\n for k in g.keys():\n if g[k]['gini_idx'] < opt_gini:\n opt_gini = g[k]['gini_idx']\n opt_col = k\n opt_mean = g[k]['mean']\n return opt_col, opt_mean",
"def choose_best_split(self, X_subset, y_subset):\n # YOUR CODE HERE\n feature_index = None\n threshold = None\n best_G = np.inf\n N = len(X_subset)\n \n for current_feature in range(X_subset.shape[1]):\n thresholds = np.unique(X_subset[:, current_feature])\n \n for t in thresholds:\n y_left, y_right = self.make_split_only_y(current_feature, t, X_subset, y_subset)\n H_L = self.H(y_left)\n H_R = self.H(y_right)\n \n G = (len(y_left) / N) * H_L + (len(y_right) / N) * H_R\n \n if G < best_G:\n best_G = G\n feature_index = current_feature\n threshold = t\n \n return feature_index, threshold",
"def bestfeature_split(dataset):\n fea_nums=len(dataset[0])-1\n base_entro=dataentro_cal(dataset)\n best_gain=0.0\n for i in range (fea_nums): # iterate all the features\n con_entro=0.0\n fea_list=[sample[i] for sample in dataset ] # collect all the feature values\n fea_set=set(fea_list) # make the values unique to each other\n for value in fea_set: # iterate all the values of a feature to seperate the dataset\n sub_dataset=split_dataset(dataset,i,value)\n entropy=dataentro_cal(sub_dataset)\n prob_subdataset=len(sub_dataset)/float(len(dataset))\n con_entro+=prob_subdataset*entropy\n info_gain=base_entro-con_entro\n if (info_gain>best_gain):\n best_gain=info_gain\n best_feature=i\n return best_feature",
"def _reevaluate_best_split(self, node, parent, branch_index):\n stop_flag = False\n if not node.observed_class_distribution_is_pure():\n if self._split_criterion == self._GINI_SPLIT:\n split_criterion = GiniSplitCriterion()\n elif self._split_criterion == self._INFO_GAIN_SPLIT:\n split_criterion = InfoGainSplitCriterion()\n elif self._split_criterion == self._HELLINGER:\n split_criterion = HellingerDistanceCriterion()\n else:\n split_criterion = InfoGainSplitCriterion()\n\n best_split_suggestions = node.best_split_suggestions(split_criterion, self)\n if len(best_split_suggestions) > 0:\n # Sort the attribute accordingly to their split merit for each attribute\n # (except the null one)\n best_split_suggestions.sort(key=attrgetter('merit'))\n\n # x_best is the attribute with the highest merit\n x_best = best_split_suggestions[-1]\n id_best = x_best.split_test.attrs_test_depends_on()[0]\n\n # x_current is the current attribute used in this SplitNode\n id_current = node.split_test.attrs_test_depends_on()[0]\n x_current = node.find_attribute(id_current, best_split_suggestions)\n\n # Get x_null\n x_null = node.null_split(split_criterion)\n\n # Compute Hoeffding bound\n hoeffding_bound = self._hoeffding_bound(\n split_criterion.range_of_merit(node.stats), self.split_confidence,\n node.total_weight)\n\n if x_null.merit - x_best.merit > hoeffding_bound:\n # Kill subtree & replace the EFDTSplitNode by an EFDTLearningNode\n best_split = self._kill_subtree(node)\n\n # update EFDT\n if parent is None:\n # Root case : replace the root node by a new split node\n self._tree_root = best_split\n else:\n parent.set_child(branch_index, best_split)\n\n deleted_node_cnt = node.count_nodes()\n\n self._n_active_leaves += 1\n self._n_active_leaves -= deleted_node_cnt['leaf_nodes']\n self._n_decision_nodes -= deleted_node_cnt['decision_nodes']\n stop_flag = True\n\n # Manage memory\n self._enforce_size_limit()\n\n elif (x_best.merit - x_current.merit > hoeffding_bound or hoeffding_bound\n < self.tie_threshold) and (id_current != id_best):\n # Create a new branch\n new_split = self._new_split_node(x_best.split_test, node.stats, node.depth,\n node.attribute_observers)\n # Update weights in new_split\n new_split.last_split_reevaluation_at = node.total_weight\n\n # Update EFDT\n for i in range(x_best.num_splits()):\n new_child = self._new_learning_node(x_best.resulting_stats_from_split(i))\n new_split.set_child(i, new_child)\n\n deleted_node_cnt = node.count_nodes()\n\n self._n_active_leaves -= deleted_node_cnt['leaf_nodes']\n self._n_decision_nodes -= deleted_node_cnt['decision_nodes']\n self._n_decision_nodes += 1\n self._n_active_leaves += x_best.num_splits()\n\n if parent is None:\n # Root case : replace the root node by a new split node\n self._tree_root = new_split\n else:\n parent.set_child(branch_index, new_split)\n\n stop_flag = True\n\n # Manage memory\n self._enforce_size_limit()\n\n elif (x_best.merit - x_current.merit > hoeffding_bound or hoeffding_bound\n < self.tie_threshold) and (id_current == id_best):\n node._split_test = x_best.split_test\n\n return stop_flag",
"def decision_tree_split_regression(feature, label, method):\r\n # Number of samples\r\n total_count = len(feature)\r\n\r\n # Midpoint of the feature\r\n mid_point = np.mean(feature.values)\r\n\r\n # Subset of data < midpoint\r\n subset_before = label.values[np.where(feature.values < mid_point)]\r\n\r\n # Subset of data > midpoint\r\n subset_after = label.values[np.where(feature.values > mid_point)]\r\n\r\n # List to store the splitting method values\r\n tmp_list = []\r\n\r\n # Store the total splitting method values across all the classes\r\n weighted_method = 0\r\n\r\n # List of data containg subset < midpoint and subset > midpoint\r\n subset_list = [subset_before, subset_after]\r\n\r\n # Iterate through the two subsets\r\n for i in subset_list:\r\n # Method\r\n if method == 'MSE':\r\n # Entropy of the ith class\r\n mse_i = mse(i)\r\n\r\n # Append to list to find the minimum\r\n tmp_list.append(mse_i)\r\n\r\n # Weighted average across all classes\r\n weighted_method += ((mse_i * len(i)))\r\n\r\n # Find the index corresponding to the minimum decision splitting method value\r\n tmp_array = tmp_list\r\n min_idx = np.argmin(tmp_array)\r\n\r\n # Subset of data points < midpoint\r\n if min_idx == 0:\r\n\r\n # Optimal\r\n # Optimal split indices\r\n optimal_idx = np.where(feature < mid_point)\r\n\r\n # Optimal split data\r\n optimal_split_data = optimal_idx\r\n\r\n # Optimal split labels\r\n optimal_split_label = label.values[optimal_idx]\r\n\r\n # Majority label\r\n optimal_label = np.mean(optimal_split_label)\r\n\r\n # List of Optimal Label < Midpoint\r\n optimal_label = ['<', mid_point, optimal_label]\r\n\r\n # Suboptimal\r\n # Indices of the suboptimal splits\r\n suboptimal_idx = np.where(feature > mid_point)\r\n\r\n # Suboptimal split data\r\n suboptimal_split_data = suboptimal_idx\r\n\r\n # Suboptimal split labels\r\n suboptimal_split_label = label.values[suboptimal_idx]\r\n\r\n # Majority label\r\n suboptimal_label = np.mean(suboptimal_split_label)\r\n\r\n # List of Suboptimal Label > Midpoint\r\n suboptimal_label = ['>', mid_point, suboptimal_label]\r\n\r\n # Subset of data points > midpoint\r\n elif min_idx == 1:\r\n\r\n # Optimal\r\n # Optimal split indices\r\n optimal_idx = np.where(feature > mid_point)\r\n\r\n # Optimal split data\r\n optimal_split_data = optimal_idx\r\n\r\n # Optimal split labels\r\n optimal_split_label = label.values[optimal_idx]\r\n\r\n # Majority label\r\n optimal_label = np.mean(optimal_split_label)\r\n\r\n # List of Optimal Label > Midpoint\r\n optimal_label = ['>', mid_point, optimal_label]\r\n\r\n # Suboptimal\r\n # Indices of the suboptimal splits\r\n suboptimal_idx = np.where(feature < mid_point)\r\n\r\n # Suboptimal split data\r\n suboptimal_split_data = suboptimal_idx\r\n\r\n # Suboptimal split labels\r\n suboptimal_split_label = label.values[suboptimal_idx]\r\n\r\n # Majority label\r\n suboptimal_label = np.mean(suboptimal_split_label)\r\n\r\n # List of Suboptimal Label < Midpoint\r\n suboptimal_label = ['<', mid_point, suboptimal_label]\r\n\r\n return weighted_method, optimal_split_data, suboptimal_split_data, optimal_label, suboptimal_label",
"def split_next_best(self, pbar=None):\n assert hasattr(self, 'tree'), 'Must have started growth process already.'\n if self.leaf_impurity_sums == []: return False\n imp_norm = np.array(self.leaf_impurity_sums) / self.root_impurity_sums\n if self.split_by == 'action': best = np.argmax(imp_norm[:,0])\n elif self.split_by == 'value': best = np.argmax(imp_norm[:,1])\n elif self.split_by == 'derivative': best = np.argmax(imp_norm[:,2])\n # NOTE: For split_by='pick', sum normalised impurities and find argmax.\n elif self.split_by == 'pick': best = np.argmax(imp_norm.sum(axis=1))\n # NOTE: For split_by='weighted', take weighted sum instead. \n elif self.split_by == 'weighted': best = np.argmax(np.inner(imp_norm, self.impurity_weights))\n nint = self.untried_leaf_nints.pop(best)\n imp = self.leaf_impurity_sums.pop(best)\n node = self.node(nint)\n if self.split(node): # The split is tried here.\n if pbar: pbar.update(1)\n self.untried_leaf_nints.append(node.left.nint)\n self.leaf_impurity_sums.append(self.get_node_impurity_sums(node.left))\n self.untried_leaf_nints.append(node.right.nint)\n self.leaf_impurity_sums.append(self.get_node_impurity_sums(node.right))\n return True\n # If can't make a split, recurse to try the next best.\n else: return self.split_next_best()",
"def boosting_iteration(self):\n\n self.T.stamp('Start main loop %d'%i)\n\n feature_no=self.feature_no\n\n i=self.iteration\n\n BC_Split_Table = self.sc.broadcast(self.Split_Table)\n prop=self.PS[i].map(Find_weak).collect()\n self.proposals.append(prop)\n\n corrs=[p['Correlation'] for p in prop]\n best_splitter_index=np.argmax(np.abs(corrs))\n best_splitter = prop[best_splitter_index]\n corr=best_splitter['Correlation']\n best_splitter['alpha']=0.5*np.log((1+corr)/(1-corr))\n\n BC_best_splitter=sc.broadcast(best_splitter)\n self.Strong_Classifier.append(best_splitter)\n\n BC_Strong_Classifier=sc.broadcast(self.Strong_Classifier)\n self.T.stamp('found best splitter %d'%i)\n\n newPS=self.PS[i].map(update_weights).cache()\n newPS.count()\n self.PS.append(newPS)\n\n self.T.stamp('Updated Weights %d'%i)\n self.iteration+=1\n\n def Find_weak(A):\n \"\"\"Find the best split for a single feature on a single partition\n\n :param A: Partition data structure\n\n :returns: a dict describing the added weak classifier\n 'Feature_index': the index of the best feature\n 'Threshold_index': the index of the best treshold (the split point)\n 'Threshold': the value of the best treshold \n 'Correlation': the weighted correlation of the best weak rule\n 'SS': the weighted correlations of all of the split points.\n :rtype: dict\n \"\"\"\n index=A['index']%feature_no\n SP=BC_Splits_Table.value[index]\n\n M=A['M']\n weights=A['weights']\n weighted_Labels=weights*A['labels']\n SS=np.dot(M,weighted_Labels)/np.sum(weights)\n i_max=np.argmax(np.abs(SS))\n return {'Feature_index':A['index']%feature_no,\\\n 'Threshold_index':i_max,\\\n 'Threshold':SP[i_max],\\\n 'Correlation':SS[i_max],\\\n 'SS':SS\n }\n\n def update_weights(A):\n \"\"\"Update the weights of the examples belonging to this partition\n\n :param A: The partition data structure\n\n :returns: A partition data structure with updated weights\n :rtype: dict\n\n \"\"\"\n best_splitter=BC_best_splitter.value\n\n F_index=best_splitter['Feature_index']\n Thr=best_splitter['Threshold']\n alpha=best_splitter['alpha']\n y_hat=2*(A['feature_values'][F_index,:]<Thr)-1\n y=A['labels']\n weights=A['weights']*exp(-alpha*y_hat*y)\n weights /= sum(weights)\n\n A['weights']=weights\n return A",
"def pick_best_split(self,db,labels,ids,features=None):\n idlabels = [labels[id] for id in ids]\n if misclassification_error(idlabels) == 0:\n #base case: no misclassifications\n self.type = 'v'\n self.value = idlabels[0]\n return 0\n best = None\n bestCost = 0\n splitval = None\n discrete = True\n if features == None:\n if len(ids) < db.numFeatures():\n #look at all present features in the training set\n features = db.getPresentFeatures(ids)\n #print len(features),\"of\",db.numFeatures(),\"features selected\"\n else:\n features = range(db.numFeatures())\n elif callable(features):\n features = features()\n for i in features:\n if len(db.entryLists[i]) == 0: continue\n idiscrete = db.discreteFeature[i]\n if idiscrete:\n #count number of labels of a certain value\n splitter = defaultdict(lambda:defaultdict(int))\n #count of labels for missing values\n nmissing = defaultdict(int)\n for id in ids:\n val = db[i,id]\n if val is None:\n #missing values go down to all splits\n nmissing[labels[id]] += 1\n continue\n splitter[val][labels[id]] += 1\n if len(splitter) > continuous_variable_threshold:\n #print \"Determined to be a continuous variable\"\n idiscrete = False\n break\n if idiscrete:\n if len(splitter) <= 1:\n #only a single value\n continue\n #count number of missing values in all splits\n cmax = 0\n for k in splitter:\n for l,v in nmissing.iteritems():\n splitter[k][l] += v\n cmax = max(cmax,sum(splitter[k].values()))\n #shrink by fraction of (# of ids - largest child)/(# of ids)\n scale = (1.0-float(cmax)/float(len(ids)))*len(splitter)\n #evaluate cost\n cost = split_cost(splitter.values())*scale\n #print \"Split on\",i,\"information gain\",-cost,splitter.values()\n else:\n #continuous, need to learn the best split\n vals = []\n presentlabels = []\n nonelabels = []\n for id in ids:\n val = db[i,id]\n if val is None:\n nonelabels.append(labels[id])\n continue\n vals.append(val)\n presentlabels.append(labels[id])\n if len(vals) <= 1:\n print \"No values for feature\",i,\"?\"\n print vals\n continue\n #print \"Considering continuous split on\",i\n s,cost = best_split(vals,presentlabels,nonelabels)\n scale = (1.0-float(len(presentlabels)/2+len(nonelabels))/float(len(ids)))*2\n cost *= scale\n #print \"Result\",s,\"Information gain\",-cost\n \n if cost < bestCost:\n best = i\n bestCost = cost\n discrete = idiscrete\n if not idiscrete:\n splitval = s\n \n if best is None:\n self.type = 'v'\n if len(ids) > 0:\n self.value = vote(idlabels)\n return misclassification_error(idlabels)\n else:\n self.value = None\n return 0\n else:\n self.feature = best\n #discrete or inequality split\n if discrete:\n self.type = 's'\n else:\n self.type = 'i'\n self.value = splitval\n return bestCost",
"def _split_continuous_feature(self, df, feature):\n\n sorted_df = df.sort_values(feature.name)\n last_entry_class = None\n max_gain = -1\n best_info_gain_ratio = 0\n best_split_point = None\n last_row = None\n for _, row in sorted_df.iterrows():\n if last_entry_class and row[self.label.name] != last_entry_class:\n # 感觉可以优化\n split_point = (row[feature.name] + last_row[feature.name]) / 2\n info_gain, info_gain_ratio = self._calculate_continuous_info_gain_and_ratio(sorted_df, split_point, feature)\n if max_gain < info_gain:\n max_gain = info_gain\n best_split_point = split_point\n best_info_gain_ratio = info_gain_ratio\n\n last_row = row\n last_entry_class = row[self.label.name]\n\n assert best_split_point is not None, 'Error:can\\' find best split point'\n return self._make_continous_tree(sorted_df, best_split_point, feature), best_info_gain_ratio",
"def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err",
"def greedy_learn(self,node,db,labels,ids):\n if node.depth >= self.maxdepth or len(ids) <= self.minexamples:\n #terminate recursion\n node.pick_best_label(db,labels,ids)\n err = misclassification_error([labels[id] for id in ids])\n if err > 0:\n print \"Reached a leaf and had to make some sacrifices, cost\",err\n print \" depth\",node.depth\n print \" labels\",[labels[id] for id in ids]\n return err\n\n features = self.feature_subset(node,db,labels,ids)\n cost = node.pick_best_split(db,labels,ids,features)\n \n #do a split\n if node.type == 'v':\n #base case: no misclassifications\n \"\"\"\n if cost>0:\n print \"greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero\"\n print \"cost=\",cost,\"misclassification=\",misclassification_error([labels[id] for id in ids])\n print \"# of ids:\",len(ids)\n for i in ids:\n print \"id\",i,\",\",\n for k in range(db.numFeatures()):\n if db[k,i] != None:\n print k,\"=\",db[k,i],\",\",\n print \"label\",labels[i]\n raw_input()\n \"\"\"\n return 0\n elif node.type == 's':\n #print \"Picked feature\",node.feature,\"split\"\n #do a discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in ids:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #print \" split sizes:\",[len(x) for x in Eids.values()]\n #print \" None ids:\",len(noneids)\n ids = None\n errors = 0\n for v,vids in Eids.iteritems():\n #recurse\n c = DecisionTreeNode(node)\n #print \"Recursing on value\",v\n #print \" ids:\",vids\n errors += self.greedy_learn(c,db,labels,vids+noneids)\n node.children[v] = c\n if c.depth > self.deepest:\n self.deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors\n else:\n #do an inequality split\n assert node.type == 'i'\n #print \"Picked feature\",node.feature,\"inequality value\",node.value,\"cost\",cost\n leftids = []\n rightids = []\n for id in ids:\n if db[node.feature,id] is not None:\n if db[node.feature,id] <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(rightids) == len(ids) or len(leftids) == len(ids):\n #due to missing values, this split is useless\n errors = misclassification_error([labels[id] for id in ids])\n print \"useless split on feature\",node.feature,\"value\",node.value,\"misclassification error\",errors\n print \"Left size\",len(leftids),\"right size\",len(rightids)\n raw_input()\n node.pick_best_label(db,labels,ids)\n return errors\n #clear memory associated with ids list\n del ids[:]\n ids = None\n #print \"Left size\",len(leftids),\"right size\",len(rightids)\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n #left side\n errors = self.greedy_learn(c1,db,labels,leftids)\n #right side\n errors += self.greedy_learn(c2,db,labels,rightids)\n #restore index\n node.children = {0:c1,1:c2}\n if c1.depth > self.deepest:\n self.deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors",
"def split(self, node):\n assert node.left == None, 'Not a leaf node.'\n # Check whether able to skip consideration of action, value or normalised derivative entirely.\n if (node.action_impurity > 0) and (self.split_by == 'pick' or self.impurity_weights[0] > 0): do_action = True\n else: do_action = False\n if (node.value_impurity > 0) and (self.split_by == 'pick' or self.impurity_weights[1] > 0): do_value = True\n else: do_value = False\n if (node.derivative_impurity > 0) and (self.split_by == 'pick' or self.impurity_weights[2] > 0): do_derivatives = True\n else: do_derivatives = False\n if (not do_action) and (not do_value) and (not do_derivatives): return False\n\n # Iterate through features and find best split(s) for each.\n candidate_splits = []\n for f in range(self.num_features):\n candidate_splits += self.split_feature(node, f, do_action, do_value, do_derivatives)\n # If beneficial split found on at least one feature...\n if sum([s[3][0] != None for s in candidate_splits]) > 0: \n split_quality = [s[3][2] for s in candidate_splits] \n # Choose one feature to split on. \n if self.stochastic_splits:\n # Sample in proportion to relative impurity gain.\n chosen_split = np.random.choice(range(len(candidate_splits)), p=split_quality)\n else:\n # Deterministically choose the feature with greatest relative impurity gain.\n chosen_split = np.argmax(split_quality) # Ties broken by lowest index. \n # Unpack information for this split and create child leaves.\n node.feature_index, node.split_by, indices_sorted, (node.threshold, split_index, _, _, _, _) = candidate_splits[chosen_split] \n address = int_to_bits(node.nint)\n node.left = self.new_leaf(list(address)+[0], indices_sorted[:split_index])\n node.right = self.new_leaf(list(address)+[1], indices_sorted[split_index:]) \n self.num_leaves += 1\n # Store impurity gains, scaled by node.num_samples, to measure feature importance.\n node.feature_importance = np.zeros((4, self.num_features))\n if do_action:\n fi_action = np.array([s[3][3] for s in candidate_splits if s[1] in ('action','weighted')]) * node.num_samples \n node.feature_importance[2,:] = fi_action # Potential.\n node.feature_importance[0,node.feature_index] = max(fi_action) # Realised.\n if do_value:\n fi_value = np.array([s[3][4] for s in candidate_splits if s[1] in ('value','weighted')]) * node.num_samples \n node.feature_importance[3,:] = fi_value # Potential.\n node.feature_importance[1,node.feature_index] = max(fi_value) # Realised.\n # Back-propagate importances to all ancestors.\n while address != ():\n ancestor, address = self.parent(address)\n ancestor.feature_importance += node.feature_importance\n return True\n return False",
"def findBestValueSplitByGini(self, data, structure, colIndex):\n minGini, bestSplit = 1, []\n for i in range(0, len(data)-1):\n split = (float(data[i][colIndex]) + float(data[i+1][colIndex])) / 2\n giniSplit = self.calcGiniSplitBySplitValue(data, structure, colIndex, split)\n if giniSplit <= minGini:\n minGini = giniSplit\n bestSplit = [split, giniSplit]\n return bestSplit",
"def decision_tree_split_classification(feature, label, method):\r\n # Check if feature is string or categorical\r\n if (feature.dtype.name == 'str160') | (feature.dtype.name == 'object'):\r\n\r\n # Number of classes\r\n classes = np.unique(feature)\r\n\r\n # Number of samples\r\n total_count = int(len(feature))\r\n\r\n # List to store the splitting method values\r\n tmp_list = []\r\n\r\n # Store the total splitting method values across all the classes\r\n weighted_method = 0\r\n\r\n # Iterate through each class\r\n for i in classes:\r\n\r\n # Find the indices with correspond to each class\r\n idx = np.where(feature == i)\r\n\r\n # Subset the data by using the previous indices\r\n subset = feature.values[idx]\r\n\r\n # Splitting Method\r\n if method == 'Entropy':\r\n\r\n # Entropy of the ith class\r\n entropy_i = entropy(subset, total_count)\r\n\r\n # Append to list to find the minimum\r\n tmp_list.append(entropy_i)\r\n\r\n # Weighted average across all classes\r\n weighted_method += ((entropy_i * len(subset))/total_count)\r\n\r\n # Splitting Method\r\n elif method == 'Gini':\r\n\r\n # Gini measure of the ith class\r\n gini_i = gini(subset, total_count)\r\n\r\n # Append to list to find the minimum\r\n tmp_list.append(gini_i)\r\n\r\n # Weighted average across all classes\r\n weighted_method += ((gini_i * len(subset))/total_count)\r\n\r\n # Find the index corresponding to the minimum decision splitting method value\r\n tmp_array = tmp_list\r\n min_idx = np.argmin(tmp_array)\r\n\r\n # Optimal\r\n # Optimal split indices\r\n optimal_idx = np.where(feature == classes[min_idx])\r\n\r\n # Optimal split data\r\n optimal_split_data = optimal_idx\r\n\r\n # Optimal labels\r\n optimal_split_label = label.values[optimal_idx]\r\n\r\n # Majority label\r\n optimal_label = label_selection(optimal_split_label)\r\n\r\n # Create a list of the Optimal Class and Optimal Label\r\n optimal_label = [classes[min_idx], optimal_label]\r\n\r\n # Suboptimal\r\n # Function to create suboptimal_split_dict\r\n tmp_dict = suboptimal_classes_dict(classes, min_idx)\r\n\r\n # Iterate through each class of the dictionary\r\n for k in tmp_dict.keys():\r\n\r\n # Select the feature values which are the k class\r\n idx = np.where(feature == k)\r\n\r\n # Append those feature values to the dictionary\r\n tmp_dict[k].append(idx)\r\n\r\n # Assign suboptimal_split_data to the tmp dict\r\n suboptimal_split_data = tmp_dict\r\n\r\n # Suboptimal split labels\r\n tmp_dict = suboptimal_classes_dict(classes, min_idx)\r\n\r\n # Iterate through each class of the dictionary\r\n for k in tmp_dict.keys():\r\n\r\n # Select the feature values which are the k class\r\n idx = np.where(feature == k)\r\n\r\n # Labels for each suboptimal class\r\n suboptimal_split_label = label.values[idx]\r\n\r\n # Assign majority label\r\n suboptimal_label = label_selection(suboptimal_split_label)\r\n\r\n # Append values\r\n tmp_dict[k].append(suboptimal_label)\r\n\r\n # Assign suboptimal_label to the tmp dict\r\n suboptimal_label = tmp_dict\r\n\r\n # elif feature is continuous or integer dataype\r\n elif (feature.dtype.name != 'str160') & (feature.dtype.name != 'object'):\r\n # Number of samples\r\n total_count = len(feature)\r\n\r\n # Midpoint of the feature\r\n mid_point = np.mean(feature.values)\r\n\r\n # Subset of data < midpoint\r\n subset_before = feature.values[np.where(feature.values < mid_point)]\r\n\r\n # Subset of data > midpoint\r\n subset_after = feature.values[np.where(feature.values > mid_point)]\r\n\r\n # List to store the splitting method values\r\n tmp_list = []\r\n\r\n # Store the total splitting method values across all the classes\r\n weighted_method = 0\r\n\r\n # List of data containg subset < midpoint and subset > midpoint\r\n subset_list = [subset_before, subset_after]\r\n\r\n # Iterate through the two subsets\r\n for i in subset_list:\r\n\r\n # Method\r\n if method == 'Entropy':\r\n\r\n # Entropy of the ith class\r\n entropy_i = entropy(i, total_count)\r\n\r\n # Append to list to find the minimum\r\n tmp_list.append(entropy_i)\r\n\r\n # Weighted average across all classes\r\n weighted_method += ((entropy_i * len(i))/total_count)\r\n\r\n # Method\r\n elif method == 'Gini':\r\n\r\n # Gini of the ith class\r\n gini_i = gini(i, total_count)\r\n\r\n # Append to list to find the minimum\r\n tmp_list.append(gini_i)\r\n\r\n # Weighted average across all classes\r\n weighted_method += ((gini_i * len(i))/total_count)\r\n\r\n # Find the index corresponding to the minimum decision splitting method value\r\n tmp_array = tmp_list\r\n min_idx = np.argmin(tmp_array)\r\n\r\n # Subset of data points < midpoint\r\n if min_idx == 0:\r\n\r\n # Optimal\r\n # Optimal split indices\r\n optimal_idx = np.where(feature < mid_point)\r\n\r\n # Optimal split data\r\n optimal_split_data = optimal_idx\r\n\r\n # Optimal split labels\r\n optimal_split_label = label.values[optimal_idx]\r\n\r\n # Majority label\r\n optimal_label = label_selection(optimal_split_label)\r\n\r\n # List of Optimal Label < Midpoint\r\n optimal_label = ['<', mid_point, optimal_label]\r\n\r\n # Suboptimal\r\n # Indices of the suboptimal splits\r\n suboptimal_idx = np.where(feature > mid_point)\r\n\r\n # Suboptimal split data\r\n suboptimal_split_data = suboptimal_idx\r\n\r\n # Suboptimal split labels\r\n suboptimal_split_label = label.values[suboptimal_idx]\r\n\r\n # Majority label\r\n suboptimal_label = label_selection(suboptimal_split_label)\r\n\r\n # List of Suboptimal Label > Midpoint\r\n suboptimal_label = ['>', mid_point, suboptimal_label]\r\n\r\n # Subset of data points > midpoint\r\n elif min_idx == 1:\r\n\r\n # Optimal\r\n # Optimal split indices\r\n optimal_idx = np.where(feature > mid_point)\r\n\r\n # Optimal split data\r\n optimal_split_data = optimal_idx\r\n\r\n # Optimal split labels\r\n optimal_split_label = label.values[optimal_idx]\r\n\r\n # Majority label\r\n optimal_label = label_selection(optimal_split_label)\r\n\r\n # List of Optimal Label > Midpoint\r\n optimal_label = ['>', mid_point, optimal_label]\r\n\r\n # Suboptimal\r\n # Indices of the suboptimal splits\r\n suboptimal_idx = np.where(feature < mid_point)\r\n\r\n # Suboptimal split data\r\n suboptimal_split_data = suboptimal_idx\r\n\r\n # Suboptimal split labels\r\n suboptimal_split_label = label.values[suboptimal_idx]\r\n\r\n # Majority label\r\n suboptimal_label = label_selection(suboptimal_split_label)\r\n\r\n # List of Suboptimal Label < Midpoint\r\n suboptimal_label = ['<', mid_point, suboptimal_label]\r\n\r\n return weighted_method, optimal_split_data, suboptimal_split_data, optimal_label, suboptimal_label",
"def _find_best_split(self, X, y):\n\n def calculate_entropy(p):\n # _, counts = np.unique(y, return_counts=True)\n # entropy = 0.0\n # for prob in counts / float(len(y)):\n # entropy -= prob * math.log(prob, 2)\n # return entropy\n p = np.bincount(p) / float(p.shape[0])\n return stats.entropy(p)\n\n def calculate_information_gain(y, left_y, right_y):\n # p = len(left_y) / len(y)\n # return calculate_entropy(y) - p * \\\n # calculate_entropy(left_y) - (1 - p) * \\\n # calculate_entropy(right_y)\n return calculate_entropy(y) \\\n - calculate_entropy(left_y) * (float(left_y.shape[0]) / y.shape[0]) \\\n - calculate_entropy(right_y) * (float(right_y.shape[0]) / y.shape[0])\n\n def find_splits(x):\n \"\"\"Find all possible split values.\"\"\"\n split_values = set()\n\n # Get unique values in a sorted order\n x_unique = list(np.unique(x))\n for i in range(1, len(x_unique)):\n # Find a point between two values\n average = (x_unique[i - 1] + x_unique[i]) / 2.0\n split_values.add(average)\n\n return list(split_values)\n\n def split_mask(x, value):\n if isinstance(value, int) or isinstance(value, float):\n left_mask = (x >= value)\n right_mask = (x < value)\n else:\n left_mask = (x == value)\n right_mask = (x != value)\n return left_mask, right_mask\n\n max_gain, max_i_feature, max_value = None, None, None\n\n _, n_features = np.shape(X)\n for i_feature in range(n_features):\n column = X[:, i_feature]\n split_values = find_splits(column)\n for value in split_values:\n left_mask, right_mask = split_mask(column, value)\n gain = calculate_information_gain(y, y[left_mask], y[right_mask])\n\n if (max_gain is None) or (gain > max_gain):\n max_i_feature, max_value, max_gain = i_feature, value, gain\n \n if max_gain is None:\n return None, None, None, None, None, None, None\n \n left_mask, right_mask = split_mask(X[:, max_i_feature], max_value)\n return max_gain, max_i_feature, max_value, \\\n X[left_mask], X[right_mask], y[left_mask], y[right_mask]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A class without the key_fields annotation should raise a RuntimeError | def testNoKeyFields():
with pytest.raises(RuntimeError):
class AnnotatedNode(Node):
x: str
y: int
def __init__(self, x: str, y: int):
self.x = x
self.y = y
@property
def _display(self) -> str:
return self.x | [
"def test_key_init_inconsistent_fields(self):\n # Modify the class to add a bogus field name that is not in the DB\n TestMappings.index_fields.append('bad_field')\n tm = Keys(TestMappings)\n # Remove the field name again (or get horrible stuff in rest of tests)\n TestMappings.index_fields.remove('bad_field')\n # Careful if we end up going multi-language on error messages\n error_msg = str(NoFieldError('bad_field',TestMappings))\n self.assertEqual(str(tm.errors),error_msg)",
"def test_key_no_data(self):\n key = Key({})\n\n assert key.warning is None\n assert key.in_car is None",
"def _validate(self):\n fields, schema = self.__dict__, self._def.default\n extra_fields = fields.viewkeys() - schema.viewkeys()\n if len(extra_fields) > 0:\n raise AttributeError('Fields found that are not in the schema: %r' % (list(extra_fields)))\n for key in fields.iterkeys():\n if type(fields[key]) is not type(schema[key]):\n raise AttributeError('Invalid %s for field \"%s\", should be %s' %\n (type(fields[key]), key, type(schema[key])))",
"def test_missing_field(self):\n class TestSerializer(serializers.ModelSerializer):\n missing = serializers.ReadOnlyField()\n\n class Meta:\n model = RegularFieldsModel\n fields = ('auto_field',)\n\n with self.assertRaises(AssertionError) as excinfo:\n TestSerializer().fields\n expected = (\n \"The field 'missing' was declared on serializer TestSerializer, \"\n \"but has not been included in the 'fields' option.\"\n )\n assert str(excinfo.exception) == expected",
"def required(cls, key):\n raise cls('{path} requires key %s to be defined' % key)",
"def test_throws_base_price_missing_key(self):\n with pytest.raises(marshmallow.ValidationError):\n BasePrice.Schema().loads(json.dumps(base_price_missing_key))",
"def _check_key(self, key):\n if key not in self['_allowed_keys']:\n raise AttributeError('attribute \"%s\" not allowed' % key)",
"def testBadKeys(self):\n # Ignore access to protected members\n # pylint: disable=W0212\n self.assertRaises(DOLAPI._DOLAPIError,\n self.badauth.table,\n self.dataset,\n self.table)",
"def test_retrieving_nonexistant_AttributeError(self):\n with self.assertRaises(AttributeError):\n x = self.dict[self.key]",
"def is_invalid(self, key): # pragma: no cover\n\t\traise NotImplementedError",
"def test_register_model_without_primary_key(redis_store):\n\n class ModelWithoutPrimaryKey(Model):\n title: str\n\n with pytest.raises(AttributeError, match=r\"_primary_key_field\"):\n redis_store.register_model(ModelWithoutPrimaryKey)\n\n ModelWithoutPrimaryKey._primary_key_field = None\n\n with pytest.raises(Exception, match=r\"should have a _primary_key_field\"):\n redis_store.register_model(ModelWithoutPrimaryKey)",
"def test_defining_a_primary_key_counter_column_fails(self):\r\n with self.assertRaises(TypeError):\r\n class model(Model):\r\n partition = columns.UUID(primary_key=True, default=uuid4)\r\n cluster = columns.Counter(primary_ley=True)\r\n counter = columns.Counter()\r\n\r\n # force it\r\n with self.assertRaises(ModelDefinitionException):\r\n class model(Model):\r\n partition = columns.UUID(primary_key=True, default=uuid4)\r\n cluster = columns.Counter()\r\n cluster.primary_key = True\r\n counter = columns.Counter()",
"def test_keyerror(self):\n try:\n self.db['foo']\n except KeyError, e:\n assert \"no key 'foo' in database <SequenceFileDB\" in str(e), str(e)",
"def test_defining_only_or_defer_on_nonexistant_fields_fails(self):",
"def test_missing_required_field_raises_error():\n with pytest.raises(ValidationError):\n Entity()",
"def __missing__(self, key):\n return None",
"def DisableReflectionKey(*args, **kwargs): # real signature unknown\r\n pass",
"def test_raise_if_no_attr(self):\n self.assertRaises(AttributeError, self.Model.set_primary_key, 'asdf')",
"def test_invalid_field(self):\n class TestSerializer(serializers.ModelSerializer):\n class Meta:\n model = RegularFieldsModel\n fields = ('auto_field', 'invalid')\n\n with self.assertRaises(ImproperlyConfigured) as excinfo:\n TestSerializer().fields\n expected = 'Field name `invalid` is not valid for model `RegularFieldsModel`.'\n assert str(excinfo.exception) == expected"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
creates randomized colors of shape size_x by size_y | def create_world(size_x = 100, size_y=100):
colors = np.random.randint(0,2,(size_x,size_y)).tolist()
for row in range(len(colors)):
for col in range(len(colors[row])):
if (colors[row][col]== 1):
colors[row][col] = 'R'
else:
colors[row][col] = 'G'
r = [[10.0 for i in range(size_y)] for i in range(size_x)]
g = [[10.0 for i in range(size_y)] for i in range(size_x)]
b = [[10.0 for i in range(size_y)] for i in range(size_x)]
RGB = []
for i in range(size_x):
for j in range(size_y):
if colors[i][j] == 'R':
r[i][j] = 255.0
else:
b[i][j] = 255.0
RGB.append(b[i][j])
RGB.append(r[i][j])
RGB.append(g[i][j])
RGB = np.array(RGB).reshape(size_x,size_y,3)
return RGB, colors | [
"def generate_random_colors(self):\n colors = [(random.randint(0, 200), random.randint(0, 200), random.randint(0, 255)) for _ in range(32)]\n grid = []\n for color in colors:\n grid.extend([color, color])\n grid[self.special_coord[0] + (self.special_coord[1] * 8)] = self.special_coord_color\n self.sense.set_pixels(grid)",
"def __init__(self, width, height, relevant_colors):\n self.color = relevant_colors[random.randint(0, len(relevant_colors)-1)]\n self.x = random.randint(0, width)\n self.y = random.randint(0, height)\n self.width = random.randint(0, width//2)\n self.height = random.randint(0, height//2)\n self.type = random.randint(0, 1)",
"def random_color_gen():\n r = randint(0, 255)\n g = randint(0, 255)\n b = randint(0, 255)\n return [r, g, b]",
"def mutate(self, size):\n rand = random.random()\n if rand <= 0.5:\n print u\"changing colour\"\n idx = random.randrange(0, 4)\n value = random.randrange(0, 256)\n colour = list(self.colour)\n colour[idx] = value\n self.colour = tuple(colour)\n else:\n print u\"changing point\"\n idx = random.randrange(0, len(self.points))\n point = generate_point(size[0], size[1])\n self.points[idx] = point",
"def create_random_color(self):\n # Create a list of n colors.\n n = 4\n dc = 1.0 / (n-1)\n color_list = [i*dc for i in range(n)]\n\n if self.is_scaffold:\n rgb = [1.0, 1.0, 1.0]\n else:\n rgb = [random.choice(color_list) for i in range(3)]\n # Don't generate blue (that's for a scaffold in cadnano) or black.\n if (rgb[0] == 0.0) and (rgb[1] == 0.0):\n rgb[0] = random.choice(color_list[1:])\n if rgb[2] == 0.0: \n rgb[2] = random.choice(color_list[1:]) \n #__if (rgb[0] == 0) and (rgb[1] == 0)\n #__if self.is_scaffold\n return rgb",
"def generate_random_colours_list(rng: random.Random, size: int) -> List[TupleInt3]:\n return [random_colour(rng) for _ in range(size)]",
"def random_color():\n\n rgbl=[255,0,0]\n random.shuffle(rgbl)\n return tuple(rgbl)",
"def new_color(self):\n o = []\n\n for i in self.color:\n \n i *= 255\n \n if i < 5:\n i += randint(10, 25)\n elif i > 250:\n i -= randint(10, 25)\n else:\n i += randint(-20, 25)\n\n o.append(i / 255)\n\n self.color = tuple(o)",
"def random_shape(height, width):\n # Shape\n shape = random.choice([\"square\", \"circle\", \"triangle\"])\n # Color\n color = tuple([random.randint(0, 255) for _ in range(3)])\n # Center x, y\n buffer = 20\n y = random.randint(buffer, height - buffer - 1)\n x = random.randint(buffer, width - buffer - 1)\n # Size\n s = random.randint(buffer, height // 4)\n return shape, color, (x, y, s)",
"def rand_color():\r\n R = random.random()\r\n G = random.random()\r\n B = random.random()\r\n tt.color(R, G, B)",
"def createPickColor():\n color_list = []\n\n for i in range(50, 450, 100): #Create the 4 shapes to show colors\n point1 = g.Point(50, i)\n point2 = g.Point(100, i+50)\n shape = g.Rectangle(point1, point2)\n color_list.append(shape)\n\n #Set the right colors\n color_list[0].setFill(\"Blue\")\n color_list[1].setFill(\"Green\")\n color_list[2].setFill(\"Yellow\")\n color_list[3].setFill(\"Red\")\n\n return color_list",
"def randomcolour(self):\n r = random.randrange(1, 255)\n g = random.randrange(1, 255)\n b = random.randrange(1, 255)\n self.colour((r,g,b))",
"def randomXY(size, XYdens=[25,3],XYrand=[1,1]):\n size_y, size_x = size\n x_gap = size_x//XYdens[0]\n y_gap = size_y//XYdens[1]\n XYmap = []\n for i in range(int(XYdens[1])):\n for j in range(int(XYdens[0])):\n XYmap.append([j*x_gap,i*y_gap])\n \n rand = np.random.rand(int(XYdens[0])*int(XYdens[1]),2)*XYrand*[x_gap,y_gap]\n XYmap = XYmap + rand\n return XYmap.tolist()",
"def randcolor():\r\n r = random(0.0, 1.0)\r\n g = random(0.0, 1.0)\r\n b = random(0.0, 1.0)\r\n return vec(r, g, b) # A color is a three-element vec\r",
"def _genRandomColor():\n b = random.randint(0, 255)\n g = random.randint(0, 255)\n r = random.randint(0, 255)\n return (b, g, r)",
"def test_color_gen(self):\n color_array = color_gen(10)\n self.assertEqual(len(color_array), 10)",
"def generateWorld(var_size):\n return [[int(round(random.random())) for x in range(var_size)] for y in range(var_size)]",
"def rand_shape(\n x_offset: float,\n y_offset: float,\n min_radius: float,\n max_radius: float,\n) -> List[int]:\n points = []\n angle_offset = random.random() * 2 * math.pi\n angle = 0\n while angle < 2 * math.pi:\n magnitude = random.uniform(min_radius, max_radius)\n x = math.cos(angle + angle_offset) * magnitude + x_offset\n y = math.sin(angle + angle_offset) * magnitude + y_offset\n points.append((x, y))\n angle += random.uniform(RAND_MIN_POINT_GEN_STEP_RADS,\n RAND_MAX_POINT_GEN_STEP_RADS)\n return points",
"def makeRandomGradient(size):\n\tx, y = np.meshgrid(np.linspace(0, 1, size[1]), np.linspace(0, 1, size[0]))\n\tgrad = x * np.random.uniform(-1, 1) + y * np.random.uniform(-1, 1)\n\tgrad = (grad - grad.mean()) / grad.std()\n\treturn grad"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Finds a dihedral angle adjacent to the selected atoms that includes a new atom | def _find_dihedral(selected):
atom_name = lambda atom: atom.fullName()
atom_mass = lambda atom: atom.mass()
# Loop over possible nearest neighbors
for a2 in selected:
# Find the new atom
attached_to_a2 = sorted([a for a in a2.bondedTo() \
if a not in selected], key=atom_name)
for a1 in sorted(attached_to_a2, key=atom_mass, reverse=True):
# Find the third atom
attached_to_a3 = sorted([a for a in a2.bondedTo() \
if (a in selected) and (a!=a1)], key=atom_name)
for a3 in sorted(attached_to_a3, key=atom_mass, reverse=True):
# Find the last atom
attached_to_a4 = sorted([a for a in a3.bondedTo() \
if (a in selected) and (a!=a2)], key=atom_name)
for a4 in sorted(attached_to_a4, key=atom_mass, reverse=True):
return (a1, a2, a3, a4)
print 'Selected atoms:', selected
raise Exception('No new dihedral angle found!') | [
"def dihedral_calculator():\n\n\t# Prime with first 3 points\n\tp1 = Vector3((yield None))\n\tp2 = Vector3((yield None))\n\tp3 = Vector3((yield None))\n\n\t# Set up for first angle\n\tlastpoint = p3\n\tlastdisp = p3 - p2\n\tlastnormal = ((p2 - p1) @ lastdisp).normalize()\n\n\tangle = None\n\n\t# For each point starting with the 4th, we can compute a new angle\n\twhile True:\n\n\t\t# Yield the last angle (None the first time), get the next point\n\t\tnextpoint = Vector3((yield angle))\n\n\t\t# Displacement from previous point to current\n\t\tnextdisp = nextpoint - lastpoint\n\n\t\t# Normal vector to plane containing last 3 points\n\t\tnextnormal = (lastdisp @ nextdisp).normalize()\n\n\t\t# This one's complicated... see step 3 in source.\n\t\tx = lastnormal * nextnormal\n\t\ty = (lastnormal @ lastdisp.normalize()) * nextnormal\n\t\tangle = -math.atan2(y, x)\n\n\t\t# Current values used as previous in next loop\n\t\tlastpoint = nextpoint\n\t\tlastdisp = nextdisp\n\t\tlastnormal = nextnormal",
"def get_angles_and_dihedrals(atoms):\n angles = []\n for center in atoms:\n if len(center.bonded) < 2:\n continue\n for i, a in enumerate(center.bonded):\n for b in center.bonded[i + 1:]:\n A = math.sqrt((center.z - b.z)**2 +\n (center.x - b.x)**2 +\n (center.y - b.y)**2)\n N = math.sqrt((a.z - b.z)**2 +\n (a.x - b.x)**2 +\n (a.y - b.y)**2)\n B = math.sqrt((center.z - a.z)**2 +\n (center.x - a.x)**2 +\n (center.y - a.y)**2)\n try:\n theta = 180 / math.pi * math.acos((A**2 + B**2 - N**2) /\n (2 * A * B))\n except:\n theta = 0.0\n angles.append(structures.Angle(a, center, b, theta=theta))\n\n # Updated to provide deterministic dihedral order with the same\n # time complexity\n dihedral_list = []\n dihedral_set = {}\n for angle in angles:\n for a in angle.atoms[0].bonded:\n if a is angle.atoms[1]:\n continue\n dihedral = (a,) + angle.atoms\n if tuple(reversed(dihedral)) not in dihedral_set:\n dihedral_set[dihedral] = True\n dihedral_list.append(dihedral)\n\n for b in angle.atoms[2].bonded:\n if b is angle.atoms[1]:\n continue\n dihedral = angle.atoms + (b,)\n if tuple(reversed(dihedral)) not in dihedral_set:\n dihedral_set[dihedral] = True\n dihedral_list.append(dihedral)\n dihedral_list = reduce_list(dihedral_list)\n dihedrals = [structures.Dihedral(*d) for d in dihedral_list]\n\n return angles, dihedrals",
"def optimize_dihedral(self, atom1, atom2, atom3, atom4, step=10):\n self._check_atom_number(atom1)\n self._check_atom_number(atom2)\n self._check_atom_number(atom3)\n self._check_atom_number(atom4)\n\n best_angle = 0\n best_dist = 0\n\n for angle in range(0, 360, step):\n self.set_dihedral(atom1, atom2, atom3, atom4, angle)\n if self.rms_distance_between_atoms() > best_dist:\n best_dist = self.rms_distance_between_atoms()\n best_angle = angle\n\n self.set_dihedral(atom1, atom2, atom3, atom4, best_angle)\n return best_angle",
"def testDihedralAngles(self):\n a1 = self.m.GetAtom(0)\n a2 = self.m.GetAtom(1)\n a3 = self.m.GetAtom(2)\n a4 = self.m.GetAtom(3)\n a5 = self.m.GetAtom(5)\n\n # Check for a dihedral angle that doesn't exist\n da = self.m.FindDihedralAngle(a1, a2, a3, a4)\n self.assertTrue(da is None)\n\n # Make a da and try to find it\n self.m.AddDihedralAngle(a1, a2, a3, a4, 90, 0, 0)\n da1 = self.m.FindDihedralAngle(a1, a2, a3, a4)\n da2 = self.m.FindDihedralAngle(a1, a2, a3, a4)\n self.assertTrue(da1 is not None)\n self.assertEqual(da1.GetName(), da2.GetName())\n\n\n # Remove an atom, the dihedral angle should disappear as well.\n self.m.RemoveAtom(a1)\n da4 = self.m.FindDihedralAngle(a2, a1, a3, a4)\n self.assertTrue(da4 is None)\n\n # Try to find a dihedral angle from an atom outside of the molecule.\n m = makeC60().GetScatterer(\"c60\")\n b1 = m.GetAtom(0)\n b2 = m.GetAtom(1)\n b3 = m.GetAtom(1)\n b4 = m.GetAtom(1)\n da5 = self.m.FindDihedralAngle(b1, b2, b3, b4)\n self.assertTrue(da5 is None)\n\n # make a good dihedral angle\n da6 = self.m.AddDihedralAngle(a2, a3, a4, a5, 5, 0, 0)\n da7 = self.m.GetDihedralAngle(0)\n self.assertEqual(da6.GetName(), da7.GetName())\n\n # Delete some dihedral angles and see what happens\n name = da6.GetName()\n del da6\n del da7\n da8 = self.m.GetDihedralAngle(0)\n self.assertEqual(name, da8.GetName())\n\n # Try to get a dihedral angle that doesn't exist by index\n self.assertRaises(IndexError, self.m.GetDihedralAngle, 1)\n\n # Remove the dihedral angle\n angles = self.m.GetDihedralAngleList()\n self.assertEquals(1, len(angles))\n self.m.RemoveDihedralAngle(angles[0])\n # is the object still in existance?\n self.assertEqual(name, da8.GetName())\n # Can we get it from the engine?\n self.assertRaises(IndexError, self.m.GetDihedralAngle, 0)\n da9 = self.m.FindDihedralAngle(a2, a3, a4, a5)\n self.assertTrue(da9 is None)\n\n # make a good dihedral angle again\n da10 = self.m.AddDihedralAngle(a2, a3, a4, a5, 5, 0, 0)\n # Get an atom from that\n a = da10.GetAtom1()\n # Try to remove that atom\n self.m.RemoveAtom(a)\n self.assertEquals(0, self.m.GetNbDihedralAngles())\n\n return",
"def get_dihedral_angles(self):\n mol = self.m\n c1 = mol.GetConformer(-1)\n torsma = '[!$(*#*)&!D1]~[!$(*#*)&!D1]'\n q = Chem.MolFromSmarts(torsma)\n matches = mol.GetSubstructMatches(q)\n nmat = len(matches)\n dic = {}\n for match in matches:\n j = match[0]\n k = match[1]\n bond = mol.GetBondBetweenAtoms(j, k)\n aj = mol.GetAtomWithIdx(j)\n ak = mol.GetAtomWithIdx(k)\n hj, hk = [ _hyb[_a.GetHybridization()] for _a in [aj,ak] ]\n iok1 = ( hj not in [2,3] )\n iok2 = ( hk not in [2,3] )\n if iok1 or iok2: continue\n for b1 in aj.GetBonds():\n if (b1.GetIdx() == bond.GetIdx()):\n continue\n i = b1.GetOtherAtomIdx(j)\n for b2 in ak.GetBonds():\n if (b2.GetIdx() == bond.GetIdx()) or (b2.GetIdx() == b1.GetIdx()):\n continue\n l = b2.GetOtherAtomIdx(k)\n # skip 3-membered rings\n if (l == i):\n continue\n _dang = rdMolTransforms.GetDihedralDeg(c1, i,j,k,l)\n dang = abs(_dang)\n assert dang <= 180.0\n ias4 = (i,j,k,l)\n if not self.wH:\n if np.any([ self.zs[iaa]==1 for iaa in ias4 ]):\n continue\n if self.key in ['z']:\n #print('atsi=',ias4, 'zsi=', [_zs[iaa] for iaa in ias4])\n zi,zj,zk,zl = [ self.zs[iaa] for iaa in ias4 ]\n if (zj==zk and zi>zl) or (zj>zk):\n ias4 = (l,k,j,i)\n #torsions.append(ias4)\n #_zi,_zj,_zk,_zl = [ zs[_] for _ in ias4 ]\n #typez = '%d-%d-%d-%d'%(_zi,_zj,_zk,_zl)\n type4 = tuple([self.zs[iaa] for iaa in ias4])\n if type4 in list(dic.keys()):\n dic[type4] += [dang]\n else:\n dic[type4] = [dang]\n elif self.key in ['ia','i']:\n type4 = ias4\n dic[type4] = dang\n else:\n raise Exception('#unknown key')\n return dic",
"def set_dihedral(self, atom1=None, atom2=None, atom3=None, atom4=None, dihedral=None, move=\"group34\", check_result=True, atoms=None):\n\n if (atom1 is None) and (atom2 is None) and (atom3 is None) and (atom4 is None):\n assert isinstance(atoms, (list, np.ndarray)), \"atom numbers need to come from fields or list!\"\n assert len(atoms) == 4, \"need 4 atom numbers to set dihedral\"\n atom1 = atoms[0]\n atom2 = atoms[1]\n atom3 = atoms[2]\n atom4 = atoms[3]\n\n assert isinstance(dihedral, (float, int, np.number)), \"need angle to set dihedral angle\"\n\n # check atom numbers\n self._check_atom_number(atom1)\n self._check_atom_number(atom2)\n self._check_atom_number(atom3)\n self._check_atom_number(atom4)\n\n # check there is bond connectivity information\n assert len(self.bonds) > 0, \"no bond connectivity information\"\n\n # check for collinearity\n angle = self.get_angle(atom1, atom2, atom3, check=False)\n assert 0.0001 < angle < 179.9999, f\"1/2/3 atoms {atom1}-{atom2}-{atom3} are collinear (angle={angle:.8f})\"\n angle = self.get_angle(atom2, atom3, atom4, check=False)\n assert 0.0001 < angle < 179.9999, f\"2/3/4 atoms {atom2}-{atom3}-{atom4} are collinear (angle={angle:.8f})\"\n\n for x in [atom1, atom2, atom3, atom4]:\n for y in [atom1, atom2, atom3, atom4]:\n if x <= y:\n continue\n else:\n if self.get_sq_distance(x, y, check=False) < 0.001:\n raise ValueError(f\"atom {x} and atom {y} are too close!\")\n\n try:\n dihedral = float(dihedral)\n except Exception as e:\n raise TypeError(f\"dihedral angle {dihedral} cannot be converted to float!\")\n\n if (not isinstance(dihedral, float)) or ((dihedral < 0) or (dihedral > 360)):\n raise ValueError(f\"invalid value {dihedral} for dihedral angle!\")\n\n atoms_to_move = []\n if move == \"group34\":\n #### add atom3's fragment to atom4\n if self.get_bond_order(atom2, atom3):\n _, atoms_to_move = self._get_bond_fragments(atom2, atom3)\n elif self.are_connected(atom2, atom3):\n raise ValueError(\n f\"atom {atom2} and atom {atom3} are connected but not bonded -- cannot adjust dihedral angle! try manually removing one or more bonds.\"\n )\n else:\n atoms_to_move = self._get_fragment_containing(atom3)\n\n #### and make sure atom4 is in there too!\n if atom4 not in atoms_to_move:\n atoms_to_move += self._get_fragment_containing(atom4)\n elif move == \"group4\":\n if self.get_bond_order(atom3, atom4):\n _, atoms_to_move = self._get_bond_fragments(atom3, atom4)\n elif self.are_connected(atom3, atom4):\n raise ValueError(\n f\"atom {atom3} and atom {atom4} are connected but not bonded -- cannot adjust dihedral angle! try manually removing one or more bonds.\"\n )\n else:\n atoms_to_move = self._get_fragment_containing(atom4)\n elif move == \"atom\":\n atoms_to_move = [atom4]\n else:\n raise ValueError(f\"Invalid option {move} for parameter 'move'!\")\n\n if atom1 in atoms_to_move:\n raise ValueError(\n f\"atom {atom1} and atom {atom4} are connected in multiple ways -- cannot adjust dihedral angle! try manually removing one or more bonds.\"\n )\n\n if atom2 in atoms_to_move:\n raise ValueError(\n f\"atom {atom2} and atom {atom4} are connected in multiple ways -- cannot adjust dihedral angle! try manually removing one or more bonds.\"\n )\n\n if atom4 not in atoms_to_move:\n raise ValueError(f\"atom {atom4} is not going to be moved... this operation is doomed to fail!\")\n\n current_dihedral = self.get_dihedral(atom1, atom2, atom3, atom4, check=False)\n delta = (dihedral - current_dihedral) % 360\n\n if np.abs(delta) < 0.001:\n return self\n\n #### now the real work begins...\n #### move everything to place atom2 at the origin\n v3 = self.get_vector(atom3, check=False)\n self.translate_molecule(-v3)\n\n #### perform the actual rotation\n rot_matrix = compute_rotation_matrix(-self.get_vector(atom2, check=False), delta)\n\n for atom in atoms_to_move:\n self.geometry[atom] = np.dot(rot_matrix, self.get_vector(atom, check=False))\n\n #### and move it back!\n self.translate_molecule(v3)\n\n if check_result:\n final_dihedral = self.get_dihedral(atom1, atom2, atom3, atom4, check=False)\n\n #### need to compare cosines to prevent insidious phase difficulties (like 0.00 and 359.99)\n #### this will throw ValueError for differences of about 2 degrees\n if np.abs(math.cos(math.radians(final_dihedral)) - math.cos(math.radians(dihedral))) > 0.001:\n raise ValueError(f\"Error rotating atoms -- expected dihedral angle {dihedral}, got {final_dihedral} -- operation failed!\")\n\n return self",
"def dihedral_angle(a, b, c, d):\n \n v = b - c\n m = numpy.cross((a - b), v)\n m /= norm(m)\n n = numpy.cross((d - c), v)\n n /= norm(n)\n\n c = numpy.dot(m, n)\n s = numpy.dot(numpy.cross(n, m), v) / norm(v)\n \n angle = math.degrees(math.atan2(s, c)) \n\n if angle > 0:\n return numpy.fmod(angle + 180, 360) - 180\n else:\n return numpy.fmod(angle - 180, 360) + 180",
"def calc_dihedral(self, atom1, atom2, atom3, atom4):\n # Vectors between 4 atoms\n b1 = self.atomcoords[atom2] - self.atomcoords[atom1]\n b2 = self.atomcoords[atom2] - self.atomcoords[atom3]\n b3 = self.atomcoords[atom4] - self.atomcoords[atom3]\n\n # Normal vector of plane containing b1,b2\n n1 = np.cross(b1, b2)\n un1 = n1 / norm(n1)\n\n # Normal vector of plane containing b1,b2\n n2 = np.cross(b2, b3)\n un2 = n2 / norm(n2)\n\n # un1, ub2, and m1 form orthonormal frame\n ub2 = b2 / norm(b2)\n um1 = np.cross(un1, ub2)\n\n # dot(ub2, n2) is always zero\n x = np.dot(un1, un2)\n y = np.dot(um1, un2)\n\n dihedral = np.arctan2(y, x)*(180.0/np.pi)\n if dihedral < 0:\n dihedral = 360.0 + dihedral\n return dihedral",
"def single_frame_dihedral_calc(ag, dih_list):\n TPA = u.selectAtoms(\"resname TPA\")\n #Select the 1st, 2nd, 3rd and 4th atoms in the dihedral groups\n r1_TPA = TPA[dih_list[:,0]].positions\n r2_TPA = TPA[dih_list[:,1]].positions\n r3_TPA = TPA[dih_list[:,2]].positions\n r4_TPA = TPA[dih_list[:,3]].positions\n\n\n dih_angle = dihedral_calc(r1_TPA, r2_TPA, r3_TPA, r4_TPA)\n dih_angle_deg = np.rad2deg(dih_angle) \n \n for angle in range(np.shape(dih_angle_deg)[0]):\n dih_file.write(str(dih_angle_deg[angle]) + \" \")\n \n dih_file.write(\"\\n\") \n\n return dih_angle_deg",
"def sp2_dihedrals(atoms):\n\n #problems with atoms inbuilt dihedral method (doesn't match gaussview/jmol at all)\n #so we'll use one taken from http://stackoverflow.com/questions/20305272/dihedral-torsion-angle-from-four-points-in-cartesian-coordinates-in-python\n def get_dihedral(p):\n b = p[:-1] - p[1:]\n b[0] *= -1\n v = np.array([v - (v.dot(b[1])/b[1].dot(b[1])) * b[1] for v in [b[0], b[2]]])\n # Normalize vectors\n v /= np.sqrt(np.einsum('...i,...i', v, v)).reshape(-1,1)\n b1 = b[1] / np.linalg.norm(b[1])\n x = np.dot(v[0], v[1])\n m = np.cross(v[0], b1)\n y = np.dot(m, v[1])\n return np.degrees(np.arctan2(y, x))\n\n mol = to_molmod(atoms)\n data = []\n\n for i in range(len(atoms)):\n if len(mol.graph.neighbors[i]) == 3:\n atom_indices = [i] + list(mol.graph.neighbors[i])\n atom_positions = np.array([atoms[temp_index].position for temp_index in atom_indices])\n #dihedral = atoms.get_dihedral(atom_indices)\n dihedral = get_dihedral(atom_positions)\n result = (i, dihedral)\n data.append(result)\n\n return data",
"def _infer_dihedral(a2, a3=None):\n if a3 is None: # assume bond-like\n bond = a2\n a2, a3 = bond.a1, bond.a2\n a1 = _pick_atom(a2, a3)\n a4 = _pick_atom(a3, a2)\n return a1, a2, a3, a4",
"def calc_bond_dihedral(at1, at2, at3, at4):\n abv = at1.coord - at2.coord\n cbv = at3.coord - at2.coord\n dbv = at4.coord - at3.coord\n uvec = np.cross(abv, cbv)\n vvec = np.cross(dbv, cbv)\n wvec = np.cross(uvec, vvec)\n angle_uv = _calc_v_angle(uvec, vvec)\n if norm(wvec) == 0.:\n angle_cbw = 0.\n else:\n angle_cbw = _calc_v_angle(cbv, wvec)\n try:\n if angle_cbw > 0.001:\n angle_uv = -angle_uv\n except ZeroDivisionError:\n pass\n return angle_uv",
"def addDihedral(self, atom):\n self.dihedralatoms.append(atom)",
"def calculate_dihedral_angles(mol, dihedral_atom_sets):\n\n # Create list for the dihedrals (to be ordered in the same order as the input dihedral sets)\n dihedral_angles = []\n # Now calculate the dihedral angles between the sets identified previously\n conf = mol.GetConformer()\n # Loop through the angles => 2-3 is the rotatable bonds, 1,4 are the neighbours of 2,3 respectively\n for at1, at2, at3, at4 in dihedral_atom_sets:\n # Get the coordinates of the positions\n pos1 = conf.GetAtomPosition(at1)\n pos2 = conf.GetAtomPosition(at2)\n pos3 = conf.GetAtomPosition(at3)\n pos4 = conf.GetAtomPosition(at4)\n # Need to calculate three vectors 1->2, 2->3, 3->4\n vec1 = pos2 - pos1\n vec2 = pos3 - pos2\n vec3 = pos4 - pos3\n # Get the normals to the two planes (vec1-vec2 plane and vec2-vec3 plane))\n cross12 = vec1.CrossProduct(vec2)\n cross23 = vec2.CrossProduct(vec3)\n # Normalise the normals\n cross12.Normalize()\n cross23.Normalize()\n # Calculate dot-product and then inverse cosine to get the angle\n dot_prod = cross12.DotProduct(cross23)\n dihedral_rad = math.acos(dot_prod)\n dihedral_deg = 180*dihedral_rad/math.pi\n dihedral_angles.append(dihedral_deg)\n return dihedral_angles",
"def get_bond_angle(mol, r_group, debug=False):\n\n # create necessary variables\n r_group_map = create_r_group_map(mol)\n dm = Draw.PrepareMolForDrawing(mol)\n\n # get r group atom id\n atom_id = r_group_map[str(r_group)]\n atom = mol.GetAtomWithIdx(atom_id)\n\n # find the coordinates of the atom\n atom_pos = Geometry.Point2D(dm.GetConformer().GetAtomPosition(atom_id))\n\n # get the atoms neighbours, should only be one\n neighbours = atom.GetNeighbors()\n if len(neighbours) != 1:\n print(f\"Warning - atom {atom_id} has {len(neighbours)} neighbours\")\n\n # get the coordinates of the neighbour\n first_neighbour = neighbours[0]\n first_neighbour_id = first_neighbour.GetIdx()\n neighbour_pos = Geometry.Point2D(dm.GetConformer().GetAtomPosition(first_neighbour_id))\n\n # calculate the vector for the bond between the atoms, using the coordinates\n direction_vector = Geometry.Point2D.DirectionVector(atom_pos, neighbour_pos)\n\n # angle of vector - taken from https://stackoverflow.com/questions/6247153/angle-from-2d-unit-vector\n angle = math.atan2(direction_vector.x, direction_vector.y ) * 180 / math.pi\n\n # print commands for debugging if its not working\n if debug == True:\n print(f\"R group: {r_group}, Atom_ID: {atom_id}\")\n print(f\"X: {atom_pos.x}, Y: {atom_pos.y}\")\n print(f\"Number of neighbours = {len(neighbours)}\")\n print(f\"Neighbour_ID: {first_neighbour_id}, Neighbour x: {neighbour_pos.x}, Neighbour y: {neighbour_pos.y}\")\n print(f\"Angle = {angle}\")\n\n return angle",
"def calc_dihedral(v1, v2, v3, v4):\n ab = v1 - v2\n cb = v3 - v2\n db = v4 - v3\n u = ab ** cb\n v = db ** cb\n w = u ** v\n angle = u.angle(v)\n # Determine sign of angle\n try:\n if cb.angle(w) > 0.001:\n angle = -angle\n except ZeroDivisionError:\n # dihedral=pi\n pass\n return angle",
"def dihedral(self,\n w: int,\n x: int,\n y: int,\n z: int) -> Angle:\n if not self._idxs_are_present(w, x, y, z):\n raise ValueError(f'Cannot calculate the dihedral angle involving '\n f'atoms {z}-{w}-{x}-{y}. At least one atom not '\n f'present')\n\n vec_xw = self.atoms[w].coord - self.atoms[x].coord\n vec_yz = self.atoms[z].coord - self.atoms[y].coord\n vec_xy = self.atoms[y].coord - self.atoms[x].coord\n\n vec1, vec2 = np.cross(vec_xw, vec_xy), np.cross(-vec_xy, vec_yz)\n\n # Normalise and ensure no zero vectors, for which the dihedral is not\n # defined\n for vec in (vec1, vec2, vec_xy):\n norm = np.linalg.norm(vec)\n\n if np.isclose(norm, 0.0):\n raise ValueError(f'Cannot calculate the dihedral angle '\n f'{z}-{w}-{x}-{y} - one zero vector')\n vec /= norm\n\n \"\"\"\n Dihedral angles are defined as from the IUPAC gold book: \"the torsion \n angle between groups A and D is then considered to be positive if \n the bond A-B is rotated in a clockwise direction through less than\n 180 degrees\"\n \"\"\"\n value = -np.arctan2(np.dot(np.cross(vec1, vec_xy), vec2),\n np.dot(vec1, vec2))\n\n return Angle(value)",
"def compute_torsion(self, prev_residue, next_residue, strict=True): \n if prev_residue is None and next_residue is None:\n raise ValueError('At least one neighboring residue is required to compute the torsion.')\n \n angles = TorsionAngles(None, None, None, units=AngleUnits.Degrees)\n \n for residue in (self, prev_residue, next_residue):\n if residue is not None and not residue.has_structure:\n if strict:\n raise Missing3DStructureError(repr(residue))\n elif residue is self:\n return angles\n \n try:\n n = self._atoms['N'].vector\n ca = self._atoms['CA'].vector\n c = self._atoms['C'].vector\n except csb.core.ItemNotFoundError as missing_atom:\n if strict:\n raise Broken3DStructureError('Could not retrieve {0} atom from the current residue {1!r}.'.format(\n missing_atom, self))\n else:\n return angles\n \n try:\n if prev_residue is not None and prev_residue.has_structure:\n prev_c = prev_residue._atoms['C'].vector\n angles.phi = csb.numeric.dihedral_angle(prev_c, n, ca, c)\n except csb.core.ItemNotFoundError as missing_prevatom:\n if strict:\n raise Broken3DStructureError('Could not retrieve {0} atom from the i-1 residue {1!r}.'.format(\n missing_prevatom, prev_residue)) \n try:\n if next_residue is not None and next_residue.has_structure: \n next_n = next_residue._atoms['N'].vector\n angles.psi = csb.numeric.dihedral_angle(n, ca, c, next_n)\n next_ca = next_residue._atoms['CA'].vector\n angles.omega = csb.numeric.dihedral_angle(ca, c, next_n, next_ca)\n except csb.core.ItemNotFoundError as missing_nextatom:\n if strict:\n raise Broken3DStructureError('Could not retrieve {0} atom from the i+1 residue {1!r}.'.format(\n missing_nextatom, next_residue)) \n \n return angles",
"def addDihedralBond(a1, a2, length, angleInfo, dihedInfo):\n\n\tif a1.molecule == a2.molecule:\n\t\traise ValueError(\"Atoms to be bonded must be in different models\")\n\n\t# first, get the distance correct\n\tfrom chimera import Xform, cross, angle, Point\n\tdvector = a1.xformCoord() - a2.xformCoord()\n\tdvector.length = dvector.length + length\n\topenState = a2.molecule.openState\n\topenState.globalXform(Xform.translation(dvector))\n\n\t# then angle\n\tif angleInfo:\n\t\tatoms, angleVal = angleInfo\n\t\tp1, p2, p3 = [a.xformCoord() for a in atoms]\n\t\taxis = cross(p1-p2, p2-p3)\n\t\tcurAngle = angle(p1, p2, p3)\n\t\tdelta = angleVal - curAngle\n\t\tv2 = p2 - Point(0.0, 0.0, 0.0)\n\t\ttrans1 = Xform.translation(v2)\n\t\tv2.negate()\n\t\ttrans2 = Xform.translation(v2)\n\t\ttrans1.multiply(Xform.rotation(axis, delta))\n\t\ttrans1.multiply(trans2)\n\t\topenState.globalXform(trans1)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Conversion from (internal or extended) BondAngleTorsion to Cartesian coordinates | def Cartesian(self, BAT):
# Arrange BAT coordinates in convenient arrays
offset = 6 if len(BAT) == (3 * self.natoms) else 0
bonds = BAT[offset + 3::3]
angles = BAT[offset + 4::3]
phase_torsions = BAT[offset + 5::3]
torsions = [(phase_torsions[n] + phase_torsions[self._firstTorsionTInd[n]]) \
if self._firstTorsionTInd[n]!=n else phase_torsions[n] \
for n in range(self.ntorsions)]
p1 = np.array([0., 0., 0.])
p2 = np.array([0., 0., BAT[offset]])
p3 = np.array([BAT[offset+1]*np.sin(BAT[offset+2]), 0., \
BAT[offset]-BAT[offset+1]*np.cos(BAT[offset+2])])
# If appropriate, rotate and translate the first three atoms
if offset == 6:
# Rotate the third atom by the appropriate value
(phi, theta, omega) = BAT[3:6]
co = np.cos(omega)
so = np.sin(omega)
Romega = np.array([[co, -so, 0], [so, co, 0], [0, 0, 1]])
p3 = Romega.dot(p3)
# Rotate the second two atoms to point in the right direction
cp = np.cos(phi)
sp = np.sin(phi)
ct = np.cos(theta)
st = np.sin(theta)
Re = np.array([[cp * ct, -sp, cp * st], [ct * sp, cp, sp * st],
[-st, 0, ct]])
p2 = Re.dot(p2)
p3 = Re.dot(p3)
# Translate the first three atoms by the origin
origin = np.array(BAT[:3])
p1 += origin
p2 += origin
p3 += origin
XYZ = np.zeros((self.natoms, 3))
XYZ[self.rootInd[0]] = p1
XYZ[self.rootInd[1]] = p2
XYZ[self.rootInd[2]] = p3
for ((a1,a2,a3,a4), bond, angle, torsion) in \
zip(self._torsionIndL,bonds,angles,torsions):
sphere = Sphere(Vector(XYZ[a2]), bond)
cone = Cone(Vector(XYZ[a2]), Vector(XYZ[a3] - XYZ[a2]), angle)
plane123 = Plane(Vector(XYZ[a4]), Vector(XYZ[a3]), Vector(XYZ[a2]))
points = sphere.intersectWith(cone).intersectWith(plane123)
p = points[0] if (Plane(Vector(XYZ[a3]), Vector(
XYZ[a2]), points[0]).normal * plane123.normal) > 0 else points[1]
p = rotatePoint(Vector(p),
Line(Vector(XYZ[a2]), Vector(XYZ[a2] - XYZ[a3])),
torsion)
XYZ[a1] = p.array
return XYZ
for ((a1,a2,a3,a4), bond, angle, torsion) in \
zip(self._torsionIndL,bonds,angles,torsions):
p2 = XYZ[a2]
p3 = XYZ[a3]
p4 = XYZ[a4]
# circle = sphere.intersectWith(cone)
n23 = normalize(p3 - p2)
# points = circle.intersectWith(plane123)
# plane.intersectWith(Plane(circle.center, circle.normal)) is a line
# line_direction = cross(normalize(cross(p4-p3,n23)),n23)
# Rotate the point about the p2-p3 axis by the torsion angle
v21 = (bond * np.cos(angle)) * n23 - (bond * np.sin(angle)) * cross(
normalize(cross(p4 - p3, n23)), n23)
s = np.sin(torsion)
c = np.cos(torsion)
XYZ[a1] = p2 - cross(n23, v21) * s + np.sum(
n23 * v21) * n23 * (1.0 - c) + v21 * c | [
"def cartesian2polar(cartesian):\n cartesian = np.array(cartesian).squeeze()\n x, y = cartesian\n r = np.linalg.norm([x, y])\n azimuth = np.arctan2(y, x)\n return np.array([r, azimuth])",
"def to_cartesian(self):\n w = 1.73205 # sqrt(3)\n h = 2\n dx = 0.5 * w if self.y % 2 == 1 else 0\n x = 0.5 * w + self.x * w + dx\n y = 0.5 * h + 0.75 * self.y * h\n return (x, y)",
"def to_cartesian(self):\n\n if self.cartesian is None:\n theta = math.radians(self.lat)\n phi = math.radians(self.long)\n x = R_EARTH * math.cos(theta) * math.cos(phi)\n y = R_EARTH * math.cos(theta) * math.sin(phi)\n z = R_EARTH * math.sin(theta)\n self.cartesian = CartesianPoint(x, y, z)\n return self.cartesian",
"def polar2cartesian(polar):\n polar = np.array(polar).squeeze()\n r, azimuth = polar\n x = r * np.cos(azimuth)\n y = r * np.sin(azimuth)\n return np.array([x, y])",
"def cartesian_coordinates(self):\n # extract RA items\n ra_hours, ra_minutes, ra_seconds = RA_RE.match(str(self.ra)).groups()\n # then cast\n ra_hours = int(ra_hours)\n ra_minutes = int(ra_minutes)\n ra_seconds = float(ra_seconds)\n\n # extract DEC items\n dec_sign, dec_degrees, dec_minutes, dec_seconds = DEC_RE.match(str(self.dec)).groups()\n # then cast\n dec_sign = -1 if dec_sign == '-' else 1\n dec_degrees = int(dec_degrees)\n dec_minutes = int(dec_minutes)\n dec_seconds = float(dec_seconds)\n\n # to degrees\n a = (ra_hours*15) + (ra_minutes*0.25) + (ra_seconds*0.004166)\n b = abs(dec_degrees + dec_minutes/60 + dec_seconds/3600) * dec_sign\n\n # to radians\n a = math.radians(a)\n b = math.radians(b)\n\n distance = float(self.distance)\n\n x = (distance * math.cos(b)) * math.cos(a)\n y = (distance * math.cos(b)) * math.sin(a)\n z = distance * math.sin(b)\n\n return x, y, z",
"def _position_cylindrical2cartesian(pos):\n \n rho=pos[:,0]\n theta=pos[:,1]\n z=pos[:,2]\n\n x=rho*np.cos(theta)\n y=rho*np.sin(theta)\n z=z\n\n return np.dstack((x,y,z))[0]",
"def toCartesian(ra, dec):\n x = cos(radians(ra)) * sin(radians(90 - dec))\n y = sin(radians(ra)) * sin(radians(90 - dec))\n z = cos(radians(90 - dec))\n return x,y,z",
"def cartesianToPolar(x,y):\n r = np.sqrt(x**2 + y**2)\n theta = np.arctan2(y,x)\n\n return r,theta",
"def cartesian(self):\n radius = self.magnitude()\n cos_phi = self.__cos(self.phi())\n sin_phi = self.__sin(self.phi())\n cos_theta = self.__cos(self.theta())\n sin_theta = self.__sin(self.theta())\n x = radius*sin_phi*cos_theta\n y = radius*sin_phi*sin_theta\n z = radius*cos_phi\n return [x, y, z]",
"def angles_from_cartesian(self, x_mm, y_mm, z_mm):\n print(\"< Calculating Cylindrical Coordinates from Cartesian... >\")\n # TODO prevent use of coordinates out of range\n # calculate radius\n radius = math.sqrt(x_mm * x_mm + z_mm * z_mm)\n print(\"Radius: \" + str(radius))\n # calculate theta\n theta = 0\n if x_mm > 0:\n theta += math.atan(z_mm / x_mm) * self.rad_to_deg # theta += arcsin(opposite / adjacent)\n else:\n theta = 90 - math.atan(x_mm / z_mm) * self.rad_to_deg\n print(\" Theta: \" + str(theta))\n # print height\n print(\"Height: \" + str(y_mm))\n # now use cylindrical function\n print(\"< Done >\")\n return self.angles_from_cylindrical(radius, theta, y_mm)",
"def _position_cartesian2cylindrical(pos):\n\n \n #save cartesian position of each particle\n x=pos[:,0]\n y=pos[:,1]\n z=pos[:,2]\n\n rho= np.sqrt(x**2+y**2)\n theta=np.arctan2(y,x)\n\n\n return np.dstack((rho,theta,z))[0]",
"def polarToCartesian(r,theta):\n x = r * np.cos(theta)\n y = r * np.sin(theta)\n return x,y",
"def to_cartesian(r, phi):\n x = r*np.cos(phi)\n y = r*np.sin(phi)\n return x, y",
"def azimuth_to_cartesian_angle(azimuth: float, radians: bool = False) -> float:\n cartesian_angle = (90 - azimuth) % 360\n\n if radians:\n cartesian_angle = np.deg2rad(cartesian_angle)\n\n return cartesian_angle",
"def to_cartesian(polar_vector):\n length, angle = polar_vector[0], polar_vector[1] # this is like destructuring\n return (length * cos(angle), length * sin(angle))",
"def _position_spherical2cartesian(pos):\n \n r=pos[:,0]\n theta=pos[:,1]\n phi=pos[:,2]\n\n if any(theta>np.pi) or any(theta<0): #sanity check. not necessary for phi.\n raise ValueError, \"Theta beyond [0,pi]. Exiting.\"\n\n\n x=r*np.sin(theta)*np.cos(phi)\n y=r*np.sin(theta)*np.sin(phi)\n z=r*np.cos(theta)\n\n return np.dstack((x,y,z))[0]",
"def cart2polar3d(cartesian):\n radius = np.linalg.norm(cartesian)\n theta = np.cos",
"def _GetArcCoords(radians):\r\n return [c[0] + r*math.cos(radians), c[1] + r*math.sin(radians)]",
"def cartesian_to_polar(x, y):\n\n r = np.sqrt(x**2 + y**2)\n thr = np.arctan2(y,x)\n\n return r, thr"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Opens the molecule in VMD | def showMolecule(self, colorBy=None, label=False, dcdFN=None):
# Write PDB file
# To set Occupancy, change atom.occupancy
# To set Beta, change atom.temperature_factor
import os.path
pdbFN = os.path.join(MMTK.Database.molecule_types.directory,
'showMolecule.pdb')
outF = MMTK.PDB.PDBOutputFile(pdbFN)
outF.write(self.molecule)
outF.close()
# Write VMD script
script = 'set ligand [mol new ' + pdbFN + ']\n'
if colorBy is not None:
script += 'mol modcolor 0 $ligand ' + colorBy + '\n'
script += 'mol modstyle 0 0 CPK 1.000000 0.300000 10.000000 10.000000\n'
if label:
script += """
proc label_atoms { molid seltext } {
set sel [atomselect $molid $seltext]
set atomlist [$sel list]
foreach {atom} $atomlist {
set atomlabel [format "%d/%d" $molid $atom]
label add Atoms $atomlabel
}
$sel delete
}
label_atoms 0 all
"""
if dcdFN is not None:
script += 'animate delete all $ligand\n'
script += 'mol addfile ' + dcdFN + ' type dcd waitfor all\n'
scriptF = open('showMolecule.vmd', 'w')
scriptF.write(script)
scriptF.close()
# Find and run vmd
import AlGDock
vmdCommand = AlGDock.findPath(AlGDock.search_paths['vmd'])
import subprocess
subprocess.call([vmdCommand, '-e', 'showMolecule.vmd'])
# Remove files
os.remove(pdbFN)
os.remove('showMolecule.vmd') | [
"def open_mdd(self):\n import webbrowser\n\n mdd, *_ = self.simulation_dir.files(\"*.mdd\")\n\n webbrowser.open(mdd.abspath())",
"def viewNMDinVMD(filename):\n\n vmd = pathVMD()\n if vmd:\n os.system('{0} -e {1}'.format(vmd, abspath(filename)))",
"def _vmd_script_molecule(mole, filename=\"molecule.xyz\"):\n output = \"# load new molecule\\n\"\n if len(mole.atom) == 0:\n raise ValueError(\"Need at least one molecule file with coordinates.\")\n atoms = mole.atom\n natoms = len(mole.atom[0:, 0])\n f = open(filename, \"w\")\n f.write(str(natoms) + \"\\n\\n\")\n for i in range(0, natoms):\n symb = str(atoms[i, 0])\n coord = \" \".join(map(str, atoms[i, 1].tolist()))\n f.write(symb + \" \" + coord + \"\\n\")\n f.close()\n output += (\n \"mol {0} {1} type {2} first 0 last -1 step 1 filebonds 1 autobonds 1 waitfor all\"\n \"\\n\".format(\"new\", filename, \"{xyz}\")\n )\n output += \"#\\n\" \"# representation of the atoms\\n\"\n output += \"mol representation CPK 1.000000 0.300000 118.000000 131.000000\\n\"\n output += (\n \"mol delrep 0 top\\n\"\n \"mol color Element\\n\"\n \"mol selection {{all}}\\n\"\n \"mol material Opaque\\n\"\n \"mol addrep top\\n\"\n \"#\\n\"\n )\n return output",
"def open_database(app):\n app.status.message(\"Opening DICOM folder..\")\n path = app.dialog.directory(\"Select a DICOM folder\")\n if path == '':\n app.status.message('') \n return\n app.status.cursorToHourglass()\n app.close()\n app.open(path)\n app.status.hide()\n app.status.cursorToNormal()",
"def open_molecule(code):\n filename = mol_file_basename(code)+'cat'\n try:\n # Has the file been stored locally?\n if filename[4:5] == '0':\n mol_path = cat_path+'jpl/'+filename\n elif filename[4:5] == '5':\n mol_path = cat_path+'koln/'+filename\n else:\n return None\n mol_fd = open(mol_path)\n except:\n # See which catalog it is in\n if filename[4:5] == '0':\n mol_path = jpl_url+filename\n elif filename[4:5] == '5':\n mol_path = koln_url+filename\n else:\n return None\n print(mol_path)\n mol_fd = urllib.request.urlopen(mol_path)\n return mol_fd",
"def OpenDicomSerie(dirname=None):\n\tglobal volume, dim_x, dim_y, dim_z, spacing, origin, CT_open, filename_CT, dir_ini\n ct_swapY, ct_swapZ = False, False\n \n\tprint 'Opening DICOM serie ... '\n\n\t# Opening file\n\tif(dirname==None):\n\t\tfile_path = tkFileDialog.askopenfilename(initialdir = dir_ini, filetypes = [('DICOM files', '*.dcm')])\n\t\tfilelist = os.listdir(os.path.dirname(file_path))\n\telse:\n\t\tfilelist = os.listdir(dirname)\n\t\tfile_path = dirname + filelist[0]\n\n\tfilename_CT = file_path\n dir_ini = str(file_path.rsplit('/', 1)[0])+'/'\n\n\t# Getting dimensions\n\tds = pydicom.read_file(file_path)\n\tsp = ds.PixelSpacing\n\tds.file_meta.TransferSyntaxUID = pydicom.uid.ImplicitVRLittleEndian\n\tct_swapZ =(ds.ImageOrientationPatient[0:3] == [1, 0, 0])\n\tct_swapY =(ds.ImageOrientationPatient[3:6] == [0, 1, 0])\n\n dim_x = 0\n for f in filelist:\n if f.endswith(\".dcm\"): dim_x = dim_x + 1 \n\n\tdim_y, dim_z = np.shape(ds.pixel_array)[1], np.shape(ds.pixel_array)[0]\n \n\tvolume = np.zeros((dim_x, dim_y,dim_z))\n slicelocation = np.zeros(dim_x)\n\n\t# creating volume\n\tfor f,i in zip(filelist,range(dim_x)):\n\t\tif f.endswith(\".dcm\"):\n\t\t\tds = pydicom.read_file(os.path.dirname(file_path)+'/'+f)\n\t\t\tds.file_meta.transfersyntaxuid = pydicom.uid.ImplicitVRLittleEndian \n\t\t\tvolume[i,:,:] = ds.pixel_array\n\t\t\tif('slicelocation' in ds):\tslicelocation[i] = ds.SliceLocation\n\t\t\telse:\tslicelocation[i] = ds.ImagePositionPatient[2]\n \n\torder = np.argsort(slicelocation)\n slicelocation = slicelocation[order] # slicelocation is now sorted\n \n\tspacing = [float(slicelocation[1] - slicelocation[0]),float(sp[1]), float(sp[0])]\n\torigin = [float(slicelocation[0]),float(ds.ImagePositionPatient[1]),float(ds.ImagePositionPatient[0])]\n\tvolume = volume[order,:,:] # volume is now sorted\n\n\tif (\"RescaleSlope\" in ds):\tvolume = float(ds.RescaleSlope)*volume\n\tif (\"RescaleIntercept\" in ds):\tvolume = volume + float(ds.RescaleIntercept)\n\n\t# Dealing with image orientation\n print ' ct_swapY, ct_swapZ :', ct_swapY, ct_swapZ\n\tif(ct_swapY == True):\n volume = np.flip(volume,1) # flip volume, Y direction\n origin[1] = origin[1] + dim_y*spacing[1] \n if(ct_swapZ == True):\n volume = np.flip(volume,2) # flip volume, Z direction\n origin[2] = origin[2] + dim_z*spacing[2] \n if(ct_swapZ == True)and(ct_swapY == True): spacing[1], spacing[2] = spacing[2], spacing[1]\n\n\tSet_axes_lim_init()\n\tSet_scales()\n\tCT_open = True\n\tUpdate_all()\n\n\tprint(' file successfully opened!')",
"def do_open(self, args):\n \n args = self.ParseArguments(args)\n \n if len(args) == 0:\n self.perror(\"No version specified.\")\n return\n if len(args) == 1:\n self.perror(\"No freecad specified.\")\n return\n\n conn = fiepipelib.assetdata.assetdatabasemanager.GetConnection(self.GetGitWorkingAsset())\n db = self.GetMultiManager()\n man = self.GetManager(db)\n db.AttachToConnection(conn)\n \n version = self.GetItemByName(args[0], man, conn)\n \n if version == None:\n self.perror(\"Version does not exist.\")\n return\n \n if not version.FileExists():\n self.perror(\"File does not exist.\")\n return\n \n fcman = fiepipefreecad.freecad.FreeCADLocalManager(self.GetAssetShell()._localUser)\n freecads = fcman.get_by_name(args[1])\n\n if len(freecads) == 0:\n self.perror(\"No such freecad.\")\n return\n \n freecad = freecads[0]\n assert isinstance(freecad, fiepipefreecad.freecad.FreeCAD)\n freecad.LaunchInteractive(filepaths=[version.GetAbsolutePath()])",
"def open_idf(self):\n\n self.save()\n\n filepath = self.idfname\n\n import os\n import platform\n import subprocess\n\n if platform.system() == \"Darwin\": # macOS\n subprocess.call((\"open\", filepath))\n elif platform.system() == \"Windows\": # Windows\n os.startfile(filepath)\n else: # linux variants\n subprocess.call((\"xdg-open\", filepath))",
"def on_open_uv_editor():\n cmds.TextureViewWindow()",
"def open(self):\n super(Nodzgraph, self).open(dockable=self.configuration.maya.docked,\n area=self.configuration.maya.dock_area,\n allowedArea=self.configuration.maya.allowed_dock_areas,\n floating=self.configuration.maya.floating,\n width=self.configuration.maya.width,\n height=self.configuration.maya.height\n )",
"def dicom_cli():",
"def make_vmd(vmd_file, lichem_in_xyz, all_QM, out_qm_pdb):\n with open(vmd_file, \"w+\") as vmd_out:\n vmd_out.write(\"#!/usr/local/bin/vmd\\n\")\n vmd_out.write(\"# VMD Script generated for LICHEM QM region\\n\")\n vmd_out.write(\"mol new {} type xyz \\n\".format(lichem_in_xyz))\n vmd_out.write(\"set qm [atomselect top \\\"index \")\n vmd_out.write(\" \".join([str(i) for i in all_QM]))\n vmd_out.write(\" \\\"]\\n\")\n vmd_out.write(\"# Save the last frame of optimization\\n\")\n vmd_out.write(\"$qm frame last\\n\")\n vmd_out.write(\"$qm writepdb {}\\n\".format(out_qm_pdb))\n vmd_out.write(\"# Change to CPK\\n\")\n vmd_out.write(\"mol selection {index \")\n vmd_out.write(\" \".join([str(i) for i in all_QM]))\n vmd_out.write(\" }\\n\")\n vmd_out.write(\"mol representation CPK\\n\")\n vmd_out.write(\"mol addrep top\\n\")\n vmd_out.write(\"# Remove lines view\\n\")\n vmd_out.write(\"mol delrep 0 top\\n\")\n vmd_out.write(\"# Showcase QM\\n\")\n vmd_out.write(\"display resetview\\n\")\n vmd_out.close()",
"def open(self):\n super().open()\n self.filename_input_win.show()",
"def showOpenDialog(self):\n \n self.filename, _ = QtGui.QFileDialog.getOpenFileName(self, 'Open File', '/project/avd/iris/resources/public_sample_data_1.0')\n self.clearAll()\n \n QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n self.statusBar().showMessage('Loading Cube')\n try:\n self.cubes = iris.load(self.filename)\n except ValueError as e:\n flags = QtGui.QMessageBox.StandardButton.Ok\n response = QtGui.QMessageBox.critical(self, 'Unable to Load Cube: File type could not be read', str(e), flags) \n self.statusBar().showMessage('Load Failed')\n QApplication.restoreOverrideCursor()\n QApplication.restoreOverrideCursor()\n \n for self.cube in self.cubes:\n self.selectCube.addItem(self.cube.name()) #fills the selectCube combo box with the cubes from the file\n if len(self.cubes) == 1:\n self.selectCube.setEnabled(False)\n else:\n self.selectCube.setEnabled(True)\n self.cubeLoaded = True\n \n self.printCubeBrowser.setText(str(self.cube))\n\n self.setDimensionCombos()\n self.update()",
"def _load_molecule(self):\n self.pymol = pybel.readstring(self.input_format, self.file_dic['input'])",
"def OpenDosi(filename=None):\n\tglobal dosi, spacing_dosi, dim_x_dosi, dim_y_dosi, dim_z_dosi, dosi_open, isodose_show, origin_dosi, filename_dosi\n\tdosi_swapY,dosi_swapZ = False, False\n\n\ttypes = [('All files', '*.dcm *.mhd'), ('DCM files', '*.dcm'), ('MHD files', '*.mhd')]\n\n\tif(filename==None):\tfile_path = tkFileDialog.askopenfilename(initialdir = dir_ini, filetypes = types)\n\telse:\tfile_path = filename\n\n\tfilename_dosi = file_path\n\n\tprint('Opening RD file ...')\n\n\t### .dcm file ###\n\tif(file_path.endswith('.dcm')):\n\t\tds = pydicom.read_file(file_path)\n\t\tds.file_meta.TransferSyntaxUID = pydicom.uid.ImplicitVRLittleEndian \n\t\tscaling_dosi = float(ds.DoseGridScaling)\n\t\tdosi = scaling_dosi*ds.pixel_array\n\t\tsp = ds.PixelSpacing\n\t\tspacing_dosi = [ float(ds.GridFrameOffsetVector[1] - ds.GridFrameOffsetVector[0]), float(sp[1]),float(sp[0])]\n\t\torigin_dosi = ds.ImagePositionPatient\n\t\torigin_dosi = [float(origin_dosi[2]),float(origin_dosi[1]),float(origin_dosi[0])]\n\t\tdosi_swapZ =(ds.ImageOrientationPatient[0:3] == [1, 0, 0])\n dosi_swapY =(ds.ImageOrientationPatient[3:6] == [0, 1, 0])\n\n\t\t#if ds.SeriesDescription=='PatientLETScorer [MeV/mm/(g/cm3)]':\tSetIntensityRange(dosi,0,15)\n\n\t### .mhd file ###\n\tif(file_path.endswith('.mhd')):\t\n \t\titkimage = sitk.ReadImage(file_path) \t\t\t\t# Reads the image using SimpleITK\n \t\tdosi = sitk.GetArrayFromImage(itkimage)\n\t\tspacing_dosi = np.array(list(reversed(itkimage.GetSpacing()))) \t# Read the spacing along each dimension\n\t\torigin_dosi = np.array(list(reversed((itkimage.GetOrigin()))))\t\t# Read the origin\n\t\ttext_file = open(file_path, \"r\")\n\t\ttmp = text_file.readlines()\n\t\tdosi_swap = (tmp[8][-4:-1] == 'RAI')\n\n\tif(len(np.shape(volume))==3):\tdim_x_dosi, dim_y_dosi, dim_z_dosi = np.shape(dosi)[0], np.shape(dosi)[1], np.shape(dosi)[2]\n\n\tif(len(np.shape(volume))==2):\tdim_x_dosi, dim_y_dosi, dim_z_dosi = np.shape(dosi)[0], np.shape(dosi)[1], 1\n\n\t#print 'dosi type', dosi.dtype\n\t\n\t# Dealing with image orientation\n\tif(dosi_swapY == True):\n\t\tdosi = np.flip(dosi,1) # flip volume\n\t\torigin_dosi[1] = origin_dosi[1] + dim_y_dosi*spacing_dosi[1]\t\t\n\tif(dosi_swapZ == True):\n\t\tdosi = np.flip(dosi,2) # flip volume\n\t\torigin_dosi[2] = origin_dosi[2] + dim_z_dosi*spacing_dosi[2]\n\tif(dosi_swapY == True)and(dosi_swapZ == True):\n\t\tspacing_dosi[1], spacing_dosi[2] = spacing_dosi[2], spacing_dosi[1]\n\n print ' dosi_swapY, dosi_swapZ :', dosi_swapY, dosi_swapZ\n\n\tdosi_open = True\n\tisodose_show = True\n\tcheck1.select()\n\tUpdate_all()\n\n\tprint(' file successfully opened!')",
"def vol_open_path(volpath, open_flags=VMDK_OPEN_DEFAULT):\n dhandle = get_uint(0)\n ihandle = get_uint(0)\n key = c_uint32(0)\n\n res = lib.DiskLib_OpenWithInfo(volpath.encode(), open_flags,\n byref(key), byref(dhandle),\n byref(ihandle))\n if res != 0:\n logging.warning(\"Open %s failed - %x\", volpath, res)\n return dhandle",
"def open(self) -> None:\n if not self.__opened:\n if self.path is None:\n self.path = HID.enumerate_devices(self.vendor_id)[0]\n self.device.open_path(self.path)\n self.device.set_nonblocking(True)\n self.__opened = True",
"def _vmd_script_start():\n return (\n \"#!/usr/local/bin/vmd\\n\"\n \"# VMD version: 1.8.6\\n\"\n \"#\\n\"\n \"# Display settings\\n\"\n \"display projection Perspective\\n\"\n \"display nearclip set 0.000000\\n\"\n \"display shadow off\\n\"\n \"color Element {C} silver\\n\"\n \"color Element {Cl} green\\n\"\n \"axes location Off\\n\"\n \"color Display Background white\\n\"\n \"light 2 on\\n\"\n \"light 3 on\\n\"\n \"#\\n\"\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test read and write ints. | def test_message_int():
result = True
message = msg.Message()
for i in range(num_it):
message.appendInt(i)
if message.length != msg.HEADER_SIZE + (i+1)*msg.intStruct.size:
print("Size is ", message.length, " but should be ", msg.HEADER_SIZE + (i+1)*msg.intStruct.size)
print("Error : message.appendInt")
result = False
message.resetCursor()
for i in range(num_it):
r = message.readInt()
if r != i:
print(r, " vs ", i)
print("Error : message.read/appendInt")
result = False
return result | [
"def testSimpleReadWrite(self):\n write_data = self.GetRandomIntegers(2048)\n read_data = []\n # Writer writes some data.\n asserts.assertTrue(\n self._queue1_writer.write(write_data, 2048), \"Write queue failed.\")\n # Check reader reads them back correctly.\n read_success = self._queue1_reader.read(read_data, 2048)\n asserts.assertTrue(read_success, \"Read queue failed.\")\n asserts.assertEqual(read_data, write_data)",
"def test_write_read_int_array(self):\n array_manager = GeneralArrayManager(self.TEST_BLOCK_SIZE,\n self.TEST_STRING_BLOCK_SIZE)\n\n # Number of items in an array with the current block size\n array_size = self.TEST_BLOCK_SIZE / 4\n # Random number of items\n rand_size = randint(1, 3) * array_size + randint(0, array_size - 1)\n # Create an array and add it to the GeneralArrayStore\n array = [randint(-2**31, 2**31 - 1) for _ in range(0, rand_size + 1)]\n array_idx = array_manager.write_array(array,\n Property.PropertyType.intArray)\n\n # Read it back from the array manager\n array_file = array_manager.read_array_at_index(array_idx)\n # Make sure they are the same\n self.assertEqual(array, array_file)",
"def test_int_field():",
"def testConsecutiveReadWrite(self):\n for i in range(64):\n write_data = self.GetRandomIntegers(2048)\n asserts.assertTrue(\n self._queue1_writer.write(write_data, 2048),\n \"Writer should write successfully.\")\n read_data = []\n read_success = self._queue1_reader.read(read_data, 2048)\n asserts.assertTrue(read_success,\n \"Reader should read successfully.\")\n asserts.assertEqual(write_data, read_data)\n\n # Reader should have no more available to read.\n asserts.assertEqual(0, self._queue1_reader.availableToRead())",
"def testManyReadWrite(self):\n def test(clock, reset, data_in, data_out, address, write, enable,\n init_data, width, size):\n for loc in [random.randrange(0, size) for i in range(size)]:\n clock.next = 0\n yield delay(10)\n \n number = random.randrange(2**WIDTH)\n \n enable.next = True\n write.next = True\n address.next = loc\n data_in.next = number\n\n clock.next = 1\n yield delay(10)\n\n clock.next = 0\n yield delay(10)\n\n write.next = False\n\n clock.next = 1\n yield delay(10)\n\n self.assertEqual(int(data_out), number)\n\n self.runTests(test, 1)",
"def testLargeWriteRead(self):\n for i in range(5):\n # Writes five integers.\n write_data = [random.randint(0, 100) for j in range(10)]\n write_data_str = str(bytearray(write_data))\n # Start writing at offset i * 5.\n self._mem_obj.updateRange(i * 5, len(write_data_str))\n self._mem_obj.updateBytes(write_data_str, len(write_data_str),\n i * 5)\n self._mem_obj.commit()\n\n # Reads data back.\n self._mem_obj.readRange(i * 5, len(write_data_str))\n read_data_str = self._mem_obj.readBytes(len(write_data_str), i * 5)\n read_data = list(bytearray(read_data_str))\n # Check if read data is correct.\n asserts.assertEqual(write_data, read_data)",
"def test_read_and_write_works(self):\n with rika.ScopedFile() as file:\n file.write('123')\n read = file.read()\n self.assertEqual('123', read)\n\n with rika.ScopedFile() as file:\n file.write(b'123', True)\n read = file.read(True)\n self.assertEqual(b'123', read)",
"def check_for_int(check):",
"def test_readInteger(self):\n oParse = InternalParse_Test.oParse\n oParse.parsedStream(\"12 333 44444444444444444444444444\", sName = \"root\")\n self.assertTrue(\n oParse.setTag('n1') and oParse.readInteger(),\n 'failed in readInteger for n1')\n n1 = oParse.getTag('n1')\n self.assertEqual(n1, \"12\", \"failed in capture n1\")\n self.assertTrue(\n oParse.setTag('n2') and oParse.readInteger(),\n 'failed in readInteger for n2')\n n2 = oParse.getTag('n2')\n self.assertEqual(n2, \"333\", \"failed in capture n2\")\n self.assertTrue(\n oParse.setTag('n3') and oParse.readInteger(),\n 'failed in readInteger for n3')\n n3 = oParse.getTag('n3')\n self.assertEqual(n3, \"44444444444444444444444444\", \"failed in capture n3\")",
"def test_int(self):\n from random import randint\n from ctypes import byref, c_int\n # back up array.\n a_orig = self.a.copy()\n # run FORTRAN subroutine.\n tval = randint(0,10000000)\n self.args[0] = byref(c_int(tval))\n self.lib_c_ctypes.ctypes_test(*self.args)\n # revert in Python and test.\n self.a -= tval\n for i in range(len(self.a)):\n self.assertEqual(self.a[i], a_orig[i])",
"def test_toint(number, expected, cond):\n assert toInt(number, cond=cond) == expected",
"def test_integer(self):\n conn = self.database.connection()\n cursor = conn.cursor()\n dialect = self.database.dialect()\n dbapi = self.database.dbapi()\n query = dialect.translate('DROP TABLE test_integer')\n try:\n cursor.execute(query)\n except dbapi.Error:\n conn.rollback()\n query = dialect.translate('CREATE TABLE test_integer ' \\\n '( value INTEGER NOT NULL )')\n cursor.execute(query)\n data = []\n query = 'INSERT INTO test_integer VALUES (%s)'\n for i in range(100):\n item = random.randrange(-sys.maxint, sys.maxint)\n data.append(item)\n cursor.execute(query, (item,))\n query = 'SELECT * FROM test_integer'\n cursor.execute(query)\n result = cursor.fetchall()\n for row in result:\n item = row[0]\n assert isinstance(item, int) or isinstance(item, long)\n assert item in data\n data.remove(item)\n query = dialect.translate('DELETE FROM test_integer')\n cursor.execute(query)\n query = dialect.translate('DROP TABLE test_integer')\n cursor.execute(query)\n conn.commit()",
"def test_integers(self):\n for const in [\n SSL_ST_CONNECT,\n SSL_ST_ACCEPT,\n SSL_ST_MASK,\n SSL_CB_LOOP,\n SSL_CB_EXIT,\n SSL_CB_READ,\n SSL_CB_WRITE,\n SSL_CB_ALERT,\n SSL_CB_READ_ALERT,\n SSL_CB_WRITE_ALERT,\n SSL_CB_ACCEPT_LOOP,\n SSL_CB_ACCEPT_EXIT,\n SSL_CB_CONNECT_LOOP,\n SSL_CB_CONNECT_EXIT,\n SSL_CB_HANDSHAKE_START,\n SSL_CB_HANDSHAKE_DONE,\n ]:\n assert isinstance(const, int)\n\n # These constants don't exist on OpenSSL 1.1.0\n for const in [\n SSL_ST_INIT,\n SSL_ST_BEFORE,\n SSL_ST_OK,\n SSL_ST_RENEGOTIATE,\n ]:\n assert const is None or isinstance(const, int)",
"def test_roundtrip_signed_int():\n for num in (0, -0, -1, 2, -178, 300, -BIG_NUMBER, BIG_NUMBER):\n num2 = SignedInt.read(SignedInt.to_bytes(num))\n assert num2 == num",
"def testSimpleWriteRead(self):\n write_data = \"abcdef\"\n # Write data into memory.\n self._mem_obj.update()\n self._mem_obj.updateBytes(write_data, len(write_data))\n self._mem_obj.commit()\n\n # Read data from memory.\n self._mem_obj.read()\n read_data = self._mem_obj.readBytes(len(write_data))\n asserts.assertEqual(write_data, read_data)",
"def test_get_value_int(self):\n val = self.setting_int.get_value()\n self.assertIsInstance(val, int)\n self.assertEqual(val, 170)",
"def test_int32():\n assert 999 == fastparquet.encoding.read_plain(\n struct.pack(b\"<i\", 999),\n parquet_thrift.Type.INT32, 1)",
"def testUnsynchronizedReadWrite(self):\n # Prepare write data.\n write_data = self.GetRandomFloats(2048)\n read_data1 = []\n read_data2 = []\n asserts.assertTrue(\n self._queue3_writer.write(write_data, 2048),\n \"Writer should write successfully.\")\n read_success1 = self._queue3_reader1.read(read_data1, 2048)\n read_success2 = self._queue3_reader2.read(read_data2, 2048)\n asserts.assertTrue(read_success1, \"Reader 1 should read successfully.\")\n asserts.assertTrue(read_success2, \"Reader 2 should read successfully.\")\n asserts.assertEqual(write_data, read_data1)\n asserts.assertEqual(write_data, read_data2)",
"def test_int(self):\n htype = h5t.py_create('i')\n self.assertIsInstance(htype, h5t.TypeIntegerID)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handles a leave game request. Deletes the user from the game. | def leave_game(players_cursor, states_cursor, user, room_id):
leave_query = '''DELETE FROM players_table WHERE user = ? AND room_id = ?'''
players_cursor.execute(leave_query, (user, room_id))
FRAMES.append(display_game(players_cursor, states_cursor, user, room_id)) | [
"def leave(msg: telebot.types.Message):\n if utils.in_menu(msg.from_user):\n bot.reply_to(\n msg,\n 'This command outside of game is useless.'\n )\n return\n\n game, user, opponent = utils.get_game_user_opponent(msg.from_user)\n if not game or not user:\n # todo log something\n return\n\n user.state = states.USER_IN_MENU\n user.losses += 1\n utils.update_user(user)\n bot.send_message(\n user.user_id,\n 'You surrendered.'\n )\n\n if opponent:\n opponent.state = states.USER_IN_MENU\n opponent.wins += 1\n utils.update_user(opponent)\n bot.send_message(\n opponent.user_id,\n 'Your opponent surrendered'\n )\n\n field = json.loads(game.field)\n sig = 1 if user == game.user1 else 2\n\n # changes users emojis to poop\n for i in range(len(field)):\n for j in range(len(field[i])):\n if field[i][j] == sig:\n field[i][j] = 4\n\n if opponent:\n utils.send_updated_field(bot, field, game, opponent)\n Game.delete_by_id(game.id)",
"def leave(ctx):\n game = ctx.storage.games[ctx.client.name, ctx.target]\n\n if ctx.origin not in game.players:\n ctx.respond(ctx._(\"You're not in this game.\"))\n return\n\n game_over = game.leave(ctx.origin)\n\n if game_over:\n do_game_over(ctx)\n return\n\n ctx.message(ctx._(\"{origin} left the game.\").format(origin=ctx.origin))\n\n if game.started:\n send_summary(ctx)",
"def LeaveLobby(request, response, handler):\r\n\r\n user = server.users.GetCurrentUser(handler)\r\n server.lobbies.RemoveUserFromLobby(user)",
"async def leave(self, ctx):\n game = get_game(ctx)\n mode = get_channel_mode(ctx)\n player = await get_player_by_id(ctx, mode, ctx.author.id)\n await ctx.send(embed=Embed(color=0x00FF00,\n description=game.queues[mode].remove_player(player)))",
"def delete_game():\n\n # check if there was an html manipulation\n if not request.form.get('game_id') or not request.form.get('game_id').isnumeric():\n flash(\"Invalid request\")\n return redirect(\"/games\")\n\n game_id = int(request.form.get('game_id'))\n\n # check if the game exists\n rows = db(f\"SELECT * FROM games WHERE id = {game_id}\")\n\n if len(rows) == 0:\n flash(\"Invalid request\")\n return redirect(\"/games\")\n\n # check if the user is the admin of the game\n if not rows[0]['admin_id'] == session['user_id']:\n flash(\"You dont have permission to do that\")\n return redirect(\"/games\")\n\n # after confirming admin, proceeding to delete everything associated with the game\n db(f\"DELETE FROM games WHERE id = {game_id}\")\n db(f\"DELETE FROM game_req WHERE game_id = {game_id}\")\n db(f\"DELETE FROM par WHERE game_id = {game_id}\")\n\n flash(\"Game deleted successfully\")\n return redirect(\"/games\")",
"def _leave(self, *args):\n if not self.game:\n raise ServerException('not playing a game')\n self.game.leave(self)\n self.game = self.player = None",
"def on_leave(data):\n username = request.sid\n room = data\n leave_room(room)\n logging.info(username + ' has left the room.')\n send(username + ' has left the room.', room=room)",
"def on_leave(self, room, user):\n pass",
"def leave(self):\n is_group_conversation = (self._conversation.type ==\n hangouts_pb2.CONVERSATION_TYPE_GROUP)\n try:\n if is_group_conversation:\n yield from self._client.remove_user(\n hangouts_pb2.RemoveUserRequest(\n request_header=self._client.get_request_header(),\n event_request_header=self._get_event_request_header(),\n )\n )\n else:\n yield from self._client.delete_conversation(\n hangouts_pb2.DeleteConversationRequest(\n request_header=self._client.get_request_header(),\n conversation_id=hangouts_pb2.ConversationId(\n id=self.id_\n ),\n delete_upper_bound_timestamp=parsers.to_timestamp(\n datetime.datetime.now(tz=datetime.timezone.utc)\n )\n )\n )\n except exceptions.NetworkError as e:\n logger.warning('Failed to leave conversation: {}'.format(e))\n raise",
"def leave(self, message, db_session):\n username = self.ts.get_user(message)\n user = db_session.query(db.User).filter(db.User.name == username).one_or_none()\n if not user:\n user = db.User(name=username)\n db_session.add(user)\n for tup in self.player_queue.queue:\n if tup[0] == username:\n self.player_queue.queue.remove(tup)\n self._add_to_whisper_queue(username, \"You've left the queue.\")\n user.times_played -= 1\n break\n else:\n self._add_to_whisper_queue(username, \"You're not in the queue and must join before leaving.\")",
"def handle_leave_room(self, lobby_command, client_socket):\n user = self.clients[client_socket]['data'].decode('utf-8')\n words = lobby_command.split()\n roomname = words[1]\n print(f\"Handling leave room {roomname} for {user}\")\n for _room in self.rooms:\n if _room.name == roomname:\n print(\"Requested roomname found..\")\n if user not in _room.room_attrbts['members']:\n msg = f\"Client {user} is already NOT a member of room {_room.name}\"\n self.log_and_send(client_socket, msg)\n return\n else:\n _room.room_attrbts['members'].remove(user)\n msg = f\"User {user} successfully removed from room {roomname}\"\n self.log_and_send(client_socket, msg)\n return\n msg = f'Client {user} passed invalid room. Could not join room {roomname}'\n self.log_and_send(client_socket, msg)\n return",
"def leave_farm(self, request, pk):\n farm = self.get_object()\n user = request.user\n farm.remove_member(user)\n return Response({}, status=status.HTTP_204_NO_CONTENT)",
"async def chat_leave(self, event):\n print(\"PrivateChatConsumer\", \"chat_leave\")\n if event[\"username\"]:\n await self.send_json({\n \"msg_type\": MSG_TYPE_LEAVE,\n \"room_id\": event[\"room_id\"],\n \"profile_image\": event[\"profile_image\"],\n \"username\": event[\"username\"],\n \"message\": event[\"username\"] + \" disconnected.\"\n })",
"def leave(ctx, network):\n return _leave(ctx.obj['client'], network)",
"def leave(self, *args, **kwargs):\n return self.bot.leave_chat(self.id, *args, **kwargs)",
"def cancel_game(self, request):\n game = get_by_urlsafe(request.urlsafe_key, Game)\n if game and not game.game_over:\n game.key.delete()\n return StringMessage(message='Game with key: {} deleted.'.\n format(request.urlsafe_key))\n elif game and game.game_over:\n raise endpoints.BadRequestException('Game is already over!')\n else:\n raise endpoints.NotFoundException('Game not found!')",
"async def chat_leave(self, event):\n await self.send_json(\n return_value(\n ACTION_WENT_OFFLINE,\n event['label'],\n event['username'],\n MSG_LEAVE,\n NO_MESSAGE\n )\n )",
"async def leave(ctx, *, check=\"\"):\r\n # if botv.isAdmin(ctx.message.author) and check == \"now, bot\":\r\n # if necessary, save checks can go here; check presently commented out because botv can\r\n # fail to initialize in testing\r\n await bot.say(\"Allan, please add dialogue!\")\r\n quit()",
"async def leave_channel(\n app_user_id: Optional[str] = Header(None),\n app_channel_id: Optional[str] = Header(None),\n):\n try:\n response = user_leave(app_channel_id, app_user_id)\n return response\n except ReturnExceptions as err:\n raise HTTPException(\n status_code=Code.error_enum_http[err.error_code], detail=str(err)\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Select Relationships associated with specified fact_id. | def select_by_fact_id(cls, fact_id):
return db.session.query(cls).filter_by(fact_id=fact_id).all() | [
"def read_relationships(person_id):\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(relationship_query, (person_id,)) # note a tuple is needed as a parameter value for SQLITE\n\n relation_list = []\n for row in c:\n _relation = Relationship()\n _relation.person_id = row[\"personid\"]\n _relation.person.first_name = row[\"firstname\"]\n _relation.person.last_name = row[\"lastname\"]\n _relation.person.middle_initial = row[\"middleinitial\"]\n _relation.related_person_id = row[\"related_personid\"]\n _relation.relationship_id = row[\"relationshipid\"]\n _relation.relationship_type = row[\"relationshiptype\"]\n _relation.relationship_type_description = row[\"key\"]\n relation_list.append(_relation)\n conn.close()\n return relation_list\n except:\n return []",
"def get_filtered_relationship(self, **lookups):\n raise NotImplementedError(\"Method has to be implemented\")",
"def fn_get_relationship(_id):\n\n if request.method == \"OPTIONS\":\n return\n\n response_doc = JsonAPIResponse(request.url)\n\n entry = self.model.select().where(\n self.model._meta.primary_key == _id\n ).get()\n\n relation = getattr(entry, relationship)\n # non existant relationships must return successful with data: null\n data, included = self.__entry_to_resource(\n relation,\n include=request.query.include.split(\",\"),\n fields=util.parse_fields_parameter(),\n linkage=linkage\n ) if relation else (None, [])\n\n response_doc.data = data\n response_doc.included = included\n\n return json.dumps(dict(response_doc), sort_keys=True)",
"def _get_given_object_id(self, object_id, predicate=None):\n LOGGER.debug('Getting relationships related to object_id=%s and '\n 'predicate=%s.', object_id, predicate)\n query = schema.SubjectFromObject.objects.filter(object_id=object_id)\n if predicate:\n # pylint: disable=fixme\n # TODO: If predicate query table implemented, change. SIBO-145\n query = query.filter(predicate=predicate)\n return self._build_relationships(query.allow_filtering().all())",
"def get_relationship_properties(self, id):\n raise NotImplementedError(\"Method has to be implemented\")",
"def get_fact(self, fact):\n return self.get_facts(fact)",
"def get_relationships(self):\n raise NotImplementedError(\n 'operation get_relationships(...) not yet implemented')",
"def get_relationship(self, guid):\n results = None\n atlas_endpoint = self.endpoint_url + f\"/relationship/guid/{guid}\"\n\n getResponse = requests.get(\n atlas_endpoint,\n headers=self.authentication.get_authentication_headers()\n )\n\n results = self._handle_response(getResponse)\n\n return results",
"def get_relationship(self, id: str) -> Optional[Relationship]:\n return self._relationships_by_id.get(id)",
"def selected_relationships(self):\n return self._selected_relationships",
"def relationships(self):",
"def get_relationships_for_destination(self, destination_id):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_for_destination\n # NOTE: This implementation currently ignores plenary and effective views\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'destinationId': str(destination_id)},\n **self._view_filter())).sort('_id', ASCENDING)\n return objects.RelationshipList(result, runtime=self._runtime)",
"def get_all_relationship(self, include_properties=False):\n raise NotImplementedError(\"Method has to be implemented\")",
"def get_node_relationships(self, id, incoming=False, outgoing=False,\n include_properties=False, label=None):\n raise NotImplementedError(\"Method has to be implemented\")",
"def select_by_foreign_keys(cls, subject_id=None, object_id=None, relationship_type_id=None):\n filter_clause = sa.and_(\n sa.and_(cls.subject_id == subject_id, cls.object_id == object_id),\n cls.relationship_type_id == relationship_type_id)\n return db.session.query(cls).filter(filter_clause).first()",
"def find_relationships_for(\n self, name: str\n ) -> Generator[Tuple[Person, Relationship, Person], None, None]:\n return (relation for relation in self._relations if relation[0].name == name)",
"async def get_relationship(\n self,\n digital_twin_id: str,\n relationship_id: str,\n **kwargs\n ) -> Dict[str, object]:\n return await self._client.digital_twins.get_relationship_by_id(\n digital_twin_id,\n relationship_id,\n **kwargs\n )",
"def get_relationship_query(self):\n # Implemented from template for\n # osid.resource.ResourceQuerySession.get_resource_query_template\n return queries.RelationshipQuery(runtime=self._runtime)",
"def relationship(self,id):\n return Relationship(id,self.key)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Select Relationship with specified subject, object and relationship type. | def select_by_foreign_keys(cls, subject_id=None, object_id=None, relationship_type_id=None):
filter_clause = sa.and_(
sa.and_(cls.subject_id == subject_id, cls.object_id == object_id),
cls.relationship_type_id == relationship_type_id)
return db.session.query(cls).filter(filter_clause).first() | [
"def select_by_values(cls, relationship_type_name=None, relationship_number=None,\n subject_name=None, object_name=None):\n query = db.session.query(cls).\\\n join(RelationshipType).\\\n filter(RelationshipType.relationship_type_name==relationship_type_name)\n if relationship_number:\n query = query.filter(Relationship.count==relationship_number)\n if subject_name: \n subject_concept = sa_orm.aliased(Concept)\n query = query.\\\n join(subject_concept, Relationship.subject_id==subject_concept.concept_id).\\\n filter(subject_concept.concept_name==subject_name)\n if object_name:\n object_concept = sa_orm.aliased(Concept)\n query = query.\\\n join(object_concept, Relationship.object_id==object_concept.concept_id).\\\n filter(object_concept.concept_name==object_name)\n return query.all()",
"def _get_given_object_id(self, object_id, predicate=None):\n LOGGER.debug('Getting relationships related to object_id=%s and '\n 'predicate=%s.', object_id, predicate)\n query = schema.SubjectFromObject.objects.filter(object_id=object_id)\n if predicate:\n # pylint: disable=fixme\n # TODO: If predicate query table implemented, change. SIBO-145\n query = query.filter(predicate=predicate)\n return self._build_relationships(query.allow_filtering().all())",
"def get_relationship_type(self, type):\n raise NotImplementedError(\"Method has to be implemented\")",
"def test_filter_relationships_by_concept_type__object(self):\n # Set up mocks and test data\n concept_type = 'birds'\n mock_match_0 = Mock(name='mock_match_0',\n object=Mock(name='mock_subject_0',\n concept_name='mock_subject_0',\n concept_types=['birds', 'snakes']))\n\n mock_match_1 = Mock(name='mock_match_1',\n object=Mock(name='mock_subject_1',\n concept_name='mock_subject_1',\n concept_types=['snakes', 'turtles']))\n\n mock_match_2 = Mock(name='mock_match_2',\n object=Mock(name='mock_subject_2',\n concept_name='mock_subject_2',\n concept_types=['snakes', 'birds']))\n\n mock_matches = [mock_match_0, mock_match_1, mock_match_2]\n\n # Make call\n filtered_matches = FactQuery._filter_relationships_by_concept_type(\n mock_matches, concept_type, relationship_attr='object')\n\n # Verify results\n self.assertEqual([mock_match_0, mock_match_2], filtered_matches)",
"def get_association(relation: str, subject_node: Node, object_node: Node, is_negated: bool, pmid: str = '',\n association_config: dict = None):\n if association_config is None:\n association_config = associations\n most_relevant_relation = list(get_biolink_association(subject_node, object_node, association_config).items())[0][0]\n biolink_relation = most_relevant_relation(id=0,\n subject=subject_node['id'],\n relation=relation,\n object=object_node['id'],\n negated=is_negated,\n publications=[pmid])\n return get_relationship_from_biolink(subject_node, biolink_relation, object_node)",
"def relationships(self, obj, relationship_type=None, source_only=False, target_only=False):\n results = []\n filters = [Filter('type', '=', 'relationship')]\n\n try:\n obj_id = obj['id']\n except KeyError:\n raise ValueError(\"STIX object has no 'id' property\")\n except TypeError:\n # Assume `obj` is an ID string\n obj_id = obj\n\n if relationship_type:\n filters.append(Filter('relationship_type', '=', relationship_type))\n\n if source_only and target_only:\n raise ValueError(\"Search either source only or target only, but not both\")\n\n if not target_only:\n results.extend(self.query(filters + [Filter('source_ref', '=', obj_id)]))\n if not source_only:\n results.extend(self.query(filters + [Filter('target_ref', '=', obj_id)]))\n\n return results",
"def test_select_by_concept_type(self, select_relationships):\n select_relationships.return_value = [Mock(subject='hello'), Mock(subject='kitty')]\n mock_concept_type = Mock(name='concept_type')\n\n result = FactQuery._select_by_concept_type(mock_concept_type)\n self.assertEqual(['hello', 'kitty'], result)\n select_relationships.assert_called_once_with('is', object_name=mock_concept_type)",
"def set_relationship_type(self, id, type):\n raise NotImplementedError(\"Method has to be implemented\")",
"def _get_given_subject_id(self, subject_id, predicate=None):\n LOGGER.debug('Getting relationships related to subject_id=%s and '\n 'predicate=%s.', subject_id, predicate)\n query = (schema.ObjectFromSubject.objects\n .filter(subject_id=subject_id))\n if predicate:\n query = query.filter(predicate=predicate)\n return self._build_relationships(query.all())",
"def test_filter_relationships_by_concept_type__subject(self):\n # Set up mocks and test data\n concept_type = 'birds'\n mock_match_0 = Mock(name='mock_match_0',\n subject=Mock(name='mock_subject_0',\n concept_name='mock_subject_0',\n concept_types=['birds', 'snakes']))\n\n mock_match_1 = Mock(name='mock_match_1',\n subject=Mock(name='mock_subject_1',\n concept_name='mock_subject_1',\n concept_types=['snakes', 'turtles']))\n\n mock_match_2 = Mock(name='mock_match_2',\n subject=Mock(name='mock_subject_2',\n concept_name='mock_subject_2',\n concept_types=['snakes', 'birds']))\n\n mock_matches = [mock_match_0, mock_match_1, mock_match_2]\n\n # Make call\n filtered_matches = FactQuery._filter_relationships_by_concept_type(\n mock_matches, concept_type, relationship_attr='subject')\n\n # Verify results\n self.assertEqual([mock_match_0, mock_match_2], filtered_matches)",
"def _get_given_predicate(self, predicate):\n LOGGER.debug('Getting relationships related to predicate=%s.', predicate)\n # pylint: disable=fixme\n # TODO: If predicate query table implemented, change. SIBO-145\n query = schema.ObjectFromSubject.objects.filter(predicate=predicate)\n return self._build_relationships(query.allow_filtering().all())",
"def get_relationships_by_record_type(self, relationship_record_type):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resources_by_record_type\n # STILL NEED TO IMPLEMENT!!!\n return objects.RelationshipList([])",
"def get_relationship_from_biolink(biolink_subject: Node,\n biolink_association: Association,\n biolink_object: Node):\n properties = {key: value for key, value in biolink_association.__dict__.items() if key != 'id'}\n return Relationship(biolink_subject,\n get_pythonic_name(biolink_association.__class__.__name__),\n biolink_object,\n **properties)",
"def get_relationship_search_record(self, relationship_search_record_type):\n raise errors.Unimplemented()",
"def get_related_content_type(self, content_type):\n return self.storyrelation_set.filter(\n content_type__name=content_type)",
"def query_Relationship(self,\n started_at_time: Union[str, MatchExpression] = None,\n ended_at_time: Union[str, MatchExpression] = None,\n related_to: Union[str, MatchExpression] = None,\n type: Union[str, MatchExpression] = None,\n \n _extra: Any = None) -> List[Relationship]:\n results = self.query_engine.simple_query(Relationship.class_name,\n \n started_at_time=started_at_time,\n \n ended_at_time=ended_at_time,\n \n related_to=related_to,\n \n type=type,\n \n _extra=_extra)\n return results",
"def relationship(\n self, required_type: Type[UML.Element], head: relation, tail: relation\n ) -> Optional[UML.Element]:\n line = self.line\n\n line_head = self.get_connected(line.head)\n line_tail = self.get_connected(line.tail)\n assert line_head\n assert line_tail\n head_subject = line_head.subject\n tail_subject = line_tail.subject\n\n # First check if the right subject is already connected:\n if (\n line.subject\n and getattr(line.subject, head.name) is head_subject\n and getattr(line.subject, tail.name) is tail_subject\n ):\n return line.subject\n\n # Try to find a relationship, that is already created, but not\n # yet displayed in the diagram.\n assert isinstance(head, (association, redefine)), f\"head is {head}\"\n assert isinstance(tail, (association, redefine)), f\"tail is {tail}\"\n assert tail.opposite, f\"Tail end of {line} has no opposite definition\"\n gen: UML.Element\n for gen in getattr(tail_subject, tail.opposite):\n if not isinstance(gen, required_type):\n continue\n\n gen_head = getattr(gen, head.name)\n try:\n if head_subject not in gen_head:\n continue\n except TypeError:\n if gen_head is not head_subject:\n continue\n\n # Check for this entry on line.canvas\n item: Union[ElementPresentation, LinePresentation]\n for item in gen.presentation:\n # Allow line to be returned. Avoids strange\n # behaviour during loading\n if item.canvas is line.canvas and item is not line:\n break\n else:\n return gen\n return None",
"def related_to(self, obj, relationship_type=None, source_only=False, target_only=False, filters=None):\n results = []\n rels = self.relationships(obj, relationship_type, source_only, target_only)\n\n try:\n obj_id = obj['id']\n except TypeError:\n # Assume `obj` is an ID string\n obj_id = obj\n\n # Get all unique ids from the relationships except that of the object\n ids = set()\n for r in rels:\n ids.update((r.source_ref, r.target_ref))\n ids.discard(obj_id)\n\n # Assemble filters\n filter_list = FilterSet(filters)\n\n for i in ids:\n results.extend(self.query([f for f in filter_list] + [Filter('id', '=', i)]))\n\n return results",
"def fn_get_relationship(_id):\n\n if request.method == \"OPTIONS\":\n return\n\n response_doc = JsonAPIResponse(request.url)\n\n entry = self.model.select().where(\n self.model._meta.primary_key == _id\n ).get()\n\n relation = getattr(entry, relationship)\n # non existant relationships must return successful with data: null\n data, included = self.__entry_to_resource(\n relation,\n include=request.query.include.split(\",\"),\n fields=util.parse_fields_parameter(),\n linkage=linkage\n ) if relation else (None, [])\n\n response_doc.data = data\n response_doc.included = included\n\n return json.dumps(dict(response_doc), sort_keys=True)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Select Relationships with specified relationship_type, count, subject, and object. | def select_by_values(cls, relationship_type_name=None, relationship_number=None,
subject_name=None, object_name=None):
query = db.session.query(cls).\
join(RelationshipType).\
filter(RelationshipType.relationship_type_name==relationship_type_name)
if relationship_number:
query = query.filter(Relationship.count==relationship_number)
if subject_name:
subject_concept = sa_orm.aliased(Concept)
query = query.\
join(subject_concept, Relationship.subject_id==subject_concept.concept_id).\
filter(subject_concept.concept_name==subject_name)
if object_name:
object_concept = sa_orm.aliased(Concept)
query = query.\
join(object_concept, Relationship.object_id==object_concept.concept_id).\
filter(object_concept.concept_name==object_name)
return query.all() | [
"def select_by_foreign_keys(cls, subject_id=None, object_id=None, relationship_type_id=None):\n filter_clause = sa.and_(\n sa.and_(cls.subject_id == subject_id, cls.object_id == object_id),\n cls.relationship_type_id == relationship_type_id)\n return db.session.query(cls).filter(filter_clause).first()",
"def test_filter_relationships_by_concept_type__object(self):\n # Set up mocks and test data\n concept_type = 'birds'\n mock_match_0 = Mock(name='mock_match_0',\n object=Mock(name='mock_subject_0',\n concept_name='mock_subject_0',\n concept_types=['birds', 'snakes']))\n\n mock_match_1 = Mock(name='mock_match_1',\n object=Mock(name='mock_subject_1',\n concept_name='mock_subject_1',\n concept_types=['snakes', 'turtles']))\n\n mock_match_2 = Mock(name='mock_match_2',\n object=Mock(name='mock_subject_2',\n concept_name='mock_subject_2',\n concept_types=['snakes', 'birds']))\n\n mock_matches = [mock_match_0, mock_match_1, mock_match_2]\n\n # Make call\n filtered_matches = FactQuery._filter_relationships_by_concept_type(\n mock_matches, concept_type, relationship_attr='object')\n\n # Verify results\n self.assertEqual([mock_match_0, mock_match_2], filtered_matches)",
"def test_filter_relationships_by_concept_type__subject(self):\n # Set up mocks and test data\n concept_type = 'birds'\n mock_match_0 = Mock(name='mock_match_0',\n subject=Mock(name='mock_subject_0',\n concept_name='mock_subject_0',\n concept_types=['birds', 'snakes']))\n\n mock_match_1 = Mock(name='mock_match_1',\n subject=Mock(name='mock_subject_1',\n concept_name='mock_subject_1',\n concept_types=['snakes', 'turtles']))\n\n mock_match_2 = Mock(name='mock_match_2',\n subject=Mock(name='mock_subject_2',\n concept_name='mock_subject_2',\n concept_types=['snakes', 'birds']))\n\n mock_matches = [mock_match_0, mock_match_1, mock_match_2]\n\n # Make call\n filtered_matches = FactQuery._filter_relationships_by_concept_type(\n mock_matches, concept_type, relationship_attr='subject')\n\n # Verify results\n self.assertEqual([mock_match_0, mock_match_2], filtered_matches)",
"def relationships(self, obj, relationship_type=None, source_only=False, target_only=False):\n results = []\n filters = [Filter('type', '=', 'relationship')]\n\n try:\n obj_id = obj['id']\n except KeyError:\n raise ValueError(\"STIX object has no 'id' property\")\n except TypeError:\n # Assume `obj` is an ID string\n obj_id = obj\n\n if relationship_type:\n filters.append(Filter('relationship_type', '=', relationship_type))\n\n if source_only and target_only:\n raise ValueError(\"Search either source only or target only, but not both\")\n\n if not target_only:\n results.extend(self.query(filters + [Filter('source_ref', '=', obj_id)]))\n if not source_only:\n results.extend(self.query(filters + [Filter('target_ref', '=', obj_id)]))\n\n return results",
"def _get_given_object_id(self, object_id, predicate=None):\n LOGGER.debug('Getting relationships related to object_id=%s and '\n 'predicate=%s.', object_id, predicate)\n query = schema.SubjectFromObject.objects.filter(object_id=object_id)\n if predicate:\n # pylint: disable=fixme\n # TODO: If predicate query table implemented, change. SIBO-145\n query = query.filter(predicate=predicate)\n return self._build_relationships(query.allow_filtering().all())",
"def get_relationships_by_record_type(self, relationship_record_type):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resources_by_record_type\n # STILL NEED TO IMPLEMENT!!!\n return objects.RelationshipList([])",
"def get_relationship_search_results_record(self, relationship_search_record_type):\n raise errors.Unimplemented()",
"def test_select_by_concept_type(self, select_relationships):\n select_relationships.return_value = [Mock(subject='hello'), Mock(subject='kitty')]\n mock_concept_type = Mock(name='concept_type')\n\n result = FactQuery._select_by_concept_type(mock_concept_type)\n self.assertEqual(['hello', 'kitty'], result)\n select_relationships.assert_called_once_with('is', object_name=mock_concept_type)",
"def query_Relationship(self,\n started_at_time: Union[str, MatchExpression] = None,\n ended_at_time: Union[str, MatchExpression] = None,\n related_to: Union[str, MatchExpression] = None,\n type: Union[str, MatchExpression] = None,\n \n _extra: Any = None) -> List[Relationship]:\n results = self.query_engine.simple_query(Relationship.class_name,\n \n started_at_time=started_at_time,\n \n ended_at_time=ended_at_time,\n \n related_to=related_to,\n \n type=type,\n \n _extra=_extra)\n return results",
"def get_ids_related_to(cls, object_type, related_type, related_ids=[]):\n\n if isinstance(related_ids, (int, long)):\n related_ids = [related_ids]\n\n if not related_ids:\n return db.session.query(Relationship.source_id).filter(sql.false())\n\n destination_ids = db.session.query(Relationship.destination_id).filter(\n and_(\n Relationship.destination_type == object_type,\n Relationship.source_type == related_type,\n Relationship.source_id.in_(related_ids),\n )\n )\n source_ids = db.session.query(Relationship.source_id).filter(\n and_(\n Relationship.source_type == object_type,\n Relationship.destination_type == related_type,\n Relationship.destination_id.in_(related_ids),\n )\n )\n\n queries = [destination_ids, source_ids]\n queries.extend(cls.get_extension_mappings(\n object_type, related_type, related_ids))\n queries.extend(cls.get_special_mappings(\n object_type, related_type, related_ids))\n\n return cls._array_union(queries)",
"def related_to(self, obj, relationship_type=None, source_only=False, target_only=False, filters=None):\n results = []\n rels = self.relationships(obj, relationship_type, source_only, target_only)\n\n try:\n obj_id = obj['id']\n except TypeError:\n # Assume `obj` is an ID string\n obj_id = obj\n\n # Get all unique ids from the relationships except that of the object\n ids = set()\n for r in rels:\n ids.update((r.source_ref, r.target_ref))\n ids.discard(obj_id)\n\n # Assemble filters\n filter_list = FilterSet(filters)\n\n for i in ids:\n results.extend(self.query([f for f in filter_list] + [Filter('id', '=', i)]))\n\n return results",
"def relationship_count(self, r_type=None, n_ids=()):\n if r_type is None and not n_ids:\n return len(self._relationships)\n elif not n_ids:\n return len(self._relationships_by_type.get(r_type, ()))\n else:\n return sum(1 for _ in self.relationships(r_type, n_ids))",
"def _get_given_predicate(self, predicate):\n LOGGER.debug('Getting relationships related to predicate=%s.', predicate)\n # pylint: disable=fixme\n # TODO: If predicate query table implemented, change. SIBO-145\n query = schema.ObjectFromSubject.objects.filter(predicate=predicate)\n return self._build_relationships(query.allow_filtering().all())",
"def _get_relationship_types(self):\n return self.civicrm.get('RelationshipType', limit=999)",
"def get_relationship_search_record(self, relationship_search_record_type):\n raise errors.Unimplemented()",
"def by_type(self, type):\n return self.filter(related_type__title=type)",
"def _get_given_subject_id(self, subject_id, predicate=None):\n LOGGER.debug('Getting relationships related to subject_id=%s and '\n 'predicate=%s.', subject_id, predicate)\n query = (schema.ObjectFromSubject.objects\n .filter(subject_id=subject_id))\n if predicate:\n query = query.filter(predicate=predicate)\n return self._build_relationships(query.all())",
"def get_filtered_relationship(self, **lookups):\n raise NotImplementedError(\"Method has to be implemented\")",
"def get_related_content_type(self, content_type):\n return self.storyrelation_set.filter(\n content_type__name=content_type)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validate requests decorator with Cerberus | def validate_request_cerberus(schema):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
body_json = request.get_json()
current_app.logger.info(body_json)
v = Validator(schema, require_all=True)
v.allow_unknown = True # TODO: allow request params other then the ones defined on the schema level
if not v.validate(body_json):
valid_params_list = ', '.join(schema.keys())
return response_fail(f"You must call with all request params: {valid_params_list}")
return func(*args, **kwargs)
return wrapper
return decorator | [
"def validate_request(self, func):\n @wraps(func)\n def decorated_view(*args, **kwargs):\n if current_app.TESTING:\n return func(*args, **kwargs)\n url = request.base_url\n vars = request.values\n signature = request.header.get('X-Twilio-Signature')\n if not self.validator.validate(url, vars, signature):\n return abort(403)\n return func(*args, **kwargs)\n return decorated_view",
"def validate_twilio_request(func):\n @wraps(func)\n def decorated_function(request, *args, **kwargs):\n # Create an instance of the RequestValidator class\n validator = RequestValidator(os.environ.get('TWILIO_AUTH_TOKEN'))\n\n # Validate the request using its URL, POST data,\n # and X-TWILIO-SIGNATURE header\n request_valid = validator.validate(\n request.build_absolute_uri(),\n request.POST,\n request.META.get('HTTP_X_TWILIO_SIGNATURE', ''))\n\n # Continue processing the request if it's valid, return a 403 error if\n # it's not\n if request_valid:\n return func(request, *args, **kwargs)\n else:\n return HttpResponseForbidden()\n return decorated_function",
"def check_request(request_schema):\n def decorator(f):\n @functools.wraps(f)\n def wrapper(self, addr, request):\n data, err = request_schema.load(request)\n if err:\n return Header.ERROR, Error.WRONG_REQUEST\n else:\n return f(self, addr, data)\n\n return wrapper\n return decorator",
"def validate_twilio_request(f):\n @wraps(f)\n def decorated_function(request, *args, **kwargs):\n # Create an instance of the RequestValidator class\n validator = RequestValidator(settings.TWILIO_AUTH_TOKEN)\n absolute_url = request.build_absolute_uri()\n absolute_url = absolute_url.replace(\"http://\", \"https://\")\n # Validate the request using its URL, POST data,\n # and X-TWILIO-SIGNATURE header\n request_valid = validator.validate(\n absolute_url,\n request.POST,\n request.META.get('HTTP_X_TWILIO_SIGNATURE', ''))\n \n # Continue processing the request if it's valid, return a 403 error if\n # it's not\n if request_valid or settings.DEBUG:\n return f(request, *args, **kwargs)\n else:\n return HttpResponseForbidden()\n return decorated_function",
"def validate_fields(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n # non-JSON requests\n if not request.json:\n raise RequestFieldException(\"Request body isn't JSON!\")\n # Absent request fields\n if \"url\" not in request.json:\n raise RequestFieldException(\"Request has not all required fields!\")\n # Empty request `url` field\n if str.strip(request.json[\"url\"]) == \"\":\n raise RequestFieldException(\"URL field is empty!\")\n # Non-valid url\n if re.match(url_regex, request.json[\"url\"]) is None:\n raise RequestFieldException(\"URL isn't valid!\")\n\n # If there was provided `lifeterm` value\n if \"lifeterm\" in request.json:\n # Non-int `lifeterm` field\n if not isinstance(request.json[\"lifeterm\"], int):\n raise RequestFieldException(\"Lifeterm isn't integer!\")\n if request.json[\"lifeterm\"] < 1 or request.json[\"lifeterm\"] > 365:\n raise RequestFieldException(\"Lifeterm has to be in range [1, 365] days!\")\n return f(*args, **kwargs)\n return decorated_function",
"def request_validation(serializer):\n def request_validation_inner(handler):\n\n @functools.wraps(handler)\n def _wrapper(view, request: Request, *args, **kwargs):\n if request.method == 'GET':\n validation_serializer = serializer(data=request.query_params)\n else:\n validation_serializer = serializer(data=request.data)\n if not validation_serializer.is_valid():\n return ResponseValidationError(\n message='Request body is invalid.',\n invalid_keys=list(validation_serializer.errors.keys())\n )\n return handler(view, request, *args, **kwargs)\n\n return _wrapper\n\n return request_validation_inner",
"def wants_request(f):\n\n @wraps(f)\n def wrapper(user):\n return f(user, request)\n\n return wrapper",
"def validate_connection(func: Callable) -> Callable:\n\n def validate(*args: list):\n request = create_request(args[0], 5)\n\n if request is False:\n print(current_time(), \"target website seems to be down\")\n else:\n return func(*args)\n\n return validate",
"def request_is_valid(request):\n return 'method' in request",
"def human_required(view_func, field_name=None):\n\n def wrapped(request, *args, **kwargs):\n if is_recaptcha_valid(request, field_name=(field_name or 'recaptcha')):\n return view_func(request, *args, **kwargs)\n else:\n return HttpResponseForbidden()\n\n return wrapped",
"def protect_api(f):\n ## allow in debug mode\n if settings.DEBUG:\n return f\n\n @wraps(f)\n def decorated(request, *args, **kwargs):\n try:\n if validate_request(DjangoRequest(request), consumer_getter=get_consumer):\n return f(request, *args, **kwargs)\n return HttpResponse('Unauthorized', status=401)\n except InvalidRequest as e:\n return HttpResponse('Unauthorized: {}'.format(e), status=401)\n except Exception as e:\n print e\n return decorated",
"def validate_request(req):\n # print out the request to the terminal window if in debug mode\n # this item is set in the set tings, in the __init__.py file\n if settings.LTI_DEBUG:\n for item in sorted(req.POST.dict()):\n debug_printer('DEBUG - %s: %s \\r' % (item, req.POST[item]))\n\n # verifies that request contains the information needed\n if 'oauth_consumer_key' not in req.POST:\n debug_printer('DEBUG - Consumer Key was not present in request.')\n raise PermissionDenied()\n if 'user_id' not in req.POST:\n debug_printer('DEBUG - Anonymous ID was not present in request.')\n raise PermissionDenied()",
"def validate(**vkargs):\n def decorator(func):\n def wrapper(**kargs):\n for key in kargs:\n if key not in kargs:\n abort(400, 'Missing parameter: %s' % key)\n try:\n kargs[key] = vkargs[key](kargs[key])\n except ValueError:\n abort(400, 'Wrong parameter form at for: %s' % key)\n return func(**kargs)\n return decorator",
"async def validate_request(request: web.Request, spec: OpenApiSpec):\n req = await AiohttpOpenAPIRequest.create(request)\n\n validator = RequestValidator(spec)\n result = validator.validate(req)\n\n return result.parameters, result.body, result.errors",
"def post_required(func):\n def post_wrapper(request,*args,**kwds):\n res = http.ResponseBuilder()\n if request.method != 'POST':\n return res.error(\"post is required\").build_json()\n return func(request,*args,**kwds)\n return post_wrapper",
"def check(self):\n invalid = []\n\n if not self.route:\n invalid.append(('route', 'missing'))\n elif not self.route[1] in ['GET', 'POST', 'PUT']:\n invalid.append(('route', 'invalid method: %s' % self.route[1]))\n\n has_2xx = False\n for rcode in self.return_codes:\n code = rcode[0]\n if code >= 200 and code < 300:\n has_2xx = True\n break\n if not has_2xx:\n invalid.append(('return_codes', 'Missing succes return code doc'))\n\n if self.client_auth is None:\n invalid.append(\n ('client_auth', 'Please provide client auth requirement'))\n\n if self.user_auth is None:\n invalid.append(\n ('user_auth', 'Please provide user auth requirement'))\n\n if invalid:\n msgs = []\n for error in invalid:\n msgs.append(\"%s: %s\" % error)\n raise ValueError(\n \"APIFunc for %s is invalid: %s\"\n % (self.viewfunc.__name__,\n ', '.join(msgs)))",
"def validate(**vkargs):\n def decorator(func):\n def wrapper(**kargs):\n for key, value in vkargs.iteritems():\n if key not in kargs:\n abort(403, 'Missing parameter: %s' % key)\n try:\n kargs[key] = value(kargs[key])\n except ValueError as e:\n abort(403, 'Wrong parameter format for: %s (error %s)' % (key, str(e)))\n return func(**kargs)\n return wrapper\n return decorator",
"def validate_params(*req_params):\n def decorator(func):\n @wraps(func)\n def wrapper(*event_context):\n # see if the event has already been decoded (via 2nd order call). JSON-encoded bodies are recognised as str\n try:\n if type(event_context[0]) == dict and 'body' in event_context[0].keys():\n event_body = json.loads(event_context[0]['body'])\n else:\n event_body = event_context[0]\n except (json.JSONDecodeError, KeyError) as e:\n print(e)\n return APIResponseError(message=e).send()\n\n for req_param in req_params:\n if req_param not in event_body.keys() or len(str(event_body[req_param])) == 0:\n msg = f\"Bad required parameter: {req_param}\"\n print(msg)\n return APIResponseError(message=msg).send()\n return func(event_body, event_context[1])\n return wrapper\n return decorator",
"def request_fields(*req_args):\n\tdef decorator(f):\n\t\t@wraps(f)\n\t\tdef decorated(*args, **kwargs):\n\t\t\tif not g.req: return json_response(dict(description='JSON object must be passed as HTTP body with this request'), 422)\n\t\t\tmissing = []\n\t\t\tfor arg in req_args:\n\t\t\t\tif not g.req.has_key(arg): missing.append(arg)\n\t\t\tif missing: return json_response(dict(description='Mandatory request fields missing', missing_fields=missing), 422)\n\t\t\treturn f(*args, **kwargs)\n\t\treturn decorated\n\treturn decorator"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plots the graph. If the nodes have a position, the nodes will be placed there. Otherwise, they will be placed in a random but elegant manner. | def plot_graph(self) -> None: | [
"def plot_graph(self):\r\n x = []\r\n y = []\r\n\r\n for n in self.graph.get_all_v().values():\r\n if(n.get_pos() != None):\r\n x.append(n.get_pos().get_x())\r\n y.append(n.get_pos().get_y())\r\n else:\r\n x_random = random.random()\r\n y_random = random.random()\r\n n.set_pos(x_random, y_random, 0)\r\n x.append(x_random)\r\n y.append(y_random)\r\n fig, ax = plt.subplots()\r\n ax.scatter(x, y, 60, \"red\")\r\n for xi in self.graph.get_all_v().values():\r\n for yi in self.graph.all_out_edges_of_node(xi.get_key()):\r\n src = (xi.get_pos().get_x(), xi.get_pos().get_y())\r\n dest = (self.graph.get_node(yi).get_pos().get_x(), self.graph.get_node(yi).get_pos().get_y())\r\n plt.annotate(\"\", dest, src, arrowprops=dict(edgecolor=\"black\", arrowstyle=\"->\"))\r\n\r\n plt.title(\"OOP - Ex3\")\r\n plt.xlabel(\"x axis\")\r\n plt.ylabel(\"y axis\")\r\n plt.show()",
"def plot_graph(self) -> None:\n\n nodes_on_graph = self.dw_graph.get_all_v()\n for k, v in nodes_on_graph.items():\n if v.position is None:\n x_rand = random.uniform(0.5, self.dw_graph.v_size())\n y_rand = random.uniform(0.5, self.dw_graph.v_size())\n v.position = (x_rand, y_rand)\n x_vals = []\n y_vals = []\n n = list(nodes_on_graph.keys())\n for k, v in nodes_on_graph.items(): # draw nodes\n x_vals.append(v.position[0])\n y_vals.append(v.position[1])\n\n fig, ax = plt.subplots()\n plt.plot(x_vals, y_vals, 'ro', markersize=5, data=\"d\")\n\n for p, txt in enumerate(n):\n ax.annotate(n[p], (x_vals[p]+0.00003, y_vals[p]), color='g')\n\n for n in nodes_on_graph:\n n1 = self.dw_graph.get_nodes(n)\n x = n1.position[0]\n y = n1.position[1]\n for r in self.dw_graph.all_out_edges_of_node(n):\n dx = self.dw_graph.get_nodes(r).position[0]\n dy = self.dw_graph.get_nodes(r).position[1]\n ax.quiver(x, y, dx-x, dy-y, angles='xy', scale_units='xy', scale=1)\n #plt.arrow(x, y, dx - x, dy - y, head_width=0.0009, width=0.00005, length_includes_head=True)\n\n\n plt.xlabel(\"x axis \")\n plt.ylabel(\"y axis \")\n plt.title(\"The title of the graph\")\n plt.show()",
"def plot(self, show=True):\n \n curve_x = [node.x for node in self.nodeList]\n curve_y = [node.y for node in self.nodeList]\n pl.plot(curve_x, curve_y)\n \n if show:\n pl.show()",
"def nx_plot(self, **kwargs):\n nx.draw(self.graph, node_size=500, with_labels=True, node_color=\"white\", **kwargs)",
"def plot_nodes(nodes):\n x = [node.x for node in nodes]\n y = [node.y for node in nodes]\n plt.plot(x, y, 'k.')\n# plot_nodes_id(nodes)\n plot_nodes_energy(nodes)",
"def draw_nodes(self):\n pass",
"def plot_nodes(self,x_shift,y_shift):\n\n if not self.nodes: return # Bounce if option not selected\n\n self.ax.scatter(self.node_crds[:,0]+x_shift*self.pbc[0],self.node_crds[:,1]+y_shift*self.pbc[1],\n marker=\"o\",s=self.ms,c=self.mc,zorder=1)\n\n # for i,c in enumerate(self.node_crds):\n # self.ax.text(c[0],c[1],i,size=8)",
"def draw(self):\n\t\tnx_graph = self.parse_graph()\n\t\tpos = nx.spring_layout(nx_graph, k=0.15, iterations=20) # to spread out the nodes\n\n\t\tnx.draw(nx_graph, pos, edge_color=\"black\", width=1, linewidths=1, node_size=500, node_color=\"pink\", alpha=0.9, with_labels=True)\n\n\t\tedge_labels = {(edge[0], edge[1]):edge[2] for edge in self.edges}\n\t\tnx.draw_networkx_edge_labels(nx_graph, pos, edge_labels=edge_labels, font_color='red')\n\n\t\tplt.show()",
"def draw_nodes(self, *args, **kwargs):\n return self.assembly_plotter.draw_nodes(*args, **kwargs)",
"def paint(self):\n x = []\n y = []\n plt.figure(figsize=(10, 5), facecolor=\"silver\")\n ax = plt.axes()\n for node in self.graph.nodes.values():\n x.append(node.get_pos()[0])\n y.append(node.get_pos()[1])\n ax.scatter(x, y, color=\"black\", s=50)\n xl = ax.get_xlim()[1] - ax.get_xlim()[0]\n yl = ax.get_ylim()[1] - ax.get_ylim()[0]\n for nd in self.graph.nodes.values():\n for ed in self.graph.all_out_edges_of_node(Node.get_key(nd)).keys():\n desti: Node = self.graph.get_node(ed)\n destx = desti.get_pos()[0] - nd.get_pos()[0]\n desty = desti.get_pos()[1] - nd.get_pos()[1]\n ax.arrow(nd.get_pos()[0], nd.get_pos()[1], destx, desty, head_width=xl * 0.007,\n length_includes_head=True,\n head_length=yl * 0.02, width=xl * 0.0001 * yl, color='grey')\n plt.title(\"Your graph!\")\n plt.show()",
"def draw_graph(self):\r\n G=nx.Graph()\r\n \r\n list_location1 = []\r\n list_location2 = []\r\n list_location3 = []\r\n list_location4 = []\r\n \r\n for citizen in self.citizens:\r\n G.add_node(citizen.id)\r\n if citizen.location == 1:\r\n list_location1.append(citizen.id)\r\n elif citizen.location == 2:\r\n list_location2.append(citizen.id)\r\n elif citizen.location == 3:\r\n list_location3.append(citizen.id)\r\n else: \r\n list_location4.append(citizen.id)\r\n\r\n for citizen in self.citizens:\r\n for friend in citizen.friends:\r\n G.add_edge(citizen.id,friend.id)\r\n\r\n pos = nx.random_layout(G)\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location1, node_color='r')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location2, node_color='g')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location3, node_color='b')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location4, node_color='y')\r\n nx.draw_networkx_edges(G,pos, width=1)\r\n\r\n plt.show()",
"def draw( self, **kwargs ):\n\t\t\n\t\tnetworkx.draw(self.graph, **kwargs)\n\t\tpyplot.show()",
"def plot_graph(precincts, graph, window=None, node_size=0.1, line_size=0.05, dpi=400, size=7, save=False, savetitle=None):\n\n # Load in the shapefiles\n if type(precincts) == str:\n precincts = gp.read_file(precincts)\n else:\n precincts = precincts.copy()\n\n # Load in the graph\n if type(graph) == str:\n graph = Graph.from_json(graph)\n\n # Obtain a graph coloring\n d = nx.greedy_color(graph, strategy='largest_first')\n coloring = np.array([d[i] for i in range(len(graph))])\n\n precincts['color'] = coloring\n precincts['center'] = precincts.centroid # the location of the nodes\n nodes = gp.GeoDataFrame(geometry=precincts.center) # make a GeoDataFrame of nodes\n E = [LineString([precincts.loc[a,'center'],precincts.loc[b,'center']])\n for a,b in list(graph.edges)] # Construct a line for each edge\n edges = gp.GeoDataFrame(list(graph.edges), geometry=E) # make a geoDataFrame of edges\n\n fig = plt.figure(dpi=dpi) # Set up the figure\n fig.set_size_inches(size,size*2) # Make it have the same proportions as Utah\n ax = plt.subplot(1,1,1)\n\n precincts.plot('color', cmap='tab20', ax=ax, alpha=0.5) # Plot precincts\n nodes.plot(ax=ax,color='k', markersize=node_size) # Plot nodes\n edges.plot(ax=ax, lw=line_size, color='k') # plot edges\n if window is None:\n plt.axis('off') # no coordinates\n else:\n plt.axis(window)\n\n if save: plt.savefig(savetitle, bbox_inches='tight', dpi=dpi) # Save it",
"def drawNodes(self) -> None:\n pad_r, pad_l, pad_bot, pad_top = self.padding\n\n # finding how many nodes are possible to place\n partition = Node.size + self.gap\n\n h_available_space = self.window.get_width() - pad_r - pad_l\n v_available_space = self.window.get_height() - pad_bot - pad_top\n self.horizontal_nodes = h_available_space // partition\n self.vertical_nodes = v_available_space // partition\n \n # Finding the vertical and horizontal starting points that centers the nodes\n x0 = (self.window.get_width() - (self.horizontal_nodes * partition)) // 2\n y0 = (self.window.get_height() - (self.vertical_nodes * partition)) // 2\n\n # Generating the nodes\n self.grid = [\n Square(row, col, partition, x0, y0, self.window)\n for row in range(self.vertical_nodes) for col in range(self.horizontal_nodes)\n ]",
"def draw_random_graph(i):\n g_random = nx.gnp_random_graph(2**i,2*i/(2**i))\n nx.draw(g_random,node_size=20)\n plt.savefig(\"./random_graph.svg\")\n plt.close()\n # plt.show()",
"def draw_graph(G, # graph\n pos, # position of nodes\n aware_color = '#f63f89',\n not_aware_color = '#58f258',\n legend = True):\n \n # Create variables for store nodes numbers \n color_map_1 = [] \n color_map_2 = []\n # Create list of nodes numbers which are 'aware'\n awarelist = [i for i, d in G.nodes.data() if d['state'] == 'aware' ]\n # Create list of nodes numbers which are not 'aware'\n notawarelist = [i for i in range(len(G.nodes.data())) if i not in awarelist]\n # Append strings about colors to color_map lists\n for node in G:\n if node in awarelist:\n color_map_1.append(aware_color) # aware\n else: color_map_2.append(not_aware_color) # not aware\n # Draw the graph\n plt.title(\"Graph\")\n nx.draw_networkx_nodes(G,pos = pos, nodelist = awarelist, \n node_color = color_map_1, with_labels = True, \n label='Aware agent', alpha = 0.7)\n nx.draw_networkx_nodes(G,pos = pos, nodelist = notawarelist, \n node_color = color_map_2, with_labels = True, \n label='Not aware agent', alpha = 0.7)\n nx.draw_networkx_labels(G, pos = pos, font_size=12, font_color='k', \n font_family='sans-serif', font_weight='normal', \n alpha=1.0)\n nx.draw_networkx_edges(G,pos=pos)\n \n # optional legend\n if legend == True:\n plt.legend(numpoints = 1)",
"def plot_nodes(self, filename, **kwargs):\n\n g = graph.create_nx_graph(self.es, filename=filename, **kwargs)\n\n return g",
"def display_graph(variables, relations):\n graph = as_networkx_graph(variables, relations)\n\n # Do not crash if matplotlib is not installed\n try:\n import matplotlib.pyplot as plt\n\n nx.draw_networkx(graph, with_labels=True)\n # nx.draw_random(graph)\n # nx.draw_circular(graph)\n # nx.draw_spectral(graph)\n plt.show()\n except ImportError:\n print(\"ERROR: cannot display graph, matplotlib is not installed\")",
"def addVisibilityGraph(data, times, dataName='G1S1', coords=[0.05,0.95,0.05,0.95], \r\n numberOfVGs=1, groups_ac_colors=['b'], fig=None, numberOfCommunities=6, printCommunities=False, \r\n fontsize=None, nodesize=None, level=0.55, commLineWidth=0.5, lineWidth=1.0,\r\n withLabel=True, withTitle=False, layout='circle', radius=0.07, noplot=False, horizontal=False, communities=None, minNumberOfCommunities=2, communitiesMethod='betweenness_centrality', direction='left', weight='distance'):\r\n\r\n if len(data.shape)>1:\r\n data = extendedDataFrame.DataFrame(data=data).imputeMissingWithMedian().apply(lambda data: np.sum(data[data > 0.0]) / len(data), axis=0).values\r\n if communities is None:\r\n communities, graph_nx = clusteringFunctions.getCommunitiesOfTimeSeries(data, times, minNumberOfCommunities=minNumberOfCommunities, horizontal=horizontal, method=communitiesMethod, direction=direction, weight=weight) \r\n else:\r\n communities, graph_nx = communities\r\n\r\n if printCommunities:\r\n print('Communities:')\r\n [print(community) for community in communities]\r\n print('\\n')\r\n\r\n if noplot:\r\n return graph_nx, data, communities\r\n\r\n group = int(dataName[:dataName.find('S')].strip('G'))\r\n\r\n if fontsize is None:\r\n fontsize = 4. * (8. + 5.) / (numberOfVGs + 5.)\r\n \r\n if nodesize is None:\r\n nodesize = 30. * (8. + 5.) / (numberOfVGs + 5.)\r\n\r\n (x1,x2,y1,y2) = coords\r\n \r\n axisVG = fig.add_axes([x1,y1,x2 - x1,y2 - y1])\r\n cmap = matplotlib.colors.LinearSegmentedColormap.from_list('GR', [(0, 1, 0), (1, 0, 0)], N=1000)\r\n\r\n if layout=='line':\r\n pos = {i:[float(i)/float(len(graph_nx)), 0.5] for i in range(len(graph_nx))}\r\n else:\r\n pos = nx.circular_layout(graph_nx)\r\n keys = np.array(list(pos.keys())[::-1])\r\n values = np.array(list(pos.values()))\r\n values = (values - np.min(values, axis=0))/(np.max(values, axis=0)-np.min(values, axis=0))\r\n keys = np.roll(keys, np.argmax(values.T[1]) - np.argmin(keys))\r\n pos = dict(zip(keys, values))\r\n\r\n keys = np.array(list(pos.keys()))\r\n values = np.array(list(pos.values()))\r\n\r\n shortest_path = nx.shortest_path(graph_nx, source=min(keys), target=max(keys))\r\n shortest_path_edges = [(shortest_path[i],shortest_path[i + 1]) for i in range(len(shortest_path) - 1)]\r\n\r\n if layout=='line':\r\n for edge in graph_nx.edges:\r\n l = np.array(pos[edge[0]])\r\n r = np.array(pos[edge[1]])\r\n\r\n if edge in shortest_path_edges:\r\n axisVG.add_artist(matplotlib.patches.Wedge((l+r)/2., 0.5*np.sqrt((l-r)[0]*(l-r)[0]+(l-r)[1]*(l-r)[1]), 0, 180, fill=False, edgecolor='y', linewidth=0.5*3.*lineWidth, alpha=0.7, width=0.001))\r\n\r\n axisVG.add_artist(matplotlib.patches.Wedge((l+r)/2., 0.5*np.sqrt((l-r)[0]*(l-r)[0]+(l-r)[1]*(l-r)[1]), 0, 180, fill=False, edgecolor='k', linewidth=0.5*lineWidth, alpha=0.7, width=0.001))\r\n\r\n nx.draw_networkx(graph_nx, pos=pos, ax=axisVG, node_color='y', edge_color='y', node_size=nodesize * 1.7, width=0., nodelist=shortest_path, edgelist=shortest_path_edges, with_labels=False)\r\n nx.draw_networkx(graph_nx, pos=pos, ax=axisVG, node_color=data, cmap=cmap, alpha=1.0, font_size=fontsize, width=0., font_color='k', node_size=nodesize)\r\n else:\r\n nx.draw_networkx(graph_nx, pos=pos, ax=axisVG, node_color='y', edge_color='y', node_size=nodesize * 1.7, width=3.0*lineWidth, nodelist=shortest_path, edgelist=shortest_path_edges, with_labels=False)\r\n nx.draw_networkx(graph_nx, pos=pos, ax=axisVG, node_color=data, cmap=cmap, alpha=1.0, font_size=fontsize, width=lineWidth, font_color='k', node_size=nodesize)\r\n\r\n if layout=='line':\r\n xmin, xmax = (-1.,1.)\r\n ymin, ymax = (-1.,1.)\r\n else:\r\n xmin, xmax = axisVG.get_xlim()\r\n ymin, ymax = axisVG.get_ylim()\r\n\r\n X, Y = np.meshgrid(np.arange(xmin, xmax, (xmax - xmin) / 300.), np.arange(ymin, ymax, (ymax - ymin) / 300.))\r\n\r\n def smooth(Z, N=7.):\r\n for ix in range(1,Z.shape[0]-1,1):\r\n Z[ix] = ((N-1.)*Z[ix] + (Z[ix-1] + Z[ix+1])/2.)/N\r\n return Z\r\n\r\n for icommunity, community in enumerate(communities):\r\n Z = np.exp(X ** 2 - Y ** 2) * 0.\r\n nX, nY = tuple(np.array([pos[node] for node in community]).T)\r\n for i in range(len(community)-1):\r\n p1, p2 = np.array([nX[i], nY[i]]), np.array([nX[i+1], nY[i+1]])\r\n\r\n for j in range(-2, 32):\r\n pm = p1 + (p2-p1)*float(j)/30.\r\n Z[np.where((X-pm[0])**2+(Y-pm[1])**2<=radius**2)] = 1.\r\n \r\n for _ in range(20):\r\n Z = smooth(smooth(Z).T).T\r\n\r\n CS = axisVG.contour(X, Y, Z, [level], linewidths=commLineWidth, alpha=0.8, colors=groups_ac_colors[group - 1])\r\n #axisVG.clabel(CS, inline=True,fontsize=4,colors=group_colors[group-1], fmt ={level:'C%s'%icommunity})\r\n\r\n if layout=='line':\r\n axisVG.set_xlim(-0.1,1.)\r\n axisVG.set_ylim(-0.1,1.)\r\n\r\n axisVG.spines['left'].set_visible(False)\r\n axisVG.spines['right'].set_visible(False)\r\n axisVG.spines['top'].set_visible(False)\r\n axisVG.spines['bottom'].set_visible(False)\r\n axisVG.set_xticklabels([])\r\n axisVG.set_yticklabels([])\r\n axisVG.set_xticks([])\r\n axisVG.set_yticks([])\r\n\r\n if withLabel:\r\n axisVG.text(axisVG.get_xlim()[1], (axisVG.get_ylim()[1] + axisVG.get_ylim()[0]) * 0.5, dataName, ha='left', va='center',\r\n fontsize=8).set_path_effects([path_effects.Stroke(linewidth=0.4, foreground=groups_ac_colors[group - 1]),path_effects.Normal()])\r\n\r\n if withTitle:\r\n titleText = dataName + ' (size: ' + str(data.shape[0]) + ')' + ' min=%s max=%s' % (np.round(min(data),2), np.round(max(data),2))\r\n axisVG.set_title(titleText, fontsize=10)\r\n\r\n return graph_nx, data, communities"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Formats comparison as a strings | def format_comparison(objs):
def formatter(comp):
if not isinstance(comp, tuple):
return str(comp)
output = []
return "\n".join([comp.type] + [" "+errmessage for errmessage in output])
results = map(formatter,objs)
return "\n".join(results)
#obj1,obj2 = comp
### Sections
#for i,s1,s2 in diffs:
# if s1 and s2:
# output.append(f"Section {i} does not match:")
# result = compare_sections(s1,s2)
# output.extend(almethods.linepadder(result))
# else:
# if s1:
# output.append(f"Door 2 missing Section {i}")
# else:
# output.append(f"Door 1 missing Section {i}") | [
"def generate_comparison_output_string(comparisons: List[Dict[str, Any]]) -> str:\n result_dict = generate_comparison_dict(comparisons)\n result_string = json.dumps(result_dict, sort_keys=True, indent=4)\n return result_string",
"def format_condition(self, key, val1, val2):\n if val1 is not None and val2 is not None:\n condition = '{:.2f} < {:s} < {:.2f}'.format(val1, key, val2)\n elif val2 is None:\n condition = '{:s} == {:s}'.format(key, str(val1))\n return condition",
"def _get_formatted_output(self, expected, actual):\n msg = ''\n mismatch_format = '{}: Expected {} Actual {}. '\n if actual is None:\n raise BTUtilsError('None is not expected.')\n for key in expected.keys():\n if expected[key] != actual[key]:\n msg += mismatch_format.format(key, expected[key], actual[key])\n return msg",
"def print_conditions(self):\n _outstr = \"\"\n first = True\n for cond in self._conditions:\n if not first:\n _outstr += \", \"\n if cond in ThresholdCheck._default_min_conditions:\n _outstr += \"{:s}={:.2e}\".format(cond, self._conditions[cond])\n elif cond in ThresholdCheck._default_max_conditions:\n _outstr += \"{:s}={:d}\".format(cond, self._conditions[cond])\n first = False\n return _outstr",
"def htmlTableComparisonColorCode(value1,value2):\n if value1 > value2:\n output=\"<span class=\\\"positive\\\"> {:03,.2f}</span>\".format(value1) #this formattting uses a comma as a separator for numbers, and outputs only two decimal places of the number\n elif value1 <value2:\n output=\"<span class=\\\"negative\\\"> {:03,.2f}</span>\".format(value1)\n else:\n output=\"{:03,.2f}\".format(value1)\n return output",
"def __str__(self):\n if self.attribute_type == 'numerical':\n return self.attribute + ' ' + self.operator + ' ' + str(self.split_value)\n else:\n return self.attribute + ' = ' + str(self.split_value)",
"def _repr_(self):\n s = 'An inequality '\n have_A = not self.A().is_zero()\n if have_A:\n s += repr(self.A()) + ' x '\n if self.b()>=0:\n if have_A:\n s += '+'\n else:\n s += '-'\n if have_A:\n s += ' '\n s += repr(abs(self.b())) + ' >= 0'\n return s",
"def condition2string(cond, format):\n if cond == None:\n return ''\n if format == CONDSTR_MSVC:\n return' && '.join(['\"$(%s)\" == \"%s\"' % (x.option.name,x.value) \\\n for x in cond.exprs])\n if format == CONDSTR_UNIXTEST:\n return ' -a '.join(['\"x$%s\" = \"x%s\"' % (x.option.name,x.value) \\\n for x in cond.exprs])\n raise errors.Error('unknown format')",
"def _diff_message(lhs, rhs):\r\n lhs = _to_characters(lhs)\r\n rhs = _to_characters(rhs)\r\n\r\n message = u'Diff:\\nl: %s\\nr: %s' % stringdiffer.highlight(lhs, rhs)\r\n # Python2 exceptions require bytes.\r\n return message.encode('UTF-8')",
"def print_test_comparison(test_name, expected, result):\n line = \"\\n\"\n line += \"-\" * 60 + \"\\n\"\n line += \"{}\\n\".format(test_name)\n line += \"-\" * 60 + \"\\n\"\n line += \"-\" * 26 + \"EXPECTED\" + \"-\" * 26 + \"\\n\"\n line += \"{}\\n\".format(expected)\n line += \"-\" * 28 + \"END\" + \"-\" * 29 + \"\\n\"\n line += \"-\" * 27 + \"RESULT\" + \"-\" * 27 + \"\\n\"\n line += \"{}\\n\".format(result)\n line += \"-\" * 28 + \"END\" + \"-\" * 29 + \"\\n\"\n line += \"\\n\"\n return line",
"def __repr__(self) -> str:\n result = \"Equal\" if self.got is None else \"Unequal\"\n return f\"<TestResultFile {self.test_id},{self.regression_test_id},{self.regression_test_output_id}: {result}>\"",
"def format_errordict(self, errordict):\n errormsg = f'Comparison between {self.ad1.filename} and {self.ad2.filename}'\n for k, v in errordict.items():\n errormsg += f'\\nComparison failure in {k}'\n errormsg += '\\n' + ('-' * (22 + len(k))) + '\\n'\n errormsg += '\\n '.join(v)\n return errormsg",
"def __repr__(self) -> str:\n summary = \"----- Comparison metadata -----\\n\"\n summary += f\"Name: {self.name}\\n\"\n # probabilistic or only deterministic\n if not self.probabilistic:\n summary += \"Kind: deterministic\\n\"\n else:\n summary += \"Kind: deterministic and probabilistic\\n\"\n summary += f\"long_name: {self.long_name}\\n\"\n # doc\n summary += f\"Function: {self.function.__doc__}\\n\"\n return summary",
"def __str__(self):\n\t\treturn \"{min} ~ {max}\".format(min=str(self.min), max=str(self.max))",
"def compare(self, other: Optional['PDFState']) -> str:\n ret_value = ''\n if (\n other is None or self.font_family != other.font_family or\n self.font_mode != other.font_mode or self.size != other.size\n ):\n ret_value += ' /{} {} Tf'.format(self.font.ref, round(self.size, 3))\n if other is None or self.color != other.color:\n ret_value += ' ' + str(self.color)\n if other is None or self.rise != other.rise:\n ret_value += ' {} Ts'.format(round(self.rise, 3))\n\n return ret_value",
"def __str__(self):\n left = str(self.get_left())\n right = str(self.get_right())\n\n if type(self.get_left()) == LogicNode and not self.get_left().has_not():\n left = \"(\" + left + \")\"\n if type(self.get_right()) == LogicNode and not self.get_right().has_not():\n right = \"(\" + right + \")\"\n\n operator = self.get_operator()\n\n if self.has_not():\n return \"~({} {} {})\".format(\n left, operator, right\n )\n \n return \"{} {} {}\".format(\n left, operator, right\n )",
"def _xml_comparison_string(element):\n def format_value(value):\n try:\n return \"%.9g\" % float(value)\n except:\n return value\n\n def write_element(element, buffer, indent):\n buffer.write(u\"%s<%s\" % (indent, element.tag))\n for key, value in element.items():\n if key in [\"id\", \"clip-path\"]:\n continue\n if key == \"d\" and element.tag == \"{http://www.w3.org/2000/svg}path\":\n buffer.write(u\" %s='%s'\" % (key, \" \".join([format_value(d) for d in value.split(\" \")])))\n elif key == \"transform\":\n buffer.write(u\" %s='%s'\" % (key, \"\".join([format_value(d) for d in re.split(\"(,|\\(|\\))\", value)])))\n elif key == \"points\" and element.tag == \"{http://www.w3.org/2000/svg}polygon\":\n buffer.write(u\" %s='%s'\" % (key, \" \".join([\",\".join([format_value(i) for i in p.split(\",\")]) for p in value.split(\" \")])))\n else:\n buffer.write(u\" %s='%s'\" % (key, format_value(value)))\n\n text = element.text if element.text is not None else \"\"\n if element.tag in [\"{http://www.sandia.gov/toyplot}data-table\", \"{http://www.sandia.gov/toyplot}axes\"]:\n text = str(_json_comparison_string(json.loads(element.text)))\n buffer.write(u\">%s\\n\" % text)\n for child in list(element):\n write_element(child, buffer, indent+\" \")\n buffer.write(u\"%s</%s>\\n\" % (indent, element.tag))\n\n buffer = io.StringIO()\n write_element(element, buffer, indent=\"\")\n return buffer.getvalue()",
"def print_comparison(name, dates, times, orig_data, comp_data):\n\n # Output comparison of data\n print(' ORIGINAL COMPUTED')\n print(f' DATE TIME {name.upper():>9} {name.upper():>9} DIFFERENCE')\n print('------- ------ --------- --------- ----------')\n zip_data = zip(dates, times, orig_data, comp_data)\n for date, time, orig, comp in zip_data:\n diff = orig - comp\n print(f'{date} {time:>6} {orig:9.6f} {comp:9.6f} {diff:10.6f}')",
"def get_compare_value_texts(self):\n return self.compare_value_texts"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Catches a difference when one or both of the objects are None (since it is handled the same across methods) | def none_comparison(func):
@functools.wraps(func)
def inner(obj1,obj2):
if obj1 is not None and obj2 is not None:
return func(obj1, obj2)
if obj1 is None and obj2 is None:
return []
if obj1 is not None and obj2 is None:
return Difference(f"Second {obj1.__class__.__name__} is None",(obj1,None))
return Difference(f"First {obj2.__class__.__name__} is None",(None,obj2))
return inner | [
"def ifnone(a: Any, b: Any) -> Any:\n return b if a is None else a",
"def _check_none(self) -> PossibleResult[T]:\n if self.constructor == type(None):\n if not self.obj is None:\n raise DeserializeError(\n type(None), self.obj, self.new_depth, self.key\n )\n return self.obj # type: ignore\n return NO_RESULT",
"def IsNone(self) -> bool:",
"def compare_with_none():\n value = {};\n if value is not None:\n print(\"value is not none\")\n else:\n print(\"value is none\")",
"def __eq__(self, other: Any) -> bool:\n return isinstance(other, Nothing)",
"def _merge_sanity_check(self, other):\n if self._fields is not None and (\n set(self.query.values_select) != set(other.query.values_select)\n or set(self.query.extra_select) != set(other.query.extra_select)\n or set(self.query.annotation_select) != set(other.query.annotation_select)\n ):\n raise TypeError(\n \"Merging '%s' classes must involve the same values in each case.\"\n % self.__class__.__name__\n )",
"def whose_dependent_on_who(self, obj1, obj2):\n if obj1 is obj2:\n return None\n elif self.direction == ONETOMANY:\n return (obj1, obj2)\n else:\n return (obj2, obj1)",
"def check_ne(obj1, obj2, message=None):\n if obj1 == obj2:\n if message is None:\n message = \"Check failed: %s == %s\" % (str(obj1), str(obj2))\n check_failed(message)",
"def fun(a, b):\n if a is None or b is None:\n return None\n return a + b",
"def return_none() -> None:\n pass",
"def test_no_rhs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = object()\n rhs = None\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(lhs, result)",
"def _no_none_min(a, b):\n\n if a is None:\n return b\n elif b is None:\n return a\n else:\n return min(a, b)",
"def _check_union(self) -> PossibleResult[T]:\n if _is_union(self.constructor):\n args = get_args(self.constructor)\n is_optional = len(args) == 2 and type(None) in args\n is_optional_property = len(args) == 2 and Undefined in args\n if is_optional and self.obj is None:\n return None # type: ignore\n if is_optional_property and self.obj is UNDEFINED:\n return UNDEFINED # type: ignore\n for argument in args:\n convert_primitives = self.convert_primitives and (\n (is_optional and argument != type(None))\n or (is_optional_property and argument != Undefined)\n )\n try:\n return Deserialize(\n obj=self.obj,\n constructor=argument,\n depth=self.new_depth,\n convert_primitives=convert_primitives,\n ).run()\n except DeserializeError:\n pass\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n )\n return NO_RESULT",
"def callFunctionIfNotNone(f, a, b):\n if a == None:\n return b\n elif b == None:\n return a\n else:\n return f(a, b)",
"def _system_check_non_empty(a, b):\n raise NotImplementedError",
"def _checkWithNone(self, val1, val2):\n\t\tif val1 is None or val2 is None:\n\t\t\treturn True\n\t\treturn val1==val2",
"def __ne__(self, other):\r\n\t\treturn (self.type != other.type or self.value != other.value)",
"def test_no_two_incident_are_separate_and_different(self):\n self.assertNotEqual(self.incident_one, self.incident_two)",
"def _assertIsNone(self, obj, msg=None):\n if obj is not None:\n standardMsg = '%s is not None' % (safe_repr(obj),)\n self.fail(self._formatMessage(msg, standardMsg))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compares Attributes between 2 objects via getattr, returning the attribute values as a tuple if they do not match | def attr_comparison(obj1,obj2,attrs):
return [Difference(f"{obj1.__class__.__name__}.{attr}",(result1,result2)) for attr in attrs if (result1 := getattr(obj1,attr)) != (result2 := getattr(obj2,attr))] | [
"def attrs_to_tuple(obj):\n return tuple(getattr(obj, a) for a in attrs)",
"def CheckAttribs(a, b, attrs, assertEquals):\n # For Stop objects (and maybe others in the future) Validate converts some\n # attributes from string to native type\n a.Validate()\n b.Validate()\n for k in attrs:\n assertEquals(getattr(a, k), getattr(b, k))",
"def compare(current_formation):\n\n attribute_tuple = ()\n for attr in attributes:\n\n if attr in current_formation:\n attribute_tuple += (current_formation[attr],)\n elif attr in ['position_all']:\n position_list = list(current_formation['positions'].keys())\n attribute_tuple += (position_list,)\n else:\n print \"Invalid Attribute: %s\" % attr\n\n return attribute_tuple",
"def is_attr_equal(o1, o2, attrs):\n for attr in attrs:\n if getattr(o1, attr) != getattr(o2, attr):\n return False\n return True",
"def attr_equal(self, *args):\n return all(getattr(self.src, key) == getattr(self.dst, key)\n for key in args)",
"def _compare_attributes(self, first: Node, second: Node) -> bool:\n # If opsets of nodes are different, then nodes have different attributes.\n fst_opset = first.get_opset()\n snd_opset = second.get_opset()\n if fst_opset != snd_opset:\n return False\n\n if fst_opset not in ['opset1', 'opset4']:\n fst_name = first.soft_get('name', first.id)\n snd_name = second.soft_get('name', second.id)\n raise Error('Unsupported opset {} for nodes with names {} and {}'.format(fst_opset, fst_name, snd_name))\n\n if fst_opset == 'opset1':\n return self._compare_attributes_of_interpolate1(first, second)\n else:\n return self._compare_attributes_of_interpolate4(first, second)",
"def compare(a, b, attrs, f):\n for attr in attrs:\n if not f(getattr(a, attr), getattr(b, attr)):\n return False\n return True",
"def attrs_eq(received, **expected):\n for k, v in expected.iteritems():\n eq_(v, getattr(received, k))",
"def sub_comparison(obj1,obj2,translate):\n return [Difference(f\"{obj1.__class__.__name__} > {meth.__name__}\",result) for (meth,attr) in translate if (result := meth(getattr(obj1,attr),getattr(obj2,attr))) is not None]",
"def attr_both(self, *args):\n return all((getattr(self.src, key) and getattr(self.dst, key))\n for key in args)",
"def equiv_attrs(self, one, two):\n tups = self.tuple_relations()\n return find_closure(tups, [one]) == find_closure(tups, [two])",
"def check_attributes(test_object, attribute_map):\n for attr_name, exp_val in iteritems_(attribute_map):\n obj_val = getattr(test_object, attr_name)\n if obj_val != exp_val:\n raise AssertionError('Values for attribute %s differ! (expected: '\n '%s, found: %s)' %\n (attr_name, exp_val, obj_val))",
"def compare(current_team):\n\n attribute_tuple = ()\n for attr in attributes:\n\n if attr in current_team:\n attribute_tuple += (current_team[attr],)\n elif attr in ['style']:\n attribute_tuple += (current_team['formation'][attr],)\n elif attr in ['manager_league', 'manager_nation']:\n attribute_tuple += (current_team['manager'][attr[8:]],)\n elif attr in ['player']:\n # Get list of player names\n player_names = []\n for position in current_team['formation']['positions'].itervalues():\n player = position['player']\n player_names.append(player['name'] + player['commonName'] +\n player['firstName'] + player['lastName'])\n attribute_tuple += (player_names,)\n elif attr in ['total_skillMoves']:\n # Calculate total\n total = 0\n for position in current_team['formation']['positions'].itervalues():\n player = position['player']\n total += player[attr[6:]]\n attribute_tuple += (total,)\n elif attr in ['total_PAC', 'total_SHO', 'total_PAS', 'total_DRI', 'total_DEF', 'total_PHY']:\n # Calculate total\n total = 0\n index = ['PAC', 'SHO', 'PAS', 'DRI', 'DEF', 'PHY'].index(attr[6:])\n for position in current_team['formation']['positions'].itervalues():\n player = position['player']\n if not player['isGK']:\n total += player['attributes'][index]['value']\n attribute_tuple += (total,)\n else:\n print \"Invalid Attribute: %s\" % attr\n\n return attribute_tuple",
"def compare_element_attributes(self, elem1, elem2, attr_list):\n diff_list = []\n\n for attr in attr_list:\n val1 = elem1.get(attr)\n val2 = elem2.get(attr)\n if (val1 != val2):\n diff_list.append(\n self.Diff(elem1.tag + \":\" + attr, val1, val2))\n\n return diff_list",
"def assert_attributes_equal(self, video, attrs):\r\n for key, value in attrs.items():\r\n self.assertEquals(getattr(video, key), value)",
"def cmpAttributeValues(self, dcObj, ignoreOrder=True, **kwargs):\n rL = []\n floatRelTolerance = kwargs.get(\"floatRelTolerance\", 1.0e-05)\n floatAbsTolerance = kwargs.get(\"floatAbsTolerance\", 1.0e-04)\n try:\n sa = set(self.getAttributeList())\n sb = set(dcObj.getAttributeList())\n atNameComList = list(sa & sb)\n #\n lenEq = self.getRowCount() == dcObj.getRowCount()\n if not lenEq:\n return [(atName, False) for atName in atNameComList]\n #\n for atName in atNameComList:\n dataType, _ = self.__getAttributeInfo(atName)\n if dataType in [\"string\", \"integer\"]:\n if ignoreOrder:\n same = sorted(self.getAttributeValueList(atName)) == sorted(dcObj.getAttributeValueList(atName))\n else:\n same = self.getAttributeValueList(atName) == dcObj.getAttributeValueList(atName)\n elif dataType in [\"float\"]:\n aVL = self.getAttributeValueList(atName)\n bVL = dcObj.getAttributeValueList(atName)\n if ignoreOrder:\n for aV, bV in zip(sorted(aVL), sorted(bVL)):\n same = self.__isClose(aV, bV, relTol=floatRelTolerance, absTol=floatAbsTolerance)\n if not same:\n break\n else:\n for aV, bV in zip(aVL, bVL):\n same = self.__isClose(aV, bV, relTol=floatRelTolerance, absTol=floatAbsTolerance)\n if not same:\n logger.info(\"%s %s (rel=%r) (abs=%r) %r (%r)\", self.getName(), atName, aV * floatRelTolerance, floatAbsTolerance, aV, abs(aV - bV))\n break\n rL.append((atName, same))\n #\n return rL\n except Exception as e:\n if self._raiseExceptions:\n raise e\n return rL",
"def _PairUpAttributes(attributes):\n names = sorted(set(attr.id for attr in attributes))\n getters = {}\n setters = {}\n for attr in attributes:\n if attr.is_fc_getter:\n getters[attr.id] = attr\n elif attr.is_fc_setter and 'Replaceable' not in attr.ext_attrs:\n setters[attr.id] = attr\n return [(getters.get(id), setters.get(id)) for id in names]",
"def is_attrs_equal(cls, attr_name, self_attr_value, other_attr_value):\n is_equal = False\n if attr_name == \"custom_attributes\":\n is_equal = cls.compare_cas(self_attr_value, other_attr_value)\n elif attr_name in [\"updated_at\", \"created_at\"]:\n is_equal = cls.compare_datetime(self_attr_value, other_attr_value)\n elif attr_name == \"comments\":\n is_equal = cls.compare_comments(self_attr_value, other_attr_value)\n else:\n is_equal = self_attr_value == other_attr_value\n return is_equal",
"def with_cmp(attrs):\n def attrs_to_tuple(obj):\n \"\"\"\n Create a tuple of all values of *obj*'s *attrs*.\n \"\"\"\n return tuple(getattr(obj, a) for a in attrs)\n\n def eq(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) == attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def ne(self, other):\n result = eq(self, other)\n if result is NotImplemented:\n return NotImplemented\n else:\n return not result\n\n def lt(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) < attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def le(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) <= attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def gt(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) > attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def ge(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) >= attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def hash_(self):\n return hash(attrs_to_tuple(self))\n\n def wrap(cl):\n cl.__eq__ = eq\n cl.__ne__ = ne\n cl.__lt__ = lt\n cl.__le__ = le\n cl.__gt__ = gt\n cl.__ge__ = ge\n cl.__hash__ = hash_\n\n return cl\n return wrap"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a list of tuples comparised of (subcomparison method, attr name for comparison), returns any Difference tuple retunred by each method using the given attr of obj1 and obj2 as arguments (if that method is not None) | def sub_comparison(obj1,obj2,translate):
return [Difference(f"{obj1.__class__.__name__} > {meth.__name__}",result) for (meth,attr) in translate if (result := meth(getattr(obj1,attr),getattr(obj2,attr))) is not None] | [
"def attr_comparison(obj1,obj2,attrs):\n return [Difference(f\"{obj1.__class__.__name__}.{attr}\",(result1,result2)) for attr in attrs if (result1 := getattr(obj1,attr)) != (result2 := getattr(obj2,attr))]",
"def sortByMethodCall(objList, tag):\n # replaces sortByName and sortByQualName\n \n ll = [(getattr(x,tag)(), x) for x in objList]\n ll.sort()\n return [x[1] for x in ll]",
"def get_field_comparisons(self):\n comparisons = []\n\n if self.is_addition() or self.is_deletion():\n # Display the fields without diff as one of the versions are missing\n obj = self.obj_a or self.obj_b\n\n for field_comparison in self.field_comparisons:\n comparisons.append(field_comparison(obj, obj))\n else:\n for field_comparison in self.field_comparisons:\n comparisons.append(field_comparison(self.obj_a, self.obj_b))\n\n return comparisons",
"def get_child_comparisons(self):\n objs_a = list(self.val_a.all())\n objs_b = list(self.val_b.all())\n\n map_forwards, map_backwards, added, deleted = self.get_mapping(objs_a, objs_b)\n objs_a = dict(enumerate(objs_a))\n objs_b = dict(enumerate(objs_b))\n\n comparisons = []\n\n for b_idx, b_child in objs_b.items():\n if b_idx in added:\n comparisons.append(self.get_child_comparison(None, b_child))\n else:\n comparisons.append(\n self.get_child_comparison(objs_a[map_backwards[b_idx]], b_child)\n )\n\n for a_idx, a_child in objs_a.items():\n if a_idx in deleted:\n comparisons.append(self.get_child_comparison(a_child, None))\n\n return comparisons",
"def with_cmp(attrs):\n def attrs_to_tuple(obj):\n \"\"\"\n Create a tuple of all values of *obj*'s *attrs*.\n \"\"\"\n return tuple(getattr(obj, a) for a in attrs)\n\n def eq(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) == attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def ne(self, other):\n result = eq(self, other)\n if result is NotImplemented:\n return NotImplemented\n else:\n return not result\n\n def lt(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) < attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def le(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) <= attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def gt(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) > attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def ge(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) >= attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def hash_(self):\n return hash(attrs_to_tuple(self))\n\n def wrap(cl):\n cl.__eq__ = eq\n cl.__ne__ = ne\n cl.__lt__ = lt\n cl.__le__ = le\n cl.__gt__ = gt\n cl.__ge__ = ge\n cl.__hash__ = hash_\n\n return cl\n return wrap",
"def deep_cmp(obj1, obj2):\n pass",
"def diffs(self):\n diffs = []\n # XXX i know, we are using the ZODB, so sorry for the cheesy eval()\n # uhm, some logic is not right here as well, we need to look at keys\n # in both the before and after sets :(\n if not self.before or not self.after:\n return []\n before = eval(self.before)\n # pfft!\n if not before:\n return []\n after = eval(self.after)\n for k,v in before.items():\n if k in ['objectClass','userPassword']:\n continue\n try:\n if k == 'uniqueMember':\n added, removed = uniqueMemberDiff(\n v, after['uniqueMember'] )\n diffs.append( {'attribute' : k,\n 'added' : added,\n 'removed' : removed,\n }\n )\n elif str(v) != str(after[k]):\n diffs.append( { 'attribute' : k,\n 'before' : before[k],\n 'after' : after[k] }\n )\n except KeyError:\n pass\n return diffs",
"def diff(*args):\n return reduce(lambda x, y: x - y, args)",
"def DataDiff(source, target, compare_list_as_value=True, depth=0, no_difference_value=None):\n # Ensure recursion doesnt go out of control\n if depth > 150:\n raise Exception('DataDiff recurlsion depth has hit limit (50), aborting.')\n\n # If we are not working with 2 different containers we can inspect, then do a simple check\n if type(source) not in (list, tuple, dict) or type(target) not in (list, tuple, dict):\n # If the types are different, the data is different (and cant be compared more)\n if type(source) != type(target):\n return (source, target)\n # Else, theyre the same types, if the values are different\n elif source != target:\n return (source, target)\n # Else, theyre the same types and value\n else:\n # This should only happen if this is a fresh DataDiff() call, depth==0\n if depth == 0:\n return (no_difference_value, no_difference_value)\n else:\n raise Exception('This should never happen, having a mismatching value different in anywhere but depth=0')\n\n\n if type(source) in (list, tuple):\n source_diff = []\n elif type(source) == dict:\n source_diff = {}\n else:\n raise Exception('Unhandled source_diff data type: %s' % type(source))\n\n if type(target) in (list, tuple):\n target_diff = []\n elif type(target) == dict:\n target_diff = {}\n else:\n raise Exception('Unhandled target_diff data type: %s' % type(target))\n\n # Check for incompatible types, and just return them both as theyre totally different\n if type(source_diff) != type(target_diff):\n return (source, target)\n\n # If we're handling a Dictionary compare\n if type(source_diff) == dict:\n # Process the source keys first\n for key in source.keys():\n _CompareDictValue(key, source, target, source_diff, target_diff, compare_list_as_value, no_difference_value, depth)\n\n # Process the target keys next, skipping any source keys we already processed\n for key in target.keys():\n # Skip any keys we already processed in source\n if key in source:\n continue\n\n # Reverse target/source, so that the reverse comparison/set is done\n _CompareDictValue(key, target, source, target_diff, source_diff, compare_list_as_value, no_difference_value, depth)\n\n # Else, if we're handling a List compare\n elif type(source_diff) == list:\n # If lists must be compared in total because the order of a list is important\n if compare_list_as_value:\n if source != target:\n return (list(source), list(target))\n\n # Else, compare each element of the list\n else:\n for count in range(0, len(source)):\n if count >= len(target):\n source_diff.append(source[count])\n elif source[count] != target[count]:\n source_diff.append(source[count])\n target_diff.append(target[count])\n\n # If the target has more elements than the source, add the rest \n if len(target) > len(source):\n target_diff += target[-(len(source) - len(target)):]\n\n else:\n raise Exception('Unspecified type handler for data: %s. Only dict and list/tuple types are accepted.')\n\n return (source_diff, target_diff)",
"def _compare(self, instance1, instance2, shortcut, rtol, atol, output):\n v1 = getattr(instance1, self.name)\n v2 = getattr(instance2, self.name)\n name = getComparisonName(\n _joinNamePath(instance1._name, self.name),\n _joinNamePath(instance2._name, self.name)\n )\n return compareScalars(name, v1, v2, dtype=self.dtype, rtol=rtol, atol=atol, output=output)",
"def sortIdenticalObjects(list_objects, method_comparison):\n list_results = []\n number_of_objects = len(list_objects)\n if number_of_objects == 0:\n pass\n elif number_of_objects == 1:\n list_results.append(list_objects)\n else:\n # More than 1 object\n list_remaining_objects = list_objects\n while len(list_remaining_objects) > 0:\n index = 0\n current_object = list_remaining_objects[0]\n list_remaining_objects = list_remaining_objects[1:]\n list_sorted_objects = []\n list_sorted_objects.append(current_object)\n # print \"Before loop: \", current_object, list_remaining_objects, iNumberOfRemainingObjects\n while index < len(list_remaining_objects):\n compare_object = list_remaining_objects[index]\n are_equal = method_comparison(current_object, compare_object)\n # print \" In the loop:\", index, compare_object, are_equal, list_sorted_objects, list_remaining_objects\n if are_equal:\n list_sorted_objects.append(compare_object)\n list_remaining_objects_tmp = list_remaining_objects[0:index]\n if index < len(list_remaining_objects):\n list_remaining_objects_tmp.extend(\n list_remaining_objects[index + 1 :]\n )\n list_remaining_objects = list_remaining_objects_tmp\n else:\n index += 1\n list_results.append(list_sorted_objects)\n return list_results",
"def compare_element_attributes(self, elem1, elem2, attr_list):\n diff_list = []\n\n for attr in attr_list:\n val1 = elem1.get(attr)\n val2 = elem2.get(attr)\n if (val1 != val2):\n diff_list.append(\n self.Diff(elem1.tag + \":\" + attr, val1, val2))\n\n return diff_list",
"def transformCompare(*args, **kwargs):\n\n pass",
"def compare(a, b, attrs, f):\n for attr in attrs:\n if not f(getattr(a, attr), getattr(b, attr)):\n return False\n return True",
"def _list_subtract(list1, list2):\r\n\r\n # call val_subtract on all items that are not the same\r\n res_list = [_val_subtract(val1, val2, _dict_subtract, _list_subtract)\r\n for val1, val2 in zip(list1, list2) if val1 != val2]\r\n\r\n # now append items that come after any item in list1\r\n res_list += list1[len(list2):]\r\n\r\n # return a tuple of list1 is a tuple\r\n if isinstance(list1, tuple):\r\n return tuple(res_list)\r\n else:\r\n return res_list",
"def equalObjs(obj1,obj2,allowedDiff,ignore=[], where=None, fname=None,\n fname2=None ):\n if type(fname)==str:\n fname = fname.replace( os.getcwd(), \".\" )\n if type(obj1) in [ float, int ] and type ( obj2) in [ float, int ]:\n obj1,obj2=float(obj1),float(obj2)\n\n if type(obj1) != type(obj2):\n logger.warning(\"Data types differ: (%s,%s) <-> (%s,%s) in %s:%s\" %(obj1,type(obj1),obj2,type(obj2),where,fname))\n return False\n\n if isinstance(obj1,unum.Unum):\n if obj1 == obj2:\n return True\n diff = 2.*abs(obj1-obj2)/abs(obj1+obj2)\n return diff.asNumber() < allowedDiff\n elif isinstance(obj1,float):\n if obj1 == obj2:\n return True\n diff = 2.*abs(obj1-obj2)/abs(obj1+obj2)\n if diff > allowedDiff:\n logger.error ( \"values %s and %s differ by %s in %s:%s\" % ( obj1, obj2, diff, where, fname) )\n return diff < allowedDiff\n elif isinstance(obj1,str):\n if obj1 != obj2:\n logger.error ( \"strings ``%s'' and ``%s'' differ in %s:%s\" % ( obj1, obj2, where, fname ) )\n return obj1 == obj2\n elif isinstance(obj1,dict):\n for key in obj1:\n if key in ignore: continue\n if not key in obj2:\n if where == None:\n where = \"unspecified\"\n if fname2 == None:\n fname2 = \"unspecified\"\n logger.warning(\"Key ``%s'' missing in %s:%s\" % (key, where, fname2 ) )\n return False\n if not equalObjs(obj1[key],obj2[key],allowedDiff, ignore=ignore, where=key, fname = fname, fname2 = fname2 ):\n return False\n elif isinstance(obj1,list):\n if len(obj1) != len(obj2):\n logger.warning('Lists differ in length:\\n %i (this run)\\n and\\n %i (default)' %\\\n (len(obj1),len(obj2)))\n return False\n for ival,val in enumerate(obj1):\n if not equalObjs(val,obj2[ival],allowedDiff, fname = fname,\n fname2 = fname2 ):\n #logger.warning('Lists differ:\\n %s (this run)\\n and\\n %s (default)' %\\\n # (str(val),str(obj2[ival])))\n return False\n else:\n return obj1 == obj2\n\n return True",
"def cmpAttributeValues(self, dcObj, ignoreOrder=True, **kwargs):\n rL = []\n floatRelTolerance = kwargs.get(\"floatRelTolerance\", 1.0e-05)\n floatAbsTolerance = kwargs.get(\"floatAbsTolerance\", 1.0e-04)\n try:\n sa = set(self.getAttributeList())\n sb = set(dcObj.getAttributeList())\n atNameComList = list(sa & sb)\n #\n lenEq = self.getRowCount() == dcObj.getRowCount()\n if not lenEq:\n return [(atName, False) for atName in atNameComList]\n #\n for atName in atNameComList:\n dataType, _ = self.__getAttributeInfo(atName)\n if dataType in [\"string\", \"integer\"]:\n if ignoreOrder:\n same = sorted(self.getAttributeValueList(atName)) == sorted(dcObj.getAttributeValueList(atName))\n else:\n same = self.getAttributeValueList(atName) == dcObj.getAttributeValueList(atName)\n elif dataType in [\"float\"]:\n aVL = self.getAttributeValueList(atName)\n bVL = dcObj.getAttributeValueList(atName)\n if ignoreOrder:\n for aV, bV in zip(sorted(aVL), sorted(bVL)):\n same = self.__isClose(aV, bV, relTol=floatRelTolerance, absTol=floatAbsTolerance)\n if not same:\n break\n else:\n for aV, bV in zip(aVL, bVL):\n same = self.__isClose(aV, bV, relTol=floatRelTolerance, absTol=floatAbsTolerance)\n if not same:\n logger.info(\"%s %s (rel=%r) (abs=%r) %r (%r)\", self.getName(), atName, aV * floatRelTolerance, floatAbsTolerance, aV, abs(aV - bV))\n break\n rL.append((atName, same))\n #\n return rL\n except Exception as e:\n if self._raiseExceptions:\n raise e\n return rL",
"def diff(self, other):\n element_diffs = []\n for self_elem, other_elem in zip(\n (self.compile_setup, self.relations, self.stamps, self.apis,\n self.source_infos, self.compilations),\n (other.compile_setup, other.relations, other.stamps, other.apis,\n other.source_infos, other.compilations)):\n element_diff = self_elem.diff(other_elem)\n if element_diff.is_different():\n element_diffs.append(element_diff)\n return element_diffs",
"def whose_dependent_on_who(self, obj1, obj2):\n if obj1 is obj2:\n return None\n elif self.direction == ONETOMANY:\n return (obj1, obj2)\n else:\n return (obj2, obj1)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Postmortem, using a custom debug function if passed | def post_mortem(*args, debug_fn: Optional[Callable] = None, **kwargs) -> None:
if debug_fn is None:
import pdb
debug_fn = pdb.post_mortem
debug_fn() | [
"def debugger_step_over():",
"def debug():",
"def debugTestRunner(post_mortem=None):\n if post_mortem is None:\n post_mortem = pdb.post_mortem\n class DebugTestResult(unittest.TextTestResult):\n def addError(self, test, err):\n # called before tearDown()\n traceback.print_exception(*err)\n post_mortem(err[2])\n super(DebugTestResult, self).addError(test, err)\n def addFailure(self, test, err):\n traceback.print_exception(*err)\n post_mortem(err[2])\n super(DebugTestResult, self).addFailure(test, err)\n return unittest.TextTestRunner(resultclass=DebugTestResult)",
"def debug():\n\n return",
"def debugger_disassemble():",
"def xpm(Pdb=Pdb):\n info = sys.exc_info()\n print(traceback.format_exc())\n post_mortem(info[2], Pdb)",
"def after_step(context, step):\n if context.config.userdata.getbool(\"debug\") and step.status == \"failed\":\n spost_mortem(step.exc_traceback)",
"def debugger_step_line():",
"def debug(state: bool, /) -> None:",
"def debugger_break_now():",
"def debugger_step_into():",
"def pm(conn):\r\n #pdb.post_mortem(conn.root.getconn()._last_traceback)\r\n with redirected_stdio(conn):\r\n conn.modules.pdb.post_mortem(conn.root.getconn()._last_traceback)",
"def do_debug(self, arg):\n orig_trace = sys.gettrace()\n if orig_trace:\n sys.settrace(None)\n globals = self.curframe.f_globals\n locals = self.curframe_locals\n Config = self.ConfigFactory\n\n class PdbppWithConfig(self.__class__):\n def __init__(self_withcfg, *args, **kwargs):\n kwargs.setdefault(\"Config\", Config)\n super(PdbppWithConfig, self_withcfg).__init__(*args, **kwargs)\n\n # Backport of fix for bpo-31078 (not yet merged).\n self_withcfg.use_rawinput = self.use_rawinput\n\n local.GLOBAL_PDB = self_withcfg\n local.GLOBAL_PDB._use_global_pdb_for_class = self.__class__\n\n prev_pdb = local.GLOBAL_PDB\n p = PdbppWithConfig(self.completekey, self.stdin, self.stdout)\n p._prompt = \"({}) \".format(self._prompt.strip())\n self.message(\"ENTERING RECURSIVE DEBUGGER\")\n self._flush_sticky_messages()\n try:\n with self._custom_completer():\n sys.call_tracing(p.run, (arg, globals, locals))\n except Exception:\n exc_info = sys.exc_info()[:2]\n self.error(traceback.format_exception_only(*exc_info)[-1].strip())\n finally:\n local.GLOBAL_PDB = prev_pdb\n self.message(\"LEAVING RECURSIVE DEBUGGER\")\n\n if orig_trace:\n sys.settrace(orig_trace)\n self.lastcmd = p.lastcmd",
"def debugger_enable_breakpoint():",
"def debugger_continue():",
"def pm(conn):\n #pdb.post_mortem(conn.root.getconn()._last_traceback)\n redir = redirected_stdio(conn)\n try:\n conn.modules.pdb.post_mortem(conn.root.getconn()._last_traceback)\n finally:\n redir.restore()",
"def debugger_add_hw_breakpoint():",
"def debugger_show_breakpoints():",
"def debugger_step_over_line():"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Simple forward step with crossentropy loss. | def _cross_entropy_forward_step(batch, model):
timers = get_timers()
# Get the batch.
timers('batch-generator', log_level=2).start()
try:
batch_ = next(batch)
except BaseException:
batch_ = batch
tokens, types, labels, attention_mask = process_batch(batch_)
timers('batch-generator').stop()
# Forward model.
output_tensor = model(tokens, attention_mask, tokentype_ids=types)
return output_tensor, partial(cross_entropy_loss_func, labels) | [
"def forward_train(self, *args, **kwargs):\n pass",
"def forward(self, X, training=False):\n pass",
"def forward_pass(self, x, targets=None):\n self.x = x\n self.targets = targets\n \n # Input layer\n out = self.layers[0].forward_pass(x)\n \n # Forward...\n for layer in self.layers[1:]:\n out = layer.forward_pass(out)\n \n # Softmax\n self.y = softmax(out)\n \n # Cross-entropy loss\n if targets is not None:\n return self.loss_func(self.y, targets), self.y\n else: \n return None, self.y",
"def train_step(self, loss: torch.Tensor) -> None:\n raise NotImplementedError",
"def train_step(self, x, y):\r\n\t\t# Sets model to train mode\r\n\t\tself.model.train().to(self.device)\r\n\r\n\t\t# Makes predictions\r\n\t\tyhat = self.model(x).to(self.device)\r\n\r\n\t\t# Computes loss\r\n\t\tloss = self.loss_fn(y, yhat)\r\n\r\n\t\t# Computes gradients\r\n\t\tloss.backward()\r\n\r\n\t\t# Updates parameters and zeroes gradients\r\n\t\tself.optimizer.step()\r\n\t\tself.optimizer.zero_grad()\r\n\r\n\t\t# Returns the loss\r\n\t\treturn loss.item()",
"def nll_forward(target_pred, target_true):\n ones = np.ones(target_true.shape)\n mult1 = np.multiply((ones-target_pred), np.log(ones-target_pred))\n mult2 = np.multiply(target_pred, np.log(target_pred))\n result = mult1 + mult2\n output = np.mean(-result) \n return output",
"def forward_propagation(self):\n pred_y = argmax(self.model.predict(train_x), axis=1)\n\n accuracy_func = Accuracy()\n accuracy_func.update_state(pred_y, train_y)\n self.accuracy = accuracy_func.result().numpy()",
"def cross_entropy_loss():\n return nn.CrossEntropyLoss()",
"def forward(self,y_out, y_truth): \n result = None\n #########################################################################\n # TODO: #\n # Implement the forward pass and return the output of the BCE loss. #\n #########################################################################\n\n result = -1 * (np.multiply(y_truth, np.log(y_out)) + np.multiply((1 - y_truth), np.log(1 - y_out)))\n \n \n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n \n return result",
"def forward(self, x):\n for layer in self.hidden_layers:\n x = F.relu(layer(x))\n x = self.dropout(x)\n x = self.output(x)\n\n return F.log_softmax(x, dim=1)",
"def forward(self, x):\n # Flatten\n x = x.view(-1, 28*28)\n\n # FC->ReLU \n x = F.relu(self.fc1(x))\n if self.apply_drop:\n x = self.drop1(x)\n\n # FC->ReLU \n x = F.relu(self.fc2(x))\n if self.apply_drop:\n x = self.drop2(x)\n\n # FC->ReLU \n x = F.relu(self.fc3(x))\n if self.apply_drop:\n x = self.drop3(x)\n\n # FC\n x = self.fc4(x)\n \n return x",
"def forward(self, inputs, target_oneHot):\n\n N = inputs.size()[0]\n\n # predicted probabilities for each pixel along channel\n inputs = F.softmax(inputs, dim=1)\n\n # Numerator Product\n inter = inputs * target_oneHot\n # Sum over all pixels N x C x H x W => N x C\n inter = inter.view(N, self.classes, -1).sum(2)\n\n # Denominator\n union = inputs + target_oneHot - (inputs * target_oneHot)\n # Sum over all pixels N x C x H x W => N x C\n union = union.view(N, self.classes, -1).sum(2)\n\n loss = inter / union\n\n ## Return average loss over classes and batch\n # return 1 - loss.mean()\n return -(loss.mean() - 1.)",
"def forward_pass(self, x, targets=None):\n self.x = x\n if targets is None:\n loss = None\n else:\n self.targets = targets\n\n result = x\n for layer in self.layers:\n result = layer.forward_pass(result)\n\n # softamax activation on input\n self.y = softmax(result)\n\n if targets is not None:\n loss = self.loss_func(self.y, self.targets)\n\n return loss, self.y",
"def forward(inputs,weights,function=sigmoid,step=-1):\n if step == 0:\n return inputs\n elif step == -1:\n step = len(weights) #go to output layer \n output = np.append(1, inputs)\n for i in range(step):\n output = np.append(1, function(np.dot(weights[i], output))) #calculating activation\n return output[1:]",
"def adversarial_step_ll(model, image, num_classes):\n image = tf.cast(image, tf.float32)\n\n with tf.GradientTape() as tape:\n tape.watch(image)\n prediction = model(image)\n y_ll = model(image).numpy().argmin()\n y_ll = labels_to_one_hot([y_ll], num_classes)[0]\n loss = tf.keras.losses.MSE(y_ll, prediction)\n\n signed_gradient = tape.gradient(loss, image)\n\n return -1 * tf.sign(signed_gradient)",
"def forward(self,x):\n x = self.conv(x)\n x = self.bn(x)\n\n if self.activation:\n x = activation_relu()(x)\n\n return x",
"def forward(self, x):\n\n # CNN\n x = self.conv1(x)\n if self.apply_drop:\n x = self.drop1a(x)\n # CNN->ReLU\n x = self.conv1b(x)\n x = F.relu(x)\n if self.apply_drop:\n x = self.drop1b(x)\n\n # CNN\n x = self.conv2(x)\n if self.apply_drop:\n x = self.drop2a(x)\n # CNN->ReLU\n x = self.conv2b(x)\n x = F.relu(x)\n if self.apply_drop:\n x = self.drop2b(x)\n \n # Flatten\n x = x.view(-1, 16*4*4)\n \n # FC->ReLU\n x = F.relu(self.fc1(x))\n if self.apply_drop:\n x = self.fc_drop1(x)\n \n # FC->ReLU\n x = F.relu(self.fc2(x))\n if self.apply_drop:\n x = self.fc_drop2(x)\n \n # FC\n x = self.fc3(x)\n \n return x",
"def forward(self, x, target=None):\n for i in self.layers:\n y = i.forward(x)\n x = y\n\n if target is None:\n out = y\n else:\n out = self.loss.forward(y,target)\n return out",
"def add_loss_op(self, pred):\n ### YOUR CODE HERE\n loss = cross_entropy_loss(self.labels_placeholder,pred)\n ### END YOUR CODE\n return loss"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build a looped dataloader with infinite size. | def _build_infinite_size_dataloader(dataloader):
iterator = dataloader.__iter__()
while True:
try:
yield iterator.__next__()
except StopIteration:
iterator = dataloader.__iter__() | [
"def create_reader_op(self, feed_list):\n reader = fluid.io.DataLoader.from_generator(feed_list=feed_list, \n capacity=256, iterable=True, use_double_buffer=True)\n\n return reader",
"def data_gen_infinite(self, batch_sz):\n g = self.data_gen_finite(batch_sz)\n while True:\n try:\n *ret, = next(g)\n except StopIteration:\n # restart generator\n g = self.data_gen_finite(batch_sz)\n *ret, = next(g)\n yield ret",
"def build_data():\n i = 0\n while i < N_SAMPLE :\n sys.stdout.write(\"\\rLoading file \" + str(i+1) + \"/\" + str(N_SAMPLE))\n sys.stdout.flush()\n # There might be errors when sampling files (because file is too big\n # or because of a git clone error or because there are no c files in \n # the repo). If an error is encountered, we just\n # go to the next iteration and don't download the file.\n try: \n file_sampler()\n except:\n i = i - 1\n i = i + 1",
"def create_dataloader(self):\n if self.num_workers:\n dataloader = torch.utils.data.DataLoader(\n self,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n drop_last=self.drop_last,\n prefetch_factor=self.prefetch_factor,\n persistent_workers=self.persistent_workers,\n )\n else:\n dataloader = torch.utils.data.DataLoader(\n self, batch_size=self.batch_size, drop_last=self.drop_last,\n )\n\n return dataloader",
"def create_dataloader(data):\r\n input_ids = torch.LongTensor([sent['input_ids'] for sent in data])\r\n input_mask = torch.LongTensor([sent['input_mask'] for sent in data])\r\n segment_ids = torch.LongTensor([sent['segment_ids'] for sent in data])\r\n label_ids = torch.LongTensor([sent['label_ids'] for sent in data])\r\n\r\n dataset = TensorDataset(input_ids, input_mask, segment_ids, label_ids)\r\n\r\n train_sampler = RandomSampler(dataset)\r\n dataloader = DataLoader(dataset, sampler=train_sampler, batch_size=BATCH_SIZE)\r\n\r\n return dataloader",
"def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader",
"def iterate_data(batch_size, **kwargs):\n batch = []\n for item in read_data(**kwargs):\n batch.append(item)\n if len(batch) == batch_size:\n yield batch\n batch = []\n\n if batch:\n yield batch",
"def nextBatch(self, batch_size):\r\n ## Make sure we have enough data to generate\r\n if len(self.toGenerate) < batch_size:\r\n self.toGenerate = random.sample(self.datas, len(self.datas))\r\n ## Pop out batch size number of data\r\n batch = self.toGenerate[:batch_size]\r\n ## Remove those data from list\r\n self.toGenerate = self.toGenerate[batch_size:]\r\n return DataContainer(batch)",
"def inf_loop(data_loader):\n for loader in repeat(data_loader):\n yield from loader",
"def open_dataset(dataset_path, batch_size, img_shape, infinite=True):\n dataset = generate_paths()\n\n dataset_gen = dataset_generator(\n dataset,\n batch_size=batch_size, infinite=infinite,\n img_shape=img_shape\n )\n steps = len(dataset) // batch_size\n return dataset_gen, steps",
"def __batch_data(self, batch_size):\n first = True\n batch_container_list = []\n while True:\n for i in range(batch_size):\n data_list = self.__data_queue.get()\n if first:\n ## init the batch_list ##\n for i in range(len(data_list)):\n batch_container_list.append([])\n first = False\n\n for batch_container,data_item in zip(batch_container_list,data_list):\n batch_container.append(data_item)\n\n ## put the batch data into batch_queue ##\n self.__batch_queue.put(copy.deepcopy(batch_container_list))\n\n for batch_container in batch_container_list:\n batch_container.clear()",
"def nextEpoch(self, batch_size):\r\n numBatches = len(self) // batch_size\r\n toGenerate = random.sample(self.datas, len(self.datas))\r\n for i in range(numBatches):\r\n batch = toGenerate[:batch_size]\r\n toGenerate = toGenerate[batch_size:]\r\n yield DataContainer(batch)",
"def inf_batches(dataloader, with_info=True):\n epoch = 0\n while True:\n for i, _batch in enumerate(dataloader):\n if with_info:\n yield _batch, (i, epoch)\n else:\n yield _batch\n epoch += 1",
"def repeat(data_loader):\n for loader in itertools.repeat(data_loader):\n for batch in loader:\n yield batch",
"def __init__(self, max_count):\n self._max_count = max_count if max_count > 1 else 1 # Minimum is 1.\n self._data = collections.deque()",
"def _get_dataloader(samples, batch_size):\n print(\"Cogiendo dataloader\")\n return DataLoader(samples, shuffle=True, batch_size=batch_size)",
"def dynamic_dataset(iterated):\n if not isinstance(iterated, dataset_ops.Dataset):\n return iterated\n\n def epoch_dataset_number_helper(i):\n return dataset_ops.Dataset.zip(\n (dataset_ops.Dataset.from_tensors(i).repeat(), iterated))\n\n epoch_numbers = dataset_ops.Dataset.range(2)\n return epoch_numbers.flat_map(epoch_dataset_number_helper)",
"def make_batches(dataset, batch_size):\n for i in range(0, len(dataset), batch_size):\n yield dataset[i:i + batch_size]",
"def train_dataloader(self) -> DataLoader[Any]:\n return DataLoader(\n self.train_dataset,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n shuffle=True,\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct solver from Caffe solver prototxt file. | def from_caffe_solver_protoxt(cls, caffe_solver_prototxt_file: Path):
solver_param = caffe_pb2.SolverParameter()
with open(caffe_solver_prototxt_file, 'rt') as f:
pb2.text_format.Merge(f.read(), solver_param)
dictionary = {'lr_policy': solver_param.lr_policy,
'base_lr': solver_param.base_lr,
'gamma': solver_param.gamma,
'momentum': solver_param.momentum,
'max_iter': solver_param.max_iter,
'stepsize': solver_param.stepsize,
'stepvalues': solver_param.stepvalue,
'weight_decay': solver_param.weight_decay,
'iter_size': solver_param.iter_size,
'from_prototxt': caffe_solver_prototxt_file}
return cls(**dictionary) | [
"def from_file(self, file_path):\n clauses = []\n varnum = 0\n with open(file_path, 'r') as file:\n for line in file:\n if line.startswith(\"c\"):\n continue\n if line.startswith(\"p\"):\n tmp = line.split()\n varnum = int(tmp[2])\n continue\n\n variables = list(map(int, line.split()))[:-1]\n clauses.append(Clause.from_list(variables))\n return Cnf.from_list(clauses, varnum)",
"def get_solver(self):\n # Create a temporary solver file.\n fname = '__solver__.prototxt'\n f = open(fname, 'w')\n f.write(self.to_proto())\n f.close()\n # Get solver from file.\n solver = caffe.get_solver_from_file(fname)\n # Remove the temporary solver file and return solver.\n os.remove(fname)\n return solver",
"def _generateSolverFile(self, filepath, neural_network_id):\n # Cteni konfigurace solveru z databaze\n result = self.config.backend.proxy.solver_config.get(neural_network_id)\n config = result['data']\n \n solver_proto = caffe_pb2.SolverParameter()\n \n # Mapovani z property SolverParameter tridy na nazev databazoveho sloupecku\n # V zasade jsou nazvy stejne, ale to se muze casem zmenit (zmena definice v caffe)\n solver_db_mapping = {\n 'net': 'net',\n 'test_iter': 'test_iter',\n 'test_interval': 'test_interval',\n 'test_compute_loss': 'test_compute_loss',\n 'base_lr': 'base_lr',\n 'display': 'display',\n 'max_iter': 'max_iter',\n 'lr_policy': 'lr_policy',\n 'gamma': 'gamma',\n 'power': 'power',\n 'momentum': 'momentum',\n 'weight_decay': 'weight_decay',\n 'stepsize': 'stepsize',\n 'snapshot': 'snapshot',\n 'snapshot_prefix': 'snapshot_prefix',\n 'snapshot_diff': 'snapshot_diff',\n 'snapshot_after_train': 'snapshot_after_train',\n 'solver_mode': 'solver_mode',\n 'device_id': 'device_id',\n 'random_seed': 'random_seed',\n 'debug_info': 'debug_info',\n }\n \n message_descriptor = solver_proto.DESCRIPTOR \n for solver_property in solver_db_mapping:\n db_field = solver_db_mapping[solver_property]\n value = config[db_field]\n if value:\n field = message_descriptor.fields_by_name.get(solver_property, None)\n if field:\n # prevedeni enum value z retezce na int\n if field.type == descriptor.FieldDescriptor.TYPE_ENUM:\n value = field.containing_type.enum_values_by_name[value].number\n \n if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: \n property = getattr(solver_proto, solver_property)\n property.append(value)\n else: \n setattr(solver_proto, solver_property, value)\n #endif\n #endif\n #endif\n #endfor\n\n file_content = text_format.MessageToString(solver_proto, as_utf8=True)\n file = open(filepath, 'w')\n if not file:\n raise self.ProcessException(\"Nemuzu vytvorit solver soubor (\" + filepath + \")!\")\n file.write(file_content)\n file.close()",
"def from_CMTSOLUTION_file(self, filename):\n with open(filename, \"rt\") as f:\n f.readline()\n f.readline()\n time_shift = float(f.readline().strip().split()[-1])\n f.readline()\n latitude = float(f.readline().strip().split()[-1])\n longitude = float(f.readline().strip().split()[-1])\n depth_in_m = float(f.readline().strip().split()[-1]) * 1e3\n\n m_rr = float(f.readline().strip().split()[-1]) / 1e7\n m_tt = float(f.readline().strip().split()[-1]) / 1e7\n m_pp = float(f.readline().strip().split()[-1]) / 1e7\n m_rt = float(f.readline().strip().split()[-1]) / 1e7\n m_rp = float(f.readline().strip().split()[-1]) / 1e7\n m_tp = float(f.readline().strip().split()[-1]) / 1e7\n\n return self(latitude, longitude, depth_in_m, m_rr, m_tt, m_pp, m_rt,\n m_rp, m_tp, time_shift)",
"def __init__(self, fname):\n with open(fname, \"r\") as f:\n lines = f.readlines()\n # Parse the dimension from the first line\n self.n_dim = int(lines[0])\n # Parse the example from the second line\n self.example = [float(x) for x in lines[1].split(\" \")[0:self.n_dim]]\n\n # Run through the rest of the lines and compile the constraints\n self.exprs = []\n for i in range(2, len(lines)):\n # support comments in the first line\n if lines[i][0] == \"#\":\n continue\n self.exprs.append(compile(lines[i], \"<string>\", \"eval\"))\n return",
"def from_file(csp_file_name):\n\n with open(csp_file_name, 'r') as problem_file:\n file_lines = problem_file.readlines()\n variables = {}\n constraints = Constraints()\n largest_value = 0\n # Make a list of variable names.\n for line in file_lines:\n words = line.split()\n next_variable = CSP.__get_variable_from_dictionary(variables, words[0])\n next_relation = Relation.as_function(words[1])\n next_value = words[2]\n if next_value.isdigit():\n next_value = int(next_value)\n if next_value > largest_value:\n largest_value = next_value\n constraints.add_unary_constraint(next_variable, next_relation, next_value)\n else:\n next_value = CSP.__get_variable_from_dictionary(variables, next_value)\n constraints.add_binary_constraint(next_variable, next_relation, next_value)\n # Find d and v.\n d = len(variables)\n v = largest_value\n # Set domains.\n for var in variables.values():\n var.domain = set(xrange(max(d, (v - 1))))\n new_csp = CSP(variables.values(), constraints)\n return new_csp",
"def __init__(self, fname):\n\t\twith open(fname, \"r\") as f:\n\t\t\tlines = f.readlines()\n\t\t# Parse the dimension from the first line\n\t\tself.n_dim = int(lines[0])\n\t\t# Parse the example from the second line\n\t\tself.example = [float(x) for x in lines[1].split(\" \")[0:self.n_dim]]\n\n\t\t# Run through the rest of the lines and compile the constraints\n\t\tself.exprs = []\n\t\tfor i in range(2, len(lines)):\n\t\t\t# support comments in the first line\n\t\t\tif lines[i][0] == \"#\":\n\t\t\t\tcontinue\n\t\t\tself.exprs.append(compile(lines[i], \"<string>\", \"eval\"))\n\t\t\n\t\t# see the related functions for the variable meaning\n\t\t[self.var_constrs, id_vars, self.related_vars] = self.get_var_constraints(lines)\n\t\tself.bounds = self.get_bounds()\n\t\tself.exprs_batch = self.make_exprs_batch(lines)\n\t\t\n\t\treturn",
"def construct_solver(c, assumptions=None):\n formula, variables = cnf(c)\n if assumptions:\n add_assumptions(formula, variables, assumptions)\n solver = Cadical(bootstrap_with=formula)\n return solver, variables",
"def from_file(cls, fn):\n dct = store.get_dict(fn, 'trainalgorithm')\n return cls.from_dict(dct)",
"def createSolver(self):\n try:\n solver = self._solver_type(self._opts.file, name=self._opts.output)\n except BaseException:\n raise\n return solver",
"def load_solution_from_file(path):\n with open(path, 'r') as stream:\n reader = DatasetReader(stream)\n raise NotImplementedError()",
"def loadCTMC(file_path: str) -> CTMC:\n\tf = open(file_path,'r')\n\tl = f.readline()[:-1] \n\tif l != \"CTMC\":\n\t\tprint(\"ERROR: this file doesn't describe an CTMC: it describes a \"+l)\n\tlabelling = literal_eval(f.readline()[:-1])\n\tname = f.readline()[:-1]\n\tinitial_state = array(literal_eval(f.readline()[:-1]))\n\tmatrix = literal_eval(f.readline()[:-1])\n\tmatrix = array(matrix)\n\tf.close()\n\treturn CTMC(matrix, labelling, name)",
"def convert_to_caffe(mge_fpath, prototxt=\"out.prototxt\", caffemodel=\"out.caffemodel\"):\n\n assert isinstance(mge_fpath, str), \"mge_fpath must be string\"\n net = TopologyNetwork(mge_fpath)\n converter = CaffeConverter(net)\n converter.convert()\n assert isinstance(prototxt, str) and isinstance(\n caffemodel, str\n ), \"'prototxt' and 'caffemodel' must be string\"\n converter.dump(prototxt, caffemodel)",
"def setup_solver(self):\n H_expr = self.get_cost_expr()\n A_expr, Blb_expr, Bub_expr = self.get_constraints_expr()\n self.solver = cs.conic(\"solver\",\n self.options[\"solver_name\"],\n {\"h\": H_expr.sparsity(),\n \"a\": A_expr.sparsity()},\n self.options[\"solver_opts\"])",
"def _CreateSolver(self):\n ## Solver construction\n with open('RomParameters.json') as rom_parameters:\n rom_settings = KratosMultiphysics.Parameters(rom_parameters.read())\n self.project_parameters[\"solver_settings\"].AddValue(\"rom_settings\", rom_settings[\"rom_settings\"])\n return solver_wrapper.CreateSolverByParameters(self.model, self.project_parameters[\"solver_settings\"],self.project_parameters[\"problem_data\"][\"parallel_type\"].GetString())",
"def init_solver(self):\n self.solver = TfSolver(\n loss_scalar=self.loss_scalar,\n solver_name=self._hyperparams['solver_type'],\n base_lr=self._hyperparams['lr'],\n lr_policy=self._hyperparams['lr_policy'],\n momentum=self._hyperparams['momentum'],\n weight_decay=self._hyperparams['weight_decay']\n )",
"def buildModelFromFile(fname):\n directory = os.path.dirname(fname)\n\n f = open(fname, \"r\")\n in_map = yaml.safe_load(f)\n f.close()\n\n expression = \"\"\n\n return build_model_from_dict(in_map)",
"def __init__(self, config_file: str):\n # Read data from JSON file\n with open(config_file) as f:\n config = json.load(f)\n # Declaring variables\n self.objective: str = config['objective']\n self.c1: float = config['c1']\n self.c2: float = config['c2']\n self.max_w: float = config['max_w']\n self.min_w: float = config['min_w']\n self.particle_num: int = config['particle_num']\n self.max_iterations: int = config['max_iterations']\n self.min_avg_velocity: float = config['min_avg_velocity']\n self.cube_count: int = config['cube_count']\n self.solution_count: int = config['solution_count']\n self.variables: list[float] = config['variables']\n self.optimization_type: list = config['optimization_type']\n # Convert objective to method\n self.convert_to_method(self.objective)\n # Convert the variables to min/max lists\n self.max = []\n self.min = []\n self.split_variables_into_max_min()\n # Convert the min / max values to include infinity\n self.convert_min_max_to_inf()\n # Validation\n self.validate_config()",
"def _create_solver(self):\n solver_module = importlib.import_module(\n 'fipy.solvers.{}'.format(self.fipy_solver))\n Solver = getattr(solver_module, 'DefaultSolver')\n self._solver = Solver()\n self.logger.debug(\n 'Created fipy {} solver: {}'.format(self.fipy_solver, self._solver))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Refreshes the Job's details by querying the workspace. | def refresh(self):
self.details = self.workspace.get_job(self.id).details | [
"def refresh(self): # noqa\n data = self.connection.hgetall(self.key)\n if not data:\n raise NoSuchJobError('No such job: {0}'.format(self.key))\n self.restore(data)",
"def _reload(self):\n pr = Project(path=self._job_project_path)\n self._job = pr.load(self._job_name)\n self._job.interactive_open()\n self._job.interactive_initialize_interface()",
"def refresh_data(self):\n if self.result is None:\n self.result = JobResult(self.id.value, client=self.manager.client)\n\n if self.circuit is None:\n self.circuit = JobCircuit(self.id.value, client=self.manager.client)",
"def refresh(self):\n r = fapi.get_workspace(self.namespace, self.name, self.api_url)\n fapi._check_response_code(r, 200)\n self.data = r.json()\n return self",
"def refresh_details(self) -> None:\n data = request(\n 'get',\n f'/api/v0/projects/{self.id}/',\n ).json()\n self.data.update(data)",
"def refresh_submission_status(self):\n if self.job_id:\n self.submission_status = SubmissionStatus(\n db=self.project.db, job_id=self.job_id\n )\n self.submission_status.refresh()",
"def refresh_jobs(self):\n\n jobs = self.backend.get_jobs()\n\n if not isinstance(jobs, list):\n warning(self.iface, \"Error loading Jobs from the backend (Response status code not 200)\")\n jobs = []\n\n if not self.jobs_changed(jobs):\n return\n\n self.init_jobs()\n self.jobsTableWidget.setSortingEnabled(False)\n self.jobsTableWidget.setRowCount(len(jobs))\n row = 0\n self.jobs_table = {}\n for job in jobs:\n\n if job.updated:\n str_date = job.updated.strftime(\"%Y-%m-%d_%H-%M-%S\")\n qitem = QTableWidgetItem(str_date)\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 1, qitem)\n elif job.created:\n str_date = job.created.strftime(\"%Y-%m-%d_%H-%M-%S\")\n qitem = QTableWidgetItem(str_date)\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 1, qitem)\n\n if not job.title:\n qitem = QTableWidgetItem(\"Untitled Job!\")\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 0, qitem)\n else:\n qitem = QTableWidgetItem(job.title)\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 0, qitem)\n\n exec_btn = QPushButton(self.jobsTableWidget)\n exec_btn.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/execute_icon.svg')))\n\n if job.status:\n qitem = QTableWidgetItem(job.status)\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 2, qitem)\n\n if job.status == \"finished\":\n self.jobsTableWidget.item(row, 2).setBackground(QColor(75, 254, 40, 160))\n disp_btn = QPushButton(self.jobsTableWidget)\n disp_btn.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/display_icon.svg')))\n disp_btn.setIconSize(QSize(29, 29))\n self.jobsTableWidget.setCellWidget(row, 4, disp_btn)\n disp_btn.clicked.connect(lambda *args, job_id=job.id: self.job_display(job_id))\n disp_btn = QPushButton(self.jobsTableWidget)\n disp_btn.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/download.png')))\n disp_btn.setIconSize(QSize(29, 29))\n disp_btn.clicked.connect(lambda *args, job_id=job.id: self.job_download(job_id))\n self.jobsTableWidget.setCellWidget(row, 5, disp_btn)\n iface.actionZoomIn().trigger()\n elif job.status == \"running\":\n self.jobsTableWidget.item(row, 2).setBackground(QColor(254, 178, 76, 200))\n exec_btn.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/stop-button.png')))\n elif job.status == \"canceled\":\n self.jobsTableWidget.item(row, 2).setBackground(QColor(254, 178, 76, 200))\n elif job.status == \"error\":\n self.jobsTableWidget.item(row, 2).setBackground(QColor(254, 100, 100, 200))\n\n exec_btn.setIconSize(QSize(21, 21))\n self.jobsTableWidget.setCellWidget(row, 3, exec_btn)\n\n if job.status == \"running\":\n exec_btn.clicked.connect(lambda *args, job_id=job.id: self.job_stop(job_id))\n else:\n exec_btn.clicked.connect(lambda *args, job_id=job.id: self.job_execute(job_id))\n\n info_btn2 = QPushButton(self.jobsTableWidget)\n info_btn2.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/edit_icon.png')))\n info_btn2.setIconSize(QSize(25, 25))\n self.jobsTableWidget.setCellWidget(row, 6, info_btn2)\n info_btn2.clicked.connect(lambda *args, job_id=job.id: self.adapt_job(job_id))\n\n info_btn3 = QPushButton(self.jobsTableWidget)\n info_btn3.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/info_icon.png')))\n info_btn3.setIconSize(QSize(25, 25))\n self.jobsTableWidget.setCellWidget(row, 7, info_btn3)\n info_btn3.clicked.connect(lambda *args, job_id=job.id: self.job_info(job_id))\n\n info_btn4 = QPushButton(self.jobsTableWidget)\n info_btn4.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/deleteFinalBtn.png')))\n info_btn4.setIconSize(QSize(25, 25))\n self.jobsTableWidget.setCellWidget(row, 8, info_btn4)\n info_btn4.clicked.connect(lambda *args, job_id=job.id: self.delete_job_final(job_id))\n\n self.refreshButton.setEnabled(True)\n self.refreshButton_service.setEnabled(True)\n\n self.jobs_table[row] = job\n\n row += 1\n\n self.jobsTableWidget.setSortingEnabled(True)",
"def on_job_update(_job):\n nonlocal job\n job = _job",
"def update(self) -> None:\n self.previous_status = self.status\n\n jobs = self._client.describe_jobs(jobs = [ self.id ])[\"jobs\"]\n\n try:\n self.state = jobs[0]\n except IndexError:\n raise ValueError(\"Invalid or unknown job id %s\" % self.id) from None",
"def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n aq_data.add_aq_to_db()\n DB.session.commit()\n return 'Data refreshed!'",
"def refresh_jobs(self):\n jobs = self.connection.user_jobs()\n\n self.init_jobs()\n self.jobsTableWidget.setRowCount(len(jobs))\n row = 0\n for val in jobs:\n\n if \"id\" in val:\n qitem = QTableWidgetItem(val[\"id\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 0, qitem)\n\n if \"error\" in val:\n if val[\"error\"]:\n if \"message\" in val[\"error\"]:\n qitem = QTableWidgetItem(val[\"error\"][\"message\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 1, qitem)\n elif \"description\" in val:\n qitem = QTableWidgetItem(val[\"description\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 1, qitem)\n\n if \"submitted\" in val:\n qitem = QTableWidgetItem(val[\"submitted\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 2, qitem)\n\n execBtn = QPushButton(self.jobsTableWidget)\n execBtn.setText('Execute')\n\n if \"status\" in val:\n qitem = QTableWidgetItem(val[\"status\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 3, qitem)\n\n if val[\"status\"] == \"finished\":\n dispBtn = QPushButton(self.jobsTableWidget)\n dispBtn.setText('Display')\n self.jobsTableWidget.setCellWidget(row, 5, dispBtn)\n dispBtn.clicked.connect(lambda *args, row=row: self.job_display(row))\n\n self.jobsTableWidget.setCellWidget(row, 4, execBtn)\n execBtn.clicked.connect(lambda *args, row=row: self.job_execute(row))\n\n row += 1",
"def fetchJob(self):\n \n mpDlg = MultipleValDialog(title='Get Job',\n initialvalues=('','my job1'),\n labels=('ID','Your label',),\n types=('string','string'),\n parent=self.mainwin)\n if mpDlg.result == True:\n jobid = mpDlg.results[0]\n name = mpDlg.results[1]\n else:\n return\n job = PEATSA.WebApp.Data.Job(jobid, self.connection) \n if job != None: \n print 'adding job id %s to list' %job.identification\n self.storeJob(name, job)\n self.updateJobs()\n return",
"async def refresh(ctx):\n await update_tournament_list()\n res = await refresh_algorithm()\n if res == True:\n await ctx.send(\"Successfully refreshed data from sheet.\")\n else:\n await ctx.send(\":warning: Unsuccessfully refreshed data from sheet.\")",
"def update(self):\n self._log.debug(\"About to update job {0}\".format(self.id))\n resp = self._api.get_job(self.id)\n\n if resp.success:\n self.submission = self._format_submission(resp.result)\n return True\n\n else:\n raise resp.result",
"def refresh(self):\n connection = self._connection\n with self._refresh_lock:\n self._aiexperiment = connection.aiexperiments(self.id).fetch()",
"def reschedule(self):\n # TODO: use rq scheduler\n self._update_dumped_params()\n logger.info('Rescheduling {}'.format(self))\n service = InternalService(self.service_name)\n job = service.run_async(job_id=self.id)\n return job",
"def refresh(self):\n # pylint: disable=protected-access\n self.invalidate()\n full = not self._manager._list_has_name\n res_list = self._manager.list(full_properties=full)\n self.update_from(res_list)",
"def RefreshReport(self):\r\n report = self.data.getRefreshReport()\r\n if report: showInfo(self,report,self.data.title)",
"def refresh(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a unique id for a new job. | def create_job_id() -> str:
return str(uuid.uuid1()) | [
"def assign_job_id(self):\n num_string = str(randint(0, 10000)).zfill(5)\n job_id = self.jobname + str(num_string) + datetime.today().strftime(\"%Y%m%d\")\n return job_id",
"def _generate_job_id():\n # CAIP job id can contains only numbers, letters and underscores.\n unique_tag = str(uuid.uuid4()).replace(\"-\", \"_\")\n return \"tf_cloud_train_{}\".format(unique_tag)",
"def create_task_id():\n return str(int(round(time.time() * 10**9)))",
"def generate_job_id() -> str:\n rand = random.SystemRandom()\n possible_characters = list(\n set(string.ascii_letters + string.digits)\n - {\"I\", \"l\", \"o\", \"O\", \"0\"} # No confusing characters\n )\n id_part = \"\".join(rand.choices(possible_characters, k=16))\n return f\"raysubmit_{id_part}\"",
"def create_new_job(self, search_id: Hashable) -> Hashable:\n partial_id = (\n self._redis.incr(f\"search:{search_id}.job_id_counter\", amount=1) - 1\n )\n partial_id = f\"{partial_id}\" # converting to str\n job_id = f\"{search_id}.{partial_id}\"\n self._redis.rpush(f\"search:{search_id}.job_id_list\", job_id)\n self._redis.json().set(\n f\"job:{job_id}\", \".\", {\"in\": None, \"metadata\": {}, \"out\": None}\n )\n return job_id",
"def get_random_job_id() -> str:\n return get_uuid_str()",
"def job_create(self, sender, name=None):\n self._require_running()\n name = name or self.DEFAULT_JOB_NAME\n job_id = uuid.uuid4().hex\n assert job_id not in self._jobs\n assert sender is not None\n assert sender.connection\n job = Job(\n job_id,\n name,\n self._session_root.joinpath(job_id),\n sender,\n self._loop\n )\n self._jobs[job_id] = job\n self._jobs_by_connection[sender.connection][job_id] = job\n self._log.debug('Created job %s', job)\n return job_id",
"def make_key(\n cls,\n task_id: str,\n job_id: T.Optional[str] = None,\n ) -> str:\n JOB_ID = cls.JOB_ID if job_id is None else job_id\n return f\"{JOB_ID}{cls.SEP}{task_id}\"",
"def generate_job_id(*args):\n md5 = hashlib.md5()\n for arg in args:\n md5.update(arg.encode(\"utf-8\"))\n return md5.hexdigest()",
"async def create_job(response: Response,\n request: Request,\n job: Job = Body(\n ...,\n example={\n \"id_video\": \"bbb_0.mp4\",\n \"bitrate\": 7000,\n \"speed\": \"ultrafast\",\n },\n )\n ): \n \n\n # get an ID and return to client\n id_job = mngr.getID()\n logger.debug(\"got id_job %s\" %id_job)\n resp = [\"http:/\"]\n resp.append(request.headers['host'])\n resp.append(id_job)\n response.headers[\"Location\"] = \"/\".join(resp)\n\n # create the task\n mngr.newJob(id_job, \n job.id_video, \n job.bitrate, \n job.speed)\n\n return id_job",
"def getJobId() -> str:\n s = time.strftime(r\"%Y%m%d-%H%M%S\", time.localtime())\n return \"%s-%d\" % (s, random.randint(100, 999))",
"def job_id(self) -> str:\n return self._job['id']",
"def job_id(camera_id='0'):\n def timestamp_ms(): return int(round(time.time() * 1000))\n return '_'.join([str(timestamp_ms), str(camera_id)])",
"def job_id(self):\n return self._job.id",
"def _generate_submit_id():\n timestamp = int(time())\n return \"%d-%05d\" % (timestamp, random.randint(0, 99999))",
"def _get_job_id(self) -> int:\n with self._job_id_lock:\n job_id = self._next_job_id\n self._next_job_id += 1\n return job_id",
"def gen_id() -> str:\n # id is set according to the current unix time\n return f'cli-reminder-{time.time()}'",
"def job_id(self):\n return self.__id",
"def create_job(project, description):\n randomnames = open(os.path.join(\"Anemone\", \"templates\", \"namegen.html\")).readlines()\n jobname = (\"Quick.\" +\n random.choice(randomnames)[:-1] + # for some reason choice gives extra space\n random.choice(randomnames)[:-1]) # for some reason choice gives extra space\n\n newjob = Job.create(project=project, name=jobname, description=description)\n newjob.name = newjob.name + \".{0:0=3d}\".format(newjob.id)\n newjob.save()\n return newjob"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Applies post processing to all the outputs in the provided run results. This is a convenience function to avoid the need for manual iteration over the run_results dictionary. | def postprocess(run_results, postprocess_func):
G_LOGGER.start(f"Applying post-processing to outputs: {postprocess_func.__name__}")
for _, iteration_results in run_results:
for index, iter_res in enumerate(iteration_results):
iteration_results[index] = postprocess_func(iter_res)
G_LOGGER.finish("Finished applying post-processing")
return run_results | [
"def postprocess(self, results):\n return results",
"def process_results(self, results):\n\t\traise NotImplementedError()",
"def postprocess_result(self):\n pass",
"def process(self, results):\n raise NotImplementedError",
"def postprocess_model_results(results, model_data, timings):\n log_time(logger, timings, \"post_process_start\", comment=\"Postprocessing: started\")\n\n run_config = model_data.attrs[\"run_config\"]\n results[\"capacity_factor\"] = capacity_factor(results, model_data)\n results[\"systemwide_capacity_factor\"] = capacity_factor(\n results, model_data, systemwide=True\n )\n results[\"systemwide_levelised_cost\"] = systemwide_levelised_cost(\n results, model_data\n )\n results[\"total_levelised_cost\"] = systemwide_levelised_cost(\n results, model_data, total=True\n )\n results = clean_results(results, run_config.get(\"zero_threshold\", 0), timings)\n\n for var_data in results.data_vars.values():\n if \"is_result\" not in var_data.attrs.keys():\n var_data.attrs[\"is_result\"] = 1\n\n log_time(\n logger,\n timings,\n \"post_process_end\",\n time_since_solve_start=True,\n comment=\"Postprocessing: ended\",\n )\n\n if \"run_solution_returned\" in timings.keys():\n results.attrs[\"solution_time\"] = (\n timings[\"run_solution_returned\"] - timings[\"run_start\"]\n ).total_seconds()\n results.attrs[\"time_finished\"] = timings[\"run_solution_returned\"].strftime(\n \"%Y-%m-%d %H:%M:%S\"\n )\n\n return results",
"def _post_process_results(detailed_results):\n # Adding [Gr]SearchFailure (== 1. iff. logprob(label) > logprob(greedy) and greedy != label)\n # Adding [TF]Acc_Player and [Gr]Acc_Player\n # Removing LogProbsDetails\n\n # Make sure the detailed results have the correct key (i.e. they have not yet been post-processed)\n for prefix in ['[TF]', '[Gr]']:\n assert prefix + 'LogProbsDetails' in detailed_results\n\n # Building a dictionary {request_id: (log_probs, mismatch)}\n tf_items, gr_items = {}, {}\n for tf_item in detailed_results['[TF]LogProbsDetails']:\n tf_items.update(tf_item)\n for gr_item in detailed_results['[Gr]LogProbsDetails']:\n gr_items.update(gr_item)\n\n # Making sure we have processed the same number of TF items and Gr items\n tf_nb_items = len(tf_items)\n gr_nb_items = len(gr_items)\n if tf_nb_items != gr_nb_items:\n LOGGER.warning('Got a different number of items between [TF] (%d items) and [Gr] (%d items)',\n tf_nb_items, gr_nb_items)\n\n # Computing search failure and mismatch\n search_failure, gr_acc_player, tf_acc_player = [], [], []\n for request_id in tf_items:\n if request_id not in gr_items:\n LOGGER.warning('Item %s was computed using [TF], but is missing for [Gr]. Skipping.', request_id)\n continue\n\n tf_logprobs, tf_mismatch = tf_items[request_id]\n gr_logprobs, gr_mismatch = gr_items[request_id]\n\n # Computing stats\n if gr_mismatch:\n search_failure += [int(tf_logprobs > gr_logprobs)]\n tf_acc_player += [int(not tf_mismatch)]\n gr_acc_player += [int(not gr_mismatch)]\n\n # Removing extra keys and adding new keys\n detailed_results['[Gr]SearchFailure'] = search_failure\n detailed_results['[TF]Acc_Player'] = tf_acc_player\n detailed_results['[Gr]Acc_Player'] = gr_acc_player\n del detailed_results['[TF]LogProbsDetails']\n del detailed_results['[Gr]LogProbsDetails']\n\n # Returning post-processed results\n return detailed_results",
"def process_results(self, response, results):\n return results",
"def decode_results(self, outputs):\n ...",
"def postprocess_run(store, stack, runner, par_bins=None, evid_kernel=None,\n post_kernel=None, evid_weight=True):\n aggregate_run_attributes(store)\n convolve_evidence(store, evid_kernel)\n aggregate_run_products(store)\n aggregate_run_pdfs(store, par_bins=par_bins)\n convolve_post_pdfs(store, post_kernel, evid_weight=evid_weight)\n quantize_conv_marginals(store)\n deblend_hf_intensity(store, stack, runner)\n generate_predicted_profiles(store, stack, runner)",
"def _collectResult(self, poolResults):\n for result in poolResults:\n # get() return a list of tuple contain result\n valueDict, normals, tumors = result.get()[0]\n \n # combine result calculated from different process\n for key in valueDict:\n if key in self._results:\n self._results[key] += valueDict[key]\n else:\n self._results[key] = valueDict[key]\n \n self._normalSamples += normals\n self._tumorSamples += tumors",
"def process_group_results(self, process_group_results):\n\n self._process_group_results = process_group_results",
"def process_pr_results(self, results_files, custom_report):\n \n\n \n output_file = open(os.path.join(self.path, 'raw_results.txt'), 'w')\n \n #Keep track of the last read line before a newline; this will be the best value from an optimization run\n last_line = ''\n #Match a string of the format ( 0.0995749 0.101685 0.108192 0.091224 ) 0.091224 0 100\n #Contains parameter values, the best optimization value, the cpu time, and some other values, e.g. particle numbers that Copasi likes to add. These could be removed, but they seem useful.\n output_string = r'.*\\(\\s(?P<params>.+)\\s\\)\\s+(?P<best_value>\\S+)\\s+(?P<cpu_time>\\S+)\\s+(?P<function_evals>\\S+)\\.*'\n output_re = re.compile(output_string)\n \n best_value = None\n best_line = None\n \n #Copy the contents of the first file to results.txt\n for line in open(os.path.join(self.path, results_files[0]), 'r'):\n output_file.write(line)\n try:\n if line != '\\n':\n if output_re.match(line):\n current_value = float(output_re.match(line).groupdict()['best_value'])\n if best_value != None:\n if current_value < best_value:\n best_value = current_value\n best_line = line\n elif best_value == None:\n best_value = current_value\n best_line = line\n else:\n pass\n except Exception as e:\n if custom_report:\n pass\n else:\n raise e\n \n #And for all other files, copy everything but the last line\n for filename in results_files[1:]:\n firstLine = True\n for line in open(os.path.join(self.path, filename), 'r'):\n if not firstLine:\n output_file.write(line)\n try:\n if line != '\\n':\n if output_re.match(line):\n current_value = float(output_re.match(line).groupdict()['best_value'])\n if current_value < best_value:\n best_value = current_value\n best_line = line\n else:\n pass\n except Exception as e:\n if custom_report:\n pass\n else:\n raise e\n firstLine = False\n \n \n output_file.close()\n \n #Write the best value to results.txt\n output_file = open(os.path.join(self.path, 'results.txt'), 'w')\n \n output_file.write('Best value\\tCPU time\\tFunction evals\\t')\n \n for parameter in self.get_parameter_estimation_parameters():\n\n output_file.write(parameter[0].encode('utf8'))\n output_file.write('\\t')\n output_file.write('\\n')\n\n best_line_dict = output_re.match(best_line).groupdict()\n\n output_file.write(best_line_dict['best_value'])\n output_file.write('\\t')\n output_file.write(best_line_dict['cpu_time'])\n output_file.write('\\t')\n output_file.write(best_line_dict['function_evals'])\n output_file.write('\\t')\n \n for parameter in best_line_dict['params'].split('\\t'):\n output_file.write(parameter)\n output_file.write('\\t')\n output_file.close()\n \n if best_value != None:\n return True\n else:\n return False",
"def __call__(self, results):\n for key in results.get(\"img_fields\", [\"img\"]):\n results[key] = imnormalize(results[key], self.mean, self.std, self.to_rgb)\n results[\"img_norm_cfg\"] = dict(mean=self.mean, std=self.std, to_rgb=self.to_rgb)\n return results",
"def _post_process(self, outs):\n\n detect_results = []\n\n for detection in outs[0, 0]:\n confidence = float(detection[2])\n if confidence > self.dnn_cfg.threshold:\n class_idx = int(detection[1]) # skip background ?\n xyxy = detection[3:7]\n bbox = BBox.from_xyxy_norm(*xyxy, *self.frame_dim_orig)\n label = self.labels[class_idx] if self.labels else \"\"\n detect_result = DetectResult(class_idx, confidence, bbox, label)\n detect_results.append(detect_result)\n\n if self.dnn_cfg.nms:\n detect_results = self._nms(detect_results)\n\n return DetectResults(detect_results)",
"def transform(self, results: Dict) -> Dict:\n\n # Apply mapping\n inputs = self._map_input(results, self.mapping)\n # Apply wrapped transforms\n outputs = self._apply_transforms(inputs)\n # Apply remapping\n outputs = self._map_output(outputs, self.remapping)\n\n results.update(outputs) # type: ignore\n return results",
"def update_results(self):\n try:\n self._run_openmc()\n self._test_output_created()\n results = self._get_results()\n self._write_results(results)\n self._overwrite_results()\n finally:\n self._cleanup()",
"def finalize_consolidate_results(ret):\n\n from parallel.rflag_post_proc import finalize_agg_rflag_thresholds, is_rflag_report\n\n for key, item in ret.items():\n if isinstance(item, dict) and is_rflag_report(item):\n ret[key] = finalize_agg_rflag_thresholds(item)\n\n return ret",
"def processor_results(self, processor_results):\n\n self._processor_results = processor_results",
"def make_outputs(self, cwl_result):\n # type: (CWLResults) -> None\n for output_id in self.request.outputs: # iterate over original WPS outputs, extra such as logs are dropped\n # TODO: adjust output for glob patterns (https://github.com/crim-ca/weaver/issues/24)\n if isinstance(cwl_result[output_id], list) and not isinstance(self.response.outputs[output_id], list):\n if len(cwl_result[output_id]) > 1:\n self.logger.warning(\n \"Dropping additional output values (%s total), only 1 supported per identifier.\",\n len(cwl_result[output_id])\n )\n cwl_result[output_id] = cwl_result[output_id][0] # expect only one output\n\n if \"location\" not in cwl_result[output_id] and os.path.isfile(str(cwl_result[output_id])):\n raise PackageTypeError(\"Process output '{}' defines CWL type other than 'File'. \".format(output_id) +\n \"Application output results must use 'File' type to return file references.\")\n if \"location\" in cwl_result[output_id]:\n self.make_location_output(cwl_result, output_id)\n continue\n\n # data output\n self.response.outputs[output_id].data = cwl_result[output_id]\n self.response.outputs[output_id].as_reference = False\n self.logger.info(\"Resolved WPS output [%s] as literal data\", output_id)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Turns Freshbooks tickets from the past x days into Toggl projects. | def sync(self, no_of_days=1):
zd = Zendesk()
tg = Toggl()
try:
self.print("Syncing...")
self.print_divider(30)
tickets = zd.get_tickets(no_of_days)
for ticket in tickets:
project_title = self.format_title(ticket.id, ticket.subject)
if ticket.organization:
client_id = tg.get_client_id(name=ticket.organization.name)
if not client_id:
new_client = tg.create_client(ticket.organization.name)
client_id = new_client['id']
else:
client_id = False
self.print("Ticket '%s' has no associated organization!" % (project_title))
all_projects = tg.get_projects()
if not self.already_created(ticket.id, all_projects):
self.print("Creating project '%s'..." % (project_title))
result = tg.create_project(project_title, client_id, is_private=False)
self.print("Toggl response:")
self.log(result, silent=False)
else:
self.print("There is already a Toggl project for Zendesk ticket #%s!" % ticket.id)
pass
# TODO: edit Toggl project
# tg.edit_project(project_id, name=ticket.subject)
self.print_divider(30)
self.print("Done!")
except:
self.log(traceback.format_exc(), silent=False) | [
"def get_project_tickets(\n jira: Jira,\n project: str,\n insert_blank_tickets: bool = True,\n verbose: bool = True,\n) -> List[JiraTicket]:\n # Offsets for the API\n init = 0\n size = 100\n\n # Store all the API tickets to sort through later. Keys for this are\n # ticket number. Values are the ticket object we get from the Jira\n # SDK/API library.\n api_tickets_dict = {}\n\n # Fetch from the API until there's no tickets left\n while True:\n start = init * size\n\n api_tickets = jira.search_issues(\"project = %s\" % project, start, size)\n\n # Check if we've reached the end\n if not api_tickets:\n break\n\n # Add the tickets\n for ticket in api_tickets:\n ticket_num = int(ticket.key.split(\"-\")[-1])\n api_tickets_dict[ticket_num] = ticket\n\n # Move to next API page for next round\n init += 1\n\n # Keep track of what the ticket number \"should\" be if we're\n # inserting blank tickets\n ticket_counter = 1\n\n # Store JiraTicket objects for our tickets in here\n tickets = []\n\n # Create JiraTicket objects from the tickets collected above\n for ticket_num, ticket in sorted(api_tickets_dict.items()):\n if verbose:\n print(\"...loading %s\" % ticket.key)\n\n # Insert blank tickets as necessary\n while insert_blank_tickets and ticket_counter < ticket_num:\n tickets.append(create_blank_ticket(project))\n\n ticket_counter += 1\n\n ticket_counter += 1\n\n # Insert *this* ticket. First deal with attributes that we\n # have to be careful with Nones with. Then make the ticket.\n description = ticket.fields.description\n\n if description is None:\n description = \"\"\n\n resolution = ticket.fields.resolution\n\n if resolution is not None:\n resolution = resolution.name\n\n tickets.append(\n JiraTicket(\n description=description,\n priority=translate_priority(ticket.fields.priority.name),\n project=project,\n resolution=resolution,\n source_link=ticket.permalink(),\n summary=ticket.fields.summary,\n )\n )\n\n return tickets",
"def do_chart22( self, tickets ) :\n from zeta.lib.base import BaseController\n\n data = {}\n cntlr = BaseController()\n for t in tickets :\n ts = t.statushistory[-1]\n data.setdefault( t.project.id, {}\n ).setdefault( ts.owner.username, []\n ).append(\n [ t.type.tck_typename, t.severity.tck_severityname,\n ts.status.tck_statusname ]\n )\n chart22_data = {}\n for p, udict in data.iteritems() :\n chart22_data[p] = []\n for u, vals in udict.iteritems() :\n chart22_data[p].append(\n [ u,\n sorted(\n list(h.computecount( vals, lambda x : x[0] ).items()),\n key=lambda x : x[0]\n ),\n sorted(\n list(h.computecount( vals, lambda x : x[1] ).items()),\n key=lambda x : x[0]\n ),\n sorted(\n list(h.computecount( vals, lambda x : x[2] ).items()),\n key=lambda x : x[0]\n )\n ]\n )\n chart22_usrs = dict([ ( p,\n [ [ u, cntlr.url_user(u) ]\n for u in map( lambda x : x[0], vals ) ]\n ) for p, vals in chart22_data.iteritems() ])\n return chart22_data, chart22_usrs",
"def getProjectStories(epic):\n try:\n rally = initRally()\n except Exception as e:\n return 'N', str(e)\n\n \"\"\"\n TODO\n Currently, any exception encountered just gets you redirected to the\n generic error page. I should fix that some day.\n \"\"\"\n \"\"\"\n 2016-12-12 - pyral 1.2.2 no longer likes reference to BusinessEpic\n level from User Story. Will look up name separately based on epic\n \"\"\"\n\n try:\n q='FormattedId = \"%s\"' % (epic)\n f=\"Name\"\n data = rally.get(\n 'BusinessEpic',\n query=q,\n fetch=f)\n except Exception as e:\n return 'N', str(e)\n\n if data.resultCount == 0:\n return 'N', \"Invalid project identifier: %s\" % (epic)\n\n prj = data.next()\n project = str(prj.Name)\n\n try:\n q='Feature.Parent.FormattedId = \"%s\"' % (epic)\n f=\"FormattedID,Name,ScheduleState,PlanEstimate,Feature,Owner\"\n data = rally.get(\n 'User Story',\n query=q, \n fetch=f,\n order=\"Feature\")\n except Exception as e:\n return 'N', str(e)\n\n if data.resultCount == 0:\n return 'N', \"Invalid project identifier: %s\" % (epic)\n\n results = []\n for item in data:\n rec = {\n 'id': item.FormattedID,\n 'feature': item.Feature.Name,\n 'featureid': item.Feature.FormattedID,\n 'project': project,\n 'name': item.Name,\n 'status': item.ScheduleState,\n 'points': int(item.PlanEstimate) if item.PlanEstimate else 0,\n 'owner': item.Owner.Name,\n }\n results.append(rec)\n\n return 'Y', results",
"def someday_projects():\n cfg = load_or_install_prjct()\n someday_projects_list = cfg['someday_projects'] if cfg['someday_projects'] else []\n return sort_project_list(someday_projects_list)",
"def do_chart25( self, tickets ) :\n data = {}\n for t in tickets :\n ts = t.statushistory[-1]\n if not t.versions :\n continue\n data.setdefault( t.project.id, {}\n ).setdefault( t.versions[0].version_name, []\n ).append(\n [ t.type.tck_typename, t.severity.tck_severityname,\n ts.status.tck_statusname ]\n )\n chart25_data = {}\n for p, vdict in data.iteritems() :\n chart25_data[p] = []\n for ver, vals in vdict.iteritems() :\n chart25_data[p].append(\n [ ver,\n sorted(\n h.computecount( vals, lambda x : x[0] ).items(),\n key=lambda x : x[0] \n ),\n sorted(\n h.computecount( vals, lambda x : x[1] ).items(),\n key=lambda x : x[0]\n ),\n sorted(\n h.computecount( vals, lambda x : x[2] ).items(),\n key=lambda x : x[0]\n )\n ]\n )\n return chart25_data",
"def ticketToTrade(ticket):\n\tt = {}\n\tt['Portfolio_code'] = getAccountNumber(ticket['Fund'])\n\tt['Txn_type'] = 'REPO'\n\tt['Txn_sub_type'] = 'Close' if ticket['Repo Sta'] == 'Closed' else \\\n\t\t\t\t\t\t'Change Rate' if ticket['Trd Dt'] > ticket['Stl Date'] else \\\n\t\t\t\t\t\t'Open'\n\tt['Trade_date'] = toDateTimeString(ticket['Trd Dt']) if t['Txn_sub_type'] == 'Open' \\\n\t\t\t\t\t\telse ''\n\tt['Settle_date'] = toDateTimeString(ticket['Stl Date']) if t['Txn_sub_type'] == 'Open' \\\n\t\t\t\t\t\telse ''\n\tt['Mature_date'] = '' if t['Txn_sub_type'] == 'Change Rate' else \\\n\t\t\t\t\t\ttoDateTimeString(ticket['Trd Dt']) if t['Txn_sub_type'] == 'Close' else \\\n\t\t\t\t\t\t'31/12/2049' if ticket['Trm Date'] == 'OPEN' else \\\n\t\t\t\t\t\ttoDateTimeString(ticket['Trm Date'])\n\tt['Loan_ccy'] = '' if t['Txn_sub_type'] == 'Change Rate' else ticket['Crcy']\n\tt['Amount'] = '' if t['Txn_sub_type'] == 'Change Rate' else ticket['Loan Amount']\n\tt['Eff_date'] = '' if t['Txn_sub_type'] == 'Close' else \\\n\t\t\t\t\ttoDateTimeString(ticket['Stl Date']) if t['Txn_sub_type'] == 'Open' \\\n\t\t\t\t\telse toDateTimeString(ticket['Trd Dt'])\n\tt['Int_rate'] = '' if t['Txn_sub_type'] == 'Close' else ticket['Repo Rte']\n\tt['Int_mode'] = '' if t['Txn_sub_type'] == 'Close' else 'ACT/360'\n\tt['Col_ISIN'] = '' if t['Txn_sub_type'] == 'Change Rate' else ticket['ISIN']\n\tt['Col_Qty'] = '' if t['Txn_sub_type'] == 'Change Rate' else \\\n\t\t\t\t\t1000 * toNumber(ticket['Amount'])\n\tt['Broker'] = '' if t['Txn_sub_type'] == 'Change Rate' else ticket['Broker ID']\n\tt['Cust_ref'] = toStringIfFloat(ticket['Orig Tkt']) if t['Txn_sub_type'] == 'Close' \\\n\t\t\t\t\telse toStringIfFloat(ticket['Tkt #'])\n\n\n\treturn mergeDictionary( t\n\t\t\t\t\t\t , { 'Col_SEDOL': ''\n\t\t\t\t\t\t , 'Col_Bloomberg': ''\n\t\t\t\t\t\t , 'Col_LocalCode': ''\n\t\t\t\t\t\t , 'Col_CMUCode': ''\n\t\t\t\t\t\t , 'Col_desc': ''\n\t\t\t\t\t\t , 'Exchange': ''\n\t\t\t\t\t\t }\n\t\t\t\t\t\t )",
"def do_chart14( self, projects ) :\n from zeta.config.environment import \\\n projcomp, tckcomp, vcscomp, revcomp, wikicomp\n\n chart14_data = dict([ ( p.projectname,\n { 'ticket' : 0,\n 'vcs' : 0,\n 'review' : 0,\n 'wiki' : 0,\n 'admin' : 0\n }\n ) for p in projcomp.get_project() ])\n\n for t in tckcomp.get_ticket( attrload=['project'] ) :\n d = chart14_data[ t.project.projectname ]\n d['ticket'] += len(t.logs)\n for v in vcscomp.get_vcs( attrload=['project'] ) :\n d = chart14_data[ v.project.projectname ]\n d['vcs'] += len(v.logs)\n for r in revcomp.get_review( attrload=['project'] ) :\n d = chart14_data[ r.project.projectname ]\n d['review'] += len(r.logs)\n for w in wikicomp.get_wiki( attrload=['project'] ) :\n d = chart14_data[ w.project.projectname ]\n d['wiki'] += len(w.logs)\n for p in projcomp.get_project() :\n d = chart14_data[ p.projectname ]\n d['admin'] += len(p.logs)\n return chart14_data",
"def push_historic_data(project):\n defects = []\n\n logger.info(\"Starting {}...\".format(project))\n jira_issues = get_jira_defects(project)\n last_upload = datetime.datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0) + relativedelta(weekday=SA(-1))\n logger.debug(\"Fetched {} issues successfully for {}\".format(len(jira_issues), project))\n for issue in jira_issues:\n try:\n created = datetime.datetime.strptime(issue.fields.created, DATE_FORMAT)\n jira_dict = jira_obj_to_dict(issue, datetime.datetime.utcnow().strftime(DATE_FORMAT))\n\n historic_data = []\n # Last Friday of the report ran\n report_date = last_upload\n while(report_date > created):\n jira_dict = jira_for_date(jira_dict, issue.changelog, report_date)\n historic_data.insert(0, create_defect(jira_dict, issue))\n report_date -= datetime.timedelta(weeks=1)\n defects.append(historic_data)\n except Exception as e:\n logger.debug(\"Exception processing {} {}\".format(jira_dict[\"key\"], e))\n logger.exception(\"Exception\")\n logger.debug(\"Missing values {}\".format(str(jira_dict)))\n pass\n if len(defects) < len(jira_issues):\n logger.debug(\"{delta} defects not added in the {} report\".format(project, delta=len(jira_issues) - len(defects)))\n defects_as_list = []\n for defect in defects:\n defects_as_list.extend(defect)\n return post_defects(project, jira_issues, defects_as_list)",
"def append_today_label_for_yesterday_unresolved_tickets(oz_jira, workDir):\n day_offset = 1\n weekday = today.isoweekday()\n if weekday == 6:\n # Saturday\n day_offset = 1\n elif weekday ==7:\n # Sunday\n day_offset=2\n elif weekday == 1:\n # monday, usually Sat and Sun were reset days\n day_offset = 3\n\n yesterday_label=(today - timedelta(days=day_offset)).strftime(dateFormat)\n yesterday_unresolved_tickets = oz_jira.search_issues(oz_jira_yesterday_not_resolved_query % yesterday_label)\n print 'yesterday_unresolved_tickets:', yesterday_unresolved_tickets.__len__()\n if yesterday_unresolved_tickets.__len__() > 0:\n log.info(\">>>>>>>>> There are %i tickets will append today's label: %s\", yesterday_unresolved_tickets.__len__(), label)\n for issue in yesterday_unresolved_tickets:\n cmc_ticket_num = get_cmc_ticket_number(issue)\n print 'cmc_ticket_num:', cmc_ticket_num, issue.key\n print 'issue.fields.status.name', issue.fields.status, issue.fields.status.name\n append_label(oz_jira, issue, workDir, cmc_ticket_num, issue.key)\n if force_update_flag:\n # update oz ticket's summary, description, attachments and branches\n forceUpdateOzTicket(oz_jira, cmc_jira, issue.key, cmc_ticket_num, workDir)",
"def time_tracking(self):\n fb = FreshBooks()\n tg = Toggl()\n self.print_splash()\n self.print(\"Tip: You can always enter 'skip' when you want to skip a time entry.\", format='warn')\n days = self.get_interactive_days() # number of days to go back\n self.print(\"OK, I'll run you through the Toggl time entries of the past %i day(s).\" % (days))\n timestamp = self.get_timestamp(days) # unix timestamp including tz\n time_entries = tg.get_time_entries(timestamp)\n if len(time_entries) == 0:\n self.print(\"No Toggl entries in this time span!\", 'warn')\n return False\n time_entries = self.merge_toggl_time_entries(time_entries) # merge Toggl entries\n fb_projects = fb.get_projects()\n # Loop through merged Toggl time entries:\n for entry in time_entries:\n # Get and convert all necessary info:\n client_id = tg.get_client_id(project_id=entry.get('pid'))\n client_name = tg.get_client_name(client_id)\n project = tg.get_project(entry.get('pid'))\n duration = int(entry['duration']) / 60 / 60 # convert duration to hours\n duration = round(duration * 4 ) / 4 # round hours to nearest .25\n description = self.format_description(project['name'], entry['description'])\n date = str(parser.parse(entry['start']).date())\n # Print info in a nice way:\n self.print_divider(30)\n self.print(\"Description: \" + description)\n self.print(\"Date: \" + date)\n self.print(\"Hours spent: \" + str(duration))\n # Skip if Toggl entry is already booked:\n if entry.get('tags') and tg.BOOKED_TAG in entry['tags']:\n self.print(\"Skipping this entry because it is already in Freshbooks.\", 'cross')\n # Skip if duration is below 0.25:\n elif duration < 0.25:\n self.print(\"Skipping this entry because there are less than 0.25 hours spent.\", 'cross')\n # If billable, add to Freshbooks:\n elif entry['billable']:\n # Get FreshBooks project name through interactive search:\n try:\n self.print(\"Project: \\U0001F50D \")\n fb_project_name = self.interactive_search(fb_projects.keys(), client_name)\n # Handle KeyboardInterrupt\n except KeyboardInterrupt:\n answer = input(\"\\nKeyboardInterrupt! Skip current entry or quit time tracking? (S/q) \")\n if answer.lower() == 's' or answer == '':\n self.clear_lines(1)\n self.print(\"Skipping this entry.\", 'cross')\n continue\n else:\n self.clear_lines(1)\n self.print(\"Ok, stopping time tracking.\", 'cross')\n sys.exit()\n # If user requests so, skip this entry:\n self.clear_lines(1)\n if not fb_project_name:\n self.print(\"Skipping this entry.\", 'cross')\n continue\n # Otherwise, add entry to FreshBooks and tag Toggl entry/entries:\n self.print(\"Project: \" + fb_project_name)\n project_id = fb.get_project_id(fb_project_name)\n fb.add_entry(project_id, duration, description, date)\n tg.tag_projects(entry['merged_ids'], tg.BOOKED_TAG)\n # If not billable, skip entry:\n else:\n self.print(\"Skipping this entry because it is not billable.\", 'cross')\n self.print_divider(30)\n answer = input(\"All done! Open FreshBooks in browser to verify? (Y/n) \")\n if answer.lower() == 'y' or answer == '':\n webbrowser.open('https://%s.freshbooks.com/timesheet' % fb.fb_creds['subdomain'])",
"def do_chart24( self, tickets ) :\n data = {}\n for t in tickets :\n if not t.milestones :\n continue\n ts = t.statushistory[-1]\n data.setdefault( t.project.id, {}\n ).setdefault( t.milestones[0].milestone_name, []\n ).append(\n [ t.type.tck_typename, t.severity.tck_severityname,\n ts.status.tck_statusname ]\n )\n chart24_data = {}\n for p, mdict in data.iteritems() :\n chart24_data[p] = []\n for mstn, vals in mdict.iteritems() :\n chart24_data[p].append(\n [ mstn,\n sorted(\n list(h.computecount( vals, lambda x : x[0] ).items()),\n key=lambda x : x[0] \n ),\n sorted(\n list(h.computecount( vals, lambda x : x[1] ).items()),\n key=lambda x : x[0]\n ),\n sorted(\n list(h.computecount( vals, lambda x : x[2] ).items()),\n key=lambda x : x[0]\n )\n ]\n )\n return chart24_data",
"def do_chart23( self, tickets ) :\n data = {}\n for t in tickets :\n if not t.components :\n continue\n ts = t.statushistory[-1]\n data.setdefault( t.project.id, {}\n ).setdefault( t.components[0].componentname, []\n ).append(\n [ t.type.tck_typename, t.severity.tck_severityname,\n ts.status.tck_statusname ]\n )\n chart23_data = {}\n for p, cdict in data.iteritems() :\n chart23_data[p] = []\n for comp, vals in cdict.iteritems() :\n chart23_data[p].append(\n [ comp,\n sorted(\n list(h.computecount( vals, lambda x : x[0] ).items()),\n key=lambda x : x[0] \n ),\n sorted(\n list(h.computecount( vals, lambda x : x[1] ).items()),\n key=lambda x : x[0]\n ),\n sorted(\n list(h.computecount( vals, lambda x : x[2] ).items()),\n key=lambda x : x[0]\n )\n ]\n )\n return chart23_data",
"def getPastProjects(self)->list:\n returnList=[]\n for i in range(0,randint(1, 10)):\n randumProjectId = randint(0, 109)\n if randumProjectId not in returnList:\n returnList.append(randumProjectId)\n\n return returnList",
"def scrum(project):\r\n\r\n stories = project.in_progress_stories()\r\n stories_by_owner = group_stories_by_owner(stories)\r\n\r\n print bold(\"{} SCRUM -- {}\".format(project.name, pretty_date()))\r\n print\r\n\r\n for owner in stories_by_owner:\r\n print bold(owner)\r\n for story in stories_by_owner[owner]:\r\n print \" #{:12s}{:9s} {:7s} {}\".format(story.story_id,\r\n estimate_visual(story.estimate),\r\n story.story_type,\r\n story.name)\r\n\r\n print\r\n\r\n print bold(\"Bugs\")\r\n bugs = project.open_bugs()\r\n if len(bugs) == 0:\r\n print 'Not sure that I believe it, but there are no bugs'\r\n for bug in bugs:\r\n print \" #{:12s} {:4s} {}\".format(bug.story_id,\r\n initials(bug.owned_by),\r\n bug.name)",
"def main_conference_tickets_sold():\n TUTORIAL_TICKET_TYPES = [10, 11, 12, 14]\n return ticket_count(exclude=TUTORIAL_TICKET_TYPES)",
"def main():\n parser = argparse.ArgumentParser(description='Creates tickets for release certification')\n parser.add_argument('-u', '--username', help='jira username', default='admin')\n parser.add_argument('-p', '--password', help='jira password', default='admin')\n parser.add_argument('-c', '--config', help='path to config file', default='./options.ini')\n parser.add_argument('-j', '--jira', help='url of jira server', default='http://localhost:8080')\n\n args = parser.parse_args()\n\n jira_user = args.username\n jira_pass = args.password\n jira_server = args.jira\n config_file_path = args.config\n CONFIG.read(config_file_path)\n\n parent_ticket = config_map('JiraOptions')['parent_ticket']\n apprenda_version = config_map('VersionInfo')['to_version']\n jira_project = config_map('JiraOptions')['project']\n jira_issue_type = config_map('JiraOptions')['issue_type']\n jira = JIRA(jira_server, basic_auth=(jira_user, jira_pass))\n\n parent_issue = jira.issue(parent_ticket)\n ticket_list = []\n\n # create clean install tickets\n clean_strings = config_map('CleanInstallSection')\n for cloud in ['single', 'hybrid']:\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(clean_strings['summary'], apprenda_version, cloud)\n ticket_to_add.format_description(clean_strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n # create upgrade tickets\n from_versions = json.loads(config_map('VersionInfo')['from_versions'])\n upgrade_strings = config_map('UpgradeSection')\n\n # single cloud\n for version in from_versions:\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(upgrade_strings['summary'], apprenda_version, version,\n \"single\")\n ticket_to_add.format_description(upgrade_strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n # hybrid cloud\n for version in from_versions:\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(upgrade_strings['summary'], apprenda_version, version,\n \"hybrid\")\n ticket_to_add.format_description(upgrade_strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n # create testing tickets for other tasks\n for section in CONFIG.sections():\n if 'Ticket' in section:\n strings = config_map(section)\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(strings['summary'], apprenda_version)\n ticket_to_add.format_description(strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n print 'Created {0} tickets, now sending them to Jira'.format(len(ticket_list))\n # send issues to jira and create tickets and links\n issues = jira.create_issues(field_list=ticket_list)\n\n for item in issues:\n jira.create_issue_link(\n type=\"Task of Story\",\n outwardIssue=item['issue'].key,\n inwardIssue=parent_issue.key,\n )\n\n print 'Finished linking issues, exiting.'",
"def get_newhire_tickets(group_id):\n url = f\"{BASE_URL}/api/v2/tickets\"\n headers = {\"AUTHorization\": f\"Basic {AUTH}\"}\n r = requests.get(url, headers=headers)\n if r.ok:\n print(f\"Got list of all new hire tickets.\")\n else:\n logging.debug(f\"Error - {r.status_code} - {r.content}\")\n tickets = r.json()[\"tickets\"]\n ticket_ids = set()\n last_hour = datetime.now() - timedelta(hours=1)\n\n for ticket in tickets:\n update_time = datetime.strptime(ticket[\"updated_at\"], \"%Y-%m-%dT%H:%M:%SZ\")\n # Check for tickets modified in the last hour\n if update_time > last_hour:\n # Verify the subject and group are related to New Hire Onboarding\n if \"New Hire\" in ticket[\"subject\"] and ticket[\"group_id\"] == group_id:\n start_date = get_start_date(ticket[\"id\"])\n # Check to see if ticket due date was already updated\n if start_date == ticket[\"due_by\"][0:10]:\n print(f'Ticket {ticket[\"id\"]} already updated.')\n else:\n ticket_ids.add(ticket[\"id\"])\n add_ticket_note(ticket[\"id\"], ticket[\"due_by\"][0:10])\n\n return ticket_ids",
"def get_tickets(request):\n tickets = Ticket.objects.filter(published_date__lte=timezone.now\n ())\n return render(request, \"issuetrackertickets.html\", {'tickets':tickets})",
"def getTicketList(fromDates=[date.today()-timedelta(days=30)], untilDates=[date.today()]):\n global ticketList\n ticketList = []\n for x in range(len(fromDates)):\n fromDate = fromDates[x]\n untilDate = untilDates[x]\n y = 0\n while True:\n # Add 1 to y each loop for page increment\n y = y + 1\n toAppend = backend.fetchTicketList(y, fromDate, untilDate)\n incrementRequestsSentCount()\n if len(toAppend) == 0:\n break\n for item in toAppend:\n ticketList.append(item)\n filterView()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Starts interactive time tracking session. Updates Freshbooks based on Toggl entries. | def time_tracking(self):
fb = FreshBooks()
tg = Toggl()
self.print_splash()
self.print("Tip: You can always enter 'skip' when you want to skip a time entry.", format='warn')
days = self.get_interactive_days() # number of days to go back
self.print("OK, I'll run you through the Toggl time entries of the past %i day(s)." % (days))
timestamp = self.get_timestamp(days) # unix timestamp including tz
time_entries = tg.get_time_entries(timestamp)
if len(time_entries) == 0:
self.print("No Toggl entries in this time span!", 'warn')
return False
time_entries = self.merge_toggl_time_entries(time_entries) # merge Toggl entries
fb_projects = fb.get_projects()
# Loop through merged Toggl time entries:
for entry in time_entries:
# Get and convert all necessary info:
client_id = tg.get_client_id(project_id=entry.get('pid'))
client_name = tg.get_client_name(client_id)
project = tg.get_project(entry.get('pid'))
duration = int(entry['duration']) / 60 / 60 # convert duration to hours
duration = round(duration * 4 ) / 4 # round hours to nearest .25
description = self.format_description(project['name'], entry['description'])
date = str(parser.parse(entry['start']).date())
# Print info in a nice way:
self.print_divider(30)
self.print("Description: " + description)
self.print("Date: " + date)
self.print("Hours spent: " + str(duration))
# Skip if Toggl entry is already booked:
if entry.get('tags') and tg.BOOKED_TAG in entry['tags']:
self.print("Skipping this entry because it is already in Freshbooks.", 'cross')
# Skip if duration is below 0.25:
elif duration < 0.25:
self.print("Skipping this entry because there are less than 0.25 hours spent.", 'cross')
# If billable, add to Freshbooks:
elif entry['billable']:
# Get FreshBooks project name through interactive search:
try:
self.print("Project: \U0001F50D ")
fb_project_name = self.interactive_search(fb_projects.keys(), client_name)
# Handle KeyboardInterrupt
except KeyboardInterrupt:
answer = input("\nKeyboardInterrupt! Skip current entry or quit time tracking? (S/q) ")
if answer.lower() == 's' or answer == '':
self.clear_lines(1)
self.print("Skipping this entry.", 'cross')
continue
else:
self.clear_lines(1)
self.print("Ok, stopping time tracking.", 'cross')
sys.exit()
# If user requests so, skip this entry:
self.clear_lines(1)
if not fb_project_name:
self.print("Skipping this entry.", 'cross')
continue
# Otherwise, add entry to FreshBooks and tag Toggl entry/entries:
self.print("Project: " + fb_project_name)
project_id = fb.get_project_id(fb_project_name)
fb.add_entry(project_id, duration, description, date)
tg.tag_projects(entry['merged_ids'], tg.BOOKED_TAG)
# If not billable, skip entry:
else:
self.print("Skipping this entry because it is not billable.", 'cross')
self.print_divider(30)
answer = input("All done! Open FreshBooks in browser to verify? (Y/n) ")
if answer.lower() == 'y' or answer == '':
webbrowser.open('https://%s.freshbooks.com/timesheet' % fb.fb_creds['subdomain']) | [
"def setTrackStartTime() :\n s.startTrack()",
"def start():\n start_tracking()",
"def main():\n\n # Step 0:\n # Print lovley greetings.\n print(\"\"\"\n ------\n > Welcome to Værmelder!\n > Store your room climate into your OneDrive!\n ------\"\"\")\n\n # Step 1.\n # Get access token via device token\n print(\"\"\"\n ------\n Step 1:\n Get user's code.\n ------\"\"\")\n session = get_session()\n if not session:\n handle_error()\n\n # Step 2.\n # Get worksheet from OneDrive.\n print(\"\"\"\n ------\n Step 2:\n Get worksheet ID\n \"\"\")\n worksheet_id = get_worksheet_id(session)\n if not worksheet_id:\n handle_error()\n\n # Step 3.\n # Starting repeating timer.\n print(\"\"\"\n ------\n Step 3:\n Starting timer with 10 minutes delay.\n \"\"\")\n timer = RepeatingTimer(timer_tick, 60 * 10, session, worksheet_id)\n timer.start()",
"def time_automation_listener(now):\n action()",
"def main():\n \n ## Determine whether to query for the sunset or sunrise\n if datetime.now().hour >= 20:\n ## Run sunrise tweets after 8PM\n type = 'sunrise'\n else:\n ## Any earlier, run sunset tweets (by default run at 12PM)\n type = 'sunset'\n \n ## Iterate through the time series and states\n log_df = TWEET_HISTORY_DF.copy()\n for loc in c.LOCATIONS.keys():\n \n ## Instantiate a class to do the tweetin'\n MySunTweeter = SunTweeter(loc, type, log_df)\n MySunTweeter.send_tweet()\n \n ## Save the log to use in the next iteration of the loop\n log_df = MySunTweeter.log_df\n \n ## Overwrite the log with the updated records\n log_df.to_csv(\"log/SunsetWx_full_tweet_log.csv\",\n index = False)",
"def start(self):\n self.current_time = time.perf_counter()",
"def do_upt(self, arg):\n self.do_timesheet('update today')",
"def time_automation_listener(now):\n hass.async_add_job(action, {\n 'trigger': {\n 'platform': 'time',\n 'now': now,\n },\n })",
"def hourly():\r\n\r\n # get the full history for any new stock added to the database\r\n CompanyHistory().init()",
"def begin():\n prefix = \"pygmt-session\"\n with Session() as lib:\n lib.call_module(module=\"begin\", args=prefix)\n # pygmt relies on GMT modern mode with GMT_COMPATIBILITY at version 6\n lib.call_module(module=\"set\", args=\"GMT_COMPATIBILITY 6\")",
"def run(self):\n self.timestamp['start'] = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')\n\n for point in self._prepare_grid():\n graph = self._prepare_graph(**point)\n env = self._prepare_env(graph, **point)\n log = self._prepare_logger(graph, env, **point)\n\n try:\n env.run(until=self.runtime)\n except Exception as e:\n print(e)\n log.close()\n\n # self.timestamp[grid.hash_grid_point(point)].append(datetime.datetime.now().strftime('%Y%m%dT%H%M%S'))\n\n self.timestamp['end'] = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')",
"def init():\n scheduler.start()\n scheduler.add_job(\n func=refresh_data,\n trigger=IntervalTrigger(days=1),\n id=\"refresh_data_job\",\n name=\"pull data from course academic timetable\",\n replace_existing=True,\n )\n atexit.register(lambda: scheduler.shutdown())\n # delay launching the app until there is some data available\n refresh_data()",
"def insert_time(self):\n if self.controller.shared_data.obj_track.size == 0:\n message = 'There is no loaded track to insert timestamp'\n messagebox.showwarning(title='Insert Time Assistant',\n message=message)\n return\n\n self.timestamp = dt.datetime(2000, 1, 1, 0, 0, 0)\n self.speed = 0\n\n spinbox_options = {'year': [1990, 2030, 2000],\n 'month': [1, 12, 1],\n 'day': [1, 31, 1],\n 'hour': [0, 23, 0],\n 'minute': [0, 59, 0],\n 'second': [0, 59, 0]}\n\n top = tk.Toplevel()\n top.title('Insert Time Assistant')\n\n # Insert data frame\n frm_form = tk.Frame(top, relief=tk.FLAT, borderwidth=3)\n frm_form.pack() # insert frame to use grid on it\n spn_time = collections.defaultdict()\n\n for i, entry in enumerate(spinbox_options):\n # This allow resize the window\n top.columnconfigure(i, weight=1, minsize=75)\n top.rowconfigure(i, weight=1, minsize=50)\n\n # Create widgets\n var = tk.StringVar(top)\n var.set(spinbox_options[entry][2])\n\n spn_time[entry] = tk.Spinbox(from_=spinbox_options[entry][0],\n to=spinbox_options[entry][1],\n master=frm_form,\n width=8,\n textvariable=var,\n justify=tk.RIGHT,\n relief=tk.FLAT)\n\n lbl_label = tk.Label(master=frm_form, text=f'{entry}', anchor='w')\n\n # Grid\n lbl_label.grid(row=i, column=0) # grid attached to frame\n spn_time[entry].grid(row=i, column=1)\n\n # Insert speed\n i = len(spn_time)\n top.columnconfigure(i, weight=1, minsize=75)\n top.rowconfigure(i, weight=1, minsize=50)\n spn_speed = tk.Spinbox(from_=0, to=2000,\n master=frm_form,\n width=8,\n justify=tk.RIGHT,\n relief=tk.FLAT)\n lbl_label = tk.Label(master=frm_form, text='speed (km/h)', anchor='w')\n lbl_label.grid(row=i, column=0, pady=10)\n spn_speed.grid(row=i, column=1)\n\n def _insert_timestamp():\n # Check input data and insert timestamp\n try:\n self.timestamp = dt.datetime(int(spn_time['year'].get()),\n int(spn_time['month'].get()),\n int(spn_time['day'].get()),\n int(spn_time['hour'].get()),\n int(spn_time['minute'].get()),\n int(spn_time['second'].get()))\n self.speed = float(spn_speed.get())\n if self.speed <= 0:\n raise ValueError('Speed must be a positive number.')\n\n # Insert timestamp\n self.controller.shared_data.obj_track.\\\n insert_timestamp(self.timestamp, self.speed)\n top.destroy()\n\n except (ValueError, OverflowError) as e:\n messagebox.showerror('Input Error', e)\n\n def _clear_box():\n for s in spn_time:\n spn_time[s].delete(0, 8)\n spn_time[s].insert(0, spinbox_options[s][2])\n spn_speed.delete(0, 8)\n spn_speed.insert(0, 0)\n\n # Button frame\n frm_button = tk.Frame(top)\n frm_button.pack(fill=tk.X, padx=5,\n pady=5) # fill in horizontal direction\n\n btn_clear = tk.Button(master=frm_button, text='Clear',\n command=_clear_box)\n btn_submit = tk.Button(master=frm_button, text='Submit',\n command=_insert_timestamp)\n btn_clear.pack(side=tk.RIGHT, padx=10)\n btn_submit.pack(side=tk.RIGHT, padx=10)",
"def tic():\n import time\n global startTime_for_tictoc\n startTime_for_tictoc = time.time()",
"def start_session(self):\n Geant4.StartUISession()",
"def start_program():\n\n today = date.today()\n current_date = today.strftime(\"%d/%m/%Y\")\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n print('Starting Program : Customer_Search_Name.py - at : ' + current_time + ' on : ' + current_date)",
"def update_on_timestep(self):\n raise NotImplementedError",
"def meeting_times():\n app.logger.debug(\"Checking credentials for Google calendar access...\")\n credentials = valid_credentials()\n if not credentials:\n app.logger.debug(\"Redirecting to authorization...\")\n return flask.redirect(flask.url_for('oauth2callback'))\n\n gcal_service = get_gcal_service(credentials)\n app.logger.debug(\"Returned from get_gcal_service.\")\n\n ## TODO: Update to windows\n flask.session['windows'] = part2.windows(gcal_service, request.form)\n\n return flask.redirect(flask.url_for(\"index\"))",
"def time(lancet, issue):\n issue = get_issue(lancet, issue)\n\n with taskstatus(\"Starting harvest timer\") as ts:\n lancet.timer.start(issue)\n ts.ok(\"Started harvest timer\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Starts interactive search, allows user to make a selection. Accepts array of strings and optional (user) query. Returns string chosen by user. | def interactive_search(self, choices, query=None):
if query:
match = self.get_interactive_match(choices, query)
if match:
self.print("Matched query to '%s'." % (match))
answer = input("Is that correct? (Y/n) ")
self.clear_lines(1)
if answer.lower() == 'y' or answer == '':
self.clear_lines(1)
return match
else:
self.clear_lines(1)
return self.interactive_search(choices)
else:
return None
else:
query = input("Please type a query: ")
self.clear_lines(1)
return self.interactive_search(choices, query) | [
"def launch_search(self):\n \n # gets the active selections from pymol\n active_selections = cmd.get_names('selections', 1)\n if len(active_selections) == 0:\n cmd.get_wizard().set_status('no selection')\n else:\n\n selection = active_selections[0]\n print \"The active selections are \" + str(selection)\n pdbstr = cmd.get_pdbstr(selection)\n print 'pdbstr is', pdbstr\n self.stop_search()\n \n# print cmd.get_wizard(), self.win.rmsd.get(), self.win.num_structs.get(), self.win.full_match, self.win.datab, pdbstr, self.win.serverURL, cmd.get_wizard().cmd, self.win.jobIDs\n\n self.win.searchThread = SearchThread(cmd.get_wizard(), self.win.rmsd.get(),\n self.win.num_structs.get(), self.win.full_match,\n self.win.datab, pdbstr, self.win.serverURL, cmd.get_wizard().cmd, self.win.jobIDs)\n self.win.searchThread.start()\n cmd.get_wizard().set_status('search launched')\n cmd.get_wizard().searchProgress = 0\n \n cmd.refresh_wizard()",
"def search():\n try:\n query = request.args.get(\"q\").lower()\n except AttributeError:\n query = request.args.get(\"q\")\n\n # Adding browse functionality\n browse = request.args.get(\"browse\")\n\n if browse is None:\n # Select all rows with a column value that includes query\n results = db.execute(\"SELECT * FROM books \"\n \"WHERE LOWER(isbn) LIKE CONCAT('%', :q, '%')\"\n \"OR LOWER(title) LIKE CONCAT('%', :q, '%') \"\n \"OR LOWER(author) LIKE CONCAT('%', :q, '%') \"\n \"ORDER BY title LIMIT 100\", {'q': query}).fetchall()\n else:\n # Select titles starting with letter\n results = db.execute(\n \"SELECT * FROM books \"\n \"WHERE LOWER(title) LIKE CONCAT(:q, '%') \"\n \"ORDER BY title\", {'q': query}).fetchall()\n\n return render_template(\"search.html\", browse=browse, query=query, results=results)",
"def search_by_selected(self):\n select_str = self.GetSelectedText()\n self.parent.search_by_str(select_str)",
"def search_prompt():\n search_field = [\n {\n 'type': 'input',\n 'name': 'search',\n 'message': 'Enter a search term: ',\n },\n ]\n search_query = prompt(search_field)['search']\n return search_query",
"def search():\n import booksearch as bs\n\n opt = var.get()\n term = searchBox.get()\n term2 = dateBox.get()\n\n # Case statement (substitute) for different search areas\n # Each key is an option in the OptionMenu\n searchBy = {\n \"Title & Author\" : bs.search(term),\n \"ID\" : bs.bookID(term),\n \"Date\" : bs.dateRange(term, term2),\n }\n query = searchBy[opt] # Make & stores a query (2D list)\n\n # Repopulates table\n if term != \"\":\n populate(query)",
"def _search(self, mdb, query, filename, season_num, episode_num, auto=False):\n choices = []\n for datasource, movie in mdb.search(query, season=season_num, episode=episode_num):\n if auto:\n return datasource, movie\n fmt = '<b>{title}</b> - <b>{ep}</b> S{season:02d}E{episode:02d} [{datasource}]'\n choices.append(option((datasource, movie), fmt, title=movie['title'],\n ep=movie['episode_title'],\n season=movie['season'],\n episode=movie['episode'],\n datasource=datasource.name))\n\n if not choices:\n printer.p('No results to display for the file: {fn}', fn=filename)\n return None, None\n\n choices.append(option(('manual', None), 'Enter information manually'))\n choices.append(option(('abort', None), 'None of these'))\n printer.p('Please choose the relevant result for the file: {fn}', fn=filename, end='\\n\\n')\n return printer.choice(choices)",
"def search_results():\n new_option = Common.search_menu()\n if new_option == 'a':\n list_employees()\n results = search_employee(input(\"Name of employee: \"))\n elif new_option == 'b':\n results = search_date()\n elif new_option == 'c':\n results = search_duration(Common.get_minutes())\n elif new_option == 'd':\n results = text_search(input(\"What text to search for: \"))\n else:\n results = None\n return results",
"def book_search(library: list) -> None:\n options = ['Author', 'Title', 'Publisher', 'Shelf', 'Category', 'Subject']\n prompt = '\\nWhat option would you like to search by?'\n choice = get_user_choice(options, prompt)\n if choice == '1':\n search_by_chosen_option(library, options[0])\n elif choice == '2':\n search_by_chosen_option(library, options[1])\n elif choice == '3':\n search_by_chosen_option(library, options[2])\n elif choice == '4':\n search_by_shelf(library)\n elif choice == '5':\n search_by_chosen_option(library, options[4])\n elif choice == '6':\n search_by_chosen_option(library, options[5])",
"def search_term():\n search = input(\"Enter term or string: \")\n entries = select_entries()\n entries = entries.where(\n (Entry.task_name.contains(search)) |\n (Entry.notes.contains(search)))\n view_entries(entries)\n return entries",
"def other_search(self):\n test = self.ask_zoekarg.text()\n if test:\n self.parent().search_arg = test\n self.parent().do_select()",
"def __ui_choose_search_criteria_for_persons(self):\n print(\"By which criteria do you want to search persons?\\n\"\n \" 1. By name\\n\"\n \" 2. By phone number\\n\")\n user_choice = input(\"Type your option: \").strip()\n if user_choice == \"1\":\n self.__ui_search_persons_by_name()\n elif user_choice == \"2\":\n self.__ui_search_persons_by_phone_number()\n else:\n print(\"Invalid option!\\n\")\n return",
"def user_search(self):\n while True:\n # prompt choose search method\n search_option = input('please choose a search option: ')\n # if user provides a invalid response to prompt\n if search_option not in ('a', 'b', 'c', 'd', 'e'):\n print('this is not an available option')\n # if response valid loop will break\n else:\n break\n\n # if date prompt for date get input\n if search_option == 'a':\n clear()\n # begin loop for entry search by date data collection\n\n while True:\n\n date = input(\n 'please input a date to search '\n 'in the format yyyy/mm/dd: '\n )\n # tests input against regex pattern\n pattern=re.compile(\"(\\d{4}\\/\\d{2}\\/\\d{2})\")\n match = pattern.fullmatch(date)\n # if pattern fails test\n # messsage will prompt until correct format recieved\n if not match:\n print('this is not an appropriate format')\n continue\n # if usre input valid\n elif match:\n\n # Utility object instantiated\n utility = Utility()\n # string -> datetime object\n str2date = utility.str2date(date)\n # Worklog object instantiated\n search = Inspector()\n # call to WorkLog.search_by_date mentod\n # method handles search logic/ display of relevant entry\n try:\n clear()\n \"\\n\"\n print('here are the matching entries: \\n')\n search_results = search.search_by_date(str2date)\n app()\n except ValueError:\n print('It looks like there is no matching results')\n app()\n\n # if duration prompt for duration get input\n elif search_option == 'b':\n # begin loop for entry search by duration data collection\n while True:\n\n duration = input(\n 'please input the duration of the task '\n 'that you want to search: '\n )\n # tests input against regex pattern\n durpattern = re.compile(\"(\\d+)\")\n durmatch = durpattern.fullmatch(duration)\n\n # if pattern fails test\n # messsage will prompt until correct format recieved\n if not durmatch:\n print('this is not an appropriate format')\n continue\n # if user input valid\n elif durmatch:\n # Utility object instantiated\n utility = Utility()\n # string -> timedelta\n str2time = utility.str2time(duration)\n # Worklog object instantiated\n search = Inspector()\n # call to WorkLog.search_by_duration\n # method handles search logic/ display of relevant entry\n try:\n clear()\n \"\\n\"\n print('here are the matching entries: \\n')\n search_results = search.search_by_duration(str2time)\n app()\n except ValueError:\n print('It looks like there is no matching results')\n app()\n # if string prompt for string get input\n elif search_option == 'c':\n while True:\n string = input(\n 'please type string and we'\n 'will search against it: '\n )\n # Worklog object instantiated\n search = Inspector()\n # call to WorkLog.search_by_duration\n # method handles search logic/ display of relevant entry\n try:\n clear()\n \"\\n\"\n print('here are the matching entries: ')\n search_results = search.search_by_string(string)\n # if now matching entries\n app()\n except ValueError:\n print('It looks like there is no matching results')\n app()\n\n elif search_option == 'd':\n while True:\n string = input(\n 'please type employee name to search for: '\n )\n # Worklog object instantiated\n search = Inspector()\n # call to WorkLog.search_by_duration\n # method handles search logic/ display of relevant entry\n try:\n clear()\n \"\\n\"\n print('here are the matching entries: \\n')\n search_results = search.search_by_employee(string)\n app()\n except ValueError:\n print('It looks like there is no matching results')\n app()\n elif search_option == 'e':\n clear()\n app()",
"def _search(self, mdb, query, filename, year=None, auto=False):\n choices = []\n for datasource, movie in mdb.search(query, year=year):\n if auto:\n return datasource, movie\n if movie.get('directors'):\n directors = ' by '\n if len(movie['directors']) > 1:\n directors += '%s and %s' % (', '.join(movie['directors'][0:-1]),\n movie['directors'][-1])\n else:\n directors += movie['directors'][0]\n else:\n directors = ''\n fmt = '<b>{title}</b> ({year}){directors} [{datasource}]'\n choices.append(option((datasource, movie), fmt, title=movie['title'],\n year=movie.get('year', 'Unknown'),\n directors=directors,\n datasource=datasource.name))\n\n if not choices:\n printer.p('No results to display for the file: {fn}', fn=filename)\n return None, None\n\n choices.append(option(('manual', None), 'Enter information manually'))\n choices.append(option(('abort', None), 'None of these'))\n printer.p('Please choose the relevant movie for the file: {fn}', fn=filename, end='\\n\\n')\n return printer.choice(choices)",
"def submit_query(self, *args):\n query_string = self.query_item.get().upper()\n query_type = self.get_query_type(self.search_type_default.get()) # change dropdown text to match db fields\n\n select_statement = \"\"\" SELECT * FROM MUSIC WHERE {0} LIKE '%{1}%' ORDER BY 'cdname' \"\"\".format(query_type, query_string)\n # Using the LIKE statement allows searching for substrings\n # allows input of \"mer\" to match \"summer, mermaid, hammering\"\n self.cursor.execute(select_statement)\n\n results = []\n for row in self.cursor:\n result_str = \"\"\n for i in row:\n result_str += str(i) + \" \"\n results.append(result_str)\n\n self.result_field.delete(0, END)\n for i in range(len(results)):\n self.result_field.insert(END, results[i])",
"def __search_student(self):\n menu_string = \"Search for a student:\\n\"\n menu_string += \"\\t1. by ID\\n\"\n menu_string += \"\\t2. by discipline_name\\n\"\n menu_string += \"\\t0. Exit\\n\"\n\n stop = False\n while not stop:\n command_list = \\\n {\n '1': self.__ui_search_student_by_id,\n '2': self.__ui_search_student_by_name,\n '0': self.__no_command\n }\n command = self.__ui_read_command(menu_string)\n\n if command == '0':\n return\n\n search = input(\"Enter search_substring string: \")\n if len(search) == 0:\n print(\"Search string cannot be empty!\")\n return\n\n if command in command_list.keys():\n command_list[command](search)\n else:\n print(\"Invalid command!\")",
"def get_interactive_match(self, choices, query):\n if query in self.SKIP_KEYWORDS:\n return None\n results = process.extract(query, choices, limit=10) # fuzzy string matching\n best_match = results[0]\n second_best_match = results[1]\n if best_match[1] == second_best_match[1] or best_match[1] < 50: # if inconclusive or low score\n self.print(\"Couldn't find a conclusive match for '%s'. Best matches:\" % (query))\n i = 0\n for result in results:\n i += 1\n print(\" [%i] %s\" % (i, result[0]))\n answer = input(\"Choose one or specify a less ambiguous query: \")\n self.clear_lines(2 + len(results))\n if answer.isdigit() and int(answer) <= len(results):\n return results[int(answer) - 1][0]\n else:\n return self.get_interactive_match(choices, answer)\n else:\n return best_match[0]",
"def interactive_select(space, current):\n print \"Type an element name, an element index, or an unambiguous prefix to add to your selection.\"\n print \"Type '\" + color_code(MAGENTA) + \"list\" + CLEAR_COLOR +\"' to see the list of valid selections/indices.\"\n print \"Type '\" + color_code(MAGENTA) + \"clear\" + CLEAR_COLOR +\"' to clear selection.\"\n print \"Enter an empty line when done.\\n\"\n \n done = False\n while not done:\n print color_code(BLACK, bold=True), \"\\nCurrent selection\" + CLEAR_COLOR + \":\", (current if current else \"None\")\n tentative = raw_input(color_code(YELLOW) + \"Selection or Command\" + CLEAR_COLOR + \": \")\n matches = [el for el in space if el.startswith(tentative)]\n try: index = int(tentative)\n except ValueError: index = None\n if tentative == 'list':\n for i,el in enumerate(space):\n print \"\\t\", color_code(BLUE, bold=True), i, CLEAR_COLOR, el\n print \"\\n\"\n elif tentative == 'clear':\n current = []\n elif tentative == '':\n if current:\n print color_code(GREEN), \"\\nFinal selection\" + CLEAR_COLOR + \":\", current, \"\\n\\n\"\n done = True\n else:\n print_error(\"Must select at least one\")\n elif len(matches) > 1:\n print_error(\"Multiple matches found for `{}' ({})\".format(tentative, matches))\n elif len(matches):\n if matches[0] in current:\n print_warning(\"{} was already selected\".format(matches[0]))\n else:\n current.append(matches[0])\n elif index is not None:\n if index < 0 or index >= len(space):\n print_error(\"Invalid index {}\".format(index))\n elif space[index] in current:\n print_warning(\"{} was already selected\".format(space[index]))\n else:\n current.append(space[index])\n else:\n print_error(\"Unknown token: {}\".format(tentative))\n \n return current",
"def enter_search_console():\n logger.output(\"SEARCH MODE\")\n while True:\n command = logger.prompt(\"search\", 1).split()\n if not command: continue\n if command[0] == 'quit': break\n \n # Breakdown of command (for clarity).\n try:\n category_name = command[0]\n notepack_name = command[1]\n except IndexError:\n logger.output(f\"Improper input. Try again.\", 1)\n continue\n\n # Create the Path objects for each.\n category_path = utility.get_root_path().joinpath(category_name)\n notepack_path = category_path.joinpath(notepack_name)\n\n # Confirm if Paths are new and need to be created.\n category_path = confirm_path_console(category_path)\n notepack_path = confirm_path_console(notepack_path)\n\n # Create any missing files and paths for the notepack.\n confirm_files_and_directories(category_path, \n config.CATEGORY_CONFIG)\n confirm_files_and_directories(notepack_path,\n config.NOTEPACK_CONFIG)\n return",
"def __ui_choose_search_criteria_for_activities(self):\n print(\"By which criteria do you want to search activities?\\n\"\n \" 1. By date\\n\"\n \" 2. By description\\n\")\n user_choice = input(\"Type your option: \").strip()\n if user_choice == \"1\":\n self.__ui_search_activities_by_date()\n elif user_choice == \"2\":\n self.__ui_search_activities_by_description()\n else:\n print(\"Invalid option!\\n\")\n return"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns string that best matches query out of a list of choices. Prompts user if unsure about best match. | def get_interactive_match(self, choices, query):
if query in self.SKIP_KEYWORDS:
return None
results = process.extract(query, choices, limit=10) # fuzzy string matching
best_match = results[0]
second_best_match = results[1]
if best_match[1] == second_best_match[1] or best_match[1] < 50: # if inconclusive or low score
self.print("Couldn't find a conclusive match for '%s'. Best matches:" % (query))
i = 0
for result in results:
i += 1
print(" [%i] %s" % (i, result[0]))
answer = input("Choose one or specify a less ambiguous query: ")
self.clear_lines(2 + len(results))
if answer.isdigit() and int(answer) <= len(results):
return results[int(answer) - 1][0]
else:
return self.get_interactive_match(choices, answer)
else:
return best_match[0] | [
"def interactive_search(self, choices, query=None):\n if query:\n match = self.get_interactive_match(choices, query)\n if match:\n self.print(\"Matched query to '%s'.\" % (match))\n answer = input(\"Is that correct? (Y/n) \")\n self.clear_lines(1)\n if answer.lower() == 'y' or answer == '':\n self.clear_lines(1)\n return match\n else:\n self.clear_lines(1)\n return self.interactive_search(choices)\n else:\n return None\n else:\n query = input(\"Please type a query: \")\n self.clear_lines(1)\n return self.interactive_search(choices, query)",
"def get_best_fuzzy(value: str, choices: list, min_score: int = None, \n scorer: Callable = fuzz_fuzz.WRatio, return_score: bool = False) -> Union[None, str]:\n\n assert isinstance(value, six.string_types), 'Invalid value. Must be a string.'\n\n min_score = min_score or config.get('fuzzy_score_cutoff', 75)\n minfuzz = config.get('minimum_fuzzy_characters', 3)\n assert len(value) >= minfuzz, f'Your fuzzy search value must be at least {minfuzz} characters long.'\n\n # returns a tuple of (best choice, score, index of choice in list or key of choice in dict)\n bests = fuzz_proc.extract(value, choices, scorer=scorer, score_cutoff=min_score)\n\n if len(bests) == 0:\n best = None\n elif len(bests) == 1:\n best = bests[0]\n else:\n # compare the two scores of top two choices\n # or take the top choice\n if bests[0][1] == bests[1][1]:\n best = None\n else:\n best = bests[0]\n\n if best is None:\n raise ValueError(f\"Cannot find a good match for '{value}'. \"\n 'Your input value is too ambiguous.')\n\n return best if return_score else best[0]",
"def select_option(options, choice):\n choices = []\n txt = \"\"\n last = len(options) - 1\n for opt in options:\n if options.index(opt) == 0:\n txt += \"'\" + str(opt) + \"'\"\n elif options.index(opt) == last:\n txt += \" and '\" + str(opt) + \"'\"\n else:\n txt += \", '\" + str(opt) + \"'\"\n choices.append({'name': opt})\n\n question = [\n {\n 'type': 'list',\n 'message': 'The similarities between \\'' + choice + '\\' with ' + txt + ' are equal. Choose the one to consider.',\n 'name': 'option',\n 'choices': choices\n }\n ]\n\n answer = prompt(question, style=style)\n return answer.get(\"option\")",
"def extractOne(query, choices, match_type=default_algorithim, score_cutoff=0):\n\n try:\n best_list = extract(query, choices, match_type, score_cutoff)\n\n best = max(best_list, key=lambda i: i[1])\n\n return best\n\n except:\n return None",
"async def choose(self, ctx, *args):\n query = \" \".join(args)\n choices = query.split(\" or \")\n if len(choices) < 2:\n await ctx.send(\"Give me at least 2 options to choose from! (separate options with `or`)\")\n self.logger.warning(misolog.format_log(ctx, f\"1 option\"))\n return\n choice = rd.choice(choices).strip()\n await ctx.send(f\"I choose **{choice}**\")\n self.logger.info(misolog.format_log(ctx, f\"{choice}\"))",
"async def randomChoice(self, ctx: commands.Context, *choices: str):\n if not choices:\n await ctx.reply(f\"Command failed - no arguments given.\\nEnter a sequence of arguments to choose from (you can use quotes for grouping).\", mention_author=False)\n elif len(choices)==1:\n await ctx.reply(f\"After some extremely randomized choosing from the one singular option that was given to choose from, the surprising result is:\\n{choices[0]}\", mention_author=False)\n else:\n await ctx.reply(f\"Randomly chosen result:\\n{random.choice(choices)}\", mention_author=False)",
"def choices(choice):\n if choice == '1':\n return ('Refilled Tanks')\n elif choice == '2':\n return ('Checked Revenue')\n elif choice == '3':\n return ('Opened Log')\n elif choice == '4':\n return ('Looked At Tank')",
"def ask(question, options, default):\n assert default in options\n\n question += \" ({})? \".format(\"/\".join(o.upper() if o == default else o for o in options))\n selected = None\n while selected not in options:\n selected = input(question).strip().lower()\n if selected == \"\":\n selected = default\n else:\n if selected not in options:\n question = \"Please type '{}'{comma} or '{}': \".format(\n \"', '\".join(options[:-1]), options[-1],\n comma=',' if len(options) > 2 else '',\n )\n return selected",
"def prompt_choices(choices, prompt='Please choose an option:',\n input_prompt='Selection: ',\n stream=sys.stderr):\n\n # loop until we get a valid selection\n while True:\n stream.write(prompt + '\\n')\n for i, choice in enumerate(choices):\n stream.write('[{0}]: {1}\\n'.format(i, choice))\n\n chosen = raw_input(input_prompt)\n\n try:\n index = int(chosen)\n if index >= 0:\n return choices[index]\n except (ValueError, IndexError):\n pass",
"def best_choice_of_letter(found: list, unfound: list) -> (chr, str, int):\n\n best_letter = \"\"\n max_val = float(\"-inf\")\n best_dia = \"\"\n # Loop over unfound letters finding the best choice\n for letter in unfound:\n (val,dia) = min_distance_of_letter(letter, found, unfound)\n if val > max_val:\n max_val = val\n best_letter = letter\n best_dia = dia\n return best_letter, best_dia, max_val",
"def find_better_question(self) -> str:\n ...",
"def get_best_match(indicator: Indicator, items: Iterable[str]) -> str:\n best_match = process.extractOne(indicator.name, items)[0]\n return best_match",
"def pick(self):\n if not self.choices_dict:\n raise self.NoChoices()\n template = \" {key: >\" + str(self.width) + \"s}) {text:s}\"\n\n print()\n for choice in self.choices:\n if \"key\" in choice:\n print(template.format(key=choice[\"key\"], text=choice[\"text\"]))\n else:\n print(choice[\"text\"])\n print()\n\n response = \"\"\n while response not in self.choices_dict:\n response = input(\"?: \")\n\n return self.choices_dict[response]",
"def get_search_scores(query, choices, ignore_case=True, template='{}',\n valid_only=False, sort=False):\n # First remove spaces from query\n query = query.replace(' ', '')\n pattern = get_search_regex(query, ignore_case)\n results = []\n\n for choice in choices:\n r = re.search(pattern, choice)\n if query and r:\n result = get_search_score(query, choice, ignore_case=ignore_case,\n apply_regex=False, template=template)\n else:\n if query:\n result = (choice, choice, NOT_FOUND_SCORE)\n else:\n result = (choice, choice, NO_SCORE)\n\n if valid_only:\n if result[-1] != NOT_FOUND_SCORE:\n results.append(result)\n else:\n results.append(result)\n\n if sort:\n results = sorted(results, key=lambda row: row[-1])\n\n return results",
"def get_suggested_text(): \n # for now, just print the sentences\n pass",
"def find_best_candidate(s_array):\n best_string = ''\n max_val = 0\n for s in s_array:\n score = compare(s)\n if score > max_val:\n max_val = score\n best_string = s\n return best_string",
"def choose_candidate(candidates, singleton, rec, cur_artist=None,\n cur_album=None, item=None, itemcount=None,\n choices=[]):\n # Sanity check.\n if singleton:\n assert item is not None\n else:\n assert cur_artist is not None\n assert cur_album is not None\n\n # Build helper variables for the prompt choices.\n choice_opts = tuple(c.long for c in choices)\n choice_actions = {c.short: c for c in choices}\n\n # Zero candidates.\n if not candidates:\n if singleton:\n print_(\"No matching recordings found.\")\n else:\n print_(\"No matching release found for {} tracks.\"\n .format(itemcount))\n print_('For help, see: '\n 'https://beets.readthedocs.org/en/latest/faq.html#nomatch')\n sel = ui.input_options(choice_opts)\n if sel in choice_actions:\n return choice_actions[sel]\n else:\n assert False\n\n # Is the change good enough?\n bypass_candidates = False\n if rec != Recommendation.none:\n match = candidates[0]\n bypass_candidates = True\n\n while True:\n # Display and choose from candidates.\n require = rec <= Recommendation.low\n\n if not bypass_candidates:\n # Display list of candidates.\n print_('Finding tags for {} \"{} - {}\".'.format(\n 'track' if singleton else 'album',\n item.artist if singleton else cur_artist,\n item.title if singleton else cur_album,\n ))\n\n print_('Candidates:')\n for i, match in enumerate(candidates):\n # Index, metadata, and distance.\n line = [\n '{}.'.format(i + 1),\n '{} - {}'.format(\n match.info.artist,\n match.info.title if singleton else match.info.album,\n ),\n '({})'.format(dist_string(match.distance)),\n ]\n\n # Penalties.\n penalties = penalty_string(match.distance, 3)\n if penalties:\n line.append(penalties)\n\n # Disambiguation\n disambig = disambig_string(match.info)\n if disambig:\n line.append(ui.colorize('text_highlight_minor',\n '(%s)' % disambig))\n\n print_(' '.join(line))\n\n # Ask the user for a choice.\n sel = ui.input_options(choice_opts,\n numrange=(1, len(candidates)))\n if sel == 'm':\n pass\n elif sel in choice_actions:\n return choice_actions[sel]\n else: # Numerical selection.\n match = candidates[sel - 1]\n if sel != 1:\n # When choosing anything but the first match,\n # disable the default action.\n require = True\n bypass_candidates = False\n\n # Show what we're about to do.\n if singleton:\n show_item_change(item, match)\n else:\n show_change(cur_artist, cur_album, match)\n\n # Exact match => tag automatically if we're not in timid mode.\n if rec == Recommendation.strong and not config['import']['timid']:\n return match\n\n # Ask for confirmation.\n default = config['import']['default_action'].as_choice({\n 'apply': 'a',\n 'skip': 's',\n 'asis': 'u',\n 'none': None,\n })\n if default is None:\n require = True\n # Bell ring when user interaction is needed.\n if config['import']['bell']:\n ui.print_('\\a', end='')\n sel = ui.input_options(('Apply', 'More candidates') + choice_opts,\n require=require, default=default)\n if sel == 'a':\n return match\n elif sel in choice_actions:\n return choice_actions[sel]",
"def _jobchecker(self, choice: str):\n\n if choice == 'engineering and architecture':\n return 'egr'\n elif choice == 'software':\n return 'sof'\n elif choice == \"accounting\" or \"finance\":\n return \"acc\"\n elif choice == \"administration\" or \"admin\" or \"office\":\n return \"ofc\"\n elif choice == \"art\" or \"media\" or \"design\":\n return \"med\"\n elif choice == \"biotech\" or \"science\":\n return \"sci\"\n elif choice == \"business\" or \"management\":\n return \"bus\"\n elif choice == \"customer service\" or \"hr\":\n return \"csr\"\n elif choice == \"education\" or \"teaching\":\n return \"edu\"\n elif choice == \"miscellaneous\":\n return \"etc\"\n elif choice == \"food\" or \"hospitality\":\n return \"fbh\"\n elif choice == \"general\" or \"labour\" or \"general labour\":\n return \"lab\"\n elif choice == \"government\":\n return \"gov\"\n else:\n raise Exception(\"invalid job choice entered.\")",
"def extract(query, choices, match_type=default_algorithim, score_cutoff=0):\n try:\n try:\n if choices is None or len(choices) == 0:\n return\n except TypeError:\n pass\n\n results = []\n \n for i in choices:\n score = (match_type(query, i))\n data = (i, score)\n if score >= score_cutoff:\n results.append(data)\n\n return results\n\n except:\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Asks an user how many days to go back. Returns int. | def get_interactive_days(self):
answer = input("Press return to get entries of past day or input number of days to go back in time: ")
if answer == '':
days = 1
else:
try:
days = int(answer)
except:
print("You didn't enter a number, assuming 1 day.")
days = 1
return days | [
"def guessDown(self):\n self.guesses = self.guesses - 1",
"def days_back(i):\n yesterday = (datetime.now() - timedelta(i))\n return yesterday.strftime('%Y-%m-%d')",
"def remaining_days(requested_on: datetime.datetime, slo_limit: int) -> int:\n # Positive: There are days remaining.\n # Zero: The review is due today.\n # Negative: The review is overdue.\n return slo_limit - weekdays_between(requested_on, now_utc())",
"def input_number_days():\n print('How many days for your very accurate weather report?')\n return int(input())",
"def remaining_days_in_cycle(self) -> int:\n if not self.expiration:\n return 0\n delta = self.expiration - _today()\n return int(delta.days)",
"def decays(self):\n return self._base.decays",
"def days_until_positive(cls, value):\n assert value > 0, f'days until booking == {value}, must be > 0'\n return value",
"def get_day_delta():\n try:\n day_delta = int(input('Please enter the number of days of inactivity you would like to search for (i.e. 1, 2, 3, etc.) '))\n return (day_delta * 24)\n except ValueError:\n print(\"Uh-oh that wasn't a number please enter a whole number.\")\n return get_day_delta()",
"def Daysleftverification():\n pass",
"def calc_two_days_back():\n\n # Multiply the result of timestamp() from the standard library by 1000 and\n # round it to have no decimal places to match the timestamp format required\n # by the discovergy API\n return round((datetime.utcnow() - timedelta(hours=48)).timestamp() * 1000)",
"def now_minus(days: int):\n return NOW - datetime.timedelta(days=days)",
"def countdown(self, amt=1):\n pass",
"def calc_date():\n today = datetime.date.today()\n target = datetime.date.fromisoformat(\"2025-07-04\")\n days = target - today\n\n # Output the number of days\n print(\"\\n\", days.days, \" days until July 4th, 2025.\")\n input(\"\\nPress ENTER to proceed.\")",
"def getBugsToday(myDay):\r\n #set bugs_today as neg one to accept zero as an input\r\n bugs_today = -1\r\n while bugs_today < 0 :\r\n myBugs_Validation = (input(u'Enter the number of bugs collected on day ' + str(myDay) + ' : '))\r\n #call my getValidation to check values entered\r\n bugs_today = getValidation(myBugs_Validation)\r\n #check if user entered a valid number\r\n if bugs_today == -1:\r\n print('\\nPlease enter the number of bugs collected. \\nEnter a whole integer number >= 0')\r\n \r\n return bugs_today",
"def remaining_trial_days(self):\n try:\n return self.count_days_from_now(self.trial_ended_at)\n except AttributeError:\n return 0",
"def days_until(self, date_obj):\n if date_obj < date.today(): \n return\n return str((date_obj - date.today()).days)",
"def ex8() :\r\n print(\" - Date Calculator - \")\r\n import datetime\r\n today = datetime.date.today()\r\n print(today)\r\n try : #try catch method, in case user enters non-date, or 31st Feb etc.\r\n userDate = input(\"Please enter the date to check in a dd/mm/yy format: \") #userDate is string\r\n userDate = datetime.datetime.strptime(userDate, '%d/%m/%Y').date() #userDate is date_object\r\n if userDate < today : print(\"Invalid input, date is in the past\")\r\n elif userDate == today: print(\"That's today you dum-dum, answer is 0 days.\")\r\n else:\r\n delta = userDate - today #calculate difference\r\n delta = str(delta) #date_object don't work with split only str\r\n delta = delta.split(\",\") #unorthodox method to delete time (0:00:0) from the days\r\n print(\"The number of days between today (\",today,\") and entered date (\",userDate,\") are \",delta[0],\".\")\r\n except ValueError as e :\r\n print(\"Not a valid date.\")",
"def days_since_last_checkin(self):\n # TODO use local timezone\n checkin_date = (self.last_checkin - datetime.timedelta(hours=5)).date()\n today = datetime.date.today()\n return (today - checkin_date).days",
"def days_existed(self):\n td = dt.date.today()\n days_lived = (td-self.birthday).days\n print(f'You have lived {days_lived} days!')\n return days_lived"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Hacky way to check if this function already made a Toggl project based on a Zendesk ticket ID. | def already_created(self, ticket_id, toggl_projects):
project_prepends = [p['name'].split()[0][1:] for p in toggl_projects]
if str(ticket_id) in project_prepends:
return True
return False | [
"async def check_if_is_ticket(ctx):\n channel : TextChannel = ctx.channel\n return 'ticket-' in channel.name",
"def has_project(cls, win_id):\r\n\r\n project = cls.get_project(win_id)\r\n return True if project is not None else False",
"def project_exists(response, project):\n if response is None:\n try:\n warn(f\"No project with the name or UUID {project} was found.\")\n UUID(project)\n raise SystemError(\n \"The provided UUID does not match any projects found in SAS Model \"\n \"Manager. Please enter a valid UUID or a new name for a project to be \"\n \"created.\"\n )\n except ValueError:\n repo = mr.default_repository().get(\"id\")\n response = mr.create_project(project, repo)\n print(f\"A new project named {response.name} was created.\")\n return response\n else:\n return response",
"def is_project_tracked(cls, obj, win_id):\r\n\r\n return True if win_id is not None and win_id in obj.projects else False",
"def sync(self, no_of_days=1):\n zd = Zendesk()\n tg = Toggl()\n try:\n self.print(\"Syncing...\")\n self.print_divider(30)\n tickets = zd.get_tickets(no_of_days)\n for ticket in tickets:\n project_title = self.format_title(ticket.id, ticket.subject)\n if ticket.organization:\n client_id = tg.get_client_id(name=ticket.organization.name)\n if not client_id:\n new_client = tg.create_client(ticket.organization.name)\n client_id = new_client['id']\n else:\n client_id = False\n self.print(\"Ticket '%s' has no associated organization!\" % (project_title))\n all_projects = tg.get_projects()\n if not self.already_created(ticket.id, all_projects):\n self.print(\"Creating project '%s'...\" % (project_title))\n result = tg.create_project(project_title, client_id, is_private=False)\n self.print(\"Toggl response:\")\n self.log(result, silent=False)\n else:\n self.print(\"There is already a Toggl project for Zendesk ticket #%s!\" % ticket.id)\n pass\n # TODO: edit Toggl project\n # tg.edit_project(project_id, name=ticket.subject)\n self.print_divider(30)\n self.print(\"Done!\")\n except:\n self.log(traceback.format_exc(), silent=False)",
"def _check_geni_federation_status(request):\n\n geni_project_key = '%s|%s' % (settings.GENI_FEDERATION_PROJECTS['geni']['id'],\n settings.GENI_FEDERATION_PROJECTS['geni']['name'])\n\n on_geni_project = geni_project_key in request.session['openid']['ax']['projects']\n\n if on_geni_project:\n try:\n fed_proj = Project(settings.GENI_FEDERATION_PROJECTS['chameleon']['id'])\n on_chameleon_project = any(u.username == request.user.username \\\n for u in fed_proj.get_users())\n except:\n logger.warn('Could not locate Chameleon federation project: %s' % \\\n settings.GENI_FEDERATION_PROJECTS['chameleon'])\n on_chameleon_project = False\n else:\n on_chameleon_project = False\n\n return on_geni_project, on_chameleon_project",
"def checkifprojectexistonGitlab(self,pathnamelower):\n projects = self.projects.list(membership=True,all=True)\n for p in projects:\n #print(p.attributes['path_with_namespace'])\n #print(newpathnamelower)\n #if the project exists in Gltlab return the project and update the push rules\n if (p.attributes['path_with_namespace']==pathnamelower):\n return p \n #if the project does not exist on Gitlab return empty and create a new project \n return None",
"def project_exists(self, project_name):\r\n\t\treturn self.projects.find_one({\"project_name\": project_name})",
"def is_project_created(path):\n project_id = None\n try:\n with open(\"%s%sproject\"\n % (path, os.sep)) as project_file:\n project_id = project_file.readline().strip()\n try:\n project_id = bigml.api.get_project_id(\n project_id)\n return True, project_id\n except ValueError:\n return False, None\n except IOError:\n return False, None",
"def test_get_pending(self):\n pd_project = self._make_project(\n title='TestProject2',\n type=PROJECT_TYPE_PROJECT,\n parent=None,\n submit_status=SUBMIT_STATUS_PENDING_TASKFLOW,\n )\n\n request = self.req_factory.post(\n reverse('projectroles:taskflow_project_get'),\n data={\n 'project_uuid': str(pd_project.sodar_uuid),\n 'sodar_secret': settings.TASKFLOW_SODAR_SECRET,\n },\n )\n response = views.TaskflowProjectGetAPIView.as_view()(request)\n self.assertEqual(response.status_code, 404)",
"def _test_get_my_project(api, project_id):\n # https://xyz.here.com/studio/project/5c54716d-f900-4b89-80ac-b21518e94b30\n project = api.get_project(project_id=\"5c54716d-f900-4b89-80ac-b21518e94b30\")\n print(project)",
"def check_project_exists(cls, owner, project_name):\n project_exists = Project.objects.filter(\n owner=owner, project_name=project_name)\n if project_exists:\n return True\n return False",
"def check_project_exists(project):\n sql = 'SELECT * FROM \"projects\" WHERE lower(name) = \"%s\" ' % (project.lower())\n c.execute(sql)\n all_rows = c.fetchone()\n return len(all_rows) > 0",
"def is_project_exists(self, project, *args, **kwargs):\n jira_conn = kwargs.get('jira_conn')\n try:\n jira_conn.project(project.upper())\n except jira.JIRAError as e:\n if e.status_code == status_codes.NOT_FOUND:\n message = f\"Project key '{project.upper()}' does not exist\"\n raise JiraInfoException(message)\n else:\n message = e.text\n raise JiraReceivingDataException(f\"checking existence of project {project}\", message)",
"def create_project_if_necessary(ctx, org_name, project_name, ):\n org = cmd.get_one_organization_by_name(\n client=ctx.obj, organization_name=org_name)\n pprint(cmd.ensure_project(\n client=ctx.obj, project_name=project_name, organization_id=org.id))",
"def single_ticket(ticket_id):\n res = requests.get(\n f'https://{subdomain}.zendesk.com/api/v2/tickets/{ticket_id}',\n headers={'Authorization': auth_header})\n if res.status_code == requests.codes.ok:\n res_json = res.json()\n return render_template('single_ticket.html', res_json=res_json)\n else:\n return 'Uh oh! Looks like a classic Dinosaur Ate My Ticket situation!'",
"def is_pull_request(issue):\r\n return 'pull_request_url' in issue",
"def can_access_project(self, project: str):\n return any(int(project) == item[\"id\"] for item in self.response.json())",
"def create_project_if_not_exists(\n client: prefect.Client, project_name: str\n) -> None:\n r = client.graphql(\n 'query{project(where: {name: {_eq : \"Monitorenv\"}}){name}}'\n )\n projects = r[\"data\"][\"project\"]\n if len(projects) == 0:\n print(\"Monitorenv project does not exists, it will be created.\")\n client.create_project(project_name)\n elif len(projects) == 1:\n print(\"Monitorenv project already exists. Skipping project creation.\")\n else:\n raise ValueError(\n \"Several projects with the name 'Monitorenv' were found.\"\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Formats id and subject into a suitable (Freshbooks) title. | def format_title(self, ticket_id, subject):
# TODO: strip block tags?
title = "#%i %s" % (ticket_id, subject)
return title.strip() | [
"def get_subject_name(id):\n if id < 10:\n return 'sub-00{}'.format(id)\n elif id < 100:\n return 'sub-0{}'.format(id)\n else:\n return 'sub-{}'.format(id)",
"def BuildSubjectTitle(self):\n return u\"hunt %s\" % self.args.subject_urn.Basename()",
"def get_title_by_id(id):\n\n return get_title_by_id_from_table(data_manager.get_table_from_file(\"sales/sales.csv\"), id)",
"def _get_title(self, book_data: Dict[Any, Any]) -> str:\n title = book_data.get('title')\n subtitle = book_data.get('subtitle')\n if subtitle:\n title = f\"{title}: {subtitle}\"\n return title",
"def _get_id_title(self):\n\n # find first dot\n i_dot = self.row[0].find(\".\")\n\n # take title word out and strip whitespaces\n RV = self.row[0][:i_dot].replace(u'T\\xcdTULO', \"\").strip()\n\n return RV",
"def get_title(self):\n return \"{id}@{hn}\".format(id=self.model.identity, hn=self.model.hostname)",
"def get_title(self) -> str:\n user = self.get_object()\n return \"{friendly_name} #{id}\".format(\n friendly_name=user.friendly_name,\n id=self.get_object().id\n )",
"def get_full_title(self):\n title = '%s #%s' % (self.volume_name, self.issue_number)\n if self.name:\n title += ': %s' % self.name\n return title",
"def getMITItemTitle(self,xc,item,id):\n \n titles = xc.xpathEval(\"mitcp:title\")\n title = ''\n if titles:\n title = titles[0].getContent()\n else:\n title = id\n\n return title",
"def make_title(words):",
"def __display_title(book):\n\n out = book[\"title\"]\n if book.get(\"subtitle\"):\n out += f\": {book['subtitle']}\"\n if book.get(\"volume_number\") and book.get(\"fascicle_number\"):\n out += f\" (vol. {book['volume_number']['raw']}; fas. {book['fascicle_number']['raw']})\"\n elif book.get(\"volume_number\"):\n out += f\" (vol. {book['volume_number']['raw']})\"\n elif book.get(\"fascicle_number\"):\n out += f\" (fas. {book['fascicle_number']['raw']})\"\n if book.get(\"volume_title\"):\n out += f\" / {book['volume_title']}\"\n return out",
"def soup_committee_id(date: datetime, title: str) -> str:\n s = unidecode(title) # remove accents\n s = re.sub(r\"\\b'\\b\", \"\", s) # remove apostrophes\n s = s.lower().replace(\"&\", \"and\") # keep the \"&\" sign\n s = re.sub(r\"\\W+\", \" \", s) # remove punctuation, shrink whitespace\n s = s.strip().replace(\" \", \"-\") # replace spaces with \"-\"\n return date.strftime(\"%Y-%m-%d\") + \"-\" + s # prepend ISO date",
"def numbered_title(self):\n return f\"{self.title}\"",
"def get_book_title(self, book_id):\n return self.execute(queries.BOOK_ID_TO_TITLE, (book_id,)).fetchone()",
"def title(self):\n name = self.concept_spec.name\n name = name[0].upper() + name[1:]\n return name.replace('_', ' ').replace('-', ' ')",
"def generate_title(self, title=None):\n if title is None:\n title = self.header.get('title', self.title)\n\n title = self.generate(title)\n title = title.replace('<p>', '').replace('</p>', '')\n # no trailing newlines\n title = re.sub(r'\\n+', ' ', title).rstrip()\n return title",
"def _update_title(self, title, tag, lid):\n return title",
"def short_title(self):\n string = self.num_str() + ' '\n if self.not_empty('Authors'):\n name = self['Authors'][0].last\n elif self.not_empty('Editors'):\n name = self['Editors'][0].last\n else:\n name = 'noAuthor'\n\n string = string + name + ', ' + self['Title'][:20] + '\\n'\n return string",
"def _generate_title_description(psap_id, title, description):\n if description is None:\n description = PersistentFields.get_description(psap_id)\n else:\n PersistentFields.set_description(psap_id, description)\n if title is None:\n title = PersistentFields.get_title(psap_id)\n else:\n PersistentFields.set_title(psap_id, title)\n\n return title, description"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Formats Toggl project name and description into (Freshbooks) description. | def format_description(self, project_name, description):
description = description if description else ''
return "%s %s" % (project_name, '- ' + description) | [
"def parse_project_file(self, project_file):\n project_name = None\n lines = []\n with open(project_file, \"rt\") as f:\n for line in f:\n # Finding the project name\n if \"Short Project Name\" in line:\n project_name = line.split(\":\")[-1].strip(\" \").rstrip(\"\\n\")\n # Making things HTML compliant - Replace \\n with <br>\n line = line.replace(\"\\n\", \"<br>\")\n lines.append(line)\n description = (\"\").join(lines)\n if not project_name:\n log.fatal(('No \"Short Project Name\" provided in project file. '\n 'Cannot generate a Globus Group without one'))\n raise RuntimeError()\n return project_name, description",
"def project_description(self):\n return self._project_description",
"def skaff_description_get(short: bool=True) -> str:\n short_description = \"An Extensible Project Scaffolding Tool\"\n long_description = (\"Skaff is a Python library for building programming \"\n \"language dependent scaffolding of software projects, \"\n \"and a command-line tool that uses this library with \"\n \"built-in (CMake-based) C/C++ support.\")\n if short:\n return short_description\n else:\n return long_description",
"def __str__(self):\r\n proj_string = \" Project Name: \" + self.__name\r\n proj_string += \"\\n Cover Photo: \" + self.__cover_photo\r\n proj_string += \"\\n Links: \" + self.__links\r\n proj_string += \" Note: \" + self.__note\r\n proj_string += \" Photos: \" + list_str(self.__photos)\r\n\r\n return proj_string",
"def full_description(self):\n des = describe_dut(self.dut) if self.dut else ''\n if self.build:\n des += ' with ' + self.build\n if self.result_id:\n des += ' BVT result ID ' + str(self.result_id)\n return (self.description if self.description \n else 'unknown test') + ' on ' + des",
"def unique_project_description():\n return ''.join([str(uuid.uuid4())[:6] for num in range(30)])",
"def __str__(self):\n return_string = \"Project: {}-{}\".\\\n format(self.public_information[\"project_id\"],\n self.public_information[\"title\"])\n\n return return_string",
"def getProjectName():",
"def displaySummary(self):\r\n print('Project Name:' + self.project['name'])\r\n print('Project chip:' + self.project['chip'])\r\n print('Project includes: ' + ' '.join(self.project['incs']))\r\n print('Project defines: ' + ' '.join(self.project['defs']))\r\n print('Project srcs: ' + ' '.join(self.project['srcs']))",
"def Description(self) -> str:",
"def description(context, gene, entry):\n\n species = entry.species\n if entry.common_name:\n species += \" (%s)\" % entry.common_name\n\n gene_name = notes(gene)\n if gene_name and len(gene_name) == 1:\n gene_name = gene_name[0]\n if gene_name.endswith(\"]\"):\n gene_name = re.sub(r\"\\s*\\[.+\\]$\", \"\", gene_name)\n gene_name.strip()\n return \"{species} {gene_name}\".format(species=species, gene_name=gene_name)\n\n locus_tag = context.rfam_name(entry.locus_tag, entry.locus_tag or \"\")\n if locus_tag.startswith(\"RF\") and entry.optional_id:\n locus_tag = entry.optional_id.split(\".\")[0]\n\n assert entry.rna_type, \"Cannot build description without rna_type\"\n rna_type = entry.human_rna_type()\n if rna_type == locus_tag:\n locus_tag = \"\"\n return \"{species} {rna_type} {locus_tag}\".format(\n species=species,\n rna_type=rna_type,\n locus_tag=locus_tag,\n ).strip()",
"def get_descriptive_name(self):\n long_name = f\"{self.make} {self.model} {self.year}\"\n \n return long_name.title()",
"def describe(self) -> str:\n return (\n \"{name} {surname} è nata/o a {birth_municipality} ({birth_province_code}) il {birthdate}.\"\n \" Ora vive a {municipality} ({province_code}) in {address} {house_number}.\"\n ).format(**self._data)",
"def _create_title_from_description(product: Dict[str, Any]) -> str:\n if 'description' in product:\n title = f'{product[\"description\"][:_CHARS_TO_USE_WHEN_CREATING_TITLE].strip()}…'\n else:\n title = ''\n\n logging.info('Modified item %s: Created title: %s',\n product.get('offerId', ''), title)\n\n return title",
"def _postage_title(self, cube, label_mems, label_ref_dates):\n title = ''\n if label_mems:\n try:\n title += '%s: %s' % (self.realization.title(),\n cube.coord(self.realization).points[0])\n except:\n pass\n if label_ref_dates:\n try:\n time_unit = cube.coord(self.time_coord).units\n fcast_ref = cube_time_converter(\n cube.coord(self.forecast_ref_time).points[0],\n time_unit)\n title += '\\nInit. date: %s' % fcast_ref.isoformat()[:10]\n except:\n pass\n return title",
"def get_descriptive_name(self):\n long_name = f\"{self.year} {self.make} {self.model}\"\n return long_name.title()",
"def combined_description(desc1, desc2):\n description = desc1\n if desc2:\n description = '{0}_{1}'.format(desc1, desc2)\n\n return description",
"def\t__str__(self):\n\t\ttxt = f\"Recipe for {self.name}:\\n\" + f\"- cooking_lvl: {self.cooking_lvl}\\n\" +\\\n\t\tf\"- cooking_time: {self.cooking_time:.1f}\\n\" +\\\n\t\t\"- ingredients: {}\\n\".format(\" \".join(ingredient for ingredient in self.ingredients)) +\\\n\t\tf\"- recipe_type: {self.recipe_type}\"\n\t\tif self.description != \"\":\n\t\t\ttxt += \"\\n\" + self.description\n\t\treturn (txt)",
"def make_description(description):\n\n docbook_desc = \"<para>\\n\"\n first_line = 1\n\n for line in StringIO(description):\n if first_line:\n refpurpose = line\n first_line = 0\n if len(line.strip()) == 0:\n docbook_desc = docbook_desc + \"</para>\\n<para>\\n\"\n else:\n docbook_desc = docbook_desc + line\n\n docbook_desc = docbook_desc + \"\\n</para>\"\n return refpurpose, docbook_desc"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Merges toggle time entries with same project name. Sums duration if billable. | def merge_toggl_time_entries(self, time_entries):
tg = Toggl()
d = {}
for entry in time_entries:
if entry.get('billable'):
if entry.get('tags') and tg.BOOKED_TAG in entry['tags']:
status = 'booked'
else:
status = 'not-booked'
date = parser.parse(entry['start']).date()
if not entry.get('pid'):
self.log("Couldn't find associated project for entry: %s" % (str(entry)))
continue
unique_id = str(entry['pid']) + str(date) + status
if not entry.get('description'):
entry['description'] = ""
if d.get(unique_id):
d[unique_id]['duration'] += entry['duration']
d[unique_id]['merged_ids'].append(entry['id'])
if d[unique_id].get('description'):
if entry['description'].strip() not in d[unique_id]['description']:
d[unique_id]['description'] += ' / ' + entry['description']
else:
d[unique_id]['description'] = entry['description']
else:
entry['merged_ids'] = [entry['id']]
d[unique_id] = entry
return d.values() | [
"def work_timing_merge(self):\n work = self.work_timing()\n work[-2].merge(work[-1])\n work.pop()\n self.set_work_timing(work)",
"def _task_data(self):\n output = {\n 'all': [],\n 'open': [],\n 'open_hours': 0,\n 'done': [],\n 'done_hours': 0,\n 'week_done': [],\n 'week_done_hours': 0,\n 'week_due': [],\n 'week_due_hours': 0,\n 'velocity': [],\n 'velocity_hours': 0,\n 'velocity_count': 0,\n }\n\n last_sunday = SUNDAY - timedelta(weeks=1)\n three_weeks_ago = MONDAY - timedelta(weeks=4)\n\n tasks = Task.originals.owner_id(self.pk).order_by('due_dt')\n for t in tasks:\n output['all'].append(t)\n # process open tasks\n if not t.completed:\n output['open'].append(t)\n output['open_hours'] += t.task_time\n\n # Process done tasks\n else:\n output['done'].append(t)\n output['done_hours'] += t.task_time\n if t.completed_dt >= three_weeks_ago and t.completed_dt <= last_sunday:\n output['velocity'].append(t)\n output['velocity_hours'] += t.task_time\n\n if t.due_dt >= MONDAY and t.due_dt <= SUNDAY:\n output['week_due'].append(t)\n output['week_due_hours'] += t.task_time\n\n if t.completed and t.completed_dt >= MONDAY and t.completed_dt <= SUNDAY:\n output['week_done'].append(t)\n output['week_done_hours'] += t.task_time\n\n output['all_hours'] = output['open_hours'] + output['done_hours']\n\n # Extra calcs for the velocity\n output['velocity_count'] = len(output['velocity'])\n\n if output['velocity_hours'] > 0:\n output['velocity_hours'] = round(output['velocity_hours']/3,2)\n if output['velocity_count'] > 0:\n output['velocity_count'] = round(Decimal(output['velocity_count'])/3,2)\n\n return output",
"def _task_data(self):\n output = {\n 'all': [],\n 'all_hours': 0,\n 'open': [],\n 'open_hours': 0,\n 'done': [],\n 'done_hours': 0,\n }\n\n tasks = Task.originals.project_id(self.pk).order_by('due_dt')\n for t in tasks:\n # process open tasks\n if not t.completed:\n output['open'].append(t)\n output['open_hours'] += t.task_time\n\n # Process done tasks\n else:\n output['done'].append(t)\n output['done_hours'] += t.task_time\n\n # Included in the loop to keep the ordering\n output['all'].append(t)\n\n output['all_hours'] = output['open_hours'] + output['done_hours']\n\n return output",
"def merge(time_cards):\n merged_time_card = TimeCard(time_cards[0].id)\n keys = time_cards[0].timings.keys()\n num_parent_timings = time_cards[0].num_parent_timings\n for time_card in time_cards[1:]:\n if keys != time_card.timings.keys():\n raise Exception('Trying to merge TimeCards with different timing keys.'\n ' %s != %s' % (str(keys), str(time_card.timings.keys())))\n if num_parent_timings != time_card.num_parent_timings:\n raise Exception('Trying to merge TimeCards that were not forked '\n 'together. %d != %d' % (num_parent_timings,\n time_card.num_parent_timings))\n\n # sort the TimeCards based on their sub_ids, for later use\n time_cards = sorted(time_cards, key=lambda time_card: time_card.sub_id)\n\n for key_idx, key in enumerate(keys):\n if key_idx < num_parent_timings:\n # This is before the fork happened. All TimeCards will have the same\n # timings anyway, so just copy the entries from any TimeCard.\n merged_time_card.timings[key] = time_cards[0].timings[key]\n\n else:\n # This is after the fork happened. For each key afterwards, append them\n # with the sub_id of the TimeCards and add them as separate entries.\n for time_card in time_cards:\n sub_key = '%s-%s' % (key, time_card.sub_id)\n merged_time_card.timings[sub_key] = time_card.timings[key]\n\n # merge the gpu logs\n for gpu_per_time_card in zip(*[time_card.gpus for time_card in time_cards]):\n gpu_per_time_card = tuple(gpu for tpl in gpu_per_time_card for gpu in tpl)\n # gpu_per_time_card is a tuple of gpu indices, denoting the gpus that\n # were used for a single step, e.g.,\n # (1, 1, 1) --> all TimeCards passed through gpu 1 for this step\n # (1, 2, 3) --> TimeCard 1 used gpu 1, TimeCard 2 used gpu 2, ..\n if len(set(gpu_per_time_card)) == 1:\n # all TimeCards passed through the same gpu\n # simply store that single gpu index\n merged_time_card.gpus.append((gpu_per_time_card[0],))\n else:\n # TimeCards passed through different gpus\n # store the whole tuple\n merged_time_card.gpus.append(gpu_per_time_card)\n\n return merged_time_card",
"def summary(user, date, end_date):\n projects = utils.get_setting('TIMEPIECE_PAID_LEAVE_PROJECTS')\n entries = user.timepiece_entries.filter(\n end_time__gt=date, end_time__lt=end_date)\n data = {\n 'billable': Decimal('0'), 'non_billable': Decimal('0'),\n 'invoiced': Decimal('0'), 'uninvoiced': Decimal('0'),\n 'total': Decimal('0')\n }\n invoiced = entries.filter(\n status=Entry.INVOICED).aggregate(i=Sum('hours'))['i']\n uninvoiced = entries.exclude(\n status=Entry.INVOICED).aggregate(uninv=Sum('hours'))['uninv']\n total = entries.aggregate(s=Sum('hours'))['s']\n if invoiced:\n data['invoiced'] = invoiced\n if uninvoiced:\n data['uninvoiced'] = uninvoiced\n if total:\n data['total'] = total\n billable = entries.exclude(project__in=projects.values())\n billable = billable.values(\n 'billable',\n ).annotate(s=Sum('hours'))\n for row in billable:\n if row['billable']:\n data['billable'] += row['s']\n else:\n data['non_billable'] += row['s']\n data['total_worked'] = data['billable'] + data['non_billable']\n data['paid_leave'] = {}\n for name, pk in projects.iteritems():\n qs = entries.filter(project=projects[name])\n data['paid_leave'][name] = qs.aggregate(s=Sum('hours'))['s']\n return data",
"def addTimeWashed(df): \n # Calculate time washed of food (start of no food)\n time_washed = pd.DataFrame(df.groupby(['date_yyyymmdd'])['wormsorter_start_time'].min())\n time_washed = time_washed.reset_index(drop=False)\n time_washed.columns = ['date_yyyymmdd','time_washed']\n \n df = pd.merge(left=df, right=time_washed, on='date_yyyymmdd')\n \n return df",
"def switch_proj(self, proj_name, switch_time):\n switch_datetime = datetime.strptime(switch_time, \"%H:%M\")\n\n for proj in self.proj_list:\n if proj.end_time != None:\n proj.end_time = switch_datetime\n break\n\n proj_already_used = any(proj.proj_name == proj_name for\n proj in self.proj_list)\n \n if not proj_already_used:\n new_proj = Project(proj_name)\n new_proj.start_time = switch_datetime\n self.proj_list.append(new_proj)\n \n elif proj_already_used:\n for proj in self.proj_list:\n if proj.proj_name == proj_name:\n proj.start_time = switch_datetime\n break",
"def _get_open_projects_info():\n projects = Project.objects.filter(project_open=True).order_by(\"created_at\")\n projects_sum_hours = []\n for project in projects:\n time_entries_pro_project = TimeEntry.objects.filter(project=project)\n used_hours = _sum_hours(time_entries_pro_project)\n hours_percent = _calculate_hours_percent(used_hours, project.stimated_hours)\n projects_sum_hours.append(\n {\n \"hours_percent_number\": hours_percent,\n \"hours_percent\": f\"{hours_percent}%\",\n \"worked_hours\": used_hours,\n \"project\": project,\n }\n )\n return projects_sum_hours",
"def merge_arrival_and_completion_time(tests_dataframe):\r\n arrival_time_df = tests_dataframe[['time_test_arrives_lab', 'server_size']]\r\n completion_time_df = tests_dataframe[['completion_time', 'server_size']]\r\n arrival_time_df['add'] = 1\r\n completion_time_df['add'] = -1\r\n arrival_time_df = arrival_time_df.rename(columns={\"time_test_arrives_lab\":\"time\"})\r\n completion_time_df = completion_time_df.rename(columns={\"completion_time\":\"time\"})\r\n union = pd.concat([arrival_time_df, completion_time_df])\r\n union = union.sort_values(by=\"time\")\r\n prev_server_size = 0\r\n for index, row in union.iterrows():\r\n if index == 0:\r\n current_server_size= row['server_size'] + row['add']\r\n prev_server_size = current_server_size\r\n #union['server_size'] = union['server_size'] + union['add']\r\n else:\r\n current_server_size = prev_server_size + row['add'] \r\n prev_server_size = current_server_size\r\n union.at[index,'server_size'] = current_server_size\r\n #union.to_csv('union.csv')\r\n return union",
"def update_kippotaskstatus_hours_worked(\n projects: KippoProject, start_date: datetime.date = None, date_delta: timezone.timedelta = DEFAULT_HOURSWORKED_DATERANGE\n) -> List[KippoTaskStatus]:\n period_start_date = start_date - date_delta\n projects_map = {p.id: p for p in projects}\n # get KippoTaskStatus for KippoProjects given which are not yet updated\n statuses = KippoTaskStatus.objects.filter(task__project__in=projects, effort_date__gte=period_start_date).order_by(\"task\", \"effort_date\")\n\n task_taskstatuses = defaultdict(list)\n for status in statuses:\n task_taskstatuses[status.task.id].append(status) # expect to be in order\n\n updated_statuses = []\n for task_id, task_statuses in task_taskstatuses.items():\n for earlier_status, later_status in window(task_statuses, n=2):\n if earlier_status.estimate_days and later_status.estimate_days:\n if later_status.hours_spent is None:\n # update\n change_in_days = earlier_status.estimate_days - later_status.estimate_days\n logger.debug(f\"change_in_days: {change_in_days}\")\n if change_in_days >= 0: # ignore increases in estimates\n # calculate based on project work days\n project = projects_map[later_status.task.project.id]\n day_workhours = project.organization.day_workhours\n calculated_work_hours = change_in_days * day_workhours\n later_status.hours_spent = calculated_work_hours\n later_status.save()\n updated_statuses.append(later_status)\n logger.info(\n f\"({later_status.task.title} [{later_status.effort_date}]) \"\n f\"Updated KippoTaskStatus.hours_spent={calculated_work_hours}\"\n )\n else:\n logger.warning(\n f\"Estimate increased, KippoTaskStatus NOT updated: \"\n f\"{earlier_status.estimate_days} - {later_status.estimate_days} = {change_in_days}\"\n )\n return updated_statuses",
"def cal_group_actions(df,option):\r\n\r\n if option == 'precovid':\r\n print('This is the pre-pandemic period:')\r\n elif option == 'postcovid':\r\n print('This is the post-pandemic period:')\r\n\r\n A = df[df['mod_numEdits'] == 1]\r\n B = df[(df['mod_numEdits'] > 1) & (df['mod_numEdits'] <= 10)]\r\n C = df[(df['mod_numEdits'] > 10) & (df['mod_numEdits'] <= 100)]\r\n D = df[(df['mod_numEdits'] >= 100)]\r\n \r\n A.insert(11,'group','A')\r\n B.insert(11,'group','B')\r\n C.insert(11,'group','C')\r\n D.insert(11,'group','D')\r\n\r\n li_add_A = []\r\n li_upd_A = []\r\n li_rem_A = []\r\n\r\n li_add_B = []\r\n li_upd_B = []\r\n li_rem_B = []\r\n\r\n li_add_C = []\r\n li_upd_C = []\r\n li_rem_C = []\r\n\r\n li_add_D = []\r\n li_upd_D = []\r\n li_rem_D = []\r\n\r\n for userid in set(A.userId):\r\n \r\n li_add_A.append(len(A[(A['action'] == 'add') & (A['userId'] == userid)]))\r\n li_upd_A.append(len(A[(A['action'] == 'update') & (A['userId'] == userid)]))\r\n li_rem_A.append(len(A[(A['action'] == 'remove') & (A['userId'] == userid)]))\r\n \r\n for userid in set(B.userId):\r\n \r\n li_add_B.append(len(B[(B['action'] == 'add') & (B['userId'] == userid)]))\r\n li_upd_B.append(len(B[(B['action'] == 'update') & (B['userId'] == userid)]))\r\n li_rem_B.append(len(B[(B['action'] == 'remove') & (B['userId'] == userid)]))\r\n \r\n for userid in set(C.userId):\r\n \r\n li_add_C.append(len(C[(C['action'] == 'add') & (C['userId'] == userid)]))\r\n li_upd_C.append(len(C[(C['action'] == 'update') & (C['userId'] == userid)]))\r\n li_rem_C.append(len(C[(C['action'] == 'remove') & (C['userId'] == userid)]))\r\n\r\n for userid in set(D.userId):\r\n \r\n li_add_D.append(len(D[(D['action'] == 'add') & (D['userId'] == userid)]))\r\n li_upd_D.append(len(D[(D['action'] == 'update') & (D['userId'] == userid)]))\r\n li_rem_D.append(len(D[(D['action'] == 'remove') & (D['userId'] == userid)]))\r\n \r\n li_add = [li_add_A, li_add_B, li_add_C, li_add_D]\r\n li_upd = [li_upd_A, li_upd_B, li_upd_C, li_upd_D]\r\n li_rem = [li_rem_A, li_rem_B, li_rem_C, li_rem_D]\r\n\r\n print(f'the mean of li_add_A is:{round(np.mean(li_add_A, dtype=np.float64),2)}')\r\n print(f'the mean of li_add_B is:{round(np.mean(li_add_B, dtype=np.float64),2)}')\r\n print(f'the mean of li_add_C is:{round(np.mean(li_add_C, dtype=np.float64),2)}')\r\n print(f'the mean of li_add_D is:{round(np.mean(li_add_D, dtype=np.float64),2)}')\r\n\r\n print(f'the mean of li_upd_A is:{round(np.mean(li_upd_A, dtype=np.float64),2)}')\r\n print(f'the mean of li_upd_B is:{round(np.mean(li_upd_B, dtype=np.float64),2)}')\r\n print(f'the mean of li_upd_C is:{round(np.mean(li_upd_C, dtype=np.float64),2)}')\r\n print(f'the mean of li_upd_D is:{round(np.mean(li_upd_D, dtype=np.float64),2)}')\r\n\r\n print(f'the mean of li_rem_A is:{round(np.mean(li_rem_A, dtype=np.float64),2)}')\r\n print(f'the mean of li_rem_B is:{round(np.mean(li_rem_B, dtype=np.float64),2)}')\r\n print(f'the mean of li_rem_C is:{round(np.mean(li_rem_C, dtype=np.float64),2)}')\r\n print(f'the mean of li_rem_D is:{round(np.mean(li_rem_D, dtype=np.float64),2)}')\r\n\r\n return li_add, li_upd, li_rem",
"def collapse_using_timeStr(self):\n if self.modified == True:\n raise Exception('Probabilities already modified.\\nCollapsing after modification will lead to incorrect results.')\n timeUnits = np.array(process_time_string(self.timeStr))\n if len(self.timeslices) + 1 == np.sum(timeUnits):\n if timeUnits[-1] == 1:\n timeUnits = timeUnits[:-1]\n else:\n timeUnits[-1] -= 1\n if len(self.timeslices) != np.sum(timeUnits):\n raise Exception('Total number of timeslices is different.')\n ind = 0\n cnt = 0\n curr_rates = np.matrix(np.zeros((np.shape(self.obsRates)[0], len(timeUnits))))\n curr_times = []\n for i in timeUnits:\n curr_rates[:, cnt] = np.sum(self.obsRates[:, ind:ind + i], axis=1)\n curr_times.append(np.sum(self.timeslices[ind:ind + i]))\n ind += i\n cnt += 1\n\n self.obsRates = curr_rates\n self.timeslices = curr_times",
"def switch_project(project):\n # Get the data\n project = project.lower()\n lines, finished, last_project = parse_file(project=None)\n line1, i1, last1, _, times1 = parse_line(lines, last_project, finished)\n line2, i2, _, new2, times2 = parse_line(lines, project, True)\n now = datetime.now()\n\n # Format the data\n if not finished:\n punch1 = now - last1\n times1.append(punch1)\n punch1 = punch1.total_seconds()\n total1 = sum(t.total_seconds() for t in times1)\n total2 = sum(t.total_seconds() for t in times2)\n now = now.strftime(TIMEF)\n\n # Modifying the lines for the file\n lines[1] = HEADER1 + project\n if not finished:\n\n # Clock-Out\n line1[-1] += IN_OUT_SEP + now\n line1[1] = fnum(total1)\n line1 = PUNCH_SEP.join(line1)\n lines[i1] = line1\n\n # Clock-In\n line2.append(now)\n line2 = PUNCH_SEP.join(line2)\n if new2:\n lines.append(line2)\n else:\n lines[i2] = line2\n\n # Write to file\n with open(PUNCHES_PATH, 'w+') as f:\n f.write('\\n'.join(lines))\n\n # Report\n if new2:\n print(f\"Created Project: '{project}'\")\n if finished:\n print(f\"CURRENTLY CLOCKED OUT, Project Switched From: '{last_project}', To: '{project}'\")\n print(f\"NOW: {now}\")\n print(f\"'{last_project}' Total Hrs: {fnum(total1)}\")\n print(f\"'{project}' Total Hrs: {fnum(total2)}\")\n else:\n print(f\"CLOCK OUT, Project: '{last_project}'\")\n print(f\"CLOCK IN, Project: '{project}'\")\n print(f\"'{last_project}' IN: {last1.strftime(TIMEF)}, NOW: {now}\")\n print(f\"'{last_project}' Total Hrs: {fnum(total1)}, Current Punch: {fnum(punch1)}\")\n print(f\"'{project}' Total Hrs: {fnum(total2)}\")",
"def get_weekly_project_durations(self, week=0):\n\n # get the start and end of the desired week\n now = dt.datetime.now()\n monday = now.date() - dt.timedelta(days=now.weekday() + 7*week)\n nextmonday = monday + dt.timedelta(days=7)\n\n # get all jobs and associated projects for the selected week\n # there will be one row per job and associated project such that a job\n # which is assigned to two projects will also have two rows\n self.alog.dbcur.execute(\n 'WITH ja (id, start, dur, act) AS ('\n ' SELECT jobs.id, jobs.start, jobs.duration, activities.label '\n ' FROM jobs JOIN activities ON jobs.activity = activities.id '\n ' WHERE jobs.start >= ? AND jobs.start < ?) '\n 'SELECT ja.id, ja.start, ja.dur, ja.act, projects.label '\n 'FROM ja LEFT OUTER JOIN job_pj ON ja.id = job_pj.job '\n ' LEFT OUTER JOIN projects ON job_pj.project = projects.id',\n (monday, nextmonday))\n\n jobs = pd.DataFrame(self.alog.dbcur.fetchall(),\n columns=('id', 'start', 'duration', 'act',\n 'project'))\n\n # do the same thing for people, but do not select jobs here that have a\n # project associated with them\n # note that it's not necessary to outer join here, because I have already\n # got all the necessary information about jobs above\n self.alog.dbcur.execute(\n 'SELECT jobs.id, people.label '\n 'FROM jobs JOIN job_p, people '\n ' ON jobs.id = job_p.job AND job_p.person = people.id '\n 'WHERE jobs.start >= ? '\n ' AND jobs.start < ?'\n ' AND jobs.id NOT IN (SELECT job FROM job_pj)',\n (monday, nextmonday))\n\n j_p = pd.DataFrame(self.alog.dbcur.fetchall(),\n columns=('id', 'person'))\n\n # sort the people as projects into the job list\n ids = j_p.id.unique()\n for jid in ids:\n people = j_p[j_p.id == jid].person\n\n row = jobs[jobs.id == jid].copy()\n row.project = people.iloc[0]\n\n # add first person to the corresponding job\n jobs[jobs.id == jid] = row\n\n # if several people are associated with the job, add more rows to the\n # job list\n for person in people.values[1:]:\n row.project = person\n jobs = jobs.append(row, ignore_index=True)\n\n projects = pd.DataFrame(jobs.groupby('project').duration.sum(\n ).sort_values(ascending=False))\n acts = jobs.act.unique()\n\n for act in acts:\n projects[act] = 0\n\n for pj in projects.index:\n actdurs = jobs[jobs.project == pj].groupby('act').duration.sum()\n\n projects.loc[pj, actdurs.index] = actdurs\n\n # remove activities which did not occur in any of the projects\n # (these are project-independent activities)\n projects = projects.T[projects.sum() > 0].T\n\n return projects",
"def consolidate_timeline_action(temp_timd_timelines, action_type, sprking):\n\n # The dictionary of three timelines with only the types specified\n # in the function.\n simplified_timelines = {scout: [] for scout in temp_timd_timelines.keys()}\n\n # Takes the three different timelines and cuts out any types of\n # data points which are not the specified types.\n for scout, timeline in temp_timd_timelines.items():\n for action in timeline:\n if action.get('type') == action_type:\n simplified_timelines[scout].append(action)\n\n # For each action in each scouts list of actions, the time is\n # converted from a string to a float.\n for scout, simplified_timeline in simplified_timelines.items():\n for action in simplified_timeline:\n action['time'] = convert_float_time(action['time'])\n\n # Scouts to the amount of actions of the specified type are in the\n # timeline.\n count_timelines = {scout: len(timeline) for\n scout, timeline in simplified_timelines.items()}\n\n # Finds the majority amount of actions in the timeline to see\n # which amount of actions is the correct amount.\n majority_length = max_occurrences(count_timelines, sprking)\n\n # Creates a dictionary of scouts to their timelines which follow the\n # majority length of timeline.\n correct_length_timelines = {}\n for scout, timeline_length in count_timelines.items():\n if timeline_length == majority_length:\n correct_length_timelines[scout] = simplified_timelines[scout]\n\n # If there are scouts that don't agree with the majority timeline,\n # creates a time_reference to line up against.\n time_reference = {}\n if sprking in correct_length_timelines.keys():\n correct_scout = sprking\n else:\n correct_scout = list(correct_length_timelines.keys())[-1]\n reference_timeline = correct_length_timelines[correct_scout]\n time_reference[correct_scout] = [action['time'] for action in\n reference_timeline]\n\n # If there are scouts that do not agree with the correct timeline\n # length, find out which of their action times agree with the time\n # reference the best, and line it up against the reference in the\n # correct_length_timelines dictionary.\n for scout in simplified_timelines.keys():\n if scout not in correct_length_timelines.keys():\n correct_length_timelines[scout] = [{} for action in\n range(majority_length)]\n # In order to find the best option for timings, it sets\n # up a matrix of time differences between each action in\n # each tempTIMD.\n timings = np.zeros((len(simplified_timelines[scout]),\n majority_length))\n for false_index, false_action in \\\n enumerate(simplified_timelines[scout]):\n for comparison_index, comparison_action in \\\n enumerate(list(time_reference.values())[0]):\n timings[false_index][comparison_index] = \\\n abs(float(comparison_action) -\n float(false_action['time']))\n\n # Once the matrix of timing differences has been\n # created, the lowest difference is used to line up the\n # incorrect length timeline with the correct length\n # timeline. To avoid one action being compared with multiple\n # other actions, all other instances of the action (The row\n # and column) are set to 200 to signify that it has been\n # used. 200 is used because it is higher than any possible\n # time difference.\n if timings.size > 0:\n # The loop runs until there are no more time differences\n # in the matrix less than 200.\n while timings.min() < 200:\n # lowest_index is in the format of ([y coordinate],\n # [x coordinate]), which requires lowest_index[1][0]\n # to get the x coordinate, and lowest_index[0][0]\n # for the y coordinate.\n lowest_index = np.where(timings == timings.min())\n correct_length_timelines[scout][lowest_index[1][0]] = \\\n simplified_timelines[scout][lowest_index[0][0]]\n timings[int(lowest_index[0][0])] = \\\n np.full([1, len(timings[0])], 200)\n for row in range(len(timings)):\n timings[row][int(lowest_index[1][0])] = 200\n\n final_simplified_timd = [{} for action in range(majority_length)]\n # Iterates through the sprking's timeline to compare all the actions.\n # If the majority 'type' for the action is None, the majority of\n # scouts did not record this action, and this action should not\n # appear in the consolidated TIMD.\n for action_index, action in enumerate(correct_length_timelines[sprking]):\n comparison_dict = {scout: timeline[action_index] for scout,\n timeline in correct_length_timelines.items()}\n types = {scout: action.get('type') for scout, action in\n comparison_dict.items()}\n if max_occurrences(types, sprking) is None:\n # Skips current iteration\n continue\n\n # Deletes scouts that did not record this action.\n for scout in list(comparison_dict):\n if comparison_dict[scout] == {}:\n comparison_dict.pop(scout)\n\n # All of the possible keys for a tempTIMD for this action.\n keys = set()\n for action in comparison_dict.values():\n for key in action.keys():\n keys.add(key)\n\n for key in keys:\n # For every key that isn't time, which can't realistically\n # have a majority, the majority opinion is set to the final\n # timd.\n scout_to_keys = {scout: action.get(key) for scout,\n action in comparison_dict.items()}\n\n if key == 'time':\n # If the key is time, finds the correct time using the\n # consolidate_times algorithm.\n final_simplified_timd[action_index]['time'] = \\\n consolidate_times(scout_to_keys)\n else:\n # For every key in the dictionary other than time, it just\n # takes the majority value for the key.\n final_simplified_timd[action_index][key] = \\\n max_occurrences(scout_to_keys, sprking)\n\n # Returns the final created timeline\n return final_simplified_timd",
"def mergetime(small, big):\n big['slots'].extend(small['slots'])\n big['times'].append(small)\n return big",
"def mergeTurnstileData(self):\n\t\t[ana_turnstiles_df, mauricio_turnstiles_df] = ReadTurnstilesDataBase.readTurnstileData()\n\t\tana_turnstiles_df = ReadTurnstilesDataBase.processAnaTurnstiles(ana_turnstiles_df)\n\n\t\tself.etapas_df = self.etapas_df.merge(ana_turnstiles_df, left_on = 'sitio_subida', right_on = 'sitio_subida', how='left')\n\t\tself.etapas_df = self.etapas_df.merge(mauricio_turnstiles_df, left_on = 'sitio_subida', right_on = 'sitio_subida' , suffixes=('_ana', '_mauricio'), how='left')\n\n#\t\tdel self.etapas_df['sitio_subida_ana']\n#\t\tdel self.etapas_df['sitio_subida_mauricio']\n\n\t\ttorniquetes_mariposa_conditions = (self.etapas_df.loc[:,'fecha_instalacion_ana'].dt.date<self.etapas_df.loc[:,'t_subida'].dt.date)\n\t\t\n\t\tself.etapas_df['min_fecha'] = pd.concat([self.etapas_df['fecha_instalacion_ana'], self.etapas_df['fecha_instalacion_mauricio']], axis=1).min(axis=1)\n\t\tno_torniquetes_conditions = (((self.etapas_df.loc[:,'fecha_instalacion_ana'].isnull()) & (self.etapas_df.loc[:,'fecha_instalacion_mauricio'].isnull())) | (self.etapas_df.loc[:,'t_subida'].dt.date<=self.etapas_df['min_fecha'].dt.date))\n\n\t\tself.etapas_df.loc[:,'torniquete_mariposa'] = np.where(torniquetes_mariposa_conditions,1,0)\n\t\tself.etapas_df.loc[:,'no_torniquete'] = np.where(no_torniquetes_conditions,1,0)",
"def consolidate_temp_timds(temp_timds):\n\n # 'sprking' is the scout with the best (lowest) SPR\n #TODO: Implement spr system\n sprking = list(temp_timds.keys())[0]\n\n final_timd = {}\n # Iterates through the keys of the best scout's tempTIMD and\n # consolidates each data_field one at a time.\n for data_field in list(temp_timds[sprking]):\n if data_field == 'timeline':\n # In order to compute the timeline properly, it is split\n # into a list of the timelines.\n timelines = {}\n for scout, temp_timd in temp_timds.items():\n temp_timeline = temp_timd.get('timeline', [])\n timelines[scout] = temp_timeline\n\n # If the list of timelines only includes one timeline, that\n # timeline is taken as the correct one and put into the\n # final TIMD.\n if len(timelines.values()) == 1:\n # Converts all times to floats and removes asterisk to\n # put it into the format of a timd.\n final_timeline = []\n for action in timelines[sprking]:\n action_time = action.get('time')\n # Takes the time before the asterisk, if there is no\n # asterisk, .split() still returns a list, a list of\n # only the time, meaning [0] works in both\n # instances.\n action['time'] = float(action_time.split('*')[0])\n final_timd['timeline'] = timelines[sprking]\n\n # If the list has more than one tempTIMD, the process for\n # computation has to be split up by each of the types of\n # actions in the timeline.\n else:\n # Creates the final timeline which is passed as the\n # timeline for the final timd at the end of\n # consolidation.\n final_timeline = []\n\n # Separates all the basic actions out and consolidates\n # them one at a time. All the actions are consolidated\n # separately so that the timings on each action are\n # split apart, making it more easy to line up, identify,\n # and consolidate the timeline.\n for action_type in ['pinningFoul', 'incap', 'unincap', \\\n 'drop', 'startDefense', 'endDefense', \\\n 'placement', 'intake']:\n final_timeline += consolidate_timeline_action(\n timelines, action_type, sprking)\n\n # Also consolidates climb separately in order to\n # separate it from intakes and placements. Climb needs a\n # separate function because of its relatively strange\n # structure.\n climb = climb_consolidation(timelines, sprking)\n if climb is not None:\n final_timeline.append(climb)\n\n # Deletes any blank actions.\n final_timeline = [action for action in final_timeline if\n action != {}]\n\n # Once the timeline is finally completed, it is sorted\n # by time, and added to the final timd.\n final_timd['timeline'] = sorted(final_timeline, \\\n key=lambda action: action['time'], reverse=True)\n\n # When consolidating non-timed keys, it is easy to consolidate\n # them, as you can simply find which value is the most common in\n # the set of three possibilities. The other data_fields that\n # are not included in this set, such as timerStarted, are scout\n # diagnostics, and not included in the final TIMD.\n elif data_field not in ['timeline', 'timerStarted',\n 'currentCycle', 'scoutID', 'scoutName',\n 'appVersion', 'assignmentMode',\n 'assignmentFileTimestamp',\n 'matchesNotScouted']:\n\n # Creates a dictionary of each scout to the key from their\n # tempTIMD to compare against each other. (Code note - This\n # code is using .get and not simply referencing the key out\n # of the dictionary because .get doesn't error out when the\n # key doesn't exist. Instead, .get returns NoneType).\n data_field_comparison_list = {}\n for scout, temp_timd in temp_timds.items():\n temp_data_field = temp_timd.get(data_field)\n if temp_data_field is not None:\n data_field_comparison_list[scout] = temp_data_field\n\n # Uses the max_occurrences function to find the correct value\n # for the data field.\n data_occurence_max = max_occurrences(\n data_field_comparison_list, sprking)\n\n final_timd[data_field] = data_occurence_max\n\n return final_timd",
"def set_work_timing(self, entries):\n self['Work'] = '\\n'.join(map(lambda x: x.to_ticket(), entries))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.