code
stringlengths
13
1.2M
order_type
stringclasses
1 value
original_example
dict
step_ids
listlengths
1
5
print("gggg") print("gggg") print("gggg")
normal
{ "blob_id": "b53294330a908f8a50d8fbb50b9c88e2bc6135a1", "index": 4124, "step-1": "<mask token>\n", "step-2": "print('gggg')\nprint('gggg')\nprint('gggg')\n", "step-3": "print(\"gggg\")\nprint(\"gggg\")\nprint(\"gggg\")\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
# Generated by Django 3.2.4 on 2021-06-18 01:20 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('eCom', '0014_auto_20210617_1503'), ] operations = [ migrations.RemoveField( model_name='order', name='items', ), migrations.AddField( model_name='order', name='items', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='eCom.orderitem'), ), ]
normal
{ "blob_id": "ef57f0dfea261f022ced36ef9e27a07d63c21026", "index": 2156, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('eCom', '0014_auto_20210617_1503')]\n operations = [migrations.RemoveField(model_name='order', name='items'),\n migrations.AddField(model_name='order', name='items', field=models.\n ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='eCom.orderitem'))]\n", "step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('eCom', '0014_auto_20210617_1503')]\n operations = [migrations.RemoveField(model_name='order', name='items'),\n migrations.AddField(model_name='order', name='items', field=models.\n ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='eCom.orderitem'))]\n", "step-5": "# Generated by Django 3.2.4 on 2021-06-18 01:20\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('eCom', '0014_auto_20210617_1503'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='order',\n name='items',\n ),\n migrations.AddField(\n model_name='order',\n name='items',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='eCom.orderitem'),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import abc class Connector: """@abc.abstractmethod def connect(self): pass """ @abc.abstractmethod def save(self, item): pass @abc.abstractmethod def load_all(self): pass @abc.abstractmethod def load_by_id(self, id): pass @abc.abstractmethod def update_item(self, item): pass @abc.abstractmethod def get_last_id(self): pass @abc.abstractmethod def get_done_items(self): pass """@abc.abstractmethod def close(self): pass"""
normal
{ "blob_id": "ac46aa6f8f4f01b6f3c48532533b9dd41a8a1c1c", "index": 7007, "step-1": "<mask token>\n\n\nclass Connector:\n <mask token>\n\n @abc.abstractmethod\n def save(self, item):\n pass\n\n @abc.abstractmethod\n def load_all(self):\n pass\n\n @abc.abstractmethod\n def load_by_id(self, id):\n pass\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Connector:\n <mask token>\n\n @abc.abstractmethod\n def save(self, item):\n pass\n\n @abc.abstractmethod\n def load_all(self):\n pass\n\n @abc.abstractmethod\n def load_by_id(self, id):\n pass\n <mask token>\n\n @abc.abstractmethod\n def get_last_id(self):\n pass\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Connector:\n <mask token>\n\n @abc.abstractmethod\n def save(self, item):\n pass\n\n @abc.abstractmethod\n def load_all(self):\n pass\n\n @abc.abstractmethod\n def load_by_id(self, id):\n pass\n\n @abc.abstractmethod\n def update_item(self, item):\n pass\n\n @abc.abstractmethod\n def get_last_id(self):\n pass\n\n @abc.abstractmethod\n def get_done_items(self):\n pass\n <mask token>\n", "step-4": "<mask token>\n\n\nclass Connector:\n \"\"\"@abc.abstractmethod\n def connect(self):\n pass\n \"\"\"\n\n @abc.abstractmethod\n def save(self, item):\n pass\n\n @abc.abstractmethod\n def load_all(self):\n pass\n\n @abc.abstractmethod\n def load_by_id(self, id):\n pass\n\n @abc.abstractmethod\n def update_item(self, item):\n pass\n\n @abc.abstractmethod\n def get_last_id(self):\n pass\n\n @abc.abstractmethod\n def get_done_items(self):\n pass\n \"\"\"@abc.abstractmethod\n def close(self):\n pass\"\"\"\n", "step-5": "import abc\r\n\r\n\r\nclass Connector:\r\n\r\n\r\n \"\"\"@abc.abstractmethod\r\n def connect(self):\r\n pass\r\n \"\"\"\r\n \r\n @abc.abstractmethod\r\n def save(self, item):\r\n pass\r\n \r\n\r\n @abc.abstractmethod\r\n def load_all(self):\r\n pass\r\n \r\n \r\n @abc.abstractmethod\r\n def load_by_id(self, id):\r\n pass\r\n\r\n \r\n @abc.abstractmethod\r\n def update_item(self, item):\r\n pass\r\n\r\n\r\n @abc.abstractmethod\r\n def get_last_id(self):\r\n pass\r\n\r\n\r\n @abc.abstractmethod\r\n def get_done_items(self):\r\n pass \r\n \r\n \r\n \"\"\"@abc.abstractmethod\r\n def close(self):\r\n pass\"\"\"\r\n\r\n \r\n\r\n", "step-ids": [ 4, 5, 7, 8, 10 ] }
[ 4, 5, 7, 8, 10 ]
import numpy as np import sys class NeuralNetworkClassifier(): def __init__(self, hidden_units, learning_rate, batch_size, epochs, l_1_beta_1, l_1_beta_2, l_2_alpha_1, l_2_alpha_2): self._hidden_units = hidden_units self._learning_rate = learning_rate self._batch_size = batch_size self._epochs = epochs self._l_1_beta_1 = l_1_beta_1 self._l_1_beta_2 = l_1_beta_2 self._l_2_alpha_1 = l_2_alpha_1 self._l_2_alpha_2 = l_2_alpha_2 def fit(self, X_train, Y_train): num_input_dimensions = X_train.shape[1] self._num_classes = Y_train.shape[1] training_set_size = X_train.shape[0] self._W_1 = 1 / np.sqrt(self._hidden_units) * np.random.randn(self._hidden_units, num_input_dimensions) self._W_2 = 1 / np.sqrt(self._num_classes) * np.random.randn(self._num_classes, self._hidden_units) self._b_1 = 0.01 * np.ones((self._hidden_units, 1)) self._b_2 = 0.01 * np.ones((self._num_classes, 1)) for epoch in range(self._epochs): for batch_start in range(0, training_set_size, self._batch_size): batch_end = batch_start + self._batch_size X_batch = X_train[batch_start:batch_end] Y_batch = Y_train[batch_start:batch_end] num_examples = X_batch.shape[0] W_1_prime_total = 0 W_2_prime_total = 0 b_1_prime_total = 0 b_2_prime_total = 0 for i in range(num_examples): x = np.vstack(X_batch[i, :]) y = np.vstack(Y_batch[i, :]) z_1, h_1, y_hat = self._forward_propagation(x) W_1_prime, W_2_prime, b_1_prime, b_2_prime = self._backward_propagation(x, y, z_1, h_1, y_hat) W_1_prime_total += W_1_prime W_2_prime_total += W_2_prime b_1_prime_total += b_1_prime b_2_prime_total += b_2_prime self._W_1 = self._W_1 - self._learning_rate * W_1_prime_total self._W_2 = self._W_2 - self._learning_rate * W_2_prime_total self._b_1 = self._b_1 - self._learning_rate * b_1_prime_total self._b_2 = self._b_2 - self._learning_rate * b_2_prime_total Y_hats = self.predict(X_batch) y_hat = self.predict(X_train) print("Epoch %3d/%3d Loss = %.2f Training Accuracy = %.2f" % (epoch + 1, self._epochs,self._cross_entropy_loss(Y_batch, Y_hats), self.score(Y_train, y_hat))) def _forward_propagation(self, x): z_1 = self._W_1.dot(x) + self._b_1 # print("_forward_propagation W_1=", self._W_1.shape) # print("_forward_propagation b_1=", self._b_1.shape) # print("_forward_propagation x=", x.shape) # print("_forward_propagation z=", z_1.shape) h_1 = self._relu(z_1) # print("_forward_propagation h_1=", h_1.shape) z_2 = self._W_2.dot(h_1) + self._b_2 # print("_forward_propagation z_2=", z_2.shape) y_hat = self._softmax(z_2) # print("_forward_propagation y_hat=", y_hat.shape) return z_1, h_1, y_hat def _backward_propagation(self, x, y, z_1, h_1, y_hat): df_dy = y_hat - y g = self._g(df_dy, self._W_2, z_1) W_1_prime = self._W_1_prime(x, g, self._W_1, self._l_2_alpha_1, self._l_1_beta_1) W_2_prime = self._W_2_prime(df_dy, h_1, self._W_2, self._l_2_alpha_2, self._l_1_beta_2) b_1_prime = self._learning_rate * self._b_1_prime(g) b_2_prime = self._learning_rate * self._b_2_prime(df_dy) return W_1_prime, W_2_prime, b_1_prime, b_2_prime def predict(self, X): num_examples = X.shape[0] Y_hat = np.zeros((num_examples, self._num_classes)) for i in range(num_examples): x = np.vstack(X[i, :]) _, _, y_hat = self._forward_propagation(x) Y_hat[i, :] = y_hat[:, 0] return Y_hat def _relu(self, x): return np.maximum(x, 0) def _relu_prime(self, x): y = np.zeros((x.shape[0], x.shape[1])) y[x > 0] = 1.0 return y def _softmax(self, Z): exp = np.exp(Z) total = np.sum(exp, axis=0) return exp / total def _g(self, df_dy, W_2, z_1): return (df_dy.T.dot(W_2) * self._relu_prime(z_1.T)).T def _W_2_prime(self, df_dy, h_1, W_2, alpha_2, beta_2): return df_dy.dot(h_1.T) + alpha_2 * W_2 + beta_2 * np.sign(W_2) def _b_2_prime(self, df_dy): return df_dy def _W_1_prime(self, x, g, W_1, alpha_1, beta_1): return g.dot(x.T) + alpha_1 * W_1 + beta_1 * np.sign(W_1) def _b_1_prime(self, g): return g def _l_1_loss(self, W): return np.sum(np.absolute(W)) def _l_2_loss(self, W): return 0.5 * np.linalg.norm(W) def _cross_entropy_loss(self, y, yhat): loss = 0 yhat_log = np.log(yhat.T) for i in range(len(y)): loss -= y[i, :].dot(yhat_log[:, i]) l_1_regularization = self._l_1_beta_1 * self._l_1_loss(self._W_1) + self._l_1_beta_2 * self._l_1_loss(self._W_2) l_2_regularization = self._l_2_alpha_1 * self._l_2_loss(self._W_1) + self._l_2_alpha_2 * self._l_2_loss(self._W_2) return loss + l_1_regularization + l_2_regularization def _toClassIndices(self, probabilities): return np.argmax(probabilities, axis=1) def loss(self, testing_labels, predicted_labels): return 0 def score(self, expected_labels, predicted_labels): return np.mean(self._toClassIndices(expected_labels) == self._toClassIndices(predicted_labels)) def describe_hyperparameters(hyperparameters): return "\nHidden Units: {0} Learning Rate: {1} Minibatch Size: {2} Epochs: {3} L1 Strength: {4} L2 Strength: {5}".format( hyperparameters[0], hyperparameters[1], hyperparameters[2], hyperparameters[3], hyperparameters[4], hyperparameters[5]) def findBestHyperparameters(training_images, training_labels, validation_images, validation_labels): print("Start training...") print() all_hidden_units = [20, 20, 30, 30, 40, 40, 50, 50, 60, 30] all_learning_rates = [0.0001, 0.001, 0.01, 0.01, 0.01, 0.02, 0.02, 0.1, 0.2, 0.007] all_minibatch_sizes = [2, 5, 10, 10, 20, 20, 100, 50, 50, 25] all_num_epochs = [1, 1, 1, 1, 2, 2, 2, 2, 3, 3] all_l1_strengths = [0.0, 0.0, 0, 0.01, 0.0, 0.001, 0.01, 0.02, 0.01, 0.001] all_l2_strengths = [0.0, 0.01, 0.001, 0.0, 0.01, 0.001, 0.01, 0.02, 0.01, 0.001] best_accuracy = 0 best_hyperparamters = [] for i in range(10): hyperparameters = (all_hidden_units[slice_start+i], all_learning_rates[slice_start+i], all_minibatch_sizes[slice_start+i], all_num_epochs[slice_start+i], all_l1_strengths[slice_start+i], all_l2_strengths[slice_start+i]) print(describe_hyperparameters(hyperparameters)) clf = NeuralNetworkClassifier( hidden_units = hyperparameters[0], learning_rate = hyperparameters[1], batch_size = hyperparameters[2], epochs = hyperparameters[3], l_1_beta_1 = hyperparameters[4], l_1_beta_2 = hyperparameters[4], l_2_alpha_1 = hyperparameters[5], l_2_alpha_2 = hyperparameters[5]) clf.fit(training_images, training_labels) predicted_labels = clf.predict(validation_images) accuracy = clf.score(validation_labels, predicted_labels) print("Accuracy: %f" % accuracy) print("Cross Entropy Loss = %.2f" % (clf.loss(validation_labels, predicted_labels))) if(accuracy > best_accuracy): best_accuracy = accuracy best_hyperparamters = hyperparameters print("Found new best hyperparameters.") print("\n") print(describe_hyperparameters(best_hyperparamters)) return best_hyperparamters def main(): training_images = np.load("mnist_train_images.npy") training_labels = np.load("mnist_train_labels.npy") testing_images = np.load("mnist_test_images.npy") testing_labels = np.load("mnist_test_labels.npy") validation_images = np.load("mnist_validation_images.npy") validation_labels = np.load("mnist_validation_labels.npy") parameters = findBestHyperparameters(training_images[0:16000, :], training_labels[0:16000, :], validation_images, validation_labels) clf = NeuralNetworkClassifier(hidden_units=parameters[0], learning_rate=parameters[1], batch_size=parameters[2], epochs=parameters[3], l_1_beta_1=parameters[4], l_1_beta_2=parameters[4], l_2_alpha_1=parameters[5], l_2_alpha_2=parameters[5]) clf.fit(training_images, training_labels) predicted_labels = clf.predict(testing_images) if __name__ == "__main__": if len(sys.argv) != 1: print("Usage: python3 digit_recognizer.py") exit() main()
normal
{ "blob_id": "6199a2ac12e80395f4a7a54877c5b639315e64aa", "index": 7702, "step-1": "<mask token>\n\n\nclass NeuralNetworkClassifier:\n <mask token>\n\n def fit(self, X_train, Y_train):\n num_input_dimensions = X_train.shape[1]\n self._num_classes = Y_train.shape[1]\n training_set_size = X_train.shape[0]\n self._W_1 = 1 / np.sqrt(self._hidden_units) * np.random.randn(self.\n _hidden_units, num_input_dimensions)\n self._W_2 = 1 / np.sqrt(self._num_classes) * np.random.randn(self.\n _num_classes, self._hidden_units)\n self._b_1 = 0.01 * np.ones((self._hidden_units, 1))\n self._b_2 = 0.01 * np.ones((self._num_classes, 1))\n for epoch in range(self._epochs):\n for batch_start in range(0, training_set_size, self._batch_size):\n batch_end = batch_start + self._batch_size\n X_batch = X_train[batch_start:batch_end]\n Y_batch = Y_train[batch_start:batch_end]\n num_examples = X_batch.shape[0]\n W_1_prime_total = 0\n W_2_prime_total = 0\n b_1_prime_total = 0\n b_2_prime_total = 0\n for i in range(num_examples):\n x = np.vstack(X_batch[i, :])\n y = np.vstack(Y_batch[i, :])\n z_1, h_1, y_hat = self._forward_propagation(x)\n W_1_prime, W_2_prime, b_1_prime, b_2_prime = (self.\n _backward_propagation(x, y, z_1, h_1, y_hat))\n W_1_prime_total += W_1_prime\n W_2_prime_total += W_2_prime\n b_1_prime_total += b_1_prime\n b_2_prime_total += b_2_prime\n self._W_1 = self._W_1 - self._learning_rate * W_1_prime_total\n self._W_2 = self._W_2 - self._learning_rate * W_2_prime_total\n self._b_1 = self._b_1 - self._learning_rate * b_1_prime_total\n self._b_2 = self._b_2 - self._learning_rate * b_2_prime_total\n Y_hats = self.predict(X_batch)\n y_hat = self.predict(X_train)\n print('Epoch %3d/%3d Loss = %.2f Training Accuracy = %.2f' % (\n epoch + 1, self._epochs, self._cross_entropy_loss(Y_batch,\n Y_hats), self.score(Y_train, y_hat)))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _relu_prime(self, x):\n y = np.zeros((x.shape[0], x.shape[1]))\n y[x > 0] = 1.0\n return y\n\n def _softmax(self, Z):\n exp = np.exp(Z)\n total = np.sum(exp, axis=0)\n return exp / total\n\n def _g(self, df_dy, W_2, z_1):\n return (df_dy.T.dot(W_2) * self._relu_prime(z_1.T)).T\n <mask token>\n <mask token>\n\n def _W_1_prime(self, x, g, W_1, alpha_1, beta_1):\n return g.dot(x.T) + alpha_1 * W_1 + beta_1 * np.sign(W_1)\n\n def _b_1_prime(self, g):\n return g\n <mask token>\n\n def _l_2_loss(self, W):\n return 0.5 * np.linalg.norm(W)\n\n def _cross_entropy_loss(self, y, yhat):\n loss = 0\n yhat_log = np.log(yhat.T)\n for i in range(len(y)):\n loss -= y[i, :].dot(yhat_log[:, i])\n l_1_regularization = self._l_1_beta_1 * self._l_1_loss(self._W_1\n ) + self._l_1_beta_2 * self._l_1_loss(self._W_2)\n l_2_regularization = self._l_2_alpha_1 * self._l_2_loss(self._W_1\n ) + self._l_2_alpha_2 * self._l_2_loss(self._W_2)\n return loss + l_1_regularization + l_2_regularization\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass NeuralNetworkClassifier:\n <mask token>\n\n def fit(self, X_train, Y_train):\n num_input_dimensions = X_train.shape[1]\n self._num_classes = Y_train.shape[1]\n training_set_size = X_train.shape[0]\n self._W_1 = 1 / np.sqrt(self._hidden_units) * np.random.randn(self.\n _hidden_units, num_input_dimensions)\n self._W_2 = 1 / np.sqrt(self._num_classes) * np.random.randn(self.\n _num_classes, self._hidden_units)\n self._b_1 = 0.01 * np.ones((self._hidden_units, 1))\n self._b_2 = 0.01 * np.ones((self._num_classes, 1))\n for epoch in range(self._epochs):\n for batch_start in range(0, training_set_size, self._batch_size):\n batch_end = batch_start + self._batch_size\n X_batch = X_train[batch_start:batch_end]\n Y_batch = Y_train[batch_start:batch_end]\n num_examples = X_batch.shape[0]\n W_1_prime_total = 0\n W_2_prime_total = 0\n b_1_prime_total = 0\n b_2_prime_total = 0\n for i in range(num_examples):\n x = np.vstack(X_batch[i, :])\n y = np.vstack(Y_batch[i, :])\n z_1, h_1, y_hat = self._forward_propagation(x)\n W_1_prime, W_2_prime, b_1_prime, b_2_prime = (self.\n _backward_propagation(x, y, z_1, h_1, y_hat))\n W_1_prime_total += W_1_prime\n W_2_prime_total += W_2_prime\n b_1_prime_total += b_1_prime\n b_2_prime_total += b_2_prime\n self._W_1 = self._W_1 - self._learning_rate * W_1_prime_total\n self._W_2 = self._W_2 - self._learning_rate * W_2_prime_total\n self._b_1 = self._b_1 - self._learning_rate * b_1_prime_total\n self._b_2 = self._b_2 - self._learning_rate * b_2_prime_total\n Y_hats = self.predict(X_batch)\n y_hat = self.predict(X_train)\n print('Epoch %3d/%3d Loss = %.2f Training Accuracy = %.2f' % (\n epoch + 1, self._epochs, self._cross_entropy_loss(Y_batch,\n Y_hats), self.score(Y_train, y_hat)))\n\n def _forward_propagation(self, x):\n z_1 = self._W_1.dot(x) + self._b_1\n h_1 = self._relu(z_1)\n z_2 = self._W_2.dot(h_1) + self._b_2\n y_hat = self._softmax(z_2)\n return z_1, h_1, y_hat\n\n def _backward_propagation(self, x, y, z_1, h_1, y_hat):\n df_dy = y_hat - y\n g = self._g(df_dy, self._W_2, z_1)\n W_1_prime = self._W_1_prime(x, g, self._W_1, self._l_2_alpha_1,\n self._l_1_beta_1)\n W_2_prime = self._W_2_prime(df_dy, h_1, self._W_2, self.\n _l_2_alpha_2, self._l_1_beta_2)\n b_1_prime = self._learning_rate * self._b_1_prime(g)\n b_2_prime = self._learning_rate * self._b_2_prime(df_dy)\n return W_1_prime, W_2_prime, b_1_prime, b_2_prime\n\n def predict(self, X):\n num_examples = X.shape[0]\n Y_hat = np.zeros((num_examples, self._num_classes))\n for i in range(num_examples):\n x = np.vstack(X[i, :])\n _, _, y_hat = self._forward_propagation(x)\n Y_hat[i, :] = y_hat[:, 0]\n return Y_hat\n <mask token>\n\n def _relu_prime(self, x):\n y = np.zeros((x.shape[0], x.shape[1]))\n y[x > 0] = 1.0\n return y\n\n def _softmax(self, Z):\n exp = np.exp(Z)\n total = np.sum(exp, axis=0)\n return exp / total\n\n def _g(self, df_dy, W_2, z_1):\n return (df_dy.T.dot(W_2) * self._relu_prime(z_1.T)).T\n <mask token>\n <mask token>\n\n def _W_1_prime(self, x, g, W_1, alpha_1, beta_1):\n return g.dot(x.T) + alpha_1 * W_1 + beta_1 * np.sign(W_1)\n\n def _b_1_prime(self, g):\n return g\n\n def _l_1_loss(self, W):\n return np.sum(np.absolute(W))\n\n def _l_2_loss(self, W):\n return 0.5 * np.linalg.norm(W)\n\n def _cross_entropy_loss(self, y, yhat):\n loss = 0\n yhat_log = np.log(yhat.T)\n for i in range(len(y)):\n loss -= y[i, :].dot(yhat_log[:, i])\n l_1_regularization = self._l_1_beta_1 * self._l_1_loss(self._W_1\n ) + self._l_1_beta_2 * self._l_1_loss(self._W_2)\n l_2_regularization = self._l_2_alpha_1 * self._l_2_loss(self._W_1\n ) + self._l_2_alpha_2 * self._l_2_loss(self._W_2)\n return loss + l_1_regularization + l_2_regularization\n\n def _toClassIndices(self, probabilities):\n return np.argmax(probabilities, axis=1)\n\n def loss(self, testing_labels, predicted_labels):\n return 0\n <mask token>\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass NeuralNetworkClassifier:\n <mask token>\n\n def fit(self, X_train, Y_train):\n num_input_dimensions = X_train.shape[1]\n self._num_classes = Y_train.shape[1]\n training_set_size = X_train.shape[0]\n self._W_1 = 1 / np.sqrt(self._hidden_units) * np.random.randn(self.\n _hidden_units, num_input_dimensions)\n self._W_2 = 1 / np.sqrt(self._num_classes) * np.random.randn(self.\n _num_classes, self._hidden_units)\n self._b_1 = 0.01 * np.ones((self._hidden_units, 1))\n self._b_2 = 0.01 * np.ones((self._num_classes, 1))\n for epoch in range(self._epochs):\n for batch_start in range(0, training_set_size, self._batch_size):\n batch_end = batch_start + self._batch_size\n X_batch = X_train[batch_start:batch_end]\n Y_batch = Y_train[batch_start:batch_end]\n num_examples = X_batch.shape[0]\n W_1_prime_total = 0\n W_2_prime_total = 0\n b_1_prime_total = 0\n b_2_prime_total = 0\n for i in range(num_examples):\n x = np.vstack(X_batch[i, :])\n y = np.vstack(Y_batch[i, :])\n z_1, h_1, y_hat = self._forward_propagation(x)\n W_1_prime, W_2_prime, b_1_prime, b_2_prime = (self.\n _backward_propagation(x, y, z_1, h_1, y_hat))\n W_1_prime_total += W_1_prime\n W_2_prime_total += W_2_prime\n b_1_prime_total += b_1_prime\n b_2_prime_total += b_2_prime\n self._W_1 = self._W_1 - self._learning_rate * W_1_prime_total\n self._W_2 = self._W_2 - self._learning_rate * W_2_prime_total\n self._b_1 = self._b_1 - self._learning_rate * b_1_prime_total\n self._b_2 = self._b_2 - self._learning_rate * b_2_prime_total\n Y_hats = self.predict(X_batch)\n y_hat = self.predict(X_train)\n print('Epoch %3d/%3d Loss = %.2f Training Accuracy = %.2f' % (\n epoch + 1, self._epochs, self._cross_entropy_loss(Y_batch,\n Y_hats), self.score(Y_train, y_hat)))\n\n def _forward_propagation(self, x):\n z_1 = self._W_1.dot(x) + self._b_1\n h_1 = self._relu(z_1)\n z_2 = self._W_2.dot(h_1) + self._b_2\n y_hat = self._softmax(z_2)\n return z_1, h_1, y_hat\n\n def _backward_propagation(self, x, y, z_1, h_1, y_hat):\n df_dy = y_hat - y\n g = self._g(df_dy, self._W_2, z_1)\n W_1_prime = self._W_1_prime(x, g, self._W_1, self._l_2_alpha_1,\n self._l_1_beta_1)\n W_2_prime = self._W_2_prime(df_dy, h_1, self._W_2, self.\n _l_2_alpha_2, self._l_1_beta_2)\n b_1_prime = self._learning_rate * self._b_1_prime(g)\n b_2_prime = self._learning_rate * self._b_2_prime(df_dy)\n return W_1_prime, W_2_prime, b_1_prime, b_2_prime\n\n def predict(self, X):\n num_examples = X.shape[0]\n Y_hat = np.zeros((num_examples, self._num_classes))\n for i in range(num_examples):\n x = np.vstack(X[i, :])\n _, _, y_hat = self._forward_propagation(x)\n Y_hat[i, :] = y_hat[:, 0]\n return Y_hat\n\n def _relu(self, x):\n return np.maximum(x, 0)\n\n def _relu_prime(self, x):\n y = np.zeros((x.shape[0], x.shape[1]))\n y[x > 0] = 1.0\n return y\n\n def _softmax(self, Z):\n exp = np.exp(Z)\n total = np.sum(exp, axis=0)\n return exp / total\n\n def _g(self, df_dy, W_2, z_1):\n return (df_dy.T.dot(W_2) * self._relu_prime(z_1.T)).T\n\n def _W_2_prime(self, df_dy, h_1, W_2, alpha_2, beta_2):\n return df_dy.dot(h_1.T) + alpha_2 * W_2 + beta_2 * np.sign(W_2)\n\n def _b_2_prime(self, df_dy):\n return df_dy\n\n def _W_1_prime(self, x, g, W_1, alpha_1, beta_1):\n return g.dot(x.T) + alpha_1 * W_1 + beta_1 * np.sign(W_1)\n\n def _b_1_prime(self, g):\n return g\n\n def _l_1_loss(self, W):\n return np.sum(np.absolute(W))\n\n def _l_2_loss(self, W):\n return 0.5 * np.linalg.norm(W)\n\n def _cross_entropy_loss(self, y, yhat):\n loss = 0\n yhat_log = np.log(yhat.T)\n for i in range(len(y)):\n loss -= y[i, :].dot(yhat_log[:, i])\n l_1_regularization = self._l_1_beta_1 * self._l_1_loss(self._W_1\n ) + self._l_1_beta_2 * self._l_1_loss(self._W_2)\n l_2_regularization = self._l_2_alpha_1 * self._l_2_loss(self._W_1\n ) + self._l_2_alpha_2 * self._l_2_loss(self._W_2)\n return loss + l_1_regularization + l_2_regularization\n\n def _toClassIndices(self, probabilities):\n return np.argmax(probabilities, axis=1)\n\n def loss(self, testing_labels, predicted_labels):\n return 0\n\n def score(self, expected_labels, predicted_labels):\n return np.mean(self._toClassIndices(expected_labels) == self.\n _toClassIndices(predicted_labels))\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass NeuralNetworkClassifier:\n\n def __init__(self, hidden_units, learning_rate, batch_size, epochs,\n l_1_beta_1, l_1_beta_2, l_2_alpha_1, l_2_alpha_2):\n self._hidden_units = hidden_units\n self._learning_rate = learning_rate\n self._batch_size = batch_size\n self._epochs = epochs\n self._l_1_beta_1 = l_1_beta_1\n self._l_1_beta_2 = l_1_beta_2\n self._l_2_alpha_1 = l_2_alpha_1\n self._l_2_alpha_2 = l_2_alpha_2\n\n def fit(self, X_train, Y_train):\n num_input_dimensions = X_train.shape[1]\n self._num_classes = Y_train.shape[1]\n training_set_size = X_train.shape[0]\n self._W_1 = 1 / np.sqrt(self._hidden_units) * np.random.randn(self.\n _hidden_units, num_input_dimensions)\n self._W_2 = 1 / np.sqrt(self._num_classes) * np.random.randn(self.\n _num_classes, self._hidden_units)\n self._b_1 = 0.01 * np.ones((self._hidden_units, 1))\n self._b_2 = 0.01 * np.ones((self._num_classes, 1))\n for epoch in range(self._epochs):\n for batch_start in range(0, training_set_size, self._batch_size):\n batch_end = batch_start + self._batch_size\n X_batch = X_train[batch_start:batch_end]\n Y_batch = Y_train[batch_start:batch_end]\n num_examples = X_batch.shape[0]\n W_1_prime_total = 0\n W_2_prime_total = 0\n b_1_prime_total = 0\n b_2_prime_total = 0\n for i in range(num_examples):\n x = np.vstack(X_batch[i, :])\n y = np.vstack(Y_batch[i, :])\n z_1, h_1, y_hat = self._forward_propagation(x)\n W_1_prime, W_2_prime, b_1_prime, b_2_prime = (self.\n _backward_propagation(x, y, z_1, h_1, y_hat))\n W_1_prime_total += W_1_prime\n W_2_prime_total += W_2_prime\n b_1_prime_total += b_1_prime\n b_2_prime_total += b_2_prime\n self._W_1 = self._W_1 - self._learning_rate * W_1_prime_total\n self._W_2 = self._W_2 - self._learning_rate * W_2_prime_total\n self._b_1 = self._b_1 - self._learning_rate * b_1_prime_total\n self._b_2 = self._b_2 - self._learning_rate * b_2_prime_total\n Y_hats = self.predict(X_batch)\n y_hat = self.predict(X_train)\n print('Epoch %3d/%3d Loss = %.2f Training Accuracy = %.2f' % (\n epoch + 1, self._epochs, self._cross_entropy_loss(Y_batch,\n Y_hats), self.score(Y_train, y_hat)))\n\n def _forward_propagation(self, x):\n z_1 = self._W_1.dot(x) + self._b_1\n h_1 = self._relu(z_1)\n z_2 = self._W_2.dot(h_1) + self._b_2\n y_hat = self._softmax(z_2)\n return z_1, h_1, y_hat\n\n def _backward_propagation(self, x, y, z_1, h_1, y_hat):\n df_dy = y_hat - y\n g = self._g(df_dy, self._W_2, z_1)\n W_1_prime = self._W_1_prime(x, g, self._W_1, self._l_2_alpha_1,\n self._l_1_beta_1)\n W_2_prime = self._W_2_prime(df_dy, h_1, self._W_2, self.\n _l_2_alpha_2, self._l_1_beta_2)\n b_1_prime = self._learning_rate * self._b_1_prime(g)\n b_2_prime = self._learning_rate * self._b_2_prime(df_dy)\n return W_1_prime, W_2_prime, b_1_prime, b_2_prime\n\n def predict(self, X):\n num_examples = X.shape[0]\n Y_hat = np.zeros((num_examples, self._num_classes))\n for i in range(num_examples):\n x = np.vstack(X[i, :])\n _, _, y_hat = self._forward_propagation(x)\n Y_hat[i, :] = y_hat[:, 0]\n return Y_hat\n\n def _relu(self, x):\n return np.maximum(x, 0)\n\n def _relu_prime(self, x):\n y = np.zeros((x.shape[0], x.shape[1]))\n y[x > 0] = 1.0\n return y\n\n def _softmax(self, Z):\n exp = np.exp(Z)\n total = np.sum(exp, axis=0)\n return exp / total\n\n def _g(self, df_dy, W_2, z_1):\n return (df_dy.T.dot(W_2) * self._relu_prime(z_1.T)).T\n\n def _W_2_prime(self, df_dy, h_1, W_2, alpha_2, beta_2):\n return df_dy.dot(h_1.T) + alpha_2 * W_2 + beta_2 * np.sign(W_2)\n\n def _b_2_prime(self, df_dy):\n return df_dy\n\n def _W_1_prime(self, x, g, W_1, alpha_1, beta_1):\n return g.dot(x.T) + alpha_1 * W_1 + beta_1 * np.sign(W_1)\n\n def _b_1_prime(self, g):\n return g\n\n def _l_1_loss(self, W):\n return np.sum(np.absolute(W))\n\n def _l_2_loss(self, W):\n return 0.5 * np.linalg.norm(W)\n\n def _cross_entropy_loss(self, y, yhat):\n loss = 0\n yhat_log = np.log(yhat.T)\n for i in range(len(y)):\n loss -= y[i, :].dot(yhat_log[:, i])\n l_1_regularization = self._l_1_beta_1 * self._l_1_loss(self._W_1\n ) + self._l_1_beta_2 * self._l_1_loss(self._W_2)\n l_2_regularization = self._l_2_alpha_1 * self._l_2_loss(self._W_1\n ) + self._l_2_alpha_2 * self._l_2_loss(self._W_2)\n return loss + l_1_regularization + l_2_regularization\n\n def _toClassIndices(self, probabilities):\n return np.argmax(probabilities, axis=1)\n\n def loss(self, testing_labels, predicted_labels):\n return 0\n\n def score(self, expected_labels, predicted_labels):\n return np.mean(self._toClassIndices(expected_labels) == self.\n _toClassIndices(predicted_labels))\n\n\n<mask token>\n\n\ndef main():\n training_images = np.load('mnist_train_images.npy')\n training_labels = np.load('mnist_train_labels.npy')\n testing_images = np.load('mnist_test_images.npy')\n testing_labels = np.load('mnist_test_labels.npy')\n validation_images = np.load('mnist_validation_images.npy')\n validation_labels = np.load('mnist_validation_labels.npy')\n parameters = findBestHyperparameters(training_images[0:16000, :],\n training_labels[0:16000, :], validation_images, validation_labels)\n clf = NeuralNetworkClassifier(hidden_units=parameters[0], learning_rate\n =parameters[1], batch_size=parameters[2], epochs=parameters[3],\n l_1_beta_1=parameters[4], l_1_beta_2=parameters[4], l_2_alpha_1=\n parameters[5], l_2_alpha_2=parameters[5])\n clf.fit(training_images, training_labels)\n predicted_labels = clf.predict(testing_images)\n\n\n<mask token>\n", "step-5": "import numpy as np\nimport sys\n\n\nclass NeuralNetworkClassifier():\n def __init__(self, hidden_units, learning_rate, batch_size, epochs, l_1_beta_1, l_1_beta_2, l_2_alpha_1, l_2_alpha_2):\n self._hidden_units = hidden_units\n self._learning_rate = learning_rate\n self._batch_size = batch_size\n self._epochs = epochs\n self._l_1_beta_1 = l_1_beta_1\n self._l_1_beta_2 = l_1_beta_2\n self._l_2_alpha_1 = l_2_alpha_1\n self._l_2_alpha_2 = l_2_alpha_2\n\n def fit(self, X_train, Y_train):\n num_input_dimensions = X_train.shape[1]\n self._num_classes = Y_train.shape[1]\n training_set_size = X_train.shape[0]\n\n self._W_1 = 1 / np.sqrt(self._hidden_units) * np.random.randn(self._hidden_units, num_input_dimensions)\n self._W_2 = 1 / np.sqrt(self._num_classes) * np.random.randn(self._num_classes, self._hidden_units)\n self._b_1 = 0.01 * np.ones((self._hidden_units, 1))\n self._b_2 = 0.01 * np.ones((self._num_classes, 1))\n\n for epoch in range(self._epochs):\n for batch_start in range(0, training_set_size, self._batch_size):\n batch_end = batch_start + self._batch_size\n X_batch = X_train[batch_start:batch_end]\n Y_batch = Y_train[batch_start:batch_end]\n\n num_examples = X_batch.shape[0]\n\n W_1_prime_total = 0\n W_2_prime_total = 0\n b_1_prime_total = 0\n b_2_prime_total = 0\n\n for i in range(num_examples):\n x = np.vstack(X_batch[i, :])\n y = np.vstack(Y_batch[i, :])\n\n z_1, h_1, y_hat = self._forward_propagation(x)\n W_1_prime, W_2_prime, b_1_prime, b_2_prime = self._backward_propagation(x, y, z_1, h_1, y_hat)\n\n W_1_prime_total += W_1_prime\n W_2_prime_total += W_2_prime\n b_1_prime_total += b_1_prime\n b_2_prime_total += b_2_prime\n \n self._W_1 = self._W_1 - self._learning_rate * W_1_prime_total\n self._W_2 = self._W_2 - self._learning_rate * W_2_prime_total\n self._b_1 = self._b_1 - self._learning_rate * b_1_prime_total\n self._b_2 = self._b_2 - self._learning_rate * b_2_prime_total\n \n Y_hats = self.predict(X_batch)\n y_hat = self.predict(X_train)\n print(\"Epoch %3d/%3d Loss = %.2f Training Accuracy = %.2f\" % (epoch + 1, self._epochs,self._cross_entropy_loss(Y_batch, Y_hats), self.score(Y_train, y_hat)))\n\n def _forward_propagation(self, x):\n z_1 = self._W_1.dot(x) + self._b_1\n\n # print(\"_forward_propagation W_1=\", self._W_1.shape)\n # print(\"_forward_propagation b_1=\", self._b_1.shape)\n # print(\"_forward_propagation x=\", x.shape)\n # print(\"_forward_propagation z=\", z_1.shape)\n h_1 = self._relu(z_1)\n\n # print(\"_forward_propagation h_1=\", h_1.shape)\n z_2 = self._W_2.dot(h_1) + self._b_2\n\n # print(\"_forward_propagation z_2=\", z_2.shape)\n y_hat = self._softmax(z_2)\n\n # print(\"_forward_propagation y_hat=\", y_hat.shape)\n return z_1, h_1, y_hat\n \n def _backward_propagation(self, x, y, z_1, h_1, y_hat):\n df_dy = y_hat - y\n g = self._g(df_dy, self._W_2, z_1)\n\n W_1_prime = self._W_1_prime(x, g, self._W_1, self._l_2_alpha_1, self._l_1_beta_1)\n W_2_prime = self._W_2_prime(df_dy, h_1, self._W_2, self._l_2_alpha_2, self._l_1_beta_2)\n b_1_prime = self._learning_rate * self._b_1_prime(g)\n b_2_prime = self._learning_rate * self._b_2_prime(df_dy)\n\n return W_1_prime, W_2_prime, b_1_prime, b_2_prime\n\n def predict(self, X):\n num_examples = X.shape[0]\n Y_hat = np.zeros((num_examples, self._num_classes))\n for i in range(num_examples):\n x = np.vstack(X[i, :])\n _, _, y_hat = self._forward_propagation(x)\n Y_hat[i, :] = y_hat[:, 0]\n return Y_hat\n \n def _relu(self, x):\n return np.maximum(x, 0)\n\n def _relu_prime(self, x):\n y = np.zeros((x.shape[0], x.shape[1]))\n y[x > 0] = 1.0\n return y\n\n def _softmax(self, Z):\n exp = np.exp(Z)\n total = np.sum(exp, axis=0)\n return exp / total\n \n def _g(self, df_dy, W_2, z_1):\n return (df_dy.T.dot(W_2) * self._relu_prime(z_1.T)).T\n\n def _W_2_prime(self, df_dy, h_1, W_2, alpha_2, beta_2):\n return df_dy.dot(h_1.T) + alpha_2 * W_2 + beta_2 * np.sign(W_2) \n \n def _b_2_prime(self, df_dy):\n return df_dy\n\n def _W_1_prime(self, x, g, W_1, alpha_1, beta_1):\n return g.dot(x.T) + alpha_1 * W_1 + beta_1 * np.sign(W_1) \n\n def _b_1_prime(self, g):\n return g\n\n def _l_1_loss(self, W):\n return np.sum(np.absolute(W))\n \n def _l_2_loss(self, W):\n return 0.5 * np.linalg.norm(W)\n\n def _cross_entropy_loss(self, y, yhat):\n loss = 0\n yhat_log = np.log(yhat.T)\n for i in range(len(y)):\n loss -= y[i, :].dot(yhat_log[:, i])\n\n l_1_regularization = self._l_1_beta_1 * self._l_1_loss(self._W_1) + self._l_1_beta_2 * self._l_1_loss(self._W_2)\n l_2_regularization = self._l_2_alpha_1 * self._l_2_loss(self._W_1) + self._l_2_alpha_2 * self._l_2_loss(self._W_2)\n return loss + l_1_regularization + l_2_regularization\n\n def _toClassIndices(self, probabilities):\n return np.argmax(probabilities, axis=1)\n\n def loss(self, testing_labels, predicted_labels):\n return 0\n\n def score(self, expected_labels, predicted_labels):\n return np.mean(self._toClassIndices(expected_labels) == self._toClassIndices(predicted_labels))\n\ndef describe_hyperparameters(hyperparameters):\n return \"\\nHidden Units: {0} Learning Rate: {1} Minibatch Size: {2} Epochs: {3} L1 Strength: {4} L2 Strength: {5}\".format(\n hyperparameters[0], hyperparameters[1], hyperparameters[2], hyperparameters[3], hyperparameters[4], hyperparameters[5])\n\n\ndef findBestHyperparameters(training_images, training_labels, validation_images, validation_labels):\n print(\"Start training...\")\n print()\n\n all_hidden_units = [20, 20, 30, 30, 40, 40, 50, 50, 60, 30]\n all_learning_rates = [0.0001, 0.001, 0.01, 0.01, 0.01, 0.02, 0.02, 0.1, 0.2, 0.007]\n all_minibatch_sizes = [2, 5, 10, 10, 20, 20, 100, 50, 50, 25]\n all_num_epochs = [1, 1, 1, 1, 2, 2, 2, 2, 3, 3]\n all_l1_strengths = [0.0, 0.0, 0, 0.01, 0.0, 0.001, 0.01, 0.02, 0.01, 0.001]\n all_l2_strengths = [0.0, 0.01, 0.001, 0.0, 0.01, 0.001, 0.01, 0.02, 0.01, 0.001]\n\n best_accuracy = 0\n best_hyperparamters = [] \n\n for i in range(10):\n hyperparameters = (all_hidden_units[slice_start+i], \n all_learning_rates[slice_start+i],\n all_minibatch_sizes[slice_start+i],\n all_num_epochs[slice_start+i],\n all_l1_strengths[slice_start+i],\n all_l2_strengths[slice_start+i])\n\n print(describe_hyperparameters(hyperparameters))\n\n clf = NeuralNetworkClassifier(\n hidden_units = hyperparameters[0],\n learning_rate = hyperparameters[1], \n batch_size = hyperparameters[2], \n epochs = hyperparameters[3], \n l_1_beta_1 = hyperparameters[4], \n l_1_beta_2 = hyperparameters[4], \n l_2_alpha_1 = hyperparameters[5], \n l_2_alpha_2 = hyperparameters[5])\n\n clf.fit(training_images, training_labels)\n\n predicted_labels = clf.predict(validation_images)\n\n accuracy = clf.score(validation_labels, predicted_labels)\n\n print(\"Accuracy: %f\" % accuracy)\n print(\"Cross Entropy Loss = %.2f\" % (clf.loss(validation_labels, predicted_labels)))\n\n if(accuracy > best_accuracy):\n best_accuracy = accuracy\n best_hyperparamters = hyperparameters\n print(\"Found new best hyperparameters.\")\n \n print(\"\\n\")\n \n print(describe_hyperparameters(best_hyperparamters))\n return best_hyperparamters\n\ndef main():\n training_images = np.load(\"mnist_train_images.npy\")\n training_labels = np.load(\"mnist_train_labels.npy\")\n testing_images = np.load(\"mnist_test_images.npy\")\n testing_labels = np.load(\"mnist_test_labels.npy\")\n validation_images = np.load(\"mnist_validation_images.npy\")\n validation_labels = np.load(\"mnist_validation_labels.npy\")\n \n parameters = findBestHyperparameters(training_images[0:16000, :], training_labels[0:16000, :], \n validation_images, validation_labels)\n\n clf = NeuralNetworkClassifier(hidden_units=parameters[0], \n learning_rate=parameters[1], \n batch_size=parameters[2], \n epochs=parameters[3], l_1_beta_1=parameters[4], l_1_beta_2=parameters[4], l_2_alpha_1=parameters[5], l_2_alpha_2=parameters[5])\n\n clf.fit(training_images, training_labels)\n predicted_labels = clf.predict(testing_images)\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 1:\n print(\"Usage: python3 digit_recognizer.py\")\n exit()\n main()", "step-ids": [ 9, 15, 19, 21, 26 ] }
[ 9, 15, 19, 21, 26 ]
from flask import (Flask, render_template, request, url_for, redirect, flash, jsonify) app = Flask(__name__) @app.route('/', methods=['GET']) def showHomepage(): return render_template('home.html') if __name__ == '__main__': print('app started') app.secret_key = 'secretkey' app.run(debug=True)
normal
{ "blob_id": "3001534be3364be1148cd51a4a943fd8c975d87e", "index": 8384, "step-1": "<mask token>\n\n\n@app.route('/', methods=['GET'])\ndef showHomepage():\n return render_template('home.html')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\n@app.route('/', methods=['GET'])\ndef showHomepage():\n return render_template('home.html')\n\n\nif __name__ == '__main__':\n print('app started')\n app.secret_key = 'secretkey'\n app.run(debug=True)\n", "step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET'])\ndef showHomepage():\n return render_template('home.html')\n\n\nif __name__ == '__main__':\n print('app started')\n app.secret_key = 'secretkey'\n app.run(debug=True)\n", "step-4": "from flask import Flask, render_template, request, url_for, redirect, flash, jsonify\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET'])\ndef showHomepage():\n return render_template('home.html')\n\n\nif __name__ == '__main__':\n print('app started')\n app.secret_key = 'secretkey'\n app.run(debug=True)\n", "step-5": "from flask import (Flask,\n\trender_template,\n\trequest,\n\turl_for,\n\tredirect,\n\tflash,\n\tjsonify)\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET'])\ndef showHomepage():\n\treturn render_template('home.html')\n\n\nif __name__ == '__main__':\n\tprint('app started')\n\tapp.secret_key = 'secretkey'\n\tapp.run(debug=True)\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import time from junk.keyboard_non_blocking import NonBlockingKeyboard TICK_DURATION = 0.05 INITIAL_FOOD_LEVEL = 100 FOOD_PER_TICK = -1 FOOD_PER_FEED = 10 MAX_FOOD_LEVEL = 100 INITIAL_ENERGY_LEVEL = 50 ENERGY_PER_TICK_AWAKE = -1 ENERGY_PER_TICK_ASLEEP = 5 MAX_ENERGY_LEVEL = 100 INITIAL_IS_AWAKE = False INITIAL_POOP_LEVEL = 0 TICKS_PER_POOP = 25 MAX_POOP_LEVEL = 10 class UnknownCommand(Exception): pass def _add_and_clip(x, dx, x_min, x_max): return max(x_min, min(x_max, x + dx)) class Tamagotchi: def __init__(self) -> None: self._age = 0 self._food_level = INITIAL_FOOD_LEVEL self._energy_level = INITIAL_ENERGY_LEVEL self._poop_level = INITIAL_POOP_LEVEL self._is_awake = INITIAL_IS_AWAKE self._commands = { "f": self._feed, "c": self._clean, "s": self._sleep, } def __repr__(self) -> str: return f"Tamagotchi(is_awake={self._is_awake}, food_level={self._food_level}, energy_level={self._energy_level}, poop_level={self._poop_level}, age={self._age})" def process_command(self, command: str) -> None: try: self._commands[command]() except KeyError: raise UnknownCommand(command) def _feed(self) -> None: if self._is_awake: self._food_level = _add_and_clip( self._food_level, FOOD_PER_FEED, 0, MAX_FOOD_LEVEL ) def _clean(self) -> None: self._poop_level = 0 def _sleep(self) -> None: self._is_awake = False def is_alive(self) -> bool: return self._food_level > 0 and self._poop_level < MAX_POOP_LEVEL def update(self) -> None: self._age += 1 # Food self._food_level = _add_and_clip( self._food_level, FOOD_PER_TICK, 0, MAX_FOOD_LEVEL ) # Energy if self._energy_level >= MAX_ENERGY_LEVEL: self._is_awake = True if self._energy_level <= 0: self._is_awake = False energy_delta = ( ENERGY_PER_TICK_AWAKE if self._is_awake else ENERGY_PER_TICK_ASLEEP ) self._energy_level = _add_and_clip( self._energy_level, energy_delta, 0, MAX_ENERGY_LEVEL ) # Poop if self._age % TICKS_PER_POOP == 0: self._poop_level += 1 def main(): tamagotchi = Tamagotchi() with NonBlockingKeyboard() as kb: while True: inpt = kb.getstr() should_quit = False for c in inpt: try: tamagotchi.process_command(c) except UnknownCommand: if c == "q": should_quit = True break else: raise if should_quit: break tamagotchi.update() print(tamagotchi) if not tamagotchi.is_alive(): print("tamagotchi died") break time.sleep(TICK_DURATION) if __name__ == "__main__": main()
normal
{ "blob_id": "1dd09a09f542099091d94d466ebd7cc149884eb4", "index": 7385, "step-1": "<mask token>\n\n\nclass UnknownCommand(Exception):\n pass\n\n\n<mask token>\n\n\nclass Tamagotchi:\n\n def __init__(self) ->None:\n self._age = 0\n self._food_level = INITIAL_FOOD_LEVEL\n self._energy_level = INITIAL_ENERGY_LEVEL\n self._poop_level = INITIAL_POOP_LEVEL\n self._is_awake = INITIAL_IS_AWAKE\n self._commands = {'f': self._feed, 'c': self._clean, 's': self._sleep}\n\n def __repr__(self) ->str:\n return (\n f'Tamagotchi(is_awake={self._is_awake}, food_level={self._food_level}, energy_level={self._energy_level}, poop_level={self._poop_level}, age={self._age})'\n )\n\n def process_command(self, command: str) ->None:\n try:\n self._commands[command]()\n except KeyError:\n raise UnknownCommand(command)\n\n def _feed(self) ->None:\n if self._is_awake:\n self._food_level = _add_and_clip(self._food_level,\n FOOD_PER_FEED, 0, MAX_FOOD_LEVEL)\n\n def _clean(self) ->None:\n self._poop_level = 0\n\n def _sleep(self) ->None:\n self._is_awake = False\n\n def is_alive(self) ->bool:\n return self._food_level > 0 and self._poop_level < MAX_POOP_LEVEL\n\n def update(self) ->None:\n self._age += 1\n self._food_level = _add_and_clip(self._food_level, FOOD_PER_TICK, 0,\n MAX_FOOD_LEVEL)\n if self._energy_level >= MAX_ENERGY_LEVEL:\n self._is_awake = True\n if self._energy_level <= 0:\n self._is_awake = False\n energy_delta = (ENERGY_PER_TICK_AWAKE if self._is_awake else\n ENERGY_PER_TICK_ASLEEP)\n self._energy_level = _add_and_clip(self._energy_level, energy_delta,\n 0, MAX_ENERGY_LEVEL)\n if self._age % TICKS_PER_POOP == 0:\n self._poop_level += 1\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass UnknownCommand(Exception):\n pass\n\n\n<mask token>\n\n\nclass Tamagotchi:\n\n def __init__(self) ->None:\n self._age = 0\n self._food_level = INITIAL_FOOD_LEVEL\n self._energy_level = INITIAL_ENERGY_LEVEL\n self._poop_level = INITIAL_POOP_LEVEL\n self._is_awake = INITIAL_IS_AWAKE\n self._commands = {'f': self._feed, 'c': self._clean, 's': self._sleep}\n\n def __repr__(self) ->str:\n return (\n f'Tamagotchi(is_awake={self._is_awake}, food_level={self._food_level}, energy_level={self._energy_level}, poop_level={self._poop_level}, age={self._age})'\n )\n\n def process_command(self, command: str) ->None:\n try:\n self._commands[command]()\n except KeyError:\n raise UnknownCommand(command)\n\n def _feed(self) ->None:\n if self._is_awake:\n self._food_level = _add_and_clip(self._food_level,\n FOOD_PER_FEED, 0, MAX_FOOD_LEVEL)\n\n def _clean(self) ->None:\n self._poop_level = 0\n\n def _sleep(self) ->None:\n self._is_awake = False\n\n def is_alive(self) ->bool:\n return self._food_level > 0 and self._poop_level < MAX_POOP_LEVEL\n\n def update(self) ->None:\n self._age += 1\n self._food_level = _add_and_clip(self._food_level, FOOD_PER_TICK, 0,\n MAX_FOOD_LEVEL)\n if self._energy_level >= MAX_ENERGY_LEVEL:\n self._is_awake = True\n if self._energy_level <= 0:\n self._is_awake = False\n energy_delta = (ENERGY_PER_TICK_AWAKE if self._is_awake else\n ENERGY_PER_TICK_ASLEEP)\n self._energy_level = _add_and_clip(self._energy_level, energy_delta,\n 0, MAX_ENERGY_LEVEL)\n if self._age % TICKS_PER_POOP == 0:\n self._poop_level += 1\n\n\ndef main():\n tamagotchi = Tamagotchi()\n with NonBlockingKeyboard() as kb:\n while True:\n inpt = kb.getstr()\n should_quit = False\n for c in inpt:\n try:\n tamagotchi.process_command(c)\n except UnknownCommand:\n if c == 'q':\n should_quit = True\n break\n else:\n raise\n if should_quit:\n break\n tamagotchi.update()\n print(tamagotchi)\n if not tamagotchi.is_alive():\n print('tamagotchi died')\n break\n time.sleep(TICK_DURATION)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass UnknownCommand(Exception):\n pass\n\n\ndef _add_and_clip(x, dx, x_min, x_max):\n return max(x_min, min(x_max, x + dx))\n\n\nclass Tamagotchi:\n\n def __init__(self) ->None:\n self._age = 0\n self._food_level = INITIAL_FOOD_LEVEL\n self._energy_level = INITIAL_ENERGY_LEVEL\n self._poop_level = INITIAL_POOP_LEVEL\n self._is_awake = INITIAL_IS_AWAKE\n self._commands = {'f': self._feed, 'c': self._clean, 's': self._sleep}\n\n def __repr__(self) ->str:\n return (\n f'Tamagotchi(is_awake={self._is_awake}, food_level={self._food_level}, energy_level={self._energy_level}, poop_level={self._poop_level}, age={self._age})'\n )\n\n def process_command(self, command: str) ->None:\n try:\n self._commands[command]()\n except KeyError:\n raise UnknownCommand(command)\n\n def _feed(self) ->None:\n if self._is_awake:\n self._food_level = _add_and_clip(self._food_level,\n FOOD_PER_FEED, 0, MAX_FOOD_LEVEL)\n\n def _clean(self) ->None:\n self._poop_level = 0\n\n def _sleep(self) ->None:\n self._is_awake = False\n\n def is_alive(self) ->bool:\n return self._food_level > 0 and self._poop_level < MAX_POOP_LEVEL\n\n def update(self) ->None:\n self._age += 1\n self._food_level = _add_and_clip(self._food_level, FOOD_PER_TICK, 0,\n MAX_FOOD_LEVEL)\n if self._energy_level >= MAX_ENERGY_LEVEL:\n self._is_awake = True\n if self._energy_level <= 0:\n self._is_awake = False\n energy_delta = (ENERGY_PER_TICK_AWAKE if self._is_awake else\n ENERGY_PER_TICK_ASLEEP)\n self._energy_level = _add_and_clip(self._energy_level, energy_delta,\n 0, MAX_ENERGY_LEVEL)\n if self._age % TICKS_PER_POOP == 0:\n self._poop_level += 1\n\n\ndef main():\n tamagotchi = Tamagotchi()\n with NonBlockingKeyboard() as kb:\n while True:\n inpt = kb.getstr()\n should_quit = False\n for c in inpt:\n try:\n tamagotchi.process_command(c)\n except UnknownCommand:\n if c == 'q':\n should_quit = True\n break\n else:\n raise\n if should_quit:\n break\n tamagotchi.update()\n print(tamagotchi)\n if not tamagotchi.is_alive():\n print('tamagotchi died')\n break\n time.sleep(TICK_DURATION)\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass UnknownCommand(Exception):\n pass\n\n\ndef _add_and_clip(x, dx, x_min, x_max):\n return max(x_min, min(x_max, x + dx))\n\n\nclass Tamagotchi:\n\n def __init__(self) ->None:\n self._age = 0\n self._food_level = INITIAL_FOOD_LEVEL\n self._energy_level = INITIAL_ENERGY_LEVEL\n self._poop_level = INITIAL_POOP_LEVEL\n self._is_awake = INITIAL_IS_AWAKE\n self._commands = {'f': self._feed, 'c': self._clean, 's': self._sleep}\n\n def __repr__(self) ->str:\n return (\n f'Tamagotchi(is_awake={self._is_awake}, food_level={self._food_level}, energy_level={self._energy_level}, poop_level={self._poop_level}, age={self._age})'\n )\n\n def process_command(self, command: str) ->None:\n try:\n self._commands[command]()\n except KeyError:\n raise UnknownCommand(command)\n\n def _feed(self) ->None:\n if self._is_awake:\n self._food_level = _add_and_clip(self._food_level,\n FOOD_PER_FEED, 0, MAX_FOOD_LEVEL)\n\n def _clean(self) ->None:\n self._poop_level = 0\n\n def _sleep(self) ->None:\n self._is_awake = False\n\n def is_alive(self) ->bool:\n return self._food_level > 0 and self._poop_level < MAX_POOP_LEVEL\n\n def update(self) ->None:\n self._age += 1\n self._food_level = _add_and_clip(self._food_level, FOOD_PER_TICK, 0,\n MAX_FOOD_LEVEL)\n if self._energy_level >= MAX_ENERGY_LEVEL:\n self._is_awake = True\n if self._energy_level <= 0:\n self._is_awake = False\n energy_delta = (ENERGY_PER_TICK_AWAKE if self._is_awake else\n ENERGY_PER_TICK_ASLEEP)\n self._energy_level = _add_and_clip(self._energy_level, energy_delta,\n 0, MAX_ENERGY_LEVEL)\n if self._age % TICKS_PER_POOP == 0:\n self._poop_level += 1\n\n\ndef main():\n tamagotchi = Tamagotchi()\n with NonBlockingKeyboard() as kb:\n while True:\n inpt = kb.getstr()\n should_quit = False\n for c in inpt:\n try:\n tamagotchi.process_command(c)\n except UnknownCommand:\n if c == 'q':\n should_quit = True\n break\n else:\n raise\n if should_quit:\n break\n tamagotchi.update()\n print(tamagotchi)\n if not tamagotchi.is_alive():\n print('tamagotchi died')\n break\n time.sleep(TICK_DURATION)\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "import time\n\nfrom junk.keyboard_non_blocking import NonBlockingKeyboard\n\nTICK_DURATION = 0.05\n\nINITIAL_FOOD_LEVEL = 100\nFOOD_PER_TICK = -1\nFOOD_PER_FEED = 10\nMAX_FOOD_LEVEL = 100\n\nINITIAL_ENERGY_LEVEL = 50\nENERGY_PER_TICK_AWAKE = -1\nENERGY_PER_TICK_ASLEEP = 5\nMAX_ENERGY_LEVEL = 100\n\nINITIAL_IS_AWAKE = False\n\nINITIAL_POOP_LEVEL = 0\nTICKS_PER_POOP = 25\nMAX_POOP_LEVEL = 10\n\n\nclass UnknownCommand(Exception):\n pass\n\n\ndef _add_and_clip(x, dx, x_min, x_max):\n return max(x_min, min(x_max, x + dx))\n\n\nclass Tamagotchi:\n def __init__(self) -> None:\n self._age = 0\n self._food_level = INITIAL_FOOD_LEVEL\n self._energy_level = INITIAL_ENERGY_LEVEL\n self._poop_level = INITIAL_POOP_LEVEL\n self._is_awake = INITIAL_IS_AWAKE\n self._commands = {\n \"f\": self._feed,\n \"c\": self._clean,\n \"s\": self._sleep,\n }\n\n def __repr__(self) -> str:\n return f\"Tamagotchi(is_awake={self._is_awake}, food_level={self._food_level}, energy_level={self._energy_level}, poop_level={self._poop_level}, age={self._age})\"\n\n def process_command(self, command: str) -> None:\n try:\n self._commands[command]()\n except KeyError:\n raise UnknownCommand(command)\n\n def _feed(self) -> None:\n if self._is_awake:\n self._food_level = _add_and_clip(\n self._food_level, FOOD_PER_FEED, 0, MAX_FOOD_LEVEL\n )\n\n def _clean(self) -> None:\n self._poop_level = 0\n\n def _sleep(self) -> None:\n self._is_awake = False\n\n def is_alive(self) -> bool:\n return self._food_level > 0 and self._poop_level < MAX_POOP_LEVEL\n\n def update(self) -> None:\n self._age += 1\n # Food\n self._food_level = _add_and_clip(\n self._food_level, FOOD_PER_TICK, 0, MAX_FOOD_LEVEL\n )\n # Energy\n if self._energy_level >= MAX_ENERGY_LEVEL:\n self._is_awake = True\n if self._energy_level <= 0:\n self._is_awake = False\n energy_delta = (\n ENERGY_PER_TICK_AWAKE if self._is_awake else ENERGY_PER_TICK_ASLEEP\n )\n self._energy_level = _add_and_clip(\n self._energy_level, energy_delta, 0, MAX_ENERGY_LEVEL\n )\n # Poop\n if self._age % TICKS_PER_POOP == 0:\n self._poop_level += 1\n\n\ndef main():\n tamagotchi = Tamagotchi()\n with NonBlockingKeyboard() as kb:\n while True:\n inpt = kb.getstr()\n\n should_quit = False\n for c in inpt:\n try:\n tamagotchi.process_command(c)\n except UnknownCommand:\n if c == \"q\":\n should_quit = True\n break\n else:\n raise\n\n if should_quit:\n break\n\n tamagotchi.update()\n print(tamagotchi)\n if not tamagotchi.is_alive():\n print(\"tamagotchi died\")\n break\n time.sleep(TICK_DURATION)\n\n\nif __name__ == \"__main__\":\n main()\n", "step-ids": [ 10, 11, 12, 13, 16 ] }
[ 10, 11, 12, 13, 16 ]
my_list = [1, 2, 4, 0, 4, 0, 10, 20, 0, 1] new_list = list(filter(lambda x: x != 0, my_list)) try: new = list(map(lambda x: 2 / x, new_list)) except ZeroDivisionError: pass print(new) # def devis(n, list): # new_list = [] # for i, m_list in enumerate(list): # try: # new_list.append(n/m_list) # except ZeroDivisionError: # new_list.append(None) # return new_list # print(devis(2, my_list))
normal
{ "blob_id": "46f3d3681343d96889ddb073f17ff7f225486f35", "index": 8005, "step-1": "<mask token>\n", "step-2": "<mask token>\ntry:\n new = list(map(lambda x: 2 / x, new_list))\nexcept ZeroDivisionError:\n pass\nprint(new)\n", "step-3": "my_list = [1, 2, 4, 0, 4, 0, 10, 20, 0, 1]\nnew_list = list(filter(lambda x: x != 0, my_list))\ntry:\n new = list(map(lambda x: 2 / x, new_list))\nexcept ZeroDivisionError:\n pass\nprint(new)\n", "step-4": "my_list = [1, 2, 4, 0, 4, 0, 10, 20, 0, 1]\nnew_list = list(filter(lambda x: x != 0, my_list))\n\ntry:\n new = list(map(lambda x: 2 / x, new_list))\nexcept ZeroDivisionError:\n pass\n\nprint(new)\n\n\n\n\n\n# def devis(n, list):\n# new_list = []\n# for i, m_list in enumerate(list):\n# try:\n# new_list.append(n/m_list)\n# except ZeroDivisionError:\n# new_list.append(None)\n# return new_list\n# print(devis(2, my_list))\n\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
############################## Import Modules ################################## import pandas as pd import numpy as np import re from scipy import stats import matplotlib.pyplot as plt ############################## Define Functions ################################ # generate list containing data of standard curve def process_std(standard_input_file): try: with open(standard_input_file, 'r') as in_handle: lin_reg_lst = [] for line in in_handle: line = line.strip('\n') lin_reg_lst.append(line) except IOError: print("Could not open " + standard_input_file + " for reading.") quit(1) return lin_reg_lst # generate info_dict containing information about the samples def process_info(info_file): try: info_dict = {} with open(info_file, 'r') as in_handle: for line in in_handle: line = line.strip() items = re.split(' ', line) well_lst = re.split(',', items[1]) info_dict[items[0]] = {'wells': well_lst, 'conc': float(items[2]), 'dil': float(items[3])} except IOError: print("Could not open " + args.info + " for reading.") quit(1) return info_dict # calculate substrate concentration from absorption values def abs_to_subconc(meas_df, info_dict, m, c): # find data series belonging to a sample for sample in info_dict.keys(): for well in info_dict[sample]['wells']: i = np.where(meas_df == well) # convert absorption values to substrate concentration for row in meas_df[i[0]]: count = 1 for el in row: if type(el) != str: conc = (el - c)/m meas_df[i[0], count] = conc count += 1 return meas_df # process blank to get slope def process_blank(blank_file, std_m, std_c): blank_df = pd.read_csv(blank_file) blank_df = blank_df.to_numpy() # define x values i = np.where(blank_df == 'Time [s]') # fall-back for case that time per well is measured if len(i[0]) == 0: b_arr = [] i = np.where(blank_df == 'Time [ms]') # convert ms to s for row in blank_df[i[0]]: count = 1 arr = [] for el in row: if type(el) != str: sec = el*0.001 arr.append(sec) count += 1 b_arr.append(arr) blank_x = np.vstack(b_arr) # make average for time av_lst = [] for row in np.transpose(blank_x): av = sum(row) / len(row) av_lst.append(av) blank_x = np.transpose(np.array(av_lst)) else: blank_x = np.array(blank_df[i[0]][0, 1:]) # define y values arr = [] for row in blank_df: if re.search(r'^[A-Z]\d\d?$', row[0]): arr.append(row[1:]) if len(arr) < 2: blank_arr = np.array(arr) else: blank_arr = np.vstack(arr) count_r = 0 for row in blank_arr: count_c = 0 for el in row: if type(el) != str: conc = (el - std_c)/std_m blank_arr[count_r, count_c] = conc count_c += 1 count_r += 1 av_lst = [] for row in np.transpose(blank_arr): av = sum(row) / len(row) av_lst.append(av) if len(av_lst) < 2: blank_y = np.transpose(np.array(av_lst)) else: blank_y = np.transpose(np.vstack(av_lst)) b_m, b_c, b_r, b_p, stderr = stats.linregress(blank_x.astype(float), blank_y.astype(float)) return b_m # calculate average activity and standard deviation of each sample def act_calc(meas_df, info_dict, b_m, std_m, std_c): act_dict = {} # m_lin defines most linear part from first point while True: print("How many time intervals you want to take for the " + "analysis? (most linear part from first to x)") m_lin = input() if m_lin.isnumeric() == True and int(m_lin) > 1: break m_lin = int(m_lin) # define volume per well while True: print("What is the volume per well? (in µL)") well_v = input() print("\n") if well_v.isnumeric() == True: break # define x values time = np.where(meas_df == 'Time [s]') # fall-back for case that time per well is measured if len(time[0]) == 0: m_arr = [] time = np.where(meas_df == 'Time [ms]') # convert ms to s for row in meas_df[time[0]]: arr = [] count = 1 for el in row: if type(el) != str: sec = el*0.001 arr.append(sec) count += 1 m_arr.append(arr) x = np.vstack(m_arr) # make average for time values av_lst = [] for row in np.transpose(x): av = sum(row) / len(row) av_lst.append(av) x = np.transpose(np.array(av_lst[0:m_lin])) else: x = meas_df[time[0]] x = np.array(x[0, 1:m_lin + 1]) # process sample data for sample in info_dict.keys(): e_conc = info_dict[sample]['conc'] e_dil = info_dict[sample]['dil'] e_conc = float(e_conc)/ (float(e_dil)*1000) for well in info_dict[sample]['wells']: i = np.where(meas_df == well) y = meas_df[i[0]] y = np.array(y[0, 1:m_lin + 1]) m, c, r, p, stderr = stats.linregress(x.astype(float), y.astype(float)) print(sample + ' >R²' + str(r)) # plot substrate decrease plt.figure(1, figsize=[10,5], frameon=False) plt.plot(x, y, 'x', markersize=2, label=sample) plt.plot(x, m*x + c, 'r', linestyle='--', color='gray') plt.savefig('activity_plot.png') # calculate specific activity m = abs(m - b_m) sact = (m*60*int(well_v)) / (10*1000000*float(e_conc)) act_dict.setdefault(sample, []) act_dict[sample].append(sact) # calculate average specific activity per sample summery_dict = {} summery_dict['interval'] = m_lin for sample in act_dict.keys(): av_sact = sum(act_dict[sample]) / len(act_dict[sample]) print("average specific activity of " + sample + " = " + str(av_sact) + " U/mg") # calculate standard deviation per sample std = np.std(act_dict[sample]) print("standard deviation for " + sample + ": +/-" + str(std)) # generate summery_dict for output file summery_dict[sample] = {'av_sact': av_sact, 'std': std} return summery_dict # process summery_dict to generate output file def gen_output(summery_dict, name): try: with open(name + '_activity.out', 'w') as out_handle: out_handle.write('time interval from 1. to ' + str(summery_dict['interval']) + '. was used for calculations.\n') for sample in summery_dict.keys(): if sample == 'interval': continue else: out_handle.write(str(sample) + ': s = ' + str(summery_dict[sample]['av_sact']) + ' +/- ' + str(summery_dict[sample]['std']) + '\n') except IOError: print("Could not open activity.out for writing.") quit(1)
normal
{ "blob_id": "19949b07c866d66b3ef00b6a386bf89f03e06294", "index": 7984, "step-1": "<mask token>\n\n\ndef process_std(standard_input_file):\n try:\n with open(standard_input_file, 'r') as in_handle:\n lin_reg_lst = []\n for line in in_handle:\n line = line.strip('\\n')\n lin_reg_lst.append(line)\n except IOError:\n print('Could not open ' + standard_input_file + ' for reading.')\n quit(1)\n return lin_reg_lst\n\n\n<mask token>\n\n\ndef abs_to_subconc(meas_df, info_dict, m, c):\n for sample in info_dict.keys():\n for well in info_dict[sample]['wells']:\n i = np.where(meas_df == well)\n for row in meas_df[i[0]]:\n count = 1\n for el in row:\n if type(el) != str:\n conc = (el - c) / m\n meas_df[i[0], count] = conc\n count += 1\n return meas_df\n\n\n<mask token>\n\n\ndef act_calc(meas_df, info_dict, b_m, std_m, std_c):\n act_dict = {}\n while True:\n print('How many time intervals you want to take for the ' +\n 'analysis? (most linear part from first to x)')\n m_lin = input()\n if m_lin.isnumeric() == True and int(m_lin) > 1:\n break\n m_lin = int(m_lin)\n while True:\n print('What is the volume per well? (in µL)')\n well_v = input()\n print('\\n')\n if well_v.isnumeric() == True:\n break\n time = np.where(meas_df == 'Time [s]')\n if len(time[0]) == 0:\n m_arr = []\n time = np.where(meas_df == 'Time [ms]')\n for row in meas_df[time[0]]:\n arr = []\n count = 1\n for el in row:\n if type(el) != str:\n sec = el * 0.001\n arr.append(sec)\n count += 1\n m_arr.append(arr)\n x = np.vstack(m_arr)\n av_lst = []\n for row in np.transpose(x):\n av = sum(row) / len(row)\n av_lst.append(av)\n x = np.transpose(np.array(av_lst[0:m_lin]))\n else:\n x = meas_df[time[0]]\n x = np.array(x[0, 1:m_lin + 1])\n for sample in info_dict.keys():\n e_conc = info_dict[sample]['conc']\n e_dil = info_dict[sample]['dil']\n e_conc = float(e_conc) / (float(e_dil) * 1000)\n for well in info_dict[sample]['wells']:\n i = np.where(meas_df == well)\n y = meas_df[i[0]]\n y = np.array(y[0, 1:m_lin + 1])\n m, c, r, p, stderr = stats.linregress(x.astype(float), y.astype\n (float))\n print(sample + ' >R²' + str(r))\n plt.figure(1, figsize=[10, 5], frameon=False)\n plt.plot(x, y, 'x', markersize=2, label=sample)\n plt.plot(x, m * x + c, 'r', linestyle='--', color='gray')\n plt.savefig('activity_plot.png')\n m = abs(m - b_m)\n sact = m * 60 * int(well_v) / (10 * 1000000 * float(e_conc))\n act_dict.setdefault(sample, [])\n act_dict[sample].append(sact)\n summery_dict = {}\n summery_dict['interval'] = m_lin\n for sample in act_dict.keys():\n av_sact = sum(act_dict[sample]) / len(act_dict[sample])\n print('average specific activity of ' + sample + ' = ' + str(\n av_sact) + ' U/mg')\n std = np.std(act_dict[sample])\n print('standard deviation for ' + sample + ': +/-' + str(std))\n summery_dict[sample] = {'av_sact': av_sact, 'std': std}\n return summery_dict\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef process_std(standard_input_file):\n try:\n with open(standard_input_file, 'r') as in_handle:\n lin_reg_lst = []\n for line in in_handle:\n line = line.strip('\\n')\n lin_reg_lst.append(line)\n except IOError:\n print('Could not open ' + standard_input_file + ' for reading.')\n quit(1)\n return lin_reg_lst\n\n\ndef process_info(info_file):\n try:\n info_dict = {}\n with open(info_file, 'r') as in_handle:\n for line in in_handle:\n line = line.strip()\n items = re.split(' ', line)\n well_lst = re.split(',', items[1])\n info_dict[items[0]] = {'wells': well_lst, 'conc': float(\n items[2]), 'dil': float(items[3])}\n except IOError:\n print('Could not open ' + args.info + ' for reading.')\n quit(1)\n return info_dict\n\n\ndef abs_to_subconc(meas_df, info_dict, m, c):\n for sample in info_dict.keys():\n for well in info_dict[sample]['wells']:\n i = np.where(meas_df == well)\n for row in meas_df[i[0]]:\n count = 1\n for el in row:\n if type(el) != str:\n conc = (el - c) / m\n meas_df[i[0], count] = conc\n count += 1\n return meas_df\n\n\n<mask token>\n\n\ndef act_calc(meas_df, info_dict, b_m, std_m, std_c):\n act_dict = {}\n while True:\n print('How many time intervals you want to take for the ' +\n 'analysis? (most linear part from first to x)')\n m_lin = input()\n if m_lin.isnumeric() == True and int(m_lin) > 1:\n break\n m_lin = int(m_lin)\n while True:\n print('What is the volume per well? (in µL)')\n well_v = input()\n print('\\n')\n if well_v.isnumeric() == True:\n break\n time = np.where(meas_df == 'Time [s]')\n if len(time[0]) == 0:\n m_arr = []\n time = np.where(meas_df == 'Time [ms]')\n for row in meas_df[time[0]]:\n arr = []\n count = 1\n for el in row:\n if type(el) != str:\n sec = el * 0.001\n arr.append(sec)\n count += 1\n m_arr.append(arr)\n x = np.vstack(m_arr)\n av_lst = []\n for row in np.transpose(x):\n av = sum(row) / len(row)\n av_lst.append(av)\n x = np.transpose(np.array(av_lst[0:m_lin]))\n else:\n x = meas_df[time[0]]\n x = np.array(x[0, 1:m_lin + 1])\n for sample in info_dict.keys():\n e_conc = info_dict[sample]['conc']\n e_dil = info_dict[sample]['dil']\n e_conc = float(e_conc) / (float(e_dil) * 1000)\n for well in info_dict[sample]['wells']:\n i = np.where(meas_df == well)\n y = meas_df[i[0]]\n y = np.array(y[0, 1:m_lin + 1])\n m, c, r, p, stderr = stats.linregress(x.astype(float), y.astype\n (float))\n print(sample + ' >R²' + str(r))\n plt.figure(1, figsize=[10, 5], frameon=False)\n plt.plot(x, y, 'x', markersize=2, label=sample)\n plt.plot(x, m * x + c, 'r', linestyle='--', color='gray')\n plt.savefig('activity_plot.png')\n m = abs(m - b_m)\n sact = m * 60 * int(well_v) / (10 * 1000000 * float(e_conc))\n act_dict.setdefault(sample, [])\n act_dict[sample].append(sact)\n summery_dict = {}\n summery_dict['interval'] = m_lin\n for sample in act_dict.keys():\n av_sact = sum(act_dict[sample]) / len(act_dict[sample])\n print('average specific activity of ' + sample + ' = ' + str(\n av_sact) + ' U/mg')\n std = np.std(act_dict[sample])\n print('standard deviation for ' + sample + ': +/-' + str(std))\n summery_dict[sample] = {'av_sact': av_sact, 'std': std}\n return summery_dict\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef process_std(standard_input_file):\n try:\n with open(standard_input_file, 'r') as in_handle:\n lin_reg_lst = []\n for line in in_handle:\n line = line.strip('\\n')\n lin_reg_lst.append(line)\n except IOError:\n print('Could not open ' + standard_input_file + ' for reading.')\n quit(1)\n return lin_reg_lst\n\n\ndef process_info(info_file):\n try:\n info_dict = {}\n with open(info_file, 'r') as in_handle:\n for line in in_handle:\n line = line.strip()\n items = re.split(' ', line)\n well_lst = re.split(',', items[1])\n info_dict[items[0]] = {'wells': well_lst, 'conc': float(\n items[2]), 'dil': float(items[3])}\n except IOError:\n print('Could not open ' + args.info + ' for reading.')\n quit(1)\n return info_dict\n\n\ndef abs_to_subconc(meas_df, info_dict, m, c):\n for sample in info_dict.keys():\n for well in info_dict[sample]['wells']:\n i = np.where(meas_df == well)\n for row in meas_df[i[0]]:\n count = 1\n for el in row:\n if type(el) != str:\n conc = (el - c) / m\n meas_df[i[0], count] = conc\n count += 1\n return meas_df\n\n\ndef process_blank(blank_file, std_m, std_c):\n blank_df = pd.read_csv(blank_file)\n blank_df = blank_df.to_numpy()\n i = np.where(blank_df == 'Time [s]')\n if len(i[0]) == 0:\n b_arr = []\n i = np.where(blank_df == 'Time [ms]')\n for row in blank_df[i[0]]:\n count = 1\n arr = []\n for el in row:\n if type(el) != str:\n sec = el * 0.001\n arr.append(sec)\n count += 1\n b_arr.append(arr)\n blank_x = np.vstack(b_arr)\n av_lst = []\n for row in np.transpose(blank_x):\n av = sum(row) / len(row)\n av_lst.append(av)\n blank_x = np.transpose(np.array(av_lst))\n else:\n blank_x = np.array(blank_df[i[0]][0, 1:])\n arr = []\n for row in blank_df:\n if re.search('^[A-Z]\\\\d\\\\d?$', row[0]):\n arr.append(row[1:])\n if len(arr) < 2:\n blank_arr = np.array(arr)\n else:\n blank_arr = np.vstack(arr)\n count_r = 0\n for row in blank_arr:\n count_c = 0\n for el in row:\n if type(el) != str:\n conc = (el - std_c) / std_m\n blank_arr[count_r, count_c] = conc\n count_c += 1\n count_r += 1\n av_lst = []\n for row in np.transpose(blank_arr):\n av = sum(row) / len(row)\n av_lst.append(av)\n if len(av_lst) < 2:\n blank_y = np.transpose(np.array(av_lst))\n else:\n blank_y = np.transpose(np.vstack(av_lst))\n b_m, b_c, b_r, b_p, stderr = stats.linregress(blank_x.astype(float),\n blank_y.astype(float))\n return b_m\n\n\ndef act_calc(meas_df, info_dict, b_m, std_m, std_c):\n act_dict = {}\n while True:\n print('How many time intervals you want to take for the ' +\n 'analysis? (most linear part from first to x)')\n m_lin = input()\n if m_lin.isnumeric() == True and int(m_lin) > 1:\n break\n m_lin = int(m_lin)\n while True:\n print('What is the volume per well? (in µL)')\n well_v = input()\n print('\\n')\n if well_v.isnumeric() == True:\n break\n time = np.where(meas_df == 'Time [s]')\n if len(time[0]) == 0:\n m_arr = []\n time = np.where(meas_df == 'Time [ms]')\n for row in meas_df[time[0]]:\n arr = []\n count = 1\n for el in row:\n if type(el) != str:\n sec = el * 0.001\n arr.append(sec)\n count += 1\n m_arr.append(arr)\n x = np.vstack(m_arr)\n av_lst = []\n for row in np.transpose(x):\n av = sum(row) / len(row)\n av_lst.append(av)\n x = np.transpose(np.array(av_lst[0:m_lin]))\n else:\n x = meas_df[time[0]]\n x = np.array(x[0, 1:m_lin + 1])\n for sample in info_dict.keys():\n e_conc = info_dict[sample]['conc']\n e_dil = info_dict[sample]['dil']\n e_conc = float(e_conc) / (float(e_dil) * 1000)\n for well in info_dict[sample]['wells']:\n i = np.where(meas_df == well)\n y = meas_df[i[0]]\n y = np.array(y[0, 1:m_lin + 1])\n m, c, r, p, stderr = stats.linregress(x.astype(float), y.astype\n (float))\n print(sample + ' >R²' + str(r))\n plt.figure(1, figsize=[10, 5], frameon=False)\n plt.plot(x, y, 'x', markersize=2, label=sample)\n plt.plot(x, m * x + c, 'r', linestyle='--', color='gray')\n plt.savefig('activity_plot.png')\n m = abs(m - b_m)\n sact = m * 60 * int(well_v) / (10 * 1000000 * float(e_conc))\n act_dict.setdefault(sample, [])\n act_dict[sample].append(sact)\n summery_dict = {}\n summery_dict['interval'] = m_lin\n for sample in act_dict.keys():\n av_sact = sum(act_dict[sample]) / len(act_dict[sample])\n print('average specific activity of ' + sample + ' = ' + str(\n av_sact) + ' U/mg')\n std = np.std(act_dict[sample])\n print('standard deviation for ' + sample + ': +/-' + str(std))\n summery_dict[sample] = {'av_sact': av_sact, 'std': std}\n return summery_dict\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\ndef process_std(standard_input_file):\n try:\n with open(standard_input_file, 'r') as in_handle:\n lin_reg_lst = []\n for line in in_handle:\n line = line.strip('\\n')\n lin_reg_lst.append(line)\n except IOError:\n print('Could not open ' + standard_input_file + ' for reading.')\n quit(1)\n return lin_reg_lst\n\n\ndef process_info(info_file):\n try:\n info_dict = {}\n with open(info_file, 'r') as in_handle:\n for line in in_handle:\n line = line.strip()\n items = re.split(' ', line)\n well_lst = re.split(',', items[1])\n info_dict[items[0]] = {'wells': well_lst, 'conc': float(\n items[2]), 'dil': float(items[3])}\n except IOError:\n print('Could not open ' + args.info + ' for reading.')\n quit(1)\n return info_dict\n\n\ndef abs_to_subconc(meas_df, info_dict, m, c):\n for sample in info_dict.keys():\n for well in info_dict[sample]['wells']:\n i = np.where(meas_df == well)\n for row in meas_df[i[0]]:\n count = 1\n for el in row:\n if type(el) != str:\n conc = (el - c) / m\n meas_df[i[0], count] = conc\n count += 1\n return meas_df\n\n\ndef process_blank(blank_file, std_m, std_c):\n blank_df = pd.read_csv(blank_file)\n blank_df = blank_df.to_numpy()\n i = np.where(blank_df == 'Time [s]')\n if len(i[0]) == 0:\n b_arr = []\n i = np.where(blank_df == 'Time [ms]')\n for row in blank_df[i[0]]:\n count = 1\n arr = []\n for el in row:\n if type(el) != str:\n sec = el * 0.001\n arr.append(sec)\n count += 1\n b_arr.append(arr)\n blank_x = np.vstack(b_arr)\n av_lst = []\n for row in np.transpose(blank_x):\n av = sum(row) / len(row)\n av_lst.append(av)\n blank_x = np.transpose(np.array(av_lst))\n else:\n blank_x = np.array(blank_df[i[0]][0, 1:])\n arr = []\n for row in blank_df:\n if re.search('^[A-Z]\\\\d\\\\d?$', row[0]):\n arr.append(row[1:])\n if len(arr) < 2:\n blank_arr = np.array(arr)\n else:\n blank_arr = np.vstack(arr)\n count_r = 0\n for row in blank_arr:\n count_c = 0\n for el in row:\n if type(el) != str:\n conc = (el - std_c) / std_m\n blank_arr[count_r, count_c] = conc\n count_c += 1\n count_r += 1\n av_lst = []\n for row in np.transpose(blank_arr):\n av = sum(row) / len(row)\n av_lst.append(av)\n if len(av_lst) < 2:\n blank_y = np.transpose(np.array(av_lst))\n else:\n blank_y = np.transpose(np.vstack(av_lst))\n b_m, b_c, b_r, b_p, stderr = stats.linregress(blank_x.astype(float),\n blank_y.astype(float))\n return b_m\n\n\ndef act_calc(meas_df, info_dict, b_m, std_m, std_c):\n act_dict = {}\n while True:\n print('How many time intervals you want to take for the ' +\n 'analysis? (most linear part from first to x)')\n m_lin = input()\n if m_lin.isnumeric() == True and int(m_lin) > 1:\n break\n m_lin = int(m_lin)\n while True:\n print('What is the volume per well? (in µL)')\n well_v = input()\n print('\\n')\n if well_v.isnumeric() == True:\n break\n time = np.where(meas_df == 'Time [s]')\n if len(time[0]) == 0:\n m_arr = []\n time = np.where(meas_df == 'Time [ms]')\n for row in meas_df[time[0]]:\n arr = []\n count = 1\n for el in row:\n if type(el) != str:\n sec = el * 0.001\n arr.append(sec)\n count += 1\n m_arr.append(arr)\n x = np.vstack(m_arr)\n av_lst = []\n for row in np.transpose(x):\n av = sum(row) / len(row)\n av_lst.append(av)\n x = np.transpose(np.array(av_lst[0:m_lin]))\n else:\n x = meas_df[time[0]]\n x = np.array(x[0, 1:m_lin + 1])\n for sample in info_dict.keys():\n e_conc = info_dict[sample]['conc']\n e_dil = info_dict[sample]['dil']\n e_conc = float(e_conc) / (float(e_dil) * 1000)\n for well in info_dict[sample]['wells']:\n i = np.where(meas_df == well)\n y = meas_df[i[0]]\n y = np.array(y[0, 1:m_lin + 1])\n m, c, r, p, stderr = stats.linregress(x.astype(float), y.astype\n (float))\n print(sample + ' >R²' + str(r))\n plt.figure(1, figsize=[10, 5], frameon=False)\n plt.plot(x, y, 'x', markersize=2, label=sample)\n plt.plot(x, m * x + c, 'r', linestyle='--', color='gray')\n plt.savefig('activity_plot.png')\n m = abs(m - b_m)\n sact = m * 60 * int(well_v) / (10 * 1000000 * float(e_conc))\n act_dict.setdefault(sample, [])\n act_dict[sample].append(sact)\n summery_dict = {}\n summery_dict['interval'] = m_lin\n for sample in act_dict.keys():\n av_sact = sum(act_dict[sample]) / len(act_dict[sample])\n print('average specific activity of ' + sample + ' = ' + str(\n av_sact) + ' U/mg')\n std = np.std(act_dict[sample])\n print('standard deviation for ' + sample + ': +/-' + str(std))\n summery_dict[sample] = {'av_sact': av_sact, 'std': std}\n return summery_dict\n\n\ndef gen_output(summery_dict, name):\n try:\n with open(name + '_activity.out', 'w') as out_handle:\n out_handle.write('time interval from 1. to ' + str(summery_dict\n ['interval']) + \"\"\". was used for calculations.\n\"\"\")\n for sample in summery_dict.keys():\n if sample == 'interval':\n continue\n else:\n out_handle.write(str(sample) + ': s = ' + str(\n summery_dict[sample]['av_sact']) + ' +/- ' + str(\n summery_dict[sample]['std']) + '\\n')\n except IOError:\n print('Could not open activity.out for writing.')\n quit(1)\n", "step-5": "############################## Import Modules ##################################\nimport pandas as pd\nimport numpy as np\nimport re\nfrom scipy import stats\nimport matplotlib.pyplot as plt\n\n############################## Define Functions ################################\n# generate list containing data of standard curve\ndef process_std(standard_input_file):\n try:\n with open(standard_input_file, 'r') as in_handle:\n lin_reg_lst = []\n for line in in_handle:\n line = line.strip('\\n')\n lin_reg_lst.append(line)\n except IOError:\n print(\"Could not open \" + standard_input_file + \" for reading.\")\n quit(1)\n return lin_reg_lst\n\n# generate info_dict containing information about the samples\ndef process_info(info_file):\n try:\n info_dict = {}\n with open(info_file, 'r') as in_handle:\n for line in in_handle:\n line = line.strip()\n items = re.split(' ', line)\n well_lst = re.split(',', items[1])\n info_dict[items[0]] = {'wells': well_lst,\n 'conc': float(items[2]),\n 'dil': float(items[3])}\n except IOError:\n print(\"Could not open \" + args.info + \" for reading.\")\n quit(1)\n return info_dict\n\n# calculate substrate concentration from absorption values\ndef abs_to_subconc(meas_df, info_dict, m, c):\n # find data series belonging to a sample\n for sample in info_dict.keys():\n for well in info_dict[sample]['wells']:\n i = np.where(meas_df == well)\n # convert absorption values to substrate concentration\n for row in meas_df[i[0]]:\n count = 1\n for el in row:\n if type(el) != str:\n conc = (el - c)/m\n meas_df[i[0], count] = conc\n count += 1\n return meas_df\n\n# process blank to get slope\ndef process_blank(blank_file, std_m, std_c):\n blank_df = pd.read_csv(blank_file)\n blank_df = blank_df.to_numpy()\n # define x values\n i = np.where(blank_df == 'Time [s]')\n # fall-back for case that time per well is measured \n if len(i[0]) == 0:\n b_arr = []\n i = np.where(blank_df == 'Time [ms]')\n # convert ms to s\n for row in blank_df[i[0]]:\n count = 1\n arr = []\n for el in row:\n if type(el) != str:\n sec = el*0.001\n arr.append(sec)\n count += 1\n b_arr.append(arr)\n blank_x = np.vstack(b_arr)\n # make average for time\n av_lst = []\n for row in np.transpose(blank_x):\n av = sum(row) / len(row)\n av_lst.append(av)\n blank_x = np.transpose(np.array(av_lst))\n else:\n blank_x = np.array(blank_df[i[0]][0, 1:])\n # define y values\n arr = []\n for row in blank_df:\n if re.search(r'^[A-Z]\\d\\d?$', row[0]):\n arr.append(row[1:])\n if len(arr) < 2:\n blank_arr = np.array(arr)\n else:\n blank_arr = np.vstack(arr)\n count_r = 0\n for row in blank_arr:\n count_c = 0\n for el in row:\n if type(el) != str:\n conc = (el - std_c)/std_m\n blank_arr[count_r, count_c] = conc\n count_c += 1\n count_r += 1\n av_lst = []\n for row in np.transpose(blank_arr):\n av = sum(row) / len(row)\n av_lst.append(av)\n if len(av_lst) < 2:\n blank_y = np.transpose(np.array(av_lst))\n else:\n blank_y = np.transpose(np.vstack(av_lst))\n b_m, b_c, b_r, b_p, stderr = stats.linregress(blank_x.astype(float),\n blank_y.astype(float))\n return b_m\n\n# calculate average activity and standard deviation of each sample\ndef act_calc(meas_df, info_dict, b_m, std_m, std_c):\n act_dict = {}\n # m_lin defines most linear part from first point\n while True:\n print(\"How many time intervals you want to take for the \"\n + \"analysis? (most linear part from first to x)\")\n m_lin = input()\n if m_lin.isnumeric() == True and int(m_lin) > 1:\n break\n m_lin = int(m_lin)\n # define volume per well\n while True:\n print(\"What is the volume per well? (in µL)\")\n well_v = input()\n print(\"\\n\")\n if well_v.isnumeric() == True:\n break\n # define x values\n time = np.where(meas_df == 'Time [s]')\n # fall-back for case that time per well is measured \n if len(time[0]) == 0:\n m_arr = []\n time = np.where(meas_df == 'Time [ms]')\n # convert ms to s\n for row in meas_df[time[0]]:\n arr = []\n count = 1\n for el in row:\n if type(el) != str:\n sec = el*0.001\n arr.append(sec)\n count += 1\n m_arr.append(arr)\n x = np.vstack(m_arr)\n # make average for time values\n av_lst = []\n for row in np.transpose(x):\n av = sum(row) / len(row)\n av_lst.append(av)\n x = np.transpose(np.array(av_lst[0:m_lin]))\n else:\n x = meas_df[time[0]]\n x = np.array(x[0, 1:m_lin + 1])\n # process sample data\n for sample in info_dict.keys():\n e_conc = info_dict[sample]['conc']\n e_dil = info_dict[sample]['dil']\n e_conc = float(e_conc)/ (float(e_dil)*1000)\n for well in info_dict[sample]['wells']:\n i = np.where(meas_df == well)\n y = meas_df[i[0]]\n y = np.array(y[0, 1:m_lin + 1])\n m, c, r, p, stderr = stats.linregress(x.astype(float),\n y.astype(float))\n print(sample + ' >R²' + str(r))\n # plot substrate decrease\n plt.figure(1, figsize=[10,5], frameon=False)\n plt.plot(x, y, 'x', markersize=2, label=sample)\n plt.plot(x, m*x + c, 'r', linestyle='--', color='gray')\n plt.savefig('activity_plot.png')\n # calculate specific activity\n m = abs(m - b_m)\n sact = (m*60*int(well_v)) / (10*1000000*float(e_conc))\n act_dict.setdefault(sample, [])\n act_dict[sample].append(sact)\n # calculate average specific activity per sample\n summery_dict = {}\n summery_dict['interval'] = m_lin\n for sample in act_dict.keys():\n av_sact = sum(act_dict[sample]) / len(act_dict[sample])\n print(\"average specific activity of \" + sample + \" = \"\n + str(av_sact) + \" U/mg\")\n # calculate standard deviation per sample\n std = np.std(act_dict[sample])\n print(\"standard deviation for \" + sample + \": +/-\" + str(std))\n # generate summery_dict for output file\n summery_dict[sample] = {'av_sact': av_sact, 'std': std}\n return summery_dict\n\n# process summery_dict to generate output file\ndef gen_output(summery_dict, name):\n try:\n with open(name + '_activity.out', 'w') as out_handle:\n out_handle.write('time interval from 1. to '\n + str(summery_dict['interval'])\n + '. was used for calculations.\\n')\n for sample in summery_dict.keys():\n if sample == 'interval':\n continue\n else:\n out_handle.write(str(sample) + ': s = '\n + str(summery_dict[sample]['av_sact'])\n + ' +/- '\n + str(summery_dict[sample]['std']) + '\\n')\n except IOError:\n print(\"Could not open activity.out for writing.\")\n quit(1)\n", "step-ids": [ 3, 4, 5, 6, 8 ] }
[ 3, 4, 5, 6, 8 ]
a = ['a', 'b', 'c', 'd', 'e'] print(';'.join(a))
normal
{ "blob_id": "a10403d7809b97c1bcdfa73224b8c365519cc456", "index": 7275, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(';'.join(a))\n", "step-3": "a = ['a', 'b', 'c', 'd', 'e']\nprint(';'.join(a))\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
from django import forms from .models import Appointment, Prescription from account.models import User class AppointmentForm(forms.ModelForm): class Meta: model = Appointment fields = '__all__' widgets = { 'date': forms.DateInput(attrs={'type': 'date'}), 'time': forms.TimeInput(attrs={'type': 'time'}) } def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['patient'].queryset = User.objects.filter(usertype='P') # self.fields['patient'].empty_label = 'select patient' self.fields['doctor'].queryset = User.objects.filter(usertype='D') # self.fields['doctor'].empty_label = 'select doctor' class PrescriptionForm(forms.ModelForm): class Meta: model = Prescription exclude = ['doctor'] widgets = { 'prescription': forms.Textarea(attrs={'rows': 4}), } def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['patient'].queryset = User.objects.filter(usertype='P')
normal
{ "blob_id": "d3425017d4e604a8940997afd0c35a4f7eac1170", "index": 6944, "step-1": "<mask token>\n\n\nclass PrescriptionForm(forms.ModelForm):\n\n\n class Meta:\n model = Prescription\n exclude = ['doctor']\n widgets = {'prescription': forms.Textarea(attrs={'rows': 4})}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['patient'].queryset = User.objects.filter(usertype='P')\n", "step-2": "<mask token>\n\n\nclass AppointmentForm(forms.ModelForm):\n\n\n class Meta:\n model = Appointment\n fields = '__all__'\n widgets = {'date': forms.DateInput(attrs={'type': 'date'}), 'time':\n forms.TimeInput(attrs={'type': 'time'})}\n <mask token>\n\n\nclass PrescriptionForm(forms.ModelForm):\n\n\n class Meta:\n model = Prescription\n exclude = ['doctor']\n widgets = {'prescription': forms.Textarea(attrs={'rows': 4})}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['patient'].queryset = User.objects.filter(usertype='P')\n", "step-3": "<mask token>\n\n\nclass AppointmentForm(forms.ModelForm):\n\n\n class Meta:\n model = Appointment\n fields = '__all__'\n widgets = {'date': forms.DateInput(attrs={'type': 'date'}), 'time':\n forms.TimeInput(attrs={'type': 'time'})}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['patient'].queryset = User.objects.filter(usertype='P')\n self.fields['doctor'].queryset = User.objects.filter(usertype='D')\n\n\nclass PrescriptionForm(forms.ModelForm):\n\n\n class Meta:\n model = Prescription\n exclude = ['doctor']\n widgets = {'prescription': forms.Textarea(attrs={'rows': 4})}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['patient'].queryset = User.objects.filter(usertype='P')\n", "step-4": "from django import forms\nfrom .models import Appointment, Prescription\nfrom account.models import User\n\n\nclass AppointmentForm(forms.ModelForm):\n\n\n class Meta:\n model = Appointment\n fields = '__all__'\n widgets = {'date': forms.DateInput(attrs={'type': 'date'}), 'time':\n forms.TimeInput(attrs={'type': 'time'})}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['patient'].queryset = User.objects.filter(usertype='P')\n self.fields['doctor'].queryset = User.objects.filter(usertype='D')\n\n\nclass PrescriptionForm(forms.ModelForm):\n\n\n class Meta:\n model = Prescription\n exclude = ['doctor']\n widgets = {'prescription': forms.Textarea(attrs={'rows': 4})}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['patient'].queryset = User.objects.filter(usertype='P')\n", "step-5": "from django import forms\nfrom .models import Appointment, Prescription\nfrom account.models import User\n\n\nclass AppointmentForm(forms.ModelForm):\n class Meta:\n model = Appointment\n fields = '__all__'\n widgets = {\n 'date': forms.DateInput(attrs={'type': 'date'}),\n 'time': forms.TimeInput(attrs={'type': 'time'})\n\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['patient'].queryset = User.objects.filter(usertype='P')\n\n # self.fields['patient'].empty_label = 'select patient'\n self.fields['doctor'].queryset = User.objects.filter(usertype='D')\n\n # self.fields['doctor'].empty_label = 'select doctor'\n\n\nclass PrescriptionForm(forms.ModelForm):\n class Meta:\n model = Prescription\n exclude = ['doctor']\n widgets = {\n 'prescription': forms.Textarea(attrs={'rows': 4}),\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['patient'].queryset = User.objects.filter(usertype='P')\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
class TestRawJob: def __init__(self, parsedRow): values = [string.strip().lower() for string in parsedRow] keys = ["Id", "Title", "Description", "Raw Location", "Normalized Location", "Contract Type", "Contract Time", "Company", "Category", "Source"] self.data = dict(zip(keys, values))
normal
{ "blob_id": "4eac468db955ca5ef5d2ec6ba67bd6c7f4d865f4", "index": 2050, "step-1": "<mask token>\n", "step-2": "class TestRawJob:\n <mask token>\n", "step-3": "class TestRawJob:\n\n def __init__(self, parsedRow):\n values = [string.strip().lower() for string in parsedRow]\n keys = ['Id', 'Title', 'Description', 'Raw Location',\n 'Normalized Location', 'Contract Type', 'Contract Time',\n 'Company', 'Category', 'Source']\n self.data = dict(zip(keys, values))\n", "step-4": "class TestRawJob:\n\t\n\tdef __init__(self, parsedRow):\n\t\tvalues = [string.strip().lower() for string in parsedRow]\n\t\tkeys = [\"Id\", \"Title\", \"Description\", \"Raw Location\", \"Normalized Location\",\n\t\t\t\t\"Contract Type\", \"Contract Time\", \"Company\", \"Category\", \"Source\"]\n\t\tself.data = dict(zip(keys, values))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import numpy as np import math import os if os.getcwd().rfind('share') > 0: topsy = True import matplotlib as mpl mpl.use('Agg') else: topsy = False from matplotlib import rc import matplotlib.pyplot as plt from matplotlib import rc from matplotlib import cm from scipy.optimize import curve_fit import sys import h5py from glob import glob pwd = os.getcwd() k = int(pwd[pwd.rfind('pred')+4:]) number_of_lines = len(glob('group*[0-9]*')) cm_subsection = np.linspace(0., 1., number_of_lines) colors = [ cm.magma(x) for x in cm_subsection] Z = [[0,0],[0,0]] levels = range(5,500+5,5) CS3 = plt.contourf(Z, levels, cmap='magma') plt.clf() area = [] def movingaverage(interval, window_size): window= np.ones(int(window_size))/float(window_size) return np.convolve(interval, window, 'same') j = 0 for group in sorted(glob('group*[0-9]*')): files = glob(group + '/data*.h5') print group alive = [] time = [] plotPeriod = 0.1 for dFile in files: value = dFile[dFile.rfind('-')+1:dFile.rfind('.')] data = dict() h5f = h5py.File(dFile,'r') itime = np.copy(h5f['itime'])[0] data['alive'] = np.copy(h5f['alive']) data['t'] = np.copy(h5f['t']) lastPlot = 0 for i in range(itime): if data['t'][i] - lastPlot > plotPeriod: time.append(data['t'][i]) alive.append(data['alive'][i].sum()) lastPlot = data['t'][i] alive = np.array(alive).reshape(len(alive), 1) time = np.array(time).reshape(len(time), 1) data = np.append(time, alive, axis = 1) data = data.tolist() data2 = sorted(data, key=lambda x : x[0]) data2 = np.array(data2) if np.shape(data2)[0] > 0: y_av = movingaverage(data2[:,1], 75) plt.plot(data2[:,0][100:-50], y_av[100:-50], label = group, color = colors[j]) trap = np.trapz(y_av[100:-50], x = data2[:,0][100:-50]) area += [[int(group[5:]), trap]] j +=1 plt.colorbar(CS3) plt.xlabel('Time', fontsize = 18) plt.ylabel('$N(t)$', fontsize = 18) plt.savefig('./groupPredation') np.save('./area.npy', area) area = np.array(area) plt.plot(area[:,0], area[:,1], lw = 2) y_av = movingaverage(area[:,1], 3) plt.plot(area[:,0][5:-5], y_av[5:-5], lw = 2) plt.xlabel('Group Size', fontsize = 18) plt.ylabel('Area', fontsize = 18) plt.savefig('./groupPredationArea.png')
normal
{ "blob_id": "2539411c7b348662dbe9ebf87e26faacc20f4c5e", "index": 3837, "step-1": "import numpy as np\nimport math\nimport os\nif os.getcwd().rfind('share') > 0:\n\ttopsy = True\n\timport matplotlib as mpl\n\tmpl.use('Agg')\nelse:\n\ttopsy = False\n\tfrom matplotlib import rc\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\nfrom matplotlib import cm\nfrom scipy.optimize import curve_fit\nimport sys\nimport h5py\nfrom glob import glob\n\npwd = os.getcwd()\nk = int(pwd[pwd.rfind('pred')+4:])\n\nnumber_of_lines = len(glob('group*[0-9]*'))\ncm_subsection = np.linspace(0., 1., number_of_lines)\ncolors = [ cm.magma(x) for x in cm_subsection]\n\nZ = [[0,0],[0,0]]\nlevels = range(5,500+5,5)\nCS3 = plt.contourf(Z, levels, cmap='magma')\nplt.clf()\n\narea = []\n\ndef movingaverage(interval, window_size):\n window= np.ones(int(window_size))/float(window_size)\n return np.convolve(interval, window, 'same')\n\nj = 0\nfor group in sorted(glob('group*[0-9]*')):\n\tfiles = glob(group + '/data*.h5')\n\tprint group\n\n\talive = []\n\ttime = []\n\tplotPeriod = 0.1\n\n\tfor dFile in files:\n\t\tvalue = dFile[dFile.rfind('-')+1:dFile.rfind('.')]\n\n\t\tdata = dict()\n\t\th5f = h5py.File(dFile,'r')\n\t\titime = np.copy(h5f['itime'])[0]\n\t\tdata['alive'] = np.copy(h5f['alive'])\n\t\tdata['t'] = np.copy(h5f['t'])\n\n\t\tlastPlot = 0\n\t\tfor i in range(itime):\n\t\t\tif data['t'][i] - lastPlot > plotPeriod:\n\t\t\t\ttime.append(data['t'][i])\n\t\t\t\talive.append(data['alive'][i].sum())\n\t\t\t\tlastPlot = data['t'][i]\n\n\n\talive = np.array(alive).reshape(len(alive), 1)\n\ttime = np.array(time).reshape(len(time), 1)\n\n\tdata = np.append(time, alive, axis = 1)\n\tdata = data.tolist()\n\tdata2 = sorted(data, key=lambda x : x[0])\n\tdata2 = np.array(data2)\n\n\tif np.shape(data2)[0] > 0:\n\t\ty_av = movingaverage(data2[:,1], 75)\n\t\tplt.plot(data2[:,0][100:-50], y_av[100:-50], label = group, color = colors[j])\n\n\ttrap = np.trapz(y_av[100:-50], x = data2[:,0][100:-50])\n\tarea += [[int(group[5:]), trap]]\n\tj +=1\nplt.colorbar(CS3)\nplt.xlabel('Time', fontsize = 18)\nplt.ylabel('$N(t)$', fontsize = 18)\nplt.savefig('./groupPredation')\n\n\nnp.save('./area.npy', area)\narea = np.array(area)\nplt.plot(area[:,0], area[:,1], lw = 2)\ny_av = movingaverage(area[:,1], 3)\nplt.plot(area[:,0][5:-5], y_av[5:-5], lw = 2)\nplt.xlabel('Group Size', fontsize = 18)\nplt.ylabel('Area', fontsize = 18)\nplt.savefig('./groupPredationArea.png')\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
""" Escreva um programa que leia as coordenadas x e y de um ponto R² e calcule sua distância da origem(0,0). """ import math print("Origem = 0") x = int(input("X: ")) y = int(input("Y: ")) aux = (x*x)+(y*y) dist = math.sqrt(aux) print("Distância da origem {:.2f}".format(dist))
normal
{ "blob_id": "69d48bc9ecd0f003d7b22c6fbaa532d28137b38e", "index": 7713, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint('Origem = 0')\n<mask token>\nprint('Distância da origem {:.2f}'.format(dist))\n", "step-3": "<mask token>\nprint('Origem = 0')\nx = int(input('X: '))\ny = int(input('Y: '))\naux = x * x + y * y\ndist = math.sqrt(aux)\nprint('Distância da origem {:.2f}'.format(dist))\n", "step-4": "<mask token>\nimport math\nprint('Origem = 0')\nx = int(input('X: '))\ny = int(input('Y: '))\naux = x * x + y * y\ndist = math.sqrt(aux)\nprint('Distância da origem {:.2f}'.format(dist))\n", "step-5": "\"\"\"\nEscreva um programa que leia as coordenadas x e y de um ponto R² e calcule\nsua distância da origem(0,0).\n\"\"\"\nimport math\n\nprint(\"Origem = 0\")\n\nx = int(input(\"X: \"))\ny = int(input(\"Y: \"))\n\naux = (x*x)+(y*y)\ndist = math.sqrt(aux)\n\n\nprint(\"Distância da origem {:.2f}\".format(dist))", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import torch.utils.data import torch import math from util.helpers import * from collections import defaultdict as ddict class _Collate: def __init__(self, ): pass def collate(self, batch): return torch.squeeze(torch.from_numpy(np.array(batch))) class PR: dataset = None eval_data = None model = None device = None most_frequent_rels = None test_data = None train_data = None valid_data = None eval_test_data = None topk = None def init(self, data): self.model = self.model.to(self.device) collate_fn = _Collate() self.eval_loader = torch.utils.data.DataLoader( data, Config.eval_batch_size, shuffle=False, pin_memory=Config.pin_memory, num_workers=Config.loader_num_workers, collate_fn=collate_fn.collate) def count_e1_e2_by_relation(self, data): rel_map = ddict(int) for r in data.keys(): rel_map[r] = len(data[r]) count_pairs_by_relation = rel_map.items() count_pairs_by_relation = sorted(count_pairs_by_relation, key=lambda x: -x[1]) return count_pairs_by_relation # computes the position of a tuple for the flattened 1d score matrix def convert_idx_to_1d(self, tuples_r, n=None): if n is None: n = self.model.num_entities pos_1d = [] row_idx, column_idx = tuples_r for i in range(len(row_idx)): pos_1d.append(row_idx[i] * n + column_idx[i]) return pos_1d def evaluate(self, epoch, logger): #prepare data idx_train = ddict(list) for e1, r, e2 in self.train_data: idx_train[r].append((e1, e2)) if self.eval_test_data: idx_valid = ddict(list) for e1, r, e2 in self.valid_data: idx_valid[r].append((e1, e2)) idx_test = ddict(list) for e1, r, e2 in self.test_data: idx_test[r].append((e1, e2)) tuples_by_relation = self.count_e1_e2_by_relation(idx_test) relations = np.array([x[0] for x in tuples_by_relation]) #tuples_count = np.array([x[1] for x in tuples_by_relation]) # speedup grid search if self.most_frequent_rels > 0: print("Evaluating on {} most frequent relations...".format(self.most_frequent_rels)) relations = relations[:self.most_frequent_rels] prepare_test = ddict(list) for e1, r, e2 in self.test_data: prepare_test[r].append([e1, r, e2]) # sorted data prepare_test_sorted = ddict(list) for r in relations: prepare_test_sorted[r].append(prepare_test[r]) eval_data_prepared = [triple_list for r, triple_list in prepare_test_sorted.items()] ranks_by_r = ddict(list) num_true_triples = ddict(list) self.init(eval_data_prepared) for i, batch in enumerate(self.eval_loader): batch = batch.to(self.device) r = None if len(batch.shape) >= 2: r_tensor = batch[0][1] r = batch[0][1].item() else: # only one test triple for a given relation r_tensor = batch[1] r = batch[1].item() print("Evaluating: {} Progress: {}%".format(r, round(i/len(self.eval_loader) * 100, 2))) scores = ddict(list) score_matrix = self.model.score_matrix_r(r_tensor) scores[r].append(score_matrix) # ----- FILTERING ----- # all e1, e2 for a given relation in test, validation data tuples_r_test = np.array(prepare_test_sorted[r][0]) tuples_r_test = [tuples_r_test[:,0], tuples_r_test[:,2]] tuples_r_train = np.array(idx_train[r]) tuples_r_train = [tuples_r_train[:,0], tuples_r_train[:,1]] score_matrix[tuples_r_train] = -math.inf # Filter training set out # Filter validation set out if self.eval_test_data: tuples_r_valid = np.array(idx_valid[r]) if (len(tuples_r_valid) > 0): tuples_r_valid = [tuples_r_valid[:, 0], tuples_r_valid[:, 1]] score_matrix[tuples_r_valid] = -math.inf # ---- /FILTERING ----- test_tuples_r_1d = self.convert_idx_to_1d(tuples_r_test) num_true_triples[r] = len(test_tuples_r_1d) test_tuples_r_1d_tensor = torch.squeeze(torch.LongTensor([test_tuples_r_1d])) topk = self.compute_topk(score_matrix, test_tuples_r_1d_tensor) ranks = topk.cpu().data.numpy() if len(ranks.shape) > 0: ranks = np.sort(ranks) print(ranks) ranks_by_r[r].append(ranks) print("-----------------------") avg_map, avg_hits = self.metrics(ranks_by_r, num_true_triples) print("TOTAL MAP: {} ".format(avg_map)) print("TOTAL HITS: {}".format(avg_hits)) # save results if logger is not None: avg_map = round(avg_map, 4) avg_hits = round(avg_hits, 4) logger.log_result(avg_map, avg_hits, epoch, "a") logger.compare_best(avg_map, avg_hits, epoch, "_best", self.model) return avg_map, avg_hits def compute_topk(self, score_matrix, tuples_r_1d): score_matrix = score_matrix.reshape((1, -1)).flatten() if len(score_matrix) > self.topk+1: sorted_k_values, sorted_k_indexs = torch.topk(score_matrix, self.topk, largest=True, sorted=True) other = torch.zeros(len(sorted_k_indexs)).long().to(self.device) tuples_r_1d = tuples_r_1d.to(self.device) if len(tuples_r_1d.size()) > 0: check = [torch.where(sorted_k_indexs == t, sorted_k_indexs, other) for t in tuples_r_1d if len(torch.nonzero(sorted_k_indexs == t)) > 0] else: check = [torch.where(sorted_k_indexs == tuples_r_1d, sorted_k_indexs, other)] ranks = [torch.nonzero(t)+1 for t in check] if len(ranks) == 1: # one or zero elements in ranks ranks = ranks[0] if len(ranks[0].size()) <= 1 else ranks[0][0] else: ranks = torch.LongTensor(ranks).to(self.device) return ranks def metrics(self, ranks_by_relation, num_true_triples): total_precision = 0 normalization = 0 total_hits = 0 for r, ranks in ranks_by_relation.items(): total_hits += len(ranks[0]) normalization += min(num_true_triples[r], self.topk) for idx, rank in enumerate(ranks[0]): total_precision += (idx + 1) / rank avg_map = (total_precision / normalization) * 100 avg_hits = (total_hits / normalization) * 100 return avg_map, avg_hits @staticmethod def fromConfig(model, dataset): evaluator = PR() if dataset is None: evaluator.dataset = dataset.load() else: evaluator.dataset = dataset evaluator.device = torch.device(Config.eval_device) torch.set_num_threads(Config.num_threads) evaluator.model = model coder = Coder() data_dir = Config.data_dir dataset = Config.dataset train_triples = read_triplets(data_dir + Config.dataset + "/" + Config.raw_split_files['train'], None) train_triples = coder.construct_encoder(train_triples) test_triples = read_triplets(data_dir + dataset + "/" + Config.raw_split_files['test'], coder) test_triples = coder.construct_encoder(test_triples) valid_triples = read_triplets(data_dir + dataset + "/" + Config.raw_split_files['valid'], coder) valid_triples = coder.construct_encoder(valid_triples) evaluator.train_data = train_triples evaluator.eval_test_data = Config.eval_test_data if Config.eval_test_data: # use test set for evaluation, training and validation split for filtering evaluator.test_data = test_triples evaluator.valid_data = valid_triples else: # use validation set for evaluation and training set for filtering evaluator.test_data = valid_triples evaluator.most_frequent_rels = Config.most_frequent_rels evaluator.topk = Config.topk return evaluator
normal
{ "blob_id": "606a6e7ecc58ecbb11aa53602599e671514bc537", "index": 3890, "step-1": "<mask token>\n\n\nclass PR:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def init(self, data):\n self.model = self.model.to(self.device)\n collate_fn = _Collate()\n self.eval_loader = torch.utils.data.DataLoader(data, Config.\n eval_batch_size, shuffle=False, pin_memory=Config.pin_memory,\n num_workers=Config.loader_num_workers, collate_fn=collate_fn.\n collate)\n\n def count_e1_e2_by_relation(self, data):\n rel_map = ddict(int)\n for r in data.keys():\n rel_map[r] = len(data[r])\n count_pairs_by_relation = rel_map.items()\n count_pairs_by_relation = sorted(count_pairs_by_relation, key=lambda\n x: -x[1])\n return count_pairs_by_relation\n\n def convert_idx_to_1d(self, tuples_r, n=None):\n if n is None:\n n = self.model.num_entities\n pos_1d = []\n row_idx, column_idx = tuples_r\n for i in range(len(row_idx)):\n pos_1d.append(row_idx[i] * n + column_idx[i])\n return pos_1d\n\n def evaluate(self, epoch, logger):\n idx_train = ddict(list)\n for e1, r, e2 in self.train_data:\n idx_train[r].append((e1, e2))\n if self.eval_test_data:\n idx_valid = ddict(list)\n for e1, r, e2 in self.valid_data:\n idx_valid[r].append((e1, e2))\n idx_test = ddict(list)\n for e1, r, e2 in self.test_data:\n idx_test[r].append((e1, e2))\n tuples_by_relation = self.count_e1_e2_by_relation(idx_test)\n relations = np.array([x[0] for x in tuples_by_relation])\n if self.most_frequent_rels > 0:\n print('Evaluating on {} most frequent relations...'.format(self\n .most_frequent_rels))\n relations = relations[:self.most_frequent_rels]\n prepare_test = ddict(list)\n for e1, r, e2 in self.test_data:\n prepare_test[r].append([e1, r, e2])\n prepare_test_sorted = ddict(list)\n for r in relations:\n prepare_test_sorted[r].append(prepare_test[r])\n eval_data_prepared = [triple_list for r, triple_list in\n prepare_test_sorted.items()]\n ranks_by_r = ddict(list)\n num_true_triples = ddict(list)\n self.init(eval_data_prepared)\n for i, batch in enumerate(self.eval_loader):\n batch = batch.to(self.device)\n r = None\n if len(batch.shape) >= 2:\n r_tensor = batch[0][1]\n r = batch[0][1].item()\n else:\n r_tensor = batch[1]\n r = batch[1].item()\n print('Evaluating: {} Progress: {}%'.format(r, round(i / len(\n self.eval_loader) * 100, 2)))\n scores = ddict(list)\n score_matrix = self.model.score_matrix_r(r_tensor)\n scores[r].append(score_matrix)\n tuples_r_test = np.array(prepare_test_sorted[r][0])\n tuples_r_test = [tuples_r_test[:, 0], tuples_r_test[:, 2]]\n tuples_r_train = np.array(idx_train[r])\n tuples_r_train = [tuples_r_train[:, 0], tuples_r_train[:, 1]]\n score_matrix[tuples_r_train] = -math.inf\n if self.eval_test_data:\n tuples_r_valid = np.array(idx_valid[r])\n if len(tuples_r_valid) > 0:\n tuples_r_valid = [tuples_r_valid[:, 0], tuples_r_valid[\n :, 1]]\n score_matrix[tuples_r_valid] = -math.inf\n test_tuples_r_1d = self.convert_idx_to_1d(tuples_r_test)\n num_true_triples[r] = len(test_tuples_r_1d)\n test_tuples_r_1d_tensor = torch.squeeze(torch.LongTensor([\n test_tuples_r_1d]))\n topk = self.compute_topk(score_matrix, test_tuples_r_1d_tensor)\n ranks = topk.cpu().data.numpy()\n if len(ranks.shape) > 0:\n ranks = np.sort(ranks)\n print(ranks)\n ranks_by_r[r].append(ranks)\n print('-----------------------')\n avg_map, avg_hits = self.metrics(ranks_by_r, num_true_triples)\n print('TOTAL MAP: {} '.format(avg_map))\n print('TOTAL HITS: {}'.format(avg_hits))\n if logger is not None:\n avg_map = round(avg_map, 4)\n avg_hits = round(avg_hits, 4)\n logger.log_result(avg_map, avg_hits, epoch, 'a')\n logger.compare_best(avg_map, avg_hits, epoch, '_best', self.model)\n return avg_map, avg_hits\n <mask token>\n\n def metrics(self, ranks_by_relation, num_true_triples):\n total_precision = 0\n normalization = 0\n total_hits = 0\n for r, ranks in ranks_by_relation.items():\n total_hits += len(ranks[0])\n normalization += min(num_true_triples[r], self.topk)\n for idx, rank in enumerate(ranks[0]):\n total_precision += (idx + 1) / rank\n avg_map = total_precision / normalization * 100\n avg_hits = total_hits / normalization * 100\n return avg_map, avg_hits\n\n @staticmethod\n def fromConfig(model, dataset):\n evaluator = PR()\n if dataset is None:\n evaluator.dataset = dataset.load()\n else:\n evaluator.dataset = dataset\n evaluator.device = torch.device(Config.eval_device)\n torch.set_num_threads(Config.num_threads)\n evaluator.model = model\n coder = Coder()\n data_dir = Config.data_dir\n dataset = Config.dataset\n train_triples = read_triplets(data_dir + Config.dataset + '/' +\n Config.raw_split_files['train'], None)\n train_triples = coder.construct_encoder(train_triples)\n test_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['test'], coder)\n test_triples = coder.construct_encoder(test_triples)\n valid_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['valid'], coder)\n valid_triples = coder.construct_encoder(valid_triples)\n evaluator.train_data = train_triples\n evaluator.eval_test_data = Config.eval_test_data\n if Config.eval_test_data:\n evaluator.test_data = test_triples\n evaluator.valid_data = valid_triples\n else:\n evaluator.test_data = valid_triples\n evaluator.most_frequent_rels = Config.most_frequent_rels\n evaluator.topk = Config.topk\n return evaluator\n", "step-2": "<mask token>\n\n\nclass PR:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def init(self, data):\n self.model = self.model.to(self.device)\n collate_fn = _Collate()\n self.eval_loader = torch.utils.data.DataLoader(data, Config.\n eval_batch_size, shuffle=False, pin_memory=Config.pin_memory,\n num_workers=Config.loader_num_workers, collate_fn=collate_fn.\n collate)\n\n def count_e1_e2_by_relation(self, data):\n rel_map = ddict(int)\n for r in data.keys():\n rel_map[r] = len(data[r])\n count_pairs_by_relation = rel_map.items()\n count_pairs_by_relation = sorted(count_pairs_by_relation, key=lambda\n x: -x[1])\n return count_pairs_by_relation\n\n def convert_idx_to_1d(self, tuples_r, n=None):\n if n is None:\n n = self.model.num_entities\n pos_1d = []\n row_idx, column_idx = tuples_r\n for i in range(len(row_idx)):\n pos_1d.append(row_idx[i] * n + column_idx[i])\n return pos_1d\n\n def evaluate(self, epoch, logger):\n idx_train = ddict(list)\n for e1, r, e2 in self.train_data:\n idx_train[r].append((e1, e2))\n if self.eval_test_data:\n idx_valid = ddict(list)\n for e1, r, e2 in self.valid_data:\n idx_valid[r].append((e1, e2))\n idx_test = ddict(list)\n for e1, r, e2 in self.test_data:\n idx_test[r].append((e1, e2))\n tuples_by_relation = self.count_e1_e2_by_relation(idx_test)\n relations = np.array([x[0] for x in tuples_by_relation])\n if self.most_frequent_rels > 0:\n print('Evaluating on {} most frequent relations...'.format(self\n .most_frequent_rels))\n relations = relations[:self.most_frequent_rels]\n prepare_test = ddict(list)\n for e1, r, e2 in self.test_data:\n prepare_test[r].append([e1, r, e2])\n prepare_test_sorted = ddict(list)\n for r in relations:\n prepare_test_sorted[r].append(prepare_test[r])\n eval_data_prepared = [triple_list for r, triple_list in\n prepare_test_sorted.items()]\n ranks_by_r = ddict(list)\n num_true_triples = ddict(list)\n self.init(eval_data_prepared)\n for i, batch in enumerate(self.eval_loader):\n batch = batch.to(self.device)\n r = None\n if len(batch.shape) >= 2:\n r_tensor = batch[0][1]\n r = batch[0][1].item()\n else:\n r_tensor = batch[1]\n r = batch[1].item()\n print('Evaluating: {} Progress: {}%'.format(r, round(i / len(\n self.eval_loader) * 100, 2)))\n scores = ddict(list)\n score_matrix = self.model.score_matrix_r(r_tensor)\n scores[r].append(score_matrix)\n tuples_r_test = np.array(prepare_test_sorted[r][0])\n tuples_r_test = [tuples_r_test[:, 0], tuples_r_test[:, 2]]\n tuples_r_train = np.array(idx_train[r])\n tuples_r_train = [tuples_r_train[:, 0], tuples_r_train[:, 1]]\n score_matrix[tuples_r_train] = -math.inf\n if self.eval_test_data:\n tuples_r_valid = np.array(idx_valid[r])\n if len(tuples_r_valid) > 0:\n tuples_r_valid = [tuples_r_valid[:, 0], tuples_r_valid[\n :, 1]]\n score_matrix[tuples_r_valid] = -math.inf\n test_tuples_r_1d = self.convert_idx_to_1d(tuples_r_test)\n num_true_triples[r] = len(test_tuples_r_1d)\n test_tuples_r_1d_tensor = torch.squeeze(torch.LongTensor([\n test_tuples_r_1d]))\n topk = self.compute_topk(score_matrix, test_tuples_r_1d_tensor)\n ranks = topk.cpu().data.numpy()\n if len(ranks.shape) > 0:\n ranks = np.sort(ranks)\n print(ranks)\n ranks_by_r[r].append(ranks)\n print('-----------------------')\n avg_map, avg_hits = self.metrics(ranks_by_r, num_true_triples)\n print('TOTAL MAP: {} '.format(avg_map))\n print('TOTAL HITS: {}'.format(avg_hits))\n if logger is not None:\n avg_map = round(avg_map, 4)\n avg_hits = round(avg_hits, 4)\n logger.log_result(avg_map, avg_hits, epoch, 'a')\n logger.compare_best(avg_map, avg_hits, epoch, '_best', self.model)\n return avg_map, avg_hits\n\n def compute_topk(self, score_matrix, tuples_r_1d):\n score_matrix = score_matrix.reshape((1, -1)).flatten()\n if len(score_matrix) > self.topk + 1:\n sorted_k_values, sorted_k_indexs = torch.topk(score_matrix,\n self.topk, largest=True, sorted=True)\n other = torch.zeros(len(sorted_k_indexs)).long().to(self.device)\n tuples_r_1d = tuples_r_1d.to(self.device)\n if len(tuples_r_1d.size()) > 0:\n check = [torch.where(sorted_k_indexs == t, sorted_k_indexs,\n other) for t in tuples_r_1d if len(torch.nonzero(\n sorted_k_indexs == t)) > 0]\n else:\n check = [torch.where(sorted_k_indexs == tuples_r_1d,\n sorted_k_indexs, other)]\n ranks = [(torch.nonzero(t) + 1) for t in check]\n if len(ranks) == 1:\n ranks = ranks[0] if len(ranks[0].size()) <= 1 else ranks[0][0]\n else:\n ranks = torch.LongTensor(ranks).to(self.device)\n return ranks\n\n def metrics(self, ranks_by_relation, num_true_triples):\n total_precision = 0\n normalization = 0\n total_hits = 0\n for r, ranks in ranks_by_relation.items():\n total_hits += len(ranks[0])\n normalization += min(num_true_triples[r], self.topk)\n for idx, rank in enumerate(ranks[0]):\n total_precision += (idx + 1) / rank\n avg_map = total_precision / normalization * 100\n avg_hits = total_hits / normalization * 100\n return avg_map, avg_hits\n\n @staticmethod\n def fromConfig(model, dataset):\n evaluator = PR()\n if dataset is None:\n evaluator.dataset = dataset.load()\n else:\n evaluator.dataset = dataset\n evaluator.device = torch.device(Config.eval_device)\n torch.set_num_threads(Config.num_threads)\n evaluator.model = model\n coder = Coder()\n data_dir = Config.data_dir\n dataset = Config.dataset\n train_triples = read_triplets(data_dir + Config.dataset + '/' +\n Config.raw_split_files['train'], None)\n train_triples = coder.construct_encoder(train_triples)\n test_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['test'], coder)\n test_triples = coder.construct_encoder(test_triples)\n valid_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['valid'], coder)\n valid_triples = coder.construct_encoder(valid_triples)\n evaluator.train_data = train_triples\n evaluator.eval_test_data = Config.eval_test_data\n if Config.eval_test_data:\n evaluator.test_data = test_triples\n evaluator.valid_data = valid_triples\n else:\n evaluator.test_data = valid_triples\n evaluator.most_frequent_rels = Config.most_frequent_rels\n evaluator.topk = Config.topk\n return evaluator\n", "step-3": "<mask token>\n\n\nclass PR:\n dataset = None\n eval_data = None\n model = None\n device = None\n most_frequent_rels = None\n test_data = None\n train_data = None\n valid_data = None\n eval_test_data = None\n topk = None\n\n def init(self, data):\n self.model = self.model.to(self.device)\n collate_fn = _Collate()\n self.eval_loader = torch.utils.data.DataLoader(data, Config.\n eval_batch_size, shuffle=False, pin_memory=Config.pin_memory,\n num_workers=Config.loader_num_workers, collate_fn=collate_fn.\n collate)\n\n def count_e1_e2_by_relation(self, data):\n rel_map = ddict(int)\n for r in data.keys():\n rel_map[r] = len(data[r])\n count_pairs_by_relation = rel_map.items()\n count_pairs_by_relation = sorted(count_pairs_by_relation, key=lambda\n x: -x[1])\n return count_pairs_by_relation\n\n def convert_idx_to_1d(self, tuples_r, n=None):\n if n is None:\n n = self.model.num_entities\n pos_1d = []\n row_idx, column_idx = tuples_r\n for i in range(len(row_idx)):\n pos_1d.append(row_idx[i] * n + column_idx[i])\n return pos_1d\n\n def evaluate(self, epoch, logger):\n idx_train = ddict(list)\n for e1, r, e2 in self.train_data:\n idx_train[r].append((e1, e2))\n if self.eval_test_data:\n idx_valid = ddict(list)\n for e1, r, e2 in self.valid_data:\n idx_valid[r].append((e1, e2))\n idx_test = ddict(list)\n for e1, r, e2 in self.test_data:\n idx_test[r].append((e1, e2))\n tuples_by_relation = self.count_e1_e2_by_relation(idx_test)\n relations = np.array([x[0] for x in tuples_by_relation])\n if self.most_frequent_rels > 0:\n print('Evaluating on {} most frequent relations...'.format(self\n .most_frequent_rels))\n relations = relations[:self.most_frequent_rels]\n prepare_test = ddict(list)\n for e1, r, e2 in self.test_data:\n prepare_test[r].append([e1, r, e2])\n prepare_test_sorted = ddict(list)\n for r in relations:\n prepare_test_sorted[r].append(prepare_test[r])\n eval_data_prepared = [triple_list for r, triple_list in\n prepare_test_sorted.items()]\n ranks_by_r = ddict(list)\n num_true_triples = ddict(list)\n self.init(eval_data_prepared)\n for i, batch in enumerate(self.eval_loader):\n batch = batch.to(self.device)\n r = None\n if len(batch.shape) >= 2:\n r_tensor = batch[0][1]\n r = batch[0][1].item()\n else:\n r_tensor = batch[1]\n r = batch[1].item()\n print('Evaluating: {} Progress: {}%'.format(r, round(i / len(\n self.eval_loader) * 100, 2)))\n scores = ddict(list)\n score_matrix = self.model.score_matrix_r(r_tensor)\n scores[r].append(score_matrix)\n tuples_r_test = np.array(prepare_test_sorted[r][0])\n tuples_r_test = [tuples_r_test[:, 0], tuples_r_test[:, 2]]\n tuples_r_train = np.array(idx_train[r])\n tuples_r_train = [tuples_r_train[:, 0], tuples_r_train[:, 1]]\n score_matrix[tuples_r_train] = -math.inf\n if self.eval_test_data:\n tuples_r_valid = np.array(idx_valid[r])\n if len(tuples_r_valid) > 0:\n tuples_r_valid = [tuples_r_valid[:, 0], tuples_r_valid[\n :, 1]]\n score_matrix[tuples_r_valid] = -math.inf\n test_tuples_r_1d = self.convert_idx_to_1d(tuples_r_test)\n num_true_triples[r] = len(test_tuples_r_1d)\n test_tuples_r_1d_tensor = torch.squeeze(torch.LongTensor([\n test_tuples_r_1d]))\n topk = self.compute_topk(score_matrix, test_tuples_r_1d_tensor)\n ranks = topk.cpu().data.numpy()\n if len(ranks.shape) > 0:\n ranks = np.sort(ranks)\n print(ranks)\n ranks_by_r[r].append(ranks)\n print('-----------------------')\n avg_map, avg_hits = self.metrics(ranks_by_r, num_true_triples)\n print('TOTAL MAP: {} '.format(avg_map))\n print('TOTAL HITS: {}'.format(avg_hits))\n if logger is not None:\n avg_map = round(avg_map, 4)\n avg_hits = round(avg_hits, 4)\n logger.log_result(avg_map, avg_hits, epoch, 'a')\n logger.compare_best(avg_map, avg_hits, epoch, '_best', self.model)\n return avg_map, avg_hits\n\n def compute_topk(self, score_matrix, tuples_r_1d):\n score_matrix = score_matrix.reshape((1, -1)).flatten()\n if len(score_matrix) > self.topk + 1:\n sorted_k_values, sorted_k_indexs = torch.topk(score_matrix,\n self.topk, largest=True, sorted=True)\n other = torch.zeros(len(sorted_k_indexs)).long().to(self.device)\n tuples_r_1d = tuples_r_1d.to(self.device)\n if len(tuples_r_1d.size()) > 0:\n check = [torch.where(sorted_k_indexs == t, sorted_k_indexs,\n other) for t in tuples_r_1d if len(torch.nonzero(\n sorted_k_indexs == t)) > 0]\n else:\n check = [torch.where(sorted_k_indexs == tuples_r_1d,\n sorted_k_indexs, other)]\n ranks = [(torch.nonzero(t) + 1) for t in check]\n if len(ranks) == 1:\n ranks = ranks[0] if len(ranks[0].size()) <= 1 else ranks[0][0]\n else:\n ranks = torch.LongTensor(ranks).to(self.device)\n return ranks\n\n def metrics(self, ranks_by_relation, num_true_triples):\n total_precision = 0\n normalization = 0\n total_hits = 0\n for r, ranks in ranks_by_relation.items():\n total_hits += len(ranks[0])\n normalization += min(num_true_triples[r], self.topk)\n for idx, rank in enumerate(ranks[0]):\n total_precision += (idx + 1) / rank\n avg_map = total_precision / normalization * 100\n avg_hits = total_hits / normalization * 100\n return avg_map, avg_hits\n\n @staticmethod\n def fromConfig(model, dataset):\n evaluator = PR()\n if dataset is None:\n evaluator.dataset = dataset.load()\n else:\n evaluator.dataset = dataset\n evaluator.device = torch.device(Config.eval_device)\n torch.set_num_threads(Config.num_threads)\n evaluator.model = model\n coder = Coder()\n data_dir = Config.data_dir\n dataset = Config.dataset\n train_triples = read_triplets(data_dir + Config.dataset + '/' +\n Config.raw_split_files['train'], None)\n train_triples = coder.construct_encoder(train_triples)\n test_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['test'], coder)\n test_triples = coder.construct_encoder(test_triples)\n valid_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['valid'], coder)\n valid_triples = coder.construct_encoder(valid_triples)\n evaluator.train_data = train_triples\n evaluator.eval_test_data = Config.eval_test_data\n if Config.eval_test_data:\n evaluator.test_data = test_triples\n evaluator.valid_data = valid_triples\n else:\n evaluator.test_data = valid_triples\n evaluator.most_frequent_rels = Config.most_frequent_rels\n evaluator.topk = Config.topk\n return evaluator\n", "step-4": "<mask token>\n\n\nclass _Collate:\n <mask token>\n\n def collate(self, batch):\n return torch.squeeze(torch.from_numpy(np.array(batch)))\n\n\nclass PR:\n dataset = None\n eval_data = None\n model = None\n device = None\n most_frequent_rels = None\n test_data = None\n train_data = None\n valid_data = None\n eval_test_data = None\n topk = None\n\n def init(self, data):\n self.model = self.model.to(self.device)\n collate_fn = _Collate()\n self.eval_loader = torch.utils.data.DataLoader(data, Config.\n eval_batch_size, shuffle=False, pin_memory=Config.pin_memory,\n num_workers=Config.loader_num_workers, collate_fn=collate_fn.\n collate)\n\n def count_e1_e2_by_relation(self, data):\n rel_map = ddict(int)\n for r in data.keys():\n rel_map[r] = len(data[r])\n count_pairs_by_relation = rel_map.items()\n count_pairs_by_relation = sorted(count_pairs_by_relation, key=lambda\n x: -x[1])\n return count_pairs_by_relation\n\n def convert_idx_to_1d(self, tuples_r, n=None):\n if n is None:\n n = self.model.num_entities\n pos_1d = []\n row_idx, column_idx = tuples_r\n for i in range(len(row_idx)):\n pos_1d.append(row_idx[i] * n + column_idx[i])\n return pos_1d\n\n def evaluate(self, epoch, logger):\n idx_train = ddict(list)\n for e1, r, e2 in self.train_data:\n idx_train[r].append((e1, e2))\n if self.eval_test_data:\n idx_valid = ddict(list)\n for e1, r, e2 in self.valid_data:\n idx_valid[r].append((e1, e2))\n idx_test = ddict(list)\n for e1, r, e2 in self.test_data:\n idx_test[r].append((e1, e2))\n tuples_by_relation = self.count_e1_e2_by_relation(idx_test)\n relations = np.array([x[0] for x in tuples_by_relation])\n if self.most_frequent_rels > 0:\n print('Evaluating on {} most frequent relations...'.format(self\n .most_frequent_rels))\n relations = relations[:self.most_frequent_rels]\n prepare_test = ddict(list)\n for e1, r, e2 in self.test_data:\n prepare_test[r].append([e1, r, e2])\n prepare_test_sorted = ddict(list)\n for r in relations:\n prepare_test_sorted[r].append(prepare_test[r])\n eval_data_prepared = [triple_list for r, triple_list in\n prepare_test_sorted.items()]\n ranks_by_r = ddict(list)\n num_true_triples = ddict(list)\n self.init(eval_data_prepared)\n for i, batch in enumerate(self.eval_loader):\n batch = batch.to(self.device)\n r = None\n if len(batch.shape) >= 2:\n r_tensor = batch[0][1]\n r = batch[0][1].item()\n else:\n r_tensor = batch[1]\n r = batch[1].item()\n print('Evaluating: {} Progress: {}%'.format(r, round(i / len(\n self.eval_loader) * 100, 2)))\n scores = ddict(list)\n score_matrix = self.model.score_matrix_r(r_tensor)\n scores[r].append(score_matrix)\n tuples_r_test = np.array(prepare_test_sorted[r][0])\n tuples_r_test = [tuples_r_test[:, 0], tuples_r_test[:, 2]]\n tuples_r_train = np.array(idx_train[r])\n tuples_r_train = [tuples_r_train[:, 0], tuples_r_train[:, 1]]\n score_matrix[tuples_r_train] = -math.inf\n if self.eval_test_data:\n tuples_r_valid = np.array(idx_valid[r])\n if len(tuples_r_valid) > 0:\n tuples_r_valid = [tuples_r_valid[:, 0], tuples_r_valid[\n :, 1]]\n score_matrix[tuples_r_valid] = -math.inf\n test_tuples_r_1d = self.convert_idx_to_1d(tuples_r_test)\n num_true_triples[r] = len(test_tuples_r_1d)\n test_tuples_r_1d_tensor = torch.squeeze(torch.LongTensor([\n test_tuples_r_1d]))\n topk = self.compute_topk(score_matrix, test_tuples_r_1d_tensor)\n ranks = topk.cpu().data.numpy()\n if len(ranks.shape) > 0:\n ranks = np.sort(ranks)\n print(ranks)\n ranks_by_r[r].append(ranks)\n print('-----------------------')\n avg_map, avg_hits = self.metrics(ranks_by_r, num_true_triples)\n print('TOTAL MAP: {} '.format(avg_map))\n print('TOTAL HITS: {}'.format(avg_hits))\n if logger is not None:\n avg_map = round(avg_map, 4)\n avg_hits = round(avg_hits, 4)\n logger.log_result(avg_map, avg_hits, epoch, 'a')\n logger.compare_best(avg_map, avg_hits, epoch, '_best', self.model)\n return avg_map, avg_hits\n\n def compute_topk(self, score_matrix, tuples_r_1d):\n score_matrix = score_matrix.reshape((1, -1)).flatten()\n if len(score_matrix) > self.topk + 1:\n sorted_k_values, sorted_k_indexs = torch.topk(score_matrix,\n self.topk, largest=True, sorted=True)\n other = torch.zeros(len(sorted_k_indexs)).long().to(self.device)\n tuples_r_1d = tuples_r_1d.to(self.device)\n if len(tuples_r_1d.size()) > 0:\n check = [torch.where(sorted_k_indexs == t, sorted_k_indexs,\n other) for t in tuples_r_1d if len(torch.nonzero(\n sorted_k_indexs == t)) > 0]\n else:\n check = [torch.where(sorted_k_indexs == tuples_r_1d,\n sorted_k_indexs, other)]\n ranks = [(torch.nonzero(t) + 1) for t in check]\n if len(ranks) == 1:\n ranks = ranks[0] if len(ranks[0].size()) <= 1 else ranks[0][0]\n else:\n ranks = torch.LongTensor(ranks).to(self.device)\n return ranks\n\n def metrics(self, ranks_by_relation, num_true_triples):\n total_precision = 0\n normalization = 0\n total_hits = 0\n for r, ranks in ranks_by_relation.items():\n total_hits += len(ranks[0])\n normalization += min(num_true_triples[r], self.topk)\n for idx, rank in enumerate(ranks[0]):\n total_precision += (idx + 1) / rank\n avg_map = total_precision / normalization * 100\n avg_hits = total_hits / normalization * 100\n return avg_map, avg_hits\n\n @staticmethod\n def fromConfig(model, dataset):\n evaluator = PR()\n if dataset is None:\n evaluator.dataset = dataset.load()\n else:\n evaluator.dataset = dataset\n evaluator.device = torch.device(Config.eval_device)\n torch.set_num_threads(Config.num_threads)\n evaluator.model = model\n coder = Coder()\n data_dir = Config.data_dir\n dataset = Config.dataset\n train_triples = read_triplets(data_dir + Config.dataset + '/' +\n Config.raw_split_files['train'], None)\n train_triples = coder.construct_encoder(train_triples)\n test_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['test'], coder)\n test_triples = coder.construct_encoder(test_triples)\n valid_triples = read_triplets(data_dir + dataset + '/' + Config.\n raw_split_files['valid'], coder)\n valid_triples = coder.construct_encoder(valid_triples)\n evaluator.train_data = train_triples\n evaluator.eval_test_data = Config.eval_test_data\n if Config.eval_test_data:\n evaluator.test_data = test_triples\n evaluator.valid_data = valid_triples\n else:\n evaluator.test_data = valid_triples\n evaluator.most_frequent_rels = Config.most_frequent_rels\n evaluator.topk = Config.topk\n return evaluator\n", "step-5": "import torch.utils.data\nimport torch\nimport math\nfrom util.helpers import *\nfrom collections import defaultdict as ddict\n\nclass _Collate:\n def __init__(self, ):\n pass\n\n def collate(self, batch):\n return torch.squeeze(torch.from_numpy(np.array(batch)))\n\n\nclass PR:\n dataset = None\n eval_data = None\n model = None\n device = None\n most_frequent_rels = None\n\n test_data = None\n train_data = None\n valid_data = None\n eval_test_data = None\n\n topk = None\n\n def init(self, data):\n self.model = self.model.to(self.device)\n collate_fn = _Collate()\n self.eval_loader = torch.utils.data.DataLoader(\n data,\n Config.eval_batch_size, shuffle=False,\n pin_memory=Config.pin_memory, num_workers=Config.loader_num_workers,\n collate_fn=collate_fn.collate)\n\n def count_e1_e2_by_relation(self, data):\n rel_map = ddict(int)\n for r in data.keys():\n rel_map[r] = len(data[r])\n count_pairs_by_relation = rel_map.items()\n count_pairs_by_relation = sorted(count_pairs_by_relation, key=lambda x: -x[1])\n return count_pairs_by_relation\n\n # computes the position of a tuple for the flattened 1d score matrix\n def convert_idx_to_1d(self, tuples_r, n=None):\n if n is None:\n n = self.model.num_entities\n pos_1d = []\n row_idx, column_idx = tuples_r\n for i in range(len(row_idx)):\n pos_1d.append(row_idx[i] * n + column_idx[i])\n return pos_1d\n\n\n def evaluate(self, epoch, logger):\n #prepare data\n idx_train = ddict(list)\n for e1, r, e2 in self.train_data:\n idx_train[r].append((e1, e2))\n\n if self.eval_test_data:\n idx_valid = ddict(list)\n for e1, r, e2 in self.valid_data:\n idx_valid[r].append((e1, e2))\n\n idx_test = ddict(list)\n for e1, r, e2 in self.test_data:\n idx_test[r].append((e1, e2))\n\n tuples_by_relation = self.count_e1_e2_by_relation(idx_test)\n\n relations = np.array([x[0] for x in tuples_by_relation])\n #tuples_count = np.array([x[1] for x in tuples_by_relation])\n\n # speedup grid search\n if self.most_frequent_rels > 0:\n print(\"Evaluating on {} most frequent relations...\".format(self.most_frequent_rels))\n relations = relations[:self.most_frequent_rels]\n\n prepare_test = ddict(list)\n for e1, r, e2 in self.test_data:\n prepare_test[r].append([e1, r, e2])\n\n # sorted data\n prepare_test_sorted = ddict(list)\n for r in relations:\n prepare_test_sorted[r].append(prepare_test[r])\n\n eval_data_prepared = [triple_list for r, triple_list in prepare_test_sorted.items()]\n\n ranks_by_r = ddict(list)\n num_true_triples = ddict(list)\n\n\n self.init(eval_data_prepared)\n for i, batch in enumerate(self.eval_loader):\n\n batch = batch.to(self.device)\n r = None\n\n if len(batch.shape) >= 2:\n r_tensor = batch[0][1]\n r = batch[0][1].item()\n\n else:\n # only one test triple for a given relation\n r_tensor = batch[1]\n r = batch[1].item()\n print(\"Evaluating: {} Progress: {}%\".format(r, round(i/len(self.eval_loader) * 100, 2)))\n scores = ddict(list)\n\n score_matrix = self.model.score_matrix_r(r_tensor)\n scores[r].append(score_matrix)\n\n # ----- FILTERING -----\n # all e1, e2 for a given relation in test, validation data\n tuples_r_test = np.array(prepare_test_sorted[r][0])\n tuples_r_test = [tuples_r_test[:,0], tuples_r_test[:,2]]\n\n tuples_r_train = np.array(idx_train[r])\n tuples_r_train = [tuples_r_train[:,0], tuples_r_train[:,1]]\n\n score_matrix[tuples_r_train] = -math.inf # Filter training set out\n\n # Filter validation set out\n if self.eval_test_data:\n tuples_r_valid = np.array(idx_valid[r])\n if (len(tuples_r_valid) > 0):\n tuples_r_valid = [tuples_r_valid[:, 0], tuples_r_valid[:, 1]]\n score_matrix[tuples_r_valid] = -math.inf\n\n # ---- /FILTERING -----\n\n test_tuples_r_1d = self.convert_idx_to_1d(tuples_r_test)\n num_true_triples[r] = len(test_tuples_r_1d)\n test_tuples_r_1d_tensor = torch.squeeze(torch.LongTensor([test_tuples_r_1d]))\n topk = self.compute_topk(score_matrix, test_tuples_r_1d_tensor)\n ranks = topk.cpu().data.numpy()\n if len(ranks.shape) > 0:\n ranks = np.sort(ranks)\n print(ranks)\n ranks_by_r[r].append(ranks)\n\n print(\"-----------------------\")\n avg_map, avg_hits = self.metrics(ranks_by_r, num_true_triples)\n\n print(\"TOTAL MAP: {} \".format(avg_map))\n print(\"TOTAL HITS: {}\".format(avg_hits))\n\n # save results\n if logger is not None:\n avg_map = round(avg_map, 4)\n avg_hits = round(avg_hits, 4)\n logger.log_result(avg_map, avg_hits, epoch, \"a\")\n logger.compare_best(avg_map, avg_hits, epoch, \"_best\", self.model)\n\n return avg_map, avg_hits\n\n\n\n def compute_topk(self, score_matrix, tuples_r_1d):\n score_matrix = score_matrix.reshape((1, -1)).flatten()\n\n if len(score_matrix) > self.topk+1:\n sorted_k_values, sorted_k_indexs = torch.topk(score_matrix, self.topk, largest=True, sorted=True)\n\n other = torch.zeros(len(sorted_k_indexs)).long().to(self.device)\n\n tuples_r_1d = tuples_r_1d.to(self.device)\n\n if len(tuples_r_1d.size()) > 0:\n check = [torch.where(sorted_k_indexs == t, sorted_k_indexs, other) for t in tuples_r_1d if len(torch.nonzero(sorted_k_indexs == t)) > 0]\n else:\n check = [torch.where(sorted_k_indexs == tuples_r_1d, sorted_k_indexs, other)]\n\n ranks = [torch.nonzero(t)+1 for t in check]\n if len(ranks) == 1: # one or zero elements in ranks\n ranks = ranks[0] if len(ranks[0].size()) <= 1 else ranks[0][0]\n else:\n ranks = torch.LongTensor(ranks).to(self.device)\n\n return ranks\n\n\n def metrics(self, ranks_by_relation, num_true_triples):\n total_precision = 0\n normalization = 0\n total_hits = 0\n for r, ranks in ranks_by_relation.items():\n total_hits += len(ranks[0])\n normalization += min(num_true_triples[r], self.topk)\n for idx, rank in enumerate(ranks[0]):\n total_precision += (idx + 1) / rank\n\n avg_map = (total_precision / normalization) * 100\n avg_hits = (total_hits / normalization) * 100\n return avg_map, avg_hits\n\n\n @staticmethod\n def fromConfig(model, dataset):\n evaluator = PR()\n if dataset is None:\n evaluator.dataset = dataset.load()\n else:\n evaluator.dataset = dataset\n\n evaluator.device = torch.device(Config.eval_device)\n\n torch.set_num_threads(Config.num_threads)\n evaluator.model = model\n\n coder = Coder()\n data_dir = Config.data_dir\n dataset = Config.dataset\n train_triples = read_triplets(data_dir + Config.dataset + \"/\" + Config.raw_split_files['train'], None)\n train_triples = coder.construct_encoder(train_triples)\n\n test_triples = read_triplets(data_dir + dataset + \"/\" + Config.raw_split_files['test'], coder)\n test_triples = coder.construct_encoder(test_triples)\n\n valid_triples = read_triplets(data_dir + dataset + \"/\" + Config.raw_split_files['valid'], coder)\n valid_triples = coder.construct_encoder(valid_triples)\n\n\n evaluator.train_data = train_triples\n evaluator.eval_test_data = Config.eval_test_data\n\n if Config.eval_test_data: # use test set for evaluation, training and validation split for filtering\n evaluator.test_data = test_triples\n evaluator.valid_data = valid_triples\n else: # use validation set for evaluation and training set for filtering\n evaluator.test_data = valid_triples\n\n evaluator.most_frequent_rels = Config.most_frequent_rels\n evaluator.topk = Config.topk\n\n return evaluator\n\n\n\n\n \n", "step-ids": [ 7, 8, 9, 11, 14 ] }
[ 7, 8, 9, 11, 14 ]
print("This program calculates whether the year is a leap year or not") year = input("Please enter the Year: ") if year.isdecimal(): year=int(year) if year%4==0 and year%100!=0 or year%400==0: print("{0} is a leap year".format(year)) else: print("{0} is not a leap year".format(year)) else: print("Invalid input")
normal
{ "blob_id": "fdea48b6012b67327aea90e40eacbea5a1930d07", "index": 9688, "step-1": "<mask token>\n", "step-2": "print('This program calculates whether the year is a leap year or not')\n<mask token>\nif year.isdecimal():\n year = int(year)\n if year % 4 == 0 and year % 100 != 0 or year % 400 == 0:\n print('{0} is a leap year'.format(year))\n else:\n print('{0} is not a leap year'.format(year))\nelse:\n print('Invalid input')\n", "step-3": "print('This program calculates whether the year is a leap year or not')\nyear = input('Please enter the Year: ')\nif year.isdecimal():\n year = int(year)\n if year % 4 == 0 and year % 100 != 0 or year % 400 == 0:\n print('{0} is a leap year'.format(year))\n else:\n print('{0} is not a leap year'.format(year))\nelse:\n print('Invalid input')\n", "step-4": "print(\"This program calculates whether the year is a leap year or not\")\nyear = input(\"Please enter the Year: \")\n\nif year.isdecimal():\n year=int(year)\n if year%4==0 and year%100!=0 or year%400==0:\n print(\"{0} is a leap year\".format(year))\n else:\n print(\"{0} is not a leap year\".format(year))\nelse:\n print(\"Invalid input\")", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/python3 print("content-type: text/html") print() import subprocess import cgi form=cgi.FieldStorage() osname=form.getvalue("x") command="sudo docker stop {}".format(osname) output=subprocess.getstatusoutput(command) status=output[0] info=output[1] if status==0: print("{} OS is stopped succesfully....".format(osname)) else: print("some error: {}".format(info))
normal
{ "blob_id": "1d2dae7f1d937bdd9a6044b23f8f1897e61dac23", "index": 6330, "step-1": "<mask token>\n", "step-2": "print('content-type: text/html')\nprint()\n<mask token>\nif status == 0:\n print('{} OS is stopped succesfully....'.format(osname))\nelse:\n print('some error: {}'.format(info))\n", "step-3": "print('content-type: text/html')\nprint()\n<mask token>\nform = cgi.FieldStorage()\nosname = form.getvalue('x')\ncommand = 'sudo docker stop {}'.format(osname)\noutput = subprocess.getstatusoutput(command)\nstatus = output[0]\ninfo = output[1]\nif status == 0:\n print('{} OS is stopped succesfully....'.format(osname))\nelse:\n print('some error: {}'.format(info))\n", "step-4": "print('content-type: text/html')\nprint()\nimport subprocess\nimport cgi\nform = cgi.FieldStorage()\nosname = form.getvalue('x')\ncommand = 'sudo docker stop {}'.format(osname)\noutput = subprocess.getstatusoutput(command)\nstatus = output[0]\ninfo = output[1]\nif status == 0:\n print('{} OS is stopped succesfully....'.format(osname))\nelse:\n print('some error: {}'.format(info))\n", "step-5": "#!/usr/bin/python3\nprint(\"content-type: text/html\")\nprint()\nimport subprocess\nimport cgi\nform=cgi.FieldStorage()\nosname=form.getvalue(\"x\")\ncommand=\"sudo docker stop {}\".format(osname)\noutput=subprocess.getstatusoutput(command)\nstatus=output[0]\ninfo=output[1]\nif status==0:\n print(\"{} OS is stopped succesfully....\".format(osname))\nelse:\n print(\"some error: {}\".format(info))\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
water = 400 milk = 540 coffee = 120 cups = 9 money = 550 def buying(): global water global coffee global cups global milk global money choice_coffee = input("What do you want to buy? 1 - espresso, 2 - latte, 3 - cappuccino, back - to main menu:") if choice_coffee == "1": if water > 250 and coffee > 16 and cups > 1: print("I have enough resources, making you a coffee!") water -= 250 coffee -= 16 cups -= 1 money += 4 coffee_machine() elif choice_coffee == "2": if water > 350 and coffee > 16 and cups > 1 and milk > 75: print("I have enough resources, making you a coffee!") water -= 350 milk -= 75 coffee -= 20 cups -= 1 money += 7 elif water < 350: print("Sorry, not enough water!") coffee_machine() elif choice_coffee == "3": if water > 200 and coffee > 12 and cups > 1 and milk > 100: print("I have enough resources, making you a coffee!") water -= 200 milk -= 100 coffee -= 12 cups -= 1 money += 6 coffee_machine() elif choice_coffee == "back": coffee_machine() def filling(): global water global coffee global cups global milk water_fill = int(input("Write how many ml of water do you want to add:")) milk_fill = int(input("Write how many ml of milk do you want to add:")) coffee_fill = int(input("Write how many grams of coffee beans do you want to add:")) cups_fill = int(input("Write how many disposable cups of coffee do you want to add:")) water += water_fill milk += milk_fill coffee += coffee_fill cups += cups_fill coffee_machine() def taking(): global money print("I gave you $" + str(money)) money = 0 coffee_machine() def stats_print(): print("The coffee machine has:") print(str(water) + " of water") print(str(milk) + " of milk") print(str(coffee) + " of coffee beans") print(str(cups) + " of disposable cups") print(str(money) + " of money") def coffee_machine(): user_action = input("Write action (buy, fill, take, remaining, exit):") if user_action == "buy": buying() elif user_action == "fill": filling() elif user_action == "take": taking() elif user_action == "remaining": stats_print() coffee_machine() elif user_action == "exit": return coffee_machine()
normal
{ "blob_id": "4e98ebd040297cb9472368478452bc484e0aaa04", "index": 3255, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef stats_print():\n print('The coffee machine has:')\n print(str(water) + ' of water')\n print(str(milk) + ' of milk')\n print(str(coffee) + ' of coffee beans')\n print(str(cups) + ' of disposable cups')\n print(str(money) + ' of money')\n\n\ndef coffee_machine():\n user_action = input('Write action (buy, fill, take, remaining, exit):')\n if user_action == 'buy':\n buying()\n elif user_action == 'fill':\n filling()\n elif user_action == 'take':\n taking()\n elif user_action == 'remaining':\n stats_print()\n coffee_machine()\n elif user_action == 'exit':\n return\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef buying():\n global water\n global coffee\n global cups\n global milk\n global money\n choice_coffee = input(\n 'What do you want to buy? 1 - espresso, 2 - latte, 3 - cappuccino, back - to main menu:'\n )\n if choice_coffee == '1':\n if water > 250 and coffee > 16 and cups > 1:\n print('I have enough resources, making you a coffee!')\n water -= 250\n coffee -= 16\n cups -= 1\n money += 4\n coffee_machine()\n elif choice_coffee == '2':\n if water > 350 and coffee > 16 and cups > 1 and milk > 75:\n print('I have enough resources, making you a coffee!')\n water -= 350\n milk -= 75\n coffee -= 20\n cups -= 1\n money += 7\n elif water < 350:\n print('Sorry, not enough water!')\n coffee_machine()\n elif choice_coffee == '3':\n if water > 200 and coffee > 12 and cups > 1 and milk > 100:\n print('I have enough resources, making you a coffee!')\n water -= 200\n milk -= 100\n coffee -= 12\n cups -= 1\n money += 6\n coffee_machine()\n elif choice_coffee == 'back':\n coffee_machine()\n\n\ndef filling():\n global water\n global coffee\n global cups\n global milk\n water_fill = int(input('Write how many ml of water do you want to add:'))\n milk_fill = int(input('Write how many ml of milk do you want to add:'))\n coffee_fill = int(input(\n 'Write how many grams of coffee beans do you want to add:'))\n cups_fill = int(input(\n 'Write how many disposable cups of coffee do you want to add:'))\n water += water_fill\n milk += milk_fill\n coffee += coffee_fill\n cups += cups_fill\n coffee_machine()\n\n\ndef taking():\n global money\n print('I gave you $' + str(money))\n money = 0\n coffee_machine()\n\n\ndef stats_print():\n print('The coffee machine has:')\n print(str(water) + ' of water')\n print(str(milk) + ' of milk')\n print(str(coffee) + ' of coffee beans')\n print(str(cups) + ' of disposable cups')\n print(str(money) + ' of money')\n\n\ndef coffee_machine():\n user_action = input('Write action (buy, fill, take, remaining, exit):')\n if user_action == 'buy':\n buying()\n elif user_action == 'fill':\n filling()\n elif user_action == 'take':\n taking()\n elif user_action == 'remaining':\n stats_print()\n coffee_machine()\n elif user_action == 'exit':\n return\n\n\ncoffee_machine()\n", "step-4": "water = 400\nmilk = 540\ncoffee = 120\ncups = 9\nmoney = 550\n\n\ndef buying():\n global water\n global coffee\n global cups\n global milk\n global money\n choice_coffee = input(\n 'What do you want to buy? 1 - espresso, 2 - latte, 3 - cappuccino, back - to main menu:'\n )\n if choice_coffee == '1':\n if water > 250 and coffee > 16 and cups > 1:\n print('I have enough resources, making you a coffee!')\n water -= 250\n coffee -= 16\n cups -= 1\n money += 4\n coffee_machine()\n elif choice_coffee == '2':\n if water > 350 and coffee > 16 and cups > 1 and milk > 75:\n print('I have enough resources, making you a coffee!')\n water -= 350\n milk -= 75\n coffee -= 20\n cups -= 1\n money += 7\n elif water < 350:\n print('Sorry, not enough water!')\n coffee_machine()\n elif choice_coffee == '3':\n if water > 200 and coffee > 12 and cups > 1 and milk > 100:\n print('I have enough resources, making you a coffee!')\n water -= 200\n milk -= 100\n coffee -= 12\n cups -= 1\n money += 6\n coffee_machine()\n elif choice_coffee == 'back':\n coffee_machine()\n\n\ndef filling():\n global water\n global coffee\n global cups\n global milk\n water_fill = int(input('Write how many ml of water do you want to add:'))\n milk_fill = int(input('Write how many ml of milk do you want to add:'))\n coffee_fill = int(input(\n 'Write how many grams of coffee beans do you want to add:'))\n cups_fill = int(input(\n 'Write how many disposable cups of coffee do you want to add:'))\n water += water_fill\n milk += milk_fill\n coffee += coffee_fill\n cups += cups_fill\n coffee_machine()\n\n\ndef taking():\n global money\n print('I gave you $' + str(money))\n money = 0\n coffee_machine()\n\n\ndef stats_print():\n print('The coffee machine has:')\n print(str(water) + ' of water')\n print(str(milk) + ' of milk')\n print(str(coffee) + ' of coffee beans')\n print(str(cups) + ' of disposable cups')\n print(str(money) + ' of money')\n\n\ndef coffee_machine():\n user_action = input('Write action (buy, fill, take, remaining, exit):')\n if user_action == 'buy':\n buying()\n elif user_action == 'fill':\n filling()\n elif user_action == 'take':\n taking()\n elif user_action == 'remaining':\n stats_print()\n coffee_machine()\n elif user_action == 'exit':\n return\n\n\ncoffee_machine()\n", "step-5": "water = 400\r\nmilk = 540\r\ncoffee = 120\r\ncups = 9\r\nmoney = 550\r\n\r\n\r\ndef buying():\r\n global water\r\n global coffee\r\n global cups\r\n global milk\r\n global money\r\n choice_coffee = input(\"What do you want to buy? 1 - espresso, 2 - latte, 3 - cappuccino, back - to main menu:\")\r\n if choice_coffee == \"1\":\r\n if water > 250 and coffee > 16 and cups > 1:\r\n print(\"I have enough resources, making you a coffee!\")\r\n water -= 250\r\n coffee -= 16\r\n cups -= 1\r\n money += 4\r\n coffee_machine()\r\n elif choice_coffee == \"2\":\r\n if water > 350 and coffee > 16 and cups > 1 and milk > 75:\r\n print(\"I have enough resources, making you a coffee!\")\r\n water -= 350\r\n milk -= 75\r\n coffee -= 20\r\n cups -= 1\r\n money += 7\r\n elif water < 350:\r\n print(\"Sorry, not enough water!\")\r\n coffee_machine()\r\n elif choice_coffee == \"3\":\r\n if water > 200 and coffee > 12 and cups > 1 and milk > 100:\r\n print(\"I have enough resources, making you a coffee!\")\r\n water -= 200\r\n milk -= 100\r\n coffee -= 12\r\n cups -= 1\r\n money += 6\r\n coffee_machine()\r\n elif choice_coffee == \"back\":\r\n coffee_machine()\r\n\r\n\r\ndef filling():\r\n global water\r\n global coffee\r\n global cups\r\n global milk\r\n water_fill = int(input(\"Write how many ml of water do you want to add:\"))\r\n milk_fill = int(input(\"Write how many ml of milk do you want to add:\"))\r\n coffee_fill = int(input(\"Write how many grams of coffee beans do you want to add:\"))\r\n cups_fill = int(input(\"Write how many disposable cups of coffee do you want to add:\"))\r\n water += water_fill\r\n milk += milk_fill\r\n coffee += coffee_fill\r\n cups += cups_fill\r\n coffee_machine()\r\n\r\n\r\ndef taking():\r\n global money\r\n print(\"I gave you $\" + str(money))\r\n money = 0\r\n coffee_machine()\r\n\r\n\r\ndef stats_print():\r\n print(\"The coffee machine has:\")\r\n print(str(water) + \" of water\")\r\n print(str(milk) + \" of milk\")\r\n print(str(coffee) + \" of coffee beans\")\r\n print(str(cups) + \" of disposable cups\")\r\n print(str(money) + \" of money\")\r\n\r\n\r\ndef coffee_machine():\r\n user_action = input(\"Write action (buy, fill, take, remaining, exit):\")\r\n if user_action == \"buy\":\r\n buying()\r\n elif user_action == \"fill\":\r\n filling()\r\n elif user_action == \"take\":\r\n taking()\r\n elif user_action == \"remaining\":\r\n stats_print()\r\n coffee_machine()\r\n elif user_action == \"exit\":\r\n return\r\n\r\n\r\ncoffee_machine()", "step-ids": [ 0, 2, 6, 7, 8 ] }
[ 0, 2, 6, 7, 8 ]
#!/usr/bin/python import wx class test(wx.Frame): def __init__(self,parent,id): wx.Frame.__init__(self,parent,id,"TestFrame",size=(500,500)) if __name__ == '__main__': app = wx.PySimpleApp() frame = test(parent=None,id=-1,) frame.show() app.mainloop()
normal
{ "blob_id": "e204cbbf36ac180eba0e95916345088c77bca7c0", "index": 5001, "step-1": "<mask token>\n\n\nclass test(wx.Frame):\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass test(wx.Frame):\n\n def __init__(self, parent, id):\n wx.Frame.__init__(self, parent, id, 'TestFrame', size=(500, 500))\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass test(wx.Frame):\n\n def __init__(self, parent, id):\n wx.Frame.__init__(self, parent, id, 'TestFrame', size=(500, 500))\n\n\nif __name__ == '__main__':\n app = wx.PySimpleApp()\n frame = test(parent=None, id=-1)\n frame.show()\n app.mainloop()\n", "step-4": "import wx\n\n\nclass test(wx.Frame):\n\n def __init__(self, parent, id):\n wx.Frame.__init__(self, parent, id, 'TestFrame', size=(500, 500))\n\n\nif __name__ == '__main__':\n app = wx.PySimpleApp()\n frame = test(parent=None, id=-1)\n frame.show()\n app.mainloop()\n", "step-5": "#!/usr/bin/python\nimport wx\n\nclass test(wx.Frame):\n def __init__(self,parent,id):\n wx.Frame.__init__(self,parent,id,\"TestFrame\",size=(500,500))\n\nif __name__ == '__main__':\n app = wx.PySimpleApp()\n frame = test(parent=None,id=-1,)\n frame.show()\n app.mainloop()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
__author__ = 'christopher' import fabio import pyFAI import matplotlib.pyplot as plt from matplotlib.colors import LogNorm from pims.tiff_stack import TiffStack_tifffile as TiffStack from skxray.io.save_powder_output import save_output from xpd_workflow.mask_tools import * geo = pyFAI.load( '/mnt/bulk-data/research_data/USC_beamtime/08-05-2015/2015-08-05/Ni_STD/Ni_PDF_60s-00000.poni') dq = geo.deltaQ((2048, 2048)) q = geo.qArray((2048, 2048)) bins = 8000 # plt.imshow(dq) # plt.show() # AAA # dq_mean = sts.binned_statistic(q.ravel(), dq.ravel(), bins=bins, # range=[0, q.max()], statistic='mean') # dq_median = sts.binned_statistic(q.ravel(), dq.ravel(), bins=bi # range=[0, q.max()], statistic='median') # plt.plot(dq_mean[1][:-1], dq_mean[0]) # plt.plot(dq_median[1][:-1], dq_median[0]) # plt.show() r = geo.qArray((2048, 2048)) nr = r / np.max(r) img = np.sin(nr * np.pi * 3) * np.exp(-10 * nr) ideal_img = dc(img) smax = np.max(img) smin = np.min(img) bad_pixels = [] ''' for i in xrange(np.random.randint(1000, 2000)): x, y = np.random.randint(0, 2048), np.random.randint(0, 2048) if np.random.random() >= .5: img[x, y] = smax * 3 else: img[x, y] = smin * 3 bad_pixels.append([x, y]) ''' plt.imshow(img, vmin=smin, vmax=smax) plt.show() # plt.imshow(idsr - dsr) # plt.show() # ideal_median = sts.binned_statistic(q.ravel(), ideal_img.ravel(), bins=bins, # range=[0, q.max()], statistic='median') # # ideal_mean = sts.binned_statistic(q.ravel(), ideal_img.ravel(), bins=bins, # range=[0, q.max()], statistic='mean') # ideal_std = sts.binned_statistic(q.ravel(), ideal_img.ravel(), bins=bins, # range=[0, q.max()], statistic=np.std) # median = sts.binned_statistic(q.ravel(), img.ravel(), bins=bins, # range=[0, q.max()], statistic='median') # # mean = sts.binned_statistic(q.ravel(), img.ravel(), bins=bins, # range=[0, q.max()], statistic='mean') # std = sts.binned_statistic(q.ravel(), img.ravel(), bins=bins, # range=[0, q.max()], statistic=np.std) # plt.plot(ideal_mean[1][:-1], ideal_mean[0], label='ideal mean') # plt.plot(ideal_median[1][:-1], ideal_median[0], label='ideal median') # plt.plot(ideal_std[1][:-1], ideal_std[0], label='ideal std') # plt.legend() # plt.show() # plt.plot(mean[1][:-1], mean[0], label='mean') # plt.plot(median[1][:-1], median[0], label='median') # # plt.plot(std[1][:-1], std[0], label='ideal std') # plt.legend() # plt.show() perfect_mask = (img - ideal_img) != 0 for i in [10, # 9, 8, 7, 6, 5, 4.5, 4 ]: rbmsk = ring_blur_mask(img, geo, i) print i print 'good mask', np.sum(perfect_mask == rbmsk) print 'under masked', np.sum(perfect_mask > rbmsk) print 'over masked', np.sum(perfect_mask < rbmsk) print # ''' plt.imshow(img, interpolation='none', origin='lower', aspect='auto') for y, x in bad_pixels: plt.plot(x, y, 'ro', mfc='r', mec='r', ms=10) for y, x in zip( np.where(rbmsk != 0)[0], np.where(rbmsk != 0)[1] ): plt.plot(x, y, 'go', mfc='g', mec='g', ms=5) plt.show() # ''' print q[1907, 173], q[173, 1907] _, hist_bins, _ = plt.hist(img[np.where((q > 313.) & (q < 314.))], bins=50) plt.axvline(np.mean(img[np.where((q > 313.) & (q < 314.))]), color='r') plt.axvline(np.mean(img[np.where((q > 313.) & (q < 314.))]) + np.std(img[np.where((q > 313.) & (q < 314.))])) plt.axvline(np.mean(img[np.where((q > 313.) & (q < 314.))]) - np.std(img[np.where((q > 313.) & (q < 314.))])) # plt.hist(img[np.where((q > 287.) & (q < 288.) & (rbmsk != 1))], # bins=50 # bins=hist_bins # ) plt.show() ''' mr = dc(q) mr[rbmsk.astype(bool)] = -1 msk_median = sts.binned_statistic(mr.ravel(), img.ravel(), bins=bins, range=[0, mr.max()], statistic='median') msk_mean = sts.binned_statistic(mr.ravel(), img.ravel(), bins=bins, range=[0, mr.max()], statistic='mean') msk_std = sts.binned_statistic(mr.ravel(), img.ravel(), bins=bins, range=[0, mr.max()], statistic=np.std) plt.plot(msk_mean[1][:-1], msk_mean[0], label='mean') plt.plot(msk_median[1][:-1], msk_median[0], label='median') # plt.plot(std[1][:-1], std[0], label='ideal std') plt.legend() plt.show() # '''
normal
{ "blob_id": "50f6bcb4d2223d864cca92778ab3483a2d2c3214", "index": 5283, "step-1": "__author__ = 'christopher'\nimport fabio\nimport pyFAI\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\nfrom pims.tiff_stack import TiffStack_tifffile as TiffStack\nfrom skxray.io.save_powder_output import save_output\nfrom xpd_workflow.mask_tools import *\n\ngeo = pyFAI.load(\n '/mnt/bulk-data/research_data/USC_beamtime/08-05-2015/2015-08-05/Ni_STD/Ni_PDF_60s-00000.poni')\ndq = geo.deltaQ((2048, 2048))\nq = geo.qArray((2048, 2048))\nbins = 8000\n# plt.imshow(dq)\n# plt.show()\n# AAA\n# dq_mean = sts.binned_statistic(q.ravel(), dq.ravel(), bins=bins,\n# range=[0, q.max()], statistic='mean')\n# dq_median = sts.binned_statistic(q.ravel(), dq.ravel(), bins=bi\n# range=[0, q.max()], statistic='median')\n# plt.plot(dq_mean[1][:-1], dq_mean[0])\n# plt.plot(dq_median[1][:-1], dq_median[0])\n# plt.show()\nr = geo.qArray((2048, 2048))\nnr = r / np.max(r)\n\nimg = np.sin(nr * np.pi * 3) * np.exp(-10 * nr)\nideal_img = dc(img)\nsmax = np.max(img)\nsmin = np.min(img)\nbad_pixels = []\n'''\nfor i in xrange(np.random.randint(1000, 2000)):\n x, y = np.random.randint(0, 2048), np.random.randint(0, 2048)\n if np.random.random() >= .5:\n img[x, y] = smax * 3\n else:\n img[x, y] = smin * 3\n bad_pixels.append([x, y])\n'''\nplt.imshow(img, vmin=smin, vmax=smax)\nplt.show()\n\n# plt.imshow(idsr - dsr)\n# plt.show()\n# ideal_median = sts.binned_statistic(q.ravel(), ideal_img.ravel(), bins=bins,\n# range=[0, q.max()], statistic='median')\n#\n# ideal_mean = sts.binned_statistic(q.ravel(), ideal_img.ravel(), bins=bins,\n# range=[0, q.max()], statistic='mean')\n# ideal_std = sts.binned_statistic(q.ravel(), ideal_img.ravel(), bins=bins,\n# range=[0, q.max()], statistic=np.std)\n\n\n# median = sts.binned_statistic(q.ravel(), img.ravel(), bins=bins,\n# range=[0, q.max()], statistic='median')\n#\n# mean = sts.binned_statistic(q.ravel(), img.ravel(), bins=bins,\n# range=[0, q.max()], statistic='mean')\n# std = sts.binned_statistic(q.ravel(), img.ravel(), bins=bins,\n# range=[0, q.max()], statistic=np.std)\n\n# plt.plot(ideal_mean[1][:-1], ideal_mean[0], label='ideal mean')\n# plt.plot(ideal_median[1][:-1], ideal_median[0], label='ideal median')\n# plt.plot(ideal_std[1][:-1], ideal_std[0], label='ideal std')\n# plt.legend()\n# plt.show()\n\n# plt.plot(mean[1][:-1], mean[0], label='mean')\n# plt.plot(median[1][:-1], median[0], label='median')\n# # plt.plot(std[1][:-1], std[0], label='ideal std')\n# plt.legend()\n# plt.show()\n\nperfect_mask = (img - ideal_img) != 0\nfor i in [10,\n # 9, 8, 7, 6, 5, 4.5, 4\n ]:\n rbmsk = ring_blur_mask(img, geo, i)\n print i\n print 'good mask', np.sum(perfect_mask == rbmsk)\n print 'under masked', np.sum(perfect_mask > rbmsk)\n print 'over masked', np.sum(perfect_mask < rbmsk)\n print\n# '''\nplt.imshow(img, interpolation='none', origin='lower', aspect='auto')\nfor y, x in bad_pixels:\n plt.plot(x, y, 'ro', mfc='r', mec='r', ms=10)\nfor y, x in zip(\n np.where(rbmsk != 0)[0],\n np.where(rbmsk != 0)[1]\n):\n plt.plot(x, y, 'go', mfc='g', mec='g', ms=5)\nplt.show()\n# '''\nprint q[1907, 173], q[173, 1907]\n\n_, hist_bins, _ = plt.hist(img[np.where((q > 313.) & (q < 314.))], bins=50)\nplt.axvline(np.mean(img[np.where((q > 313.) & (q < 314.))]), color='r')\nplt.axvline(np.mean(img[np.where((q > 313.) & (q < 314.))]) + np.std(img[np.where((q > 313.) & (q < 314.))]))\nplt.axvline(np.mean(img[np.where((q > 313.) & (q < 314.))]) - np.std(img[np.where((q > 313.) & (q < 314.))]))\n# plt.hist(img[np.where((q > 287.) & (q < 288.) & (rbmsk != 1))],\n # bins=50\n # bins=hist_bins\n # )\nplt.show()\n'''\nmr = dc(q)\nmr[rbmsk.astype(bool)] = -1\n\nmsk_median = sts.binned_statistic(mr.ravel(), img.ravel(), bins=bins,\n range=[0, mr.max()], statistic='median')\nmsk_mean = sts.binned_statistic(mr.ravel(), img.ravel(), bins=bins,\n range=[0, mr.max()], statistic='mean')\nmsk_std = sts.binned_statistic(mr.ravel(), img.ravel(), bins=bins,\n range=[0, mr.max()], statistic=np.std)\n\nplt.plot(msk_mean[1][:-1], msk_mean[0], label='mean')\nplt.plot(msk_median[1][:-1], msk_median[0], label='median')\n# plt.plot(std[1][:-1], std[0], label='ideal std')\nplt.legend()\nplt.show()\n# '''\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
# coding:utf-8 import pandas as pd import numpy as np import matplotlib.pyplot as plt from multiprocessing import Pool """ 用户id,时间戳,浏览行为数据,浏览子行为编号 """ names = ['userid','time','browser_behavior','browser_behavior_number'] browse_history_train = pd.read_csv("../../pcredit/train/browse_history_train.txt",header=None) browse_history_test = pd.read_csv("../../pcredit/test/browse_history_test.txt",header=None) browse_history = pd.concat([browse_history_train,browse_history_test]) browse_history.columns = names browse_history['browse_count'] = 1 #browse_history = browse_history.head(100) users = list(browse_history.userid.unique()) # 按照时间统计 data = browse_history[['userid','time','browse_count']] t = data.groupby(['userid','time']).agg(sum) t.reset_index(inplace=True) def time_m(u): d = {'userid':u} tu = t[t.userid==u] d['browse_max'] = tu['browse_count'].max() d['browse_min'] = tu['browse_count'].min() d['browse_mean'] = tu['browse_count'].mean() d['browse_median'] = tu['browse_count'].median() d['browse_var'] = tu['browse_count'].var() d['browse_std'] = tu['browse_count'].std() d['browse_count'] = tu['browse_count'].count() d['browse_max_min'] = d['browse_max'] - d['browse_min'] print d return d def multi_time(): pool = Pool(12) rst = pool.map(time_m,users) pool.close() pool.join() Datas = pd.DataFrame(rst) #print Data.head() #Datas.fillna(-9999,inplace=True) print Datas.head() print Datas.shape Datas.to_csv('../data/train/browser_history_time.csv', index=None) # 统计 browser 类别数据 def browser_behavior_u(u): d = {"userid":u} ta = t.loc[t.userid == u, :] d['browser_data_max'] = ta['browse_count'].max() d['browser_data_min'] = ta['browse_count'].min() d['browser_data_mean'] = ta['browse_count'].mean() d['browser_data_median'] = ta['browse_count'].median() d['browser_data_var'] = ta['browse_count'].var() d['browser_data_std'] = ta['browse_count'].std() d['browser_data_count'] = ta['browse_count'].count() d['browser_data_max_min'] = d['browser_data_max'] - d['browser_data_min'] #print ta for b in browser_behavior_tp: try: tb = ta.loc[ta.browser_behavior==b,'browse_count'] d['browser_'+str(b)] = tb.iloc[0] except: d['browser_' + str(b)] = np.NAN print d return d def multi_data(): # 浏览数据统计 data = browse_history[['userid', 'browser_behavior', 'browse_count']] t = data.groupby(['userid', 'browser_behavior']).agg(sum) t.reset_index(inplace=True) browser_behavior_tp = list(data.browser_behavior.unique()) pool = Pool(12) rst = pool.map(browser_behavior_u,users) pool.close() pool.join() Data = pd.DataFrame(rst) #Datas = pd.merge(Datas,Data,on='userid') del Data,rst,t,data def browser_behavior_number_u(u): d = {"userid":u} ta = t.loc[t.userid == u, :] d['browser_behavior_max'] = ta['browse_count'].max() d['browser_behavior_min'] = ta['browse_count'].min() d['browser_behavior_mean'] = ta['browse_count'].mean() d['browser_behavior_median'] = ta['browse_count'].median() d['browser_behavior_var'] = ta['browse_count'].var() d['browser_behavior_std'] = ta['browse_count'].std() d['browser_behavior_count'] = ta['browse_count'].count() d['browser_behavior_max_min'] = d['browser_behavior_max'] - d['browser_behavior_min'] for b in [1,4,5,6,7,8,10]: try: tb = ta.loc[t.browser_behavior_number==b,'browse_count'] d['browser_behavior_number_'+str(b)] = tb.iloc[0] except: d['browser_behavior_number_' + str(b)] = np.NAN print d return d def mult_browse_behavi(): # 子行为统计 data = browse_history[['userid', 'browser_behavior_number', 'browse_count']] t = data.groupby(['userid', 'browser_behavior_number']).agg(sum) t.reset_index(inplace=True) pool = Pool(12) rst = pool.map(browser_behavior_number_u,users) pool.close() pool.join() Data = pd.DataFrame(rst) #Datas = pd.merge(Datas,Data,on='userid') del Data,rst,data def merge_browser(): d = pd.read_csv('../data/train/browser_history_time.csv') d1 = pd.read_csv('../data/train/browse_history_stage5.csv') d = pd.merge(d,d1,on='userid') d.fillna(-9999, inplace=True) print d.head(10) print d.shape d.to_csv('../data/train/browser_history_all.csv', index=None) if __name__=='__main__': merge_browser()
normal
{ "blob_id": "e6bd9391a5364e798dfb6d2e9b7b2b98c7b701ac", "index": 6559, "step-1": "# coding:utf-8\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom multiprocessing import Pool\n\n\"\"\"\n 用户id,时间戳,浏览行为数据,浏览子行为编号\n\"\"\"\nnames = ['userid','time','browser_behavior','browser_behavior_number']\nbrowse_history_train = pd.read_csv(\"../../pcredit/train/browse_history_train.txt\",header=None)\nbrowse_history_test = pd.read_csv(\"../../pcredit/test/browse_history_test.txt\",header=None)\n\nbrowse_history = pd.concat([browse_history_train,browse_history_test])\nbrowse_history.columns = names\n\nbrowse_history['browse_count'] = 1\n#browse_history = browse_history.head(100)\nusers = list(browse_history.userid.unique())\n\n\n# 按照时间统计\ndata = browse_history[['userid','time','browse_count']]\n\nt = data.groupby(['userid','time']).agg(sum)\nt.reset_index(inplace=True)\n\ndef time_m(u):\n d = {'userid':u}\n tu = t[t.userid==u]\n d['browse_max'] = tu['browse_count'].max()\n d['browse_min'] = tu['browse_count'].min()\n d['browse_mean'] = tu['browse_count'].mean()\n d['browse_median'] = tu['browse_count'].median()\n d['browse_var'] = tu['browse_count'].var()\n d['browse_std'] = tu['browse_count'].std()\n d['browse_count'] = tu['browse_count'].count()\n d['browse_max_min'] = d['browse_max'] - d['browse_min']\n print d\n return d\n\n\ndef multi_time():\n pool = Pool(12)\n rst = pool.map(time_m,users)\n pool.close()\n pool.join()\n Datas = pd.DataFrame(rst)\n #print Data.head()\n #Datas.fillna(-9999,inplace=True)\n print Datas.head()\n print Datas.shape\n\n Datas.to_csv('../data/train/browser_history_time.csv', index=None)\n\n# 统计 browser 类别数据\ndef browser_behavior_u(u):\n d = {\"userid\":u}\n ta = t.loc[t.userid == u, :]\n d['browser_data_max'] = ta['browse_count'].max()\n d['browser_data_min'] = ta['browse_count'].min()\n d['browser_data_mean'] = ta['browse_count'].mean()\n d['browser_data_median'] = ta['browse_count'].median()\n d['browser_data_var'] = ta['browse_count'].var()\n d['browser_data_std'] = ta['browse_count'].std()\n d['browser_data_count'] = ta['browse_count'].count()\n d['browser_data_max_min'] = d['browser_data_max'] - d['browser_data_min']\n #print ta\n for b in browser_behavior_tp:\n try:\n tb = ta.loc[ta.browser_behavior==b,'browse_count']\n d['browser_'+str(b)] = tb.iloc[0]\n except:\n d['browser_' + str(b)] = np.NAN\n print d\n return d\n\ndef multi_data():\n # 浏览数据统计\n data = browse_history[['userid', 'browser_behavior', 'browse_count']]\n\n t = data.groupby(['userid', 'browser_behavior']).agg(sum)\n t.reset_index(inplace=True)\n\n browser_behavior_tp = list(data.browser_behavior.unique())\n\n pool = Pool(12)\n\n rst = pool.map(browser_behavior_u,users)\n pool.close()\n pool.join()\n Data = pd.DataFrame(rst)\n #Datas = pd.merge(Datas,Data,on='userid')\n del Data,rst,t,data\n\n\ndef browser_behavior_number_u(u):\n d = {\"userid\":u}\n ta = t.loc[t.userid == u, :]\n d['browser_behavior_max'] = ta['browse_count'].max()\n d['browser_behavior_min'] = ta['browse_count'].min()\n d['browser_behavior_mean'] = ta['browse_count'].mean()\n d['browser_behavior_median'] = ta['browse_count'].median()\n d['browser_behavior_var'] = ta['browse_count'].var()\n d['browser_behavior_std'] = ta['browse_count'].std()\n d['browser_behavior_count'] = ta['browse_count'].count()\n d['browser_behavior_max_min'] = d['browser_behavior_max'] - d['browser_behavior_min']\n for b in [1,4,5,6,7,8,10]:\n try:\n tb = ta.loc[t.browser_behavior_number==b,'browse_count']\n d['browser_behavior_number_'+str(b)] = tb.iloc[0]\n except:\n d['browser_behavior_number_' + str(b)] = np.NAN\n print d\n return d\n\ndef mult_browse_behavi():\n # 子行为统计\n data = browse_history[['userid', 'browser_behavior_number', 'browse_count']]\n t = data.groupby(['userid', 'browser_behavior_number']).agg(sum)\n t.reset_index(inplace=True)\n\n pool = Pool(12)\n rst = pool.map(browser_behavior_number_u,users)\n pool.close()\n pool.join()\n Data = pd.DataFrame(rst)\n #Datas = pd.merge(Datas,Data,on='userid')\n del Data,rst,data\n\ndef merge_browser():\n d = pd.read_csv('../data/train/browser_history_time.csv')\n d1 = pd.read_csv('../data/train/browse_history_stage5.csv')\n d = pd.merge(d,d1,on='userid')\n d.fillna(-9999, inplace=True)\n print d.head(10)\n print d.shape\n d.to_csv('../data/train/browser_history_all.csv', index=None)\n\nif __name__=='__main__':\n merge_browser()\n\n\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
"""Plugin setup.""" import importlib from qiime2.plugin import ( Plugin, Str, Choices, Int, Bool, Range, Float, Metadata, MetadataColumn, Categorical, Numeric, Citations, ) import q2_micom from q2_micom._formats_and_types import ( SBML, JSON, Pickle, SBMLFormat, SBMLDirectory, JSONFormat, JSONDirectory, CommunityModelFormat, CommunityModelManifest, CommunityModelDirectory, GrowthRates, Fluxes, MicomResultsDirectory, MicomMediumFile, MicomMediumDirectory, MetabolicModels, CommunityModels, MicomResults, MicomMedium, Global, PerSample, TradeoffResults, TradeoffResultsDirectory, REQ_FIELDS, ) from q2_types.feature_data import FeatureData, Taxonomy from q2_types.feature_table import FeatureTable, RelativeFrequency, Frequency citations = Citations.load("citations.bib", package="q2_micom") plugin = Plugin( name="micom", version=q2_micom.__version__, website="https://github.com/micom-dev/q2-micom", package="q2_micom", description=(""), short_description="Plugin for metabolic modeling of microbial communities.", citations=[citations["micom"]], ) plugin.register_formats( SBMLFormat, SBMLDirectory, JSONFormat, JSONDirectory, CommunityModelFormat, CommunityModelManifest, CommunityModelDirectory, GrowthRates, Fluxes, MicomResultsDirectory, MicomMediumFile, MicomMediumDirectory, TradeoffResultsDirectory, ) plugin.register_semantic_types( MetabolicModels, CommunityModels, MicomResults, MicomMedium ) plugin.register_semantic_type_to_format(MetabolicModels[SBML], SBMLDirectory) plugin.register_semantic_type_to_format(MetabolicModels[JSON], JSONDirectory) plugin.register_semantic_type_to_format( CommunityModels[Pickle], CommunityModelDirectory ) plugin.register_semantic_type_to_format(MicomResults, MicomResultsDirectory) plugin.register_semantic_type_to_format(TradeoffResults, TradeoffResultsDirectory) plugin.register_semantic_type_to_format(MicomMedium[Global], MicomMediumDirectory) plugin.register_semantic_type_to_format(MicomMedium[PerSample], MicomMediumDirectory) plugin.methods.register_function( function=q2_micom.db, inputs={}, parameters={ "meta": Metadata, "rank": Str % Choices(q2_micom._build.RANKS), "threads": Int % Range(1, None), }, outputs=[("metabolic_models", MetabolicModels[JSON])], input_descriptions={}, parameter_descriptions={ "meta": ( "Metadata for the individual metabolic models in `folder`. " "Must contain the the following columns: %s." % ", ".join(REQ_FIELDS) ), "rank": "The phylogenetic rank at which to summarize taxa.", "threads": "The number of threads to use when constructing models.", }, output_descriptions={"metabolic_models": "The metabolic model DB."}, name="Build a metabolic model database.", description=( "Constructs pan-genome models summarized to the specified rank " "and bundles the models to be used by MICOM. " "The chosen rank has to be the same you want as when building your " "community models. " "So you may not build genus-level community models with a species " "level database. " "You will only need to run this function if you want to build a " "custom DB. For many use cases downloading the prebuilt AGORA DB " "with the the preferred rank should be sufficient." ), citations=[ citations["agora"], citations["agora_reply"], citations["micom"], ], ) plugin.methods.register_function( function=q2_micom.build, inputs={ "abundance": FeatureTable[Frequency | RelativeFrequency], "taxonomy": FeatureData[Taxonomy], "models": MetabolicModels[JSON], }, parameters={ "threads": Int % Range(1, None), "cutoff": Float % Range(0.0, 1.0), "strict": Bool, "solver": Str % Choices("auto", "cplex", "osqp", "gurobi"), }, outputs=[("community_models", CommunityModels[Pickle])], input_descriptions={ "abundance": ( "The feature table containing the samples over which beta " "diversity should be computed." ), "taxonomy": "The taxonomy assignments for the ASVs in the table.", "models": "The single taxon model database to use.", }, parameter_descriptions={ "threads": "The number of threads to use when constructing models.", "cutoff": "Taxa with a relative abundance smaller than this will " "be dropped.", "strict": ( "If true will collapse and match on all taxa ranks up to the " "specified rank (so on all higher ranks as well). If false " "(default) will match only on single taxa rank specified before. " "If using the strict option make sure ranks are named the same as in " "the used database." ), "solver": ( "The quadratic and linear programming solver that will be used " "in the models. Will pick an appropriate one by default. " "`cplex` and `gurobi` are commercial solvers with free academic " "licenses and have to be installed manually. See the docs for more info." ), }, output_descriptions={"community_models": "The community models."}, name="Build community models.", description=("Builds the metabolic community models for a set of samples."), citations=[citations["micom"]], ) plugin.methods.register_function( function=q2_micom.minimal_medium, inputs={"models": CommunityModels[Pickle]}, parameters={ "min_growth": Float % Range(0.0, None, inclusive_start=False), "threads": Int % Range(1, None), }, outputs=[("medium", MicomMedium[Global])], input_descriptions={ "models": ( "A collection of metabolic community models. " "This should contain on model for each sample." ), }, parameter_descriptions={ "min_growth": ( "The minimum achievable growth rate for each taxon. " "The returned growth medium enables all taxa to growth " "simultaneously with at least this rate." ), "threads": "The number of threads to use when simulating.", }, output_descriptions={"medium": "The resulting growth medium."}, name="Obtain a minimal growth medium for models.", description=( "Obtains a minimal growth medium for the community models. " "Please note that this medium does not have any biological " "feasibility. If you have any knowledge about metabolites present " "in the environment we recommend you construct the medium by hand." ), citations=[citations["micom"]], ) plugin.methods.register_function( function=q2_micom.grow, inputs={ "models": CommunityModels[Pickle], "medium": MicomMedium[Global | PerSample], }, parameters={ "tradeoff": Float % Range(0.0, 1.0, inclusive_start=False, inclusive_end=True), "strategy": Str % Choices("pFBA", "minimal uptake", "none"), "threads": Int % Range(1, None), }, outputs=[("results", MicomResults)], input_descriptions={ "models": ( "A collection of metabolic community models. " "This should contain on model for each sample." ), "medium": "The growth medium to use.", }, parameter_descriptions={ "tradeoff": ( "The tradeoff parameter. This describes the balance " "between maximizing biomass production of the entire " "community and biomass production of individual taxa " '(ergo "egoistic" growth). A value of 1.0 would yield ' "the best biomass production across the community but " "will only allow a few taxa to grow. Smaller values will " "allow more taxa to grow but will sacrifice overall " "biomass. A value of 0.5 (the default) has been shown to " "best reproduce growth rates in the human gut." ), "strategy": ( "The strategy used when choosing the solution in the " "optimal flux space. `minimal uptake` uses the fluxes " "that result in the smallest total uptake from the environment." "`pFBA` uses parsimonious Flux Balance Analysis and thus will choose " "the fluxes with the lowest enzyme requirement for each taxon. " "`none` will return an arbitrary solution from the optimal flux space." ), "threads": "The number of threads to use when simulating.", }, output_descriptions={ "results": "The resulting taxa-level growth rates and metabolic " "exchange fluxes." }, name="Simulate growth for community models.", description=( "Simulates growth for a set of samples. Note that those are " 'sample-specific or "personalized" simulations, so each taxon ' "may have different growth rates and metabolite usage in each sample." ), citations=[citations["micom"]], ) plugin.methods.register_function( function=q2_micom.tradeoff, inputs={ "models": CommunityModels[Pickle], "medium": MicomMedium[Global | PerSample], }, parameters={ "tradeoff_min": Float % Range(0.0, 1.0, inclusive_start=False), "tradeoff_max": Float % Range(0.0, 1.0, inclusive_end=True), "step": Float % Range(0.0, 1.0), "threads": Int, }, outputs=[("results", TradeoffResults)], input_descriptions={ "models": ( "A collection of metabolic community models. " "This should contain on model for each sample." ), "medium": "The growth medium to use.", }, parameter_descriptions={ "tradeoff_min": "The minimum tradeoff parameter to test. This should " "be larger than 0.0 and smaller than 1.0.", "tradeoff_max": "The maximum tradeoff parameter to test. This should " "be larger than 0.0 and smaller than 1.0 and also be" "larger than `tradeoff_min`.", "step": "The tradeoff value step size to use.", "threads": "The number of threads to use when simulating.", }, output_descriptions={ "results": "The resulting taxa-level growth rates for varying " "tradeoff values." }, name="Test a variety of tradeoff values.", description=( "Simulates growth for a set of samples while varying the tradeoff " "between community and taxon biomass production. " "This can be used to characterize a good tradeoff value for a " "specific set of samples. Our study suggested that a good tradeoff " "value is the largest value that allows the majority of taxa in the " "sample to grow." ), citations=[citations["micom"]], ) plugin.methods.register_function( function=q2_micom.filter_models, inputs={"models": CommunityModels[Pickle]}, parameters={"metadata": Metadata, "query": Str, "exclude": Bool}, outputs=[("filtered_models", CommunityModels[Pickle])], input_descriptions={ "models": ( "A collection of metabolic community models. " "This should contain on model for each sample." ) }, parameter_descriptions={ "metadata": "The metadata for the samples to keep or to query.", "query": ( "A pandas query expression to select samples from the metadata. " "This will call `query` on the metadata DataFrame, so you can test " "your query by loading our metadata into a pandas DataFrame." ), "exclude": ( "If true will use all samples *except* the ones selected " "by metadata and query." ), }, output_descriptions={"filtered_models": "The filtered community models."}, name="Filters models for a chosen set of samples.", description=( "Select a subset of samples and their community models using a list " "of samples or a pandas query expression." ), citations=[citations["micom"]], ) plugin.methods.register_function( function=q2_micom.filter_results, inputs={"results": MicomResults}, parameters={"metadata": Metadata, "query": Str, "exclude": Bool}, outputs=[("filtered_results", MicomResults)], input_descriptions={ "results": ( "A set of MICOM analysis results. " "Contains predicted groath rates and exchange fluxes." ) }, parameter_descriptions={ "metadata": "The metadata for the samples to keep or to query.", "query": ( "A pandas query expression to select samples from the metadata. " "This will call `query` on the metadata DataFrame, so you can test " "your query by loading our metadata into a pandas DataFrame." ), "exclude": ( "If true will use all samples *except* the ones selected " "by metadata and query." ), }, output_descriptions={"filtered_results": "The filtered simulation models."}, name="Filters results for a chosen set of samples.", description=( "Select a subset of samples and their simulation results using a list " "of samples or a pandas query expression." ), citations=[citations["micom"]], ) plugin.visualizers.register_function( function=q2_micom.plot_growth, inputs={"results": MicomResults}, parameters={}, input_descriptions={ "results": ( "A set of MICOM analysis results. " "Contains predicted groath rates and exchange fluxes." ) }, parameter_descriptions={}, name="Plot taxa growth rates.", description=( "Plot predicted growth rates for each taxon in each sample. " "Only points with growing taxa are shown (growth rate sufficiently " "larger than zero)." ), citations=[citations["micom"]], ) plugin.visualizers.register_function( function=q2_micom.exchanges_per_sample, inputs={"results": MicomResults}, parameters={ "direction": Str % Choices("import", "export"), "cluster": Bool, }, input_descriptions={ "results": ( "A set of MICOM analysis results. " "Contains predicted groath rates and exchange fluxes." ) }, parameter_descriptions={ "direction": "The direction of the flux.", "cluster": "Whether to perform clutering on samples and reactions.", }, name="Plot gloabl exchange rates.", description=( "Plot predicted global exchange fluxes for each sample. " "When plotting imports this corresponds to the consumption " "fluxes for each metabolite that is available to the community. " "When plotting export this corresponds to the production fluxes " "for each metabolite." ), citations=[citations["micom"]], ) plugin.visualizers.register_function( function=q2_micom.exchanges_per_taxon, inputs={"results": MicomResults}, parameters={ "direction": Str % Choices("import", "export"), "perplexity": Int % Range(2, None), }, input_descriptions={ "results": ( "A set of MICOM analysis results. " "Contains predicted growth rates and exchange fluxes." ) }, parameter_descriptions={ "direction": "The direction of the flux.", "perplexity": "TSNE parameter. Relates to the number of neighbors used to " "calculate distances. Smaller values preserve more local " "structure and larger values preserve more global structure.", }, name="Plot niche overlap.", description=( "Plot growth or production niches. " "The entire set of import or export fluxes for each taxon in each " "sample is reduced onto a single point on a 2D plane." "Taxa that are close to each other either consume similar metabolites " " (imports) or produce similar metabolites (exports)." ), citations=[citations["micom"]], ) plugin.visualizers.register_function( function=q2_micom.plot_tradeoff, inputs={"results": TradeoffResults}, parameters={}, input_descriptions={ "results": ( "A set of MICOM tradeoff analysis results. " "Contains predicted growth rates for each tested tradeoff." ) }, parameter_descriptions={}, name="Plot tradeoff results.", description=( "Plot predicted growth rate distributions for each tradeoff as " "well as the fraction of growing taxa in each sample and tradeoff " "value. For a good tradeoff value one usually tries to find the " "largest tradeoff value that still aloows most taxa to grow." ), citations=[citations["micom"]], ) plugin.visualizers.register_function( function=q2_micom.fit_phenotype, inputs={"results": MicomResults}, parameters={ "metadata": MetadataColumn[Categorical | Numeric], "variable_type": Str % Choices("binary", "continuous"), "flux_type": Str % Choices("import", "production"), "min_coef": Float % Range(0, None), }, input_descriptions={ "results": ( "A set of MICOM analysis results. " "Contains predicted growth rates and exchange fluxes." ), }, parameter_descriptions={ "metadata": "The metadata variable to use.", "variable_type": "The type of the phenotype variable.", "flux_type": "Which fluxes to use.", "min_coef": ( "Only coefficient with absolute values larger than this " "will be shown." ), }, name="Test for differential production", description=( "Test for overall metabolite production differences " "between two groups." ), citations=[citations["micom"]], ) importlib.import_module("q2_micom._transform")
normal
{ "blob_id": "9a6f159d9208ee9e337de7b717e2e25c7e7f9f06", "index": 4277, "step-1": "<mask token>\n", "step-2": "<mask token>\nplugin.register_formats(SBMLFormat, SBMLDirectory, JSONFormat,\n JSONDirectory, CommunityModelFormat, CommunityModelManifest,\n CommunityModelDirectory, GrowthRates, Fluxes, MicomResultsDirectory,\n MicomMediumFile, MicomMediumDirectory, TradeoffResultsDirectory)\nplugin.register_semantic_types(MetabolicModels, CommunityModels,\n MicomResults, MicomMedium)\nplugin.register_semantic_type_to_format(MetabolicModels[SBML], SBMLDirectory)\nplugin.register_semantic_type_to_format(MetabolicModels[JSON], JSONDirectory)\nplugin.register_semantic_type_to_format(CommunityModels[Pickle],\n CommunityModelDirectory)\nplugin.register_semantic_type_to_format(MicomResults, MicomResultsDirectory)\nplugin.register_semantic_type_to_format(TradeoffResults,\n TradeoffResultsDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[Global],\n MicomMediumDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[PerSample],\n MicomMediumDirectory)\nplugin.methods.register_function(function=q2_micom.db, inputs={},\n parameters={'meta': Metadata, 'rank': Str % Choices(q2_micom._build.\n RANKS), 'threads': Int % Range(1, None)}, outputs=[('metabolic_models',\n MetabolicModels[JSON])], input_descriptions={}, parameter_descriptions=\n {'meta': \n 'Metadata for the individual metabolic models in `folder`. Must contain the the following columns: %s.'\n % ', '.join(REQ_FIELDS), 'rank':\n 'The phylogenetic rank at which to summarize taxa.', 'threads':\n 'The number of threads to use when constructing models.'},\n output_descriptions={'metabolic_models': 'The metabolic model DB.'},\n name='Build a metabolic model database.', description=\n 'Constructs pan-genome models summarized to the specified rank and bundles the models to be used by MICOM. The chosen rank has to be the same you want as when building your community models. So you may not build genus-level community models with a species level database. You will only need to run this function if you want to build a custom DB. For many use cases downloading the prebuilt AGORA DB with the the preferred rank should be sufficient.'\n , citations=[citations['agora'], citations['agora_reply'], citations[\n 'micom']])\nplugin.methods.register_function(function=q2_micom.build, inputs={\n 'abundance': FeatureTable[Frequency | RelativeFrequency], 'taxonomy':\n FeatureData[Taxonomy], 'models': MetabolicModels[JSON]}, parameters={\n 'threads': Int % Range(1, None), 'cutoff': Float % Range(0.0, 1.0),\n 'strict': Bool, 'solver': Str % Choices('auto', 'cplex', 'osqp',\n 'gurobi')}, outputs=[('community_models', CommunityModels[Pickle])],\n input_descriptions={'abundance':\n 'The feature table containing the samples over which beta diversity should be computed.'\n , 'taxonomy': 'The taxonomy assignments for the ASVs in the table.',\n 'models': 'The single taxon model database to use.'},\n parameter_descriptions={'threads':\n 'The number of threads to use when constructing models.', 'cutoff':\n 'Taxa with a relative abundance smaller than this will be dropped.',\n 'strict':\n 'If true will collapse and match on all taxa ranks up to the specified rank (so on all higher ranks as well). If false (default) will match only on single taxa rank specified before. If using the strict option make sure ranks are named the same as in the used database.'\n , 'solver':\n 'The quadratic and linear programming solver that will be used in the models. Will pick an appropriate one by default. `cplex` and `gurobi` are commercial solvers with free academic licenses and have to be installed manually. See the docs for more info.'\n }, output_descriptions={'community_models': 'The community models.'},\n name='Build community models.', description=\n 'Builds the metabolic community models for a set of samples.',\n citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.minimal_medium, inputs={\n 'models': CommunityModels[Pickle]}, parameters={'min_growth': Float %\n Range(0.0, None, inclusive_start=False), 'threads': Int % Range(1, None\n )}, outputs=[('medium', MicomMedium[Global])], input_descriptions={\n 'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n }, parameter_descriptions={'min_growth':\n 'The minimum achievable growth rate for each taxon. The returned growth medium enables all taxa to growth simultaneously with at least this rate.'\n , 'threads': 'The number of threads to use when simulating.'},\n output_descriptions={'medium': 'The resulting growth medium.'}, name=\n 'Obtain a minimal growth medium for models.', description=\n 'Obtains a minimal growth medium for the community models. Please note that this medium does not have any biological feasibility. If you have any knowledge about metabolites present in the environment we recommend you construct the medium by hand.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.grow, inputs={'models':\n CommunityModels[Pickle], 'medium': MicomMedium[Global | PerSample]},\n parameters={'tradeoff': Float % Range(0.0, 1.0, inclusive_start=False,\n inclusive_end=True), 'strategy': Str % Choices('pFBA', 'minimal uptake',\n 'none'), 'threads': Int % Range(1, None)}, outputs=[('results',\n MicomResults)], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n , 'medium': 'The growth medium to use.'}, parameter_descriptions={\n 'tradeoff':\n 'The tradeoff parameter. This describes the balance between maximizing biomass production of the entire community and biomass production of individual taxa (ergo \"egoistic\" growth). A value of 1.0 would yield the best biomass production across the community but will only allow a few taxa to grow. Smaller values will allow more taxa to grow but will sacrifice overall biomass. A value of 0.5 (the default) has been shown to best reproduce growth rates in the human gut.'\n , 'strategy':\n 'The strategy used when choosing the solution in the optimal flux space. `minimal uptake` uses the fluxes that result in the smallest total uptake from the environment.`pFBA` uses parsimonious Flux Balance Analysis and thus will choose the fluxes with the lowest enzyme requirement for each taxon. `none` will return an arbitrary solution from the optimal flux space.'\n , 'threads': 'The number of threads to use when simulating.'},\n output_descriptions={'results':\n 'The resulting taxa-level growth rates and metabolic exchange fluxes.'},\n name='Simulate growth for community models.', description=\n 'Simulates growth for a set of samples. Note that those are sample-specific or \"personalized\" simulations, so each taxon may have different growth rates and metabolite usage in each sample.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.tradeoff, inputs={\n 'models': CommunityModels[Pickle], 'medium': MicomMedium[Global |\n PerSample]}, parameters={'tradeoff_min': Float % Range(0.0, 1.0,\n inclusive_start=False), 'tradeoff_max': Float % Range(0.0, 1.0,\n inclusive_end=True), 'step': Float % Range(0.0, 1.0), 'threads': Int},\n outputs=[('results', TradeoffResults)], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n , 'medium': 'The growth medium to use.'}, parameter_descriptions={\n 'tradeoff_min':\n 'The minimum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0.'\n , 'tradeoff_max':\n 'The maximum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0 and also belarger than `tradeoff_min`.'\n , 'step': 'The tradeoff value step size to use.', 'threads':\n 'The number of threads to use when simulating.'}, output_descriptions={\n 'results':\n 'The resulting taxa-level growth rates for varying tradeoff values.'},\n name='Test a variety of tradeoff values.', description=\n 'Simulates growth for a set of samples while varying the tradeoff between community and taxon biomass production. This can be used to characterize a good tradeoff value for a specific set of samples. Our study suggested that a good tradeoff value is the largest value that allows the majority of taxa in the sample to grow.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.filter_models, inputs={\n 'models': CommunityModels[Pickle]}, parameters={'metadata': Metadata,\n 'query': Str, 'exclude': Bool}, outputs=[('filtered_models',\n CommunityModels[Pickle])], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n }, parameter_descriptions={'metadata':\n 'The metadata for the samples to keep or to query.', 'query':\n 'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'\n , 'exclude':\n 'If true will use all samples *except* the ones selected by metadata and query.'\n }, output_descriptions={'filtered_models':\n 'The filtered community models.'}, name=\n 'Filters models for a chosen set of samples.', description=\n 'Select a subset of samples and their community models using a list of samples or a pandas query expression.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.filter_results, inputs={\n 'results': MicomResults}, parameters={'metadata': Metadata, 'query':\n Str, 'exclude': Bool}, outputs=[('filtered_results', MicomResults)],\n input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={'metadata':\n 'The metadata for the samples to keep or to query.', 'query':\n 'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'\n , 'exclude':\n 'If true will use all samples *except* the ones selected by metadata and query.'\n }, output_descriptions={'filtered_results':\n 'The filtered simulation models.'}, name=\n 'Filters results for a chosen set of samples.', description=\n 'Select a subset of samples and their simulation results using a list of samples or a pandas query expression.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.plot_growth, inputs=\n {'results': MicomResults}, parameters={}, input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={}, name='Plot taxa growth rates.',\n description=\n 'Plot predicted growth rates for each taxon in each sample. Only points with growing taxa are shown (growth rate sufficiently larger than zero).'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.exchanges_per_sample,\n inputs={'results': MicomResults}, parameters={'direction': Str %\n Choices('import', 'export'), 'cluster': Bool}, input_descriptions={\n 'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={'direction': 'The direction of the flux.',\n 'cluster': 'Whether to perform clutering on samples and reactions.'},\n name='Plot gloabl exchange rates.', description=\n 'Plot predicted global exchange fluxes for each sample. When plotting imports this corresponds to the consumption fluxes for each metabolite that is available to the community. When plotting export this corresponds to the production fluxes for each metabolite.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.exchanges_per_taxon,\n inputs={'results': MicomResults}, parameters={'direction': Str %\n Choices('import', 'export'), 'perplexity': Int % Range(2, None)},\n input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'\n }, parameter_descriptions={'direction': 'The direction of the flux.',\n 'perplexity':\n 'TSNE parameter. Relates to the number of neighbors used to calculate distances. Smaller values preserve more local structure and larger values preserve more global structure.'\n }, name='Plot niche overlap.', description=\n 'Plot growth or production niches. The entire set of import or export fluxes for each taxon in each sample is reduced onto a single point on a 2D plane.Taxa that are close to each other either consume similar metabolites (imports) or produce similar metabolites (exports).'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.plot_tradeoff,\n inputs={'results': TradeoffResults}, parameters={}, input_descriptions=\n {'results':\n 'A set of MICOM tradeoff analysis results. Contains predicted growth rates for each tested tradeoff.'\n }, parameter_descriptions={}, name='Plot tradeoff results.',\n description=\n 'Plot predicted growth rate distributions for each tradeoff as well as the fraction of growing taxa in each sample and tradeoff value. For a good tradeoff value one usually tries to find the largest tradeoff value that still aloows most taxa to grow.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.fit_phenotype,\n inputs={'results': MicomResults}, parameters={'metadata':\n MetadataColumn[Categorical | Numeric], 'variable_type': Str % Choices(\n 'binary', 'continuous'), 'flux_type': Str % Choices('import',\n 'production'), 'min_coef': Float % Range(0, None)}, input_descriptions=\n {'results':\n 'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'\n }, parameter_descriptions={'metadata': 'The metadata variable to use.',\n 'variable_type': 'The type of the phenotype variable.', 'flux_type':\n 'Which fluxes to use.', 'min_coef':\n 'Only coefficient with absolute values larger than this will be shown.'\n }, name='Test for differential production', description=\n 'Test for overall metabolite production differences between two groups.',\n citations=[citations['micom']])\nimportlib.import_module('q2_micom._transform')\n", "step-3": "<mask token>\ncitations = Citations.load('citations.bib', package='q2_micom')\nplugin = Plugin(name='micom', version=q2_micom.__version__, website=\n 'https://github.com/micom-dev/q2-micom', package='q2_micom',\n description='', short_description=\n 'Plugin for metabolic modeling of microbial communities.', citations=[\n citations['micom']])\nplugin.register_formats(SBMLFormat, SBMLDirectory, JSONFormat,\n JSONDirectory, CommunityModelFormat, CommunityModelManifest,\n CommunityModelDirectory, GrowthRates, Fluxes, MicomResultsDirectory,\n MicomMediumFile, MicomMediumDirectory, TradeoffResultsDirectory)\nplugin.register_semantic_types(MetabolicModels, CommunityModels,\n MicomResults, MicomMedium)\nplugin.register_semantic_type_to_format(MetabolicModels[SBML], SBMLDirectory)\nplugin.register_semantic_type_to_format(MetabolicModels[JSON], JSONDirectory)\nplugin.register_semantic_type_to_format(CommunityModels[Pickle],\n CommunityModelDirectory)\nplugin.register_semantic_type_to_format(MicomResults, MicomResultsDirectory)\nplugin.register_semantic_type_to_format(TradeoffResults,\n TradeoffResultsDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[Global],\n MicomMediumDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[PerSample],\n MicomMediumDirectory)\nplugin.methods.register_function(function=q2_micom.db, inputs={},\n parameters={'meta': Metadata, 'rank': Str % Choices(q2_micom._build.\n RANKS), 'threads': Int % Range(1, None)}, outputs=[('metabolic_models',\n MetabolicModels[JSON])], input_descriptions={}, parameter_descriptions=\n {'meta': \n 'Metadata for the individual metabolic models in `folder`. Must contain the the following columns: %s.'\n % ', '.join(REQ_FIELDS), 'rank':\n 'The phylogenetic rank at which to summarize taxa.', 'threads':\n 'The number of threads to use when constructing models.'},\n output_descriptions={'metabolic_models': 'The metabolic model DB.'},\n name='Build a metabolic model database.', description=\n 'Constructs pan-genome models summarized to the specified rank and bundles the models to be used by MICOM. The chosen rank has to be the same you want as when building your community models. So you may not build genus-level community models with a species level database. You will only need to run this function if you want to build a custom DB. For many use cases downloading the prebuilt AGORA DB with the the preferred rank should be sufficient.'\n , citations=[citations['agora'], citations['agora_reply'], citations[\n 'micom']])\nplugin.methods.register_function(function=q2_micom.build, inputs={\n 'abundance': FeatureTable[Frequency | RelativeFrequency], 'taxonomy':\n FeatureData[Taxonomy], 'models': MetabolicModels[JSON]}, parameters={\n 'threads': Int % Range(1, None), 'cutoff': Float % Range(0.0, 1.0),\n 'strict': Bool, 'solver': Str % Choices('auto', 'cplex', 'osqp',\n 'gurobi')}, outputs=[('community_models', CommunityModels[Pickle])],\n input_descriptions={'abundance':\n 'The feature table containing the samples over which beta diversity should be computed.'\n , 'taxonomy': 'The taxonomy assignments for the ASVs in the table.',\n 'models': 'The single taxon model database to use.'},\n parameter_descriptions={'threads':\n 'The number of threads to use when constructing models.', 'cutoff':\n 'Taxa with a relative abundance smaller than this will be dropped.',\n 'strict':\n 'If true will collapse and match on all taxa ranks up to the specified rank (so on all higher ranks as well). If false (default) will match only on single taxa rank specified before. If using the strict option make sure ranks are named the same as in the used database.'\n , 'solver':\n 'The quadratic and linear programming solver that will be used in the models. Will pick an appropriate one by default. `cplex` and `gurobi` are commercial solvers with free academic licenses and have to be installed manually. See the docs for more info.'\n }, output_descriptions={'community_models': 'The community models.'},\n name='Build community models.', description=\n 'Builds the metabolic community models for a set of samples.',\n citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.minimal_medium, inputs={\n 'models': CommunityModels[Pickle]}, parameters={'min_growth': Float %\n Range(0.0, None, inclusive_start=False), 'threads': Int % Range(1, None\n )}, outputs=[('medium', MicomMedium[Global])], input_descriptions={\n 'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n }, parameter_descriptions={'min_growth':\n 'The minimum achievable growth rate for each taxon. The returned growth medium enables all taxa to growth simultaneously with at least this rate.'\n , 'threads': 'The number of threads to use when simulating.'},\n output_descriptions={'medium': 'The resulting growth medium.'}, name=\n 'Obtain a minimal growth medium for models.', description=\n 'Obtains a minimal growth medium for the community models. Please note that this medium does not have any biological feasibility. If you have any knowledge about metabolites present in the environment we recommend you construct the medium by hand.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.grow, inputs={'models':\n CommunityModels[Pickle], 'medium': MicomMedium[Global | PerSample]},\n parameters={'tradeoff': Float % Range(0.0, 1.0, inclusive_start=False,\n inclusive_end=True), 'strategy': Str % Choices('pFBA', 'minimal uptake',\n 'none'), 'threads': Int % Range(1, None)}, outputs=[('results',\n MicomResults)], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n , 'medium': 'The growth medium to use.'}, parameter_descriptions={\n 'tradeoff':\n 'The tradeoff parameter. This describes the balance between maximizing biomass production of the entire community and biomass production of individual taxa (ergo \"egoistic\" growth). A value of 1.0 would yield the best biomass production across the community but will only allow a few taxa to grow. Smaller values will allow more taxa to grow but will sacrifice overall biomass. A value of 0.5 (the default) has been shown to best reproduce growth rates in the human gut.'\n , 'strategy':\n 'The strategy used when choosing the solution in the optimal flux space. `minimal uptake` uses the fluxes that result in the smallest total uptake from the environment.`pFBA` uses parsimonious Flux Balance Analysis and thus will choose the fluxes with the lowest enzyme requirement for each taxon. `none` will return an arbitrary solution from the optimal flux space.'\n , 'threads': 'The number of threads to use when simulating.'},\n output_descriptions={'results':\n 'The resulting taxa-level growth rates and metabolic exchange fluxes.'},\n name='Simulate growth for community models.', description=\n 'Simulates growth for a set of samples. Note that those are sample-specific or \"personalized\" simulations, so each taxon may have different growth rates and metabolite usage in each sample.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.tradeoff, inputs={\n 'models': CommunityModels[Pickle], 'medium': MicomMedium[Global |\n PerSample]}, parameters={'tradeoff_min': Float % Range(0.0, 1.0,\n inclusive_start=False), 'tradeoff_max': Float % Range(0.0, 1.0,\n inclusive_end=True), 'step': Float % Range(0.0, 1.0), 'threads': Int},\n outputs=[('results', TradeoffResults)], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n , 'medium': 'The growth medium to use.'}, parameter_descriptions={\n 'tradeoff_min':\n 'The minimum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0.'\n , 'tradeoff_max':\n 'The maximum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0 and also belarger than `tradeoff_min`.'\n , 'step': 'The tradeoff value step size to use.', 'threads':\n 'The number of threads to use when simulating.'}, output_descriptions={\n 'results':\n 'The resulting taxa-level growth rates for varying tradeoff values.'},\n name='Test a variety of tradeoff values.', description=\n 'Simulates growth for a set of samples while varying the tradeoff between community and taxon biomass production. This can be used to characterize a good tradeoff value for a specific set of samples. Our study suggested that a good tradeoff value is the largest value that allows the majority of taxa in the sample to grow.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.filter_models, inputs={\n 'models': CommunityModels[Pickle]}, parameters={'metadata': Metadata,\n 'query': Str, 'exclude': Bool}, outputs=[('filtered_models',\n CommunityModels[Pickle])], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n }, parameter_descriptions={'metadata':\n 'The metadata for the samples to keep or to query.', 'query':\n 'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'\n , 'exclude':\n 'If true will use all samples *except* the ones selected by metadata and query.'\n }, output_descriptions={'filtered_models':\n 'The filtered community models.'}, name=\n 'Filters models for a chosen set of samples.', description=\n 'Select a subset of samples and their community models using a list of samples or a pandas query expression.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.filter_results, inputs={\n 'results': MicomResults}, parameters={'metadata': Metadata, 'query':\n Str, 'exclude': Bool}, outputs=[('filtered_results', MicomResults)],\n input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={'metadata':\n 'The metadata for the samples to keep or to query.', 'query':\n 'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'\n , 'exclude':\n 'If true will use all samples *except* the ones selected by metadata and query.'\n }, output_descriptions={'filtered_results':\n 'The filtered simulation models.'}, name=\n 'Filters results for a chosen set of samples.', description=\n 'Select a subset of samples and their simulation results using a list of samples or a pandas query expression.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.plot_growth, inputs=\n {'results': MicomResults}, parameters={}, input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={}, name='Plot taxa growth rates.',\n description=\n 'Plot predicted growth rates for each taxon in each sample. Only points with growing taxa are shown (growth rate sufficiently larger than zero).'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.exchanges_per_sample,\n inputs={'results': MicomResults}, parameters={'direction': Str %\n Choices('import', 'export'), 'cluster': Bool}, input_descriptions={\n 'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={'direction': 'The direction of the flux.',\n 'cluster': 'Whether to perform clutering on samples and reactions.'},\n name='Plot gloabl exchange rates.', description=\n 'Plot predicted global exchange fluxes for each sample. When plotting imports this corresponds to the consumption fluxes for each metabolite that is available to the community. When plotting export this corresponds to the production fluxes for each metabolite.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.exchanges_per_taxon,\n inputs={'results': MicomResults}, parameters={'direction': Str %\n Choices('import', 'export'), 'perplexity': Int % Range(2, None)},\n input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'\n }, parameter_descriptions={'direction': 'The direction of the flux.',\n 'perplexity':\n 'TSNE parameter. Relates to the number of neighbors used to calculate distances. Smaller values preserve more local structure and larger values preserve more global structure.'\n }, name='Plot niche overlap.', description=\n 'Plot growth or production niches. The entire set of import or export fluxes for each taxon in each sample is reduced onto a single point on a 2D plane.Taxa that are close to each other either consume similar metabolites (imports) or produce similar metabolites (exports).'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.plot_tradeoff,\n inputs={'results': TradeoffResults}, parameters={}, input_descriptions=\n {'results':\n 'A set of MICOM tradeoff analysis results. Contains predicted growth rates for each tested tradeoff.'\n }, parameter_descriptions={}, name='Plot tradeoff results.',\n description=\n 'Plot predicted growth rate distributions for each tradeoff as well as the fraction of growing taxa in each sample and tradeoff value. For a good tradeoff value one usually tries to find the largest tradeoff value that still aloows most taxa to grow.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.fit_phenotype,\n inputs={'results': MicomResults}, parameters={'metadata':\n MetadataColumn[Categorical | Numeric], 'variable_type': Str % Choices(\n 'binary', 'continuous'), 'flux_type': Str % Choices('import',\n 'production'), 'min_coef': Float % Range(0, None)}, input_descriptions=\n {'results':\n 'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'\n }, parameter_descriptions={'metadata': 'The metadata variable to use.',\n 'variable_type': 'The type of the phenotype variable.', 'flux_type':\n 'Which fluxes to use.', 'min_coef':\n 'Only coefficient with absolute values larger than this will be shown.'\n }, name='Test for differential production', description=\n 'Test for overall metabolite production differences between two groups.',\n citations=[citations['micom']])\nimportlib.import_module('q2_micom._transform')\n", "step-4": "<mask token>\nimport importlib\nfrom qiime2.plugin import Plugin, Str, Choices, Int, Bool, Range, Float, Metadata, MetadataColumn, Categorical, Numeric, Citations\nimport q2_micom\nfrom q2_micom._formats_and_types import SBML, JSON, Pickle, SBMLFormat, SBMLDirectory, JSONFormat, JSONDirectory, CommunityModelFormat, CommunityModelManifest, CommunityModelDirectory, GrowthRates, Fluxes, MicomResultsDirectory, MicomMediumFile, MicomMediumDirectory, MetabolicModels, CommunityModels, MicomResults, MicomMedium, Global, PerSample, TradeoffResults, TradeoffResultsDirectory, REQ_FIELDS\nfrom q2_types.feature_data import FeatureData, Taxonomy\nfrom q2_types.feature_table import FeatureTable, RelativeFrequency, Frequency\ncitations = Citations.load('citations.bib', package='q2_micom')\nplugin = Plugin(name='micom', version=q2_micom.__version__, website=\n 'https://github.com/micom-dev/q2-micom', package='q2_micom',\n description='', short_description=\n 'Plugin for metabolic modeling of microbial communities.', citations=[\n citations['micom']])\nplugin.register_formats(SBMLFormat, SBMLDirectory, JSONFormat,\n JSONDirectory, CommunityModelFormat, CommunityModelManifest,\n CommunityModelDirectory, GrowthRates, Fluxes, MicomResultsDirectory,\n MicomMediumFile, MicomMediumDirectory, TradeoffResultsDirectory)\nplugin.register_semantic_types(MetabolicModels, CommunityModels,\n MicomResults, MicomMedium)\nplugin.register_semantic_type_to_format(MetabolicModels[SBML], SBMLDirectory)\nplugin.register_semantic_type_to_format(MetabolicModels[JSON], JSONDirectory)\nplugin.register_semantic_type_to_format(CommunityModels[Pickle],\n CommunityModelDirectory)\nplugin.register_semantic_type_to_format(MicomResults, MicomResultsDirectory)\nplugin.register_semantic_type_to_format(TradeoffResults,\n TradeoffResultsDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[Global],\n MicomMediumDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[PerSample],\n MicomMediumDirectory)\nplugin.methods.register_function(function=q2_micom.db, inputs={},\n parameters={'meta': Metadata, 'rank': Str % Choices(q2_micom._build.\n RANKS), 'threads': Int % Range(1, None)}, outputs=[('metabolic_models',\n MetabolicModels[JSON])], input_descriptions={}, parameter_descriptions=\n {'meta': \n 'Metadata for the individual metabolic models in `folder`. Must contain the the following columns: %s.'\n % ', '.join(REQ_FIELDS), 'rank':\n 'The phylogenetic rank at which to summarize taxa.', 'threads':\n 'The number of threads to use when constructing models.'},\n output_descriptions={'metabolic_models': 'The metabolic model DB.'},\n name='Build a metabolic model database.', description=\n 'Constructs pan-genome models summarized to the specified rank and bundles the models to be used by MICOM. The chosen rank has to be the same you want as when building your community models. So you may not build genus-level community models with a species level database. You will only need to run this function if you want to build a custom DB. For many use cases downloading the prebuilt AGORA DB with the the preferred rank should be sufficient.'\n , citations=[citations['agora'], citations['agora_reply'], citations[\n 'micom']])\nplugin.methods.register_function(function=q2_micom.build, inputs={\n 'abundance': FeatureTable[Frequency | RelativeFrequency], 'taxonomy':\n FeatureData[Taxonomy], 'models': MetabolicModels[JSON]}, parameters={\n 'threads': Int % Range(1, None), 'cutoff': Float % Range(0.0, 1.0),\n 'strict': Bool, 'solver': Str % Choices('auto', 'cplex', 'osqp',\n 'gurobi')}, outputs=[('community_models', CommunityModels[Pickle])],\n input_descriptions={'abundance':\n 'The feature table containing the samples over which beta diversity should be computed.'\n , 'taxonomy': 'The taxonomy assignments for the ASVs in the table.',\n 'models': 'The single taxon model database to use.'},\n parameter_descriptions={'threads':\n 'The number of threads to use when constructing models.', 'cutoff':\n 'Taxa with a relative abundance smaller than this will be dropped.',\n 'strict':\n 'If true will collapse and match on all taxa ranks up to the specified rank (so on all higher ranks as well). If false (default) will match only on single taxa rank specified before. If using the strict option make sure ranks are named the same as in the used database.'\n , 'solver':\n 'The quadratic and linear programming solver that will be used in the models. Will pick an appropriate one by default. `cplex` and `gurobi` are commercial solvers with free academic licenses and have to be installed manually. See the docs for more info.'\n }, output_descriptions={'community_models': 'The community models.'},\n name='Build community models.', description=\n 'Builds the metabolic community models for a set of samples.',\n citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.minimal_medium, inputs={\n 'models': CommunityModels[Pickle]}, parameters={'min_growth': Float %\n Range(0.0, None, inclusive_start=False), 'threads': Int % Range(1, None\n )}, outputs=[('medium', MicomMedium[Global])], input_descriptions={\n 'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n }, parameter_descriptions={'min_growth':\n 'The minimum achievable growth rate for each taxon. The returned growth medium enables all taxa to growth simultaneously with at least this rate.'\n , 'threads': 'The number of threads to use when simulating.'},\n output_descriptions={'medium': 'The resulting growth medium.'}, name=\n 'Obtain a minimal growth medium for models.', description=\n 'Obtains a minimal growth medium for the community models. Please note that this medium does not have any biological feasibility. If you have any knowledge about metabolites present in the environment we recommend you construct the medium by hand.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.grow, inputs={'models':\n CommunityModels[Pickle], 'medium': MicomMedium[Global | PerSample]},\n parameters={'tradeoff': Float % Range(0.0, 1.0, inclusive_start=False,\n inclusive_end=True), 'strategy': Str % Choices('pFBA', 'minimal uptake',\n 'none'), 'threads': Int % Range(1, None)}, outputs=[('results',\n MicomResults)], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n , 'medium': 'The growth medium to use.'}, parameter_descriptions={\n 'tradeoff':\n 'The tradeoff parameter. This describes the balance between maximizing biomass production of the entire community and biomass production of individual taxa (ergo \"egoistic\" growth). A value of 1.0 would yield the best biomass production across the community but will only allow a few taxa to grow. Smaller values will allow more taxa to grow but will sacrifice overall biomass. A value of 0.5 (the default) has been shown to best reproduce growth rates in the human gut.'\n , 'strategy':\n 'The strategy used when choosing the solution in the optimal flux space. `minimal uptake` uses the fluxes that result in the smallest total uptake from the environment.`pFBA` uses parsimonious Flux Balance Analysis and thus will choose the fluxes with the lowest enzyme requirement for each taxon. `none` will return an arbitrary solution from the optimal flux space.'\n , 'threads': 'The number of threads to use when simulating.'},\n output_descriptions={'results':\n 'The resulting taxa-level growth rates and metabolic exchange fluxes.'},\n name='Simulate growth for community models.', description=\n 'Simulates growth for a set of samples. Note that those are sample-specific or \"personalized\" simulations, so each taxon may have different growth rates and metabolite usage in each sample.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.tradeoff, inputs={\n 'models': CommunityModels[Pickle], 'medium': MicomMedium[Global |\n PerSample]}, parameters={'tradeoff_min': Float % Range(0.0, 1.0,\n inclusive_start=False), 'tradeoff_max': Float % Range(0.0, 1.0,\n inclusive_end=True), 'step': Float % Range(0.0, 1.0), 'threads': Int},\n outputs=[('results', TradeoffResults)], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n , 'medium': 'The growth medium to use.'}, parameter_descriptions={\n 'tradeoff_min':\n 'The minimum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0.'\n , 'tradeoff_max':\n 'The maximum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0 and also belarger than `tradeoff_min`.'\n , 'step': 'The tradeoff value step size to use.', 'threads':\n 'The number of threads to use when simulating.'}, output_descriptions={\n 'results':\n 'The resulting taxa-level growth rates for varying tradeoff values.'},\n name='Test a variety of tradeoff values.', description=\n 'Simulates growth for a set of samples while varying the tradeoff between community and taxon biomass production. This can be used to characterize a good tradeoff value for a specific set of samples. Our study suggested that a good tradeoff value is the largest value that allows the majority of taxa in the sample to grow.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.filter_models, inputs={\n 'models': CommunityModels[Pickle]}, parameters={'metadata': Metadata,\n 'query': Str, 'exclude': Bool}, outputs=[('filtered_models',\n CommunityModels[Pickle])], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n }, parameter_descriptions={'metadata':\n 'The metadata for the samples to keep or to query.', 'query':\n 'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'\n , 'exclude':\n 'If true will use all samples *except* the ones selected by metadata and query.'\n }, output_descriptions={'filtered_models':\n 'The filtered community models.'}, name=\n 'Filters models for a chosen set of samples.', description=\n 'Select a subset of samples and their community models using a list of samples or a pandas query expression.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.filter_results, inputs={\n 'results': MicomResults}, parameters={'metadata': Metadata, 'query':\n Str, 'exclude': Bool}, outputs=[('filtered_results', MicomResults)],\n input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={'metadata':\n 'The metadata for the samples to keep or to query.', 'query':\n 'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'\n , 'exclude':\n 'If true will use all samples *except* the ones selected by metadata and query.'\n }, output_descriptions={'filtered_results':\n 'The filtered simulation models.'}, name=\n 'Filters results for a chosen set of samples.', description=\n 'Select a subset of samples and their simulation results using a list of samples or a pandas query expression.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.plot_growth, inputs=\n {'results': MicomResults}, parameters={}, input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={}, name='Plot taxa growth rates.',\n description=\n 'Plot predicted growth rates for each taxon in each sample. Only points with growing taxa are shown (growth rate sufficiently larger than zero).'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.exchanges_per_sample,\n inputs={'results': MicomResults}, parameters={'direction': Str %\n Choices('import', 'export'), 'cluster': Bool}, input_descriptions={\n 'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={'direction': 'The direction of the flux.',\n 'cluster': 'Whether to perform clutering on samples and reactions.'},\n name='Plot gloabl exchange rates.', description=\n 'Plot predicted global exchange fluxes for each sample. When plotting imports this corresponds to the consumption fluxes for each metabolite that is available to the community. When plotting export this corresponds to the production fluxes for each metabolite.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.exchanges_per_taxon,\n inputs={'results': MicomResults}, parameters={'direction': Str %\n Choices('import', 'export'), 'perplexity': Int % Range(2, None)},\n input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'\n }, parameter_descriptions={'direction': 'The direction of the flux.',\n 'perplexity':\n 'TSNE parameter. Relates to the number of neighbors used to calculate distances. Smaller values preserve more local structure and larger values preserve more global structure.'\n }, name='Plot niche overlap.', description=\n 'Plot growth or production niches. The entire set of import or export fluxes for each taxon in each sample is reduced onto a single point on a 2D plane.Taxa that are close to each other either consume similar metabolites (imports) or produce similar metabolites (exports).'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.plot_tradeoff,\n inputs={'results': TradeoffResults}, parameters={}, input_descriptions=\n {'results':\n 'A set of MICOM tradeoff analysis results. Contains predicted growth rates for each tested tradeoff.'\n }, parameter_descriptions={}, name='Plot tradeoff results.',\n description=\n 'Plot predicted growth rate distributions for each tradeoff as well as the fraction of growing taxa in each sample and tradeoff value. For a good tradeoff value one usually tries to find the largest tradeoff value that still aloows most taxa to grow.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.fit_phenotype,\n inputs={'results': MicomResults}, parameters={'metadata':\n MetadataColumn[Categorical | Numeric], 'variable_type': Str % Choices(\n 'binary', 'continuous'), 'flux_type': Str % Choices('import',\n 'production'), 'min_coef': Float % Range(0, None)}, input_descriptions=\n {'results':\n 'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'\n }, parameter_descriptions={'metadata': 'The metadata variable to use.',\n 'variable_type': 'The type of the phenotype variable.', 'flux_type':\n 'Which fluxes to use.', 'min_coef':\n 'Only coefficient with absolute values larger than this will be shown.'\n }, name='Test for differential production', description=\n 'Test for overall metabolite production differences between two groups.',\n citations=[citations['micom']])\nimportlib.import_module('q2_micom._transform')\n", "step-5": "\"\"\"Plugin setup.\"\"\"\n\nimport importlib\nfrom qiime2.plugin import (\n Plugin,\n Str,\n Choices,\n Int,\n Bool,\n Range,\n Float,\n Metadata,\n MetadataColumn,\n Categorical,\n Numeric,\n Citations,\n)\n\nimport q2_micom\nfrom q2_micom._formats_and_types import (\n SBML,\n JSON,\n Pickle,\n SBMLFormat,\n SBMLDirectory,\n JSONFormat,\n JSONDirectory,\n CommunityModelFormat,\n CommunityModelManifest,\n CommunityModelDirectory,\n GrowthRates,\n Fluxes,\n MicomResultsDirectory,\n MicomMediumFile,\n MicomMediumDirectory,\n MetabolicModels,\n CommunityModels,\n MicomResults,\n MicomMedium,\n Global,\n PerSample,\n TradeoffResults,\n TradeoffResultsDirectory,\n REQ_FIELDS,\n)\nfrom q2_types.feature_data import FeatureData, Taxonomy\nfrom q2_types.feature_table import FeatureTable, RelativeFrequency, Frequency\n\ncitations = Citations.load(\"citations.bib\", package=\"q2_micom\")\n\n\nplugin = Plugin(\n name=\"micom\",\n version=q2_micom.__version__,\n website=\"https://github.com/micom-dev/q2-micom\",\n package=\"q2_micom\",\n description=(\"\"),\n short_description=\"Plugin for metabolic modeling of microbial communities.\",\n citations=[citations[\"micom\"]],\n)\n\nplugin.register_formats(\n SBMLFormat,\n SBMLDirectory,\n JSONFormat,\n JSONDirectory,\n CommunityModelFormat,\n CommunityModelManifest,\n CommunityModelDirectory,\n GrowthRates,\n Fluxes,\n MicomResultsDirectory,\n MicomMediumFile,\n MicomMediumDirectory,\n TradeoffResultsDirectory,\n)\nplugin.register_semantic_types(\n MetabolicModels, CommunityModels, MicomResults, MicomMedium\n)\nplugin.register_semantic_type_to_format(MetabolicModels[SBML], SBMLDirectory)\nplugin.register_semantic_type_to_format(MetabolicModels[JSON], JSONDirectory)\nplugin.register_semantic_type_to_format(\n CommunityModels[Pickle], CommunityModelDirectory\n)\nplugin.register_semantic_type_to_format(MicomResults, MicomResultsDirectory)\nplugin.register_semantic_type_to_format(TradeoffResults, TradeoffResultsDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[Global], MicomMediumDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[PerSample], MicomMediumDirectory)\n\nplugin.methods.register_function(\n function=q2_micom.db,\n inputs={},\n parameters={\n \"meta\": Metadata,\n \"rank\": Str % Choices(q2_micom._build.RANKS),\n \"threads\": Int % Range(1, None),\n },\n outputs=[(\"metabolic_models\", MetabolicModels[JSON])],\n input_descriptions={},\n parameter_descriptions={\n \"meta\": (\n \"Metadata for the individual metabolic models in `folder`. \"\n \"Must contain the the following columns: %s.\" % \", \".join(REQ_FIELDS)\n ),\n \"rank\": \"The phylogenetic rank at which to summarize taxa.\",\n \"threads\": \"The number of threads to use when constructing models.\",\n },\n output_descriptions={\"metabolic_models\": \"The metabolic model DB.\"},\n name=\"Build a metabolic model database.\",\n description=(\n \"Constructs pan-genome models summarized to the specified rank \"\n \"and bundles the models to be used by MICOM. \"\n \"The chosen rank has to be the same you want as when building your \"\n \"community models. \"\n \"So you may not build genus-level community models with a species \"\n \"level database. \"\n \"You will only need to run this function if you want to build a \"\n \"custom DB. For many use cases downloading the prebuilt AGORA DB \"\n \"with the the preferred rank should be sufficient.\"\n ),\n citations=[\n citations[\"agora\"],\n citations[\"agora_reply\"],\n citations[\"micom\"],\n ],\n)\n\nplugin.methods.register_function(\n function=q2_micom.build,\n inputs={\n \"abundance\": FeatureTable[Frequency | RelativeFrequency],\n \"taxonomy\": FeatureData[Taxonomy],\n \"models\": MetabolicModels[JSON],\n },\n parameters={\n \"threads\": Int % Range(1, None),\n \"cutoff\": Float % Range(0.0, 1.0),\n \"strict\": Bool,\n \"solver\": Str % Choices(\"auto\", \"cplex\", \"osqp\", \"gurobi\"),\n },\n outputs=[(\"community_models\", CommunityModels[Pickle])],\n input_descriptions={\n \"abundance\": (\n \"The feature table containing the samples over which beta \"\n \"diversity should be computed.\"\n ),\n \"taxonomy\": \"The taxonomy assignments for the ASVs in the table.\",\n \"models\": \"The single taxon model database to use.\",\n },\n parameter_descriptions={\n \"threads\": \"The number of threads to use when constructing models.\",\n \"cutoff\": \"Taxa with a relative abundance smaller than this will \"\n \"be dropped.\",\n \"strict\": (\n \"If true will collapse and match on all taxa ranks up to the \"\n \"specified rank (so on all higher ranks as well). If false \"\n \"(default) will match only on single taxa rank specified before. \"\n \"If using the strict option make sure ranks are named the same as in \"\n \"the used database.\"\n ),\n \"solver\": (\n \"The quadratic and linear programming solver that will be used \"\n \"in the models. Will pick an appropriate one by default. \"\n \"`cplex` and `gurobi` are commercial solvers with free academic \"\n \"licenses and have to be installed manually. See the docs for more info.\"\n ),\n },\n output_descriptions={\"community_models\": \"The community models.\"},\n name=\"Build community models.\",\n description=(\"Builds the metabolic community models for a set of samples.\"),\n citations=[citations[\"micom\"]],\n)\n\nplugin.methods.register_function(\n function=q2_micom.minimal_medium,\n inputs={\"models\": CommunityModels[Pickle]},\n parameters={\n \"min_growth\": Float % Range(0.0, None, inclusive_start=False),\n \"threads\": Int % Range(1, None),\n },\n outputs=[(\"medium\", MicomMedium[Global])],\n input_descriptions={\n \"models\": (\n \"A collection of metabolic community models. \"\n \"This should contain on model for each sample.\"\n ),\n },\n parameter_descriptions={\n \"min_growth\": (\n \"The minimum achievable growth rate for each taxon. \"\n \"The returned growth medium enables all taxa to growth \"\n \"simultaneously with at least this rate.\"\n ),\n \"threads\": \"The number of threads to use when simulating.\",\n },\n output_descriptions={\"medium\": \"The resulting growth medium.\"},\n name=\"Obtain a minimal growth medium for models.\",\n description=(\n \"Obtains a minimal growth medium for the community models. \"\n \"Please note that this medium does not have any biological \"\n \"feasibility. If you have any knowledge about metabolites present \"\n \"in the environment we recommend you construct the medium by hand.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.methods.register_function(\n function=q2_micom.grow,\n inputs={\n \"models\": CommunityModels[Pickle],\n \"medium\": MicomMedium[Global | PerSample],\n },\n parameters={\n \"tradeoff\": Float % Range(0.0, 1.0, inclusive_start=False, inclusive_end=True),\n \"strategy\": Str % Choices(\"pFBA\", \"minimal uptake\", \"none\"),\n \"threads\": Int % Range(1, None),\n },\n outputs=[(\"results\", MicomResults)],\n input_descriptions={\n \"models\": (\n \"A collection of metabolic community models. \"\n \"This should contain on model for each sample.\"\n ),\n \"medium\": \"The growth medium to use.\",\n },\n parameter_descriptions={\n \"tradeoff\": (\n \"The tradeoff parameter. This describes the balance \"\n \"between maximizing biomass production of the entire \"\n \"community and biomass production of individual taxa \"\n '(ergo \"egoistic\" growth). A value of 1.0 would yield '\n \"the best biomass production across the community but \"\n \"will only allow a few taxa to grow. Smaller values will \"\n \"allow more taxa to grow but will sacrifice overall \"\n \"biomass. A value of 0.5 (the default) has been shown to \"\n \"best reproduce growth rates in the human gut.\"\n ),\n \"strategy\": (\n \"The strategy used when choosing the solution in the \"\n \"optimal flux space. `minimal uptake` uses the fluxes \"\n \"that result in the smallest total uptake from the environment.\"\n \"`pFBA` uses parsimonious Flux Balance Analysis and thus will choose \"\n \"the fluxes with the lowest enzyme requirement for each taxon. \"\n \"`none` will return an arbitrary solution from the optimal flux space.\"\n ),\n \"threads\": \"The number of threads to use when simulating.\",\n },\n output_descriptions={\n \"results\": \"The resulting taxa-level growth rates and metabolic \"\n \"exchange fluxes.\"\n },\n name=\"Simulate growth for community models.\",\n description=(\n \"Simulates growth for a set of samples. Note that those are \"\n 'sample-specific or \"personalized\" simulations, so each taxon '\n \"may have different growth rates and metabolite usage in each sample.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.methods.register_function(\n function=q2_micom.tradeoff,\n inputs={\n \"models\": CommunityModels[Pickle],\n \"medium\": MicomMedium[Global | PerSample],\n },\n parameters={\n \"tradeoff_min\": Float % Range(0.0, 1.0, inclusive_start=False),\n \"tradeoff_max\": Float % Range(0.0, 1.0, inclusive_end=True),\n \"step\": Float % Range(0.0, 1.0),\n \"threads\": Int,\n },\n outputs=[(\"results\", TradeoffResults)],\n input_descriptions={\n \"models\": (\n \"A collection of metabolic community models. \"\n \"This should contain on model for each sample.\"\n ),\n \"medium\": \"The growth medium to use.\",\n },\n parameter_descriptions={\n \"tradeoff_min\": \"The minimum tradeoff parameter to test. This should \"\n \"be larger than 0.0 and smaller than 1.0.\",\n \"tradeoff_max\": \"The maximum tradeoff parameter to test. This should \"\n \"be larger than 0.0 and smaller than 1.0 and also be\"\n \"larger than `tradeoff_min`.\",\n \"step\": \"The tradeoff value step size to use.\",\n \"threads\": \"The number of threads to use when simulating.\",\n },\n output_descriptions={\n \"results\": \"The resulting taxa-level growth rates for varying \"\n \"tradeoff values.\"\n },\n name=\"Test a variety of tradeoff values.\",\n description=(\n \"Simulates growth for a set of samples while varying the tradeoff \"\n \"between community and taxon biomass production. \"\n \"This can be used to characterize a good tradeoff value for a \"\n \"specific set of samples. Our study suggested that a good tradeoff \"\n \"value is the largest value that allows the majority of taxa in the \"\n \"sample to grow.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.methods.register_function(\n function=q2_micom.filter_models,\n inputs={\"models\": CommunityModels[Pickle]},\n parameters={\"metadata\": Metadata, \"query\": Str, \"exclude\": Bool},\n outputs=[(\"filtered_models\", CommunityModels[Pickle])],\n input_descriptions={\n \"models\": (\n \"A collection of metabolic community models. \"\n \"This should contain on model for each sample.\"\n )\n },\n parameter_descriptions={\n \"metadata\": \"The metadata for the samples to keep or to query.\",\n \"query\": (\n \"A pandas query expression to select samples from the metadata. \"\n \"This will call `query` on the metadata DataFrame, so you can test \"\n \"your query by loading our metadata into a pandas DataFrame.\"\n ),\n \"exclude\": (\n \"If true will use all samples *except* the ones selected \"\n \"by metadata and query.\"\n ),\n },\n output_descriptions={\"filtered_models\": \"The filtered community models.\"},\n name=\"Filters models for a chosen set of samples.\",\n description=(\n \"Select a subset of samples and their community models using a list \"\n \"of samples or a pandas query expression.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.methods.register_function(\n function=q2_micom.filter_results,\n inputs={\"results\": MicomResults},\n parameters={\"metadata\": Metadata, \"query\": Str, \"exclude\": Bool},\n outputs=[(\"filtered_results\", MicomResults)],\n input_descriptions={\n \"results\": (\n \"A set of MICOM analysis results. \"\n \"Contains predicted groath rates and exchange fluxes.\"\n )\n },\n parameter_descriptions={\n \"metadata\": \"The metadata for the samples to keep or to query.\",\n \"query\": (\n \"A pandas query expression to select samples from the metadata. \"\n \"This will call `query` on the metadata DataFrame, so you can test \"\n \"your query by loading our metadata into a pandas DataFrame.\"\n ),\n \"exclude\": (\n \"If true will use all samples *except* the ones selected \"\n \"by metadata and query.\"\n ),\n },\n output_descriptions={\"filtered_results\": \"The filtered simulation models.\"},\n name=\"Filters results for a chosen set of samples.\",\n description=(\n \"Select a subset of samples and their simulation results using a list \"\n \"of samples or a pandas query expression.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.visualizers.register_function(\n function=q2_micom.plot_growth,\n inputs={\"results\": MicomResults},\n parameters={},\n input_descriptions={\n \"results\": (\n \"A set of MICOM analysis results. \"\n \"Contains predicted groath rates and exchange fluxes.\"\n )\n },\n parameter_descriptions={},\n name=\"Plot taxa growth rates.\",\n description=(\n \"Plot predicted growth rates for each taxon in each sample. \"\n \"Only points with growing taxa are shown (growth rate sufficiently \"\n \"larger than zero).\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.visualizers.register_function(\n function=q2_micom.exchanges_per_sample,\n inputs={\"results\": MicomResults},\n parameters={\n \"direction\": Str % Choices(\"import\", \"export\"),\n \"cluster\": Bool,\n },\n input_descriptions={\n \"results\": (\n \"A set of MICOM analysis results. \"\n \"Contains predicted groath rates and exchange fluxes.\"\n )\n },\n parameter_descriptions={\n \"direction\": \"The direction of the flux.\",\n \"cluster\": \"Whether to perform clutering on samples and reactions.\",\n },\n name=\"Plot gloabl exchange rates.\",\n description=(\n \"Plot predicted global exchange fluxes for each sample. \"\n \"When plotting imports this corresponds to the consumption \"\n \"fluxes for each metabolite that is available to the community. \"\n \"When plotting export this corresponds to the production fluxes \"\n \"for each metabolite.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\n\nplugin.visualizers.register_function(\n function=q2_micom.exchanges_per_taxon,\n inputs={\"results\": MicomResults},\n parameters={\n \"direction\": Str % Choices(\"import\", \"export\"),\n \"perplexity\": Int % Range(2, None),\n },\n input_descriptions={\n \"results\": (\n \"A set of MICOM analysis results. \"\n \"Contains predicted growth rates and exchange fluxes.\"\n )\n },\n parameter_descriptions={\n \"direction\": \"The direction of the flux.\",\n \"perplexity\": \"TSNE parameter. Relates to the number of neighbors used to \"\n \"calculate distances. Smaller values preserve more local \"\n \"structure and larger values preserve more global structure.\",\n },\n name=\"Plot niche overlap.\",\n description=(\n \"Plot growth or production niches. \"\n \"The entire set of import or export fluxes for each taxon in each \"\n \"sample is reduced onto a single point on a 2D plane.\"\n \"Taxa that are close to each other either consume similar metabolites \"\n \" (imports) or produce similar metabolites (exports).\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.visualizers.register_function(\n function=q2_micom.plot_tradeoff,\n inputs={\"results\": TradeoffResults},\n parameters={},\n input_descriptions={\n \"results\": (\n \"A set of MICOM tradeoff analysis results. \"\n \"Contains predicted growth rates for each tested tradeoff.\"\n )\n },\n parameter_descriptions={},\n name=\"Plot tradeoff results.\",\n description=(\n \"Plot predicted growth rate distributions for each tradeoff as \"\n \"well as the fraction of growing taxa in each sample and tradeoff \"\n \"value. For a good tradeoff value one usually tries to find the \"\n \"largest tradeoff value that still aloows most taxa to grow.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.visualizers.register_function(\n function=q2_micom.fit_phenotype,\n inputs={\"results\": MicomResults},\n parameters={\n \"metadata\": MetadataColumn[Categorical | Numeric],\n \"variable_type\": Str % Choices(\"binary\", \"continuous\"),\n \"flux_type\": Str % Choices(\"import\", \"production\"),\n \"min_coef\": Float % Range(0, None),\n },\n input_descriptions={\n \"results\": (\n \"A set of MICOM analysis results. \"\n \"Contains predicted growth rates and exchange fluxes.\"\n ),\n },\n parameter_descriptions={\n \"metadata\": \"The metadata variable to use.\",\n \"variable_type\": \"The type of the phenotype variable.\",\n \"flux_type\": \"Which fluxes to use.\",\n \"min_coef\": (\n \"Only coefficient with absolute values larger than this \" \"will be shown.\"\n ),\n },\n name=\"Test for differential production\",\n description=(\n \"Test for overall metabolite production differences \" \"between two groups.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nimportlib.import_module(\"q2_micom._transform\")\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/env python3 import argparse import logging import tango def delete_devices(): """.""" db = tango.Database() class_list = db.get_class_list('*') print('class list = ', class_list) server_list = db.get_server_list('*') print('server list = ', server_list) # for index in range(num_devices): # name = 'low_sdp/elt/test_device_{:05d}'.format(index) # db.delete_server('TestDevice/test1') # db.delete_device('tango/test1/000') def delete_server(): """.""" db = tango.Database() db.delete_server('') if __name__ == '__main__': delete_devices()
normal
{ "blob_id": "f3dad6a474d5882beaac7d98f8f60c347730ee55", "index": 8428, "step-1": "<mask token>\n\n\ndef delete_devices():\n \"\"\".\"\"\"\n db = tango.Database()\n class_list = db.get_class_list('*')\n print('class list = ', class_list)\n server_list = db.get_server_list('*')\n print('server list = ', server_list)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef delete_devices():\n \"\"\".\"\"\"\n db = tango.Database()\n class_list = db.get_class_list('*')\n print('class list = ', class_list)\n server_list = db.get_server_list('*')\n print('server list = ', server_list)\n\n\ndef delete_server():\n \"\"\".\"\"\"\n db = tango.Database()\n db.delete_server('')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef delete_devices():\n \"\"\".\"\"\"\n db = tango.Database()\n class_list = db.get_class_list('*')\n print('class list = ', class_list)\n server_list = db.get_server_list('*')\n print('server list = ', server_list)\n\n\ndef delete_server():\n \"\"\".\"\"\"\n db = tango.Database()\n db.delete_server('')\n\n\nif __name__ == '__main__':\n delete_devices()\n", "step-4": "import argparse\nimport logging\nimport tango\n\n\ndef delete_devices():\n \"\"\".\"\"\"\n db = tango.Database()\n class_list = db.get_class_list('*')\n print('class list = ', class_list)\n server_list = db.get_server_list('*')\n print('server list = ', server_list)\n\n\ndef delete_server():\n \"\"\".\"\"\"\n db = tango.Database()\n db.delete_server('')\n\n\nif __name__ == '__main__':\n delete_devices()\n", "step-5": "#!/usr/bin/env python3\nimport argparse\nimport logging\n\nimport tango\n\n\ndef delete_devices():\n \"\"\".\"\"\"\n db = tango.Database()\n class_list = db.get_class_list('*')\n print('class list = ', class_list)\n server_list = db.get_server_list('*')\n print('server list = ', server_list)\n\n # for index in range(num_devices):\n # name = 'low_sdp/elt/test_device_{:05d}'.format(index)\n\n # db.delete_server('TestDevice/test1')\n # db.delete_device('tango/test1/000')\n\n\ndef delete_server():\n \"\"\".\"\"\"\n db = tango.Database()\n db.delete_server('')\n\n\nif __name__ == '__main__':\n delete_devices()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import itertools from typing import Tuple, List, Dict, Optional, Hashable, Collection class Hypergraph: """ Represents a hypergraph, consisting of nodes, directed edges, hypernodes (each of which is a set of nodes) and hyperedges (directed edges from hypernodes to hypernodes). Contains functionality to extract motifs from hypergraphs (Fig 2 of http://www.cs.cornell.edu/~cristian/Patterns_of_participant_interactions.html) """ def __init__(self): # public self.nodes = dict() self.hypernodes = dict() # private self.adj_out = dict() # out edges for each (hyper)node self.adj_in = dict() # in edges for each (hyper)node def add_node(self, u: Hashable, info: Optional[Dict]=None) -> None: self.nodes[u] = info if info is not None else dict() self.adj_out[u] = dict() self.adj_in[u] = dict() def add_hypernode(self, name: Hashable, nodes: Collection[Hashable], info: Optional[dict]=None) -> None: self.hypernodes[name] = set(nodes) self.adj_out[name] = dict() self.adj_in[name] = dict() # edge or hyperedge def add_edge(self, u: Hashable, v: Hashable, info: Optional[dict]=None) -> None: assert u in self.nodes or u in self.hypernodes assert v in self.nodes or v in self.hypernodes if u in self.hypernodes and v in self.hypernodes: assert len(info.keys()) > 0 if v not in self.adj_out[u]: self.adj_out[u][v] = [] if u not in self.adj_in[v]: self.adj_in[v][u] = [] if info is None: info = dict() self.adj_out[u][v].append(info) self.adj_in[v][u].append(info) def edges(self) -> Dict[Tuple[Hashable, Hashable], List]: return dict(((u, v), lst) for u, d in self.adj_out.items() for v, lst in d.items()) def outgoing_nodes(self, u: Hashable) -> Dict[Hashable, List]: assert u in self.adj_out return dict((v, lst) for v, lst in self.adj_out[u].items() if v in self.nodes) def outgoing_hypernodes(self, u) -> Dict[Hashable, List]: assert u in self.adj_out return dict((v, lst) for v, lst in self.adj_out[u].items() if v in self.hypernodes) def incoming_nodes(self, v: Hashable) -> Dict[Hashable, List]: assert v in self.adj_in return dict((u, lst) for u, lst in self.adj_in[v].items() if u in self.nodes) def incoming_hypernodes(self, v: Hashable) -> Dict[Hashable, List]: assert v in self.adj_in return dict((u, lst) for u, lst in self.adj_in[v].items() if u in self.hypernodes) def outdegrees(self, from_hyper: bool=False, to_hyper: bool=False) -> List[int]: return [sum([len(l) for v, l in self.adj_out[u].items() if v in (self.hypernodes if to_hyper else self.nodes)]) for u in (self.hypernodes if from_hyper else self.nodes)] def indegrees(self, from_hyper: bool=False, to_hyper: bool=False) -> List[int]: return [sum([len(l) for u, l in self.adj_in[v].items() if u in (self.hypernodes if from_hyper else self.nodes)]) for v in (self.hypernodes if to_hyper else self.nodes)] def reciprocity_motifs(self) -> List[Tuple]: """ :return: List of tuples of form (C1, c1, c2, C1->c2, c2->c1) as in paper """ motifs = [] for C1, c1_nodes in self.hypernodes.items(): for c1 in c1_nodes: motifs += [(C1, c1, c2, e1, e2) for c2 in self.adj_in[c1] if c2 in self.nodes and c2 in self.adj_out[C1] for e1 in self.adj_out[C1][c2] for e2 in self.adj_out[c2][c1]] return motifs def external_reciprocity_motifs(self) -> List[Tuple]: """ :return: List of tuples of form (C3, c2, c1, C3->c2, c2->c1) as in paper """ motifs = [] for C3 in self.hypernodes: for c2 in self.adj_out[C3]: if c2 in self.nodes: motifs += [(C3, c2, c1, e1, e2) for c1 in set(self.adj_out[c2].keys()) - self.hypernodes[C3] if c1 in self.nodes for e1 in self.adj_out[C3][c2] for e2 in self.adj_out[c2][c1]] return motifs def dyadic_interaction_motifs(self) -> List[Tuple]: """ :return: List of tuples of form (C1, C2, C1->C2, C2->C1) as in paper """ motifs = [] for C1 in self.hypernodes: motifs += [(C1, C2, e1, e2) for C2 in self.adj_out[C1] if C2 in self.hypernodes and C1 in self.adj_out[C2] for e1 in self.adj_out[C1][C2] for e2 in self.adj_out[C2][C1]] return motifs def incoming_triad_motifs(self) -> List[Tuple]: """ :return: List of tuples of form (C1, C2, C3, C2->C1, C3->C1) as in paper """ motifs = [] for C1 in self.hypernodes: incoming = list(self.adj_in[C1].keys()) motifs += [(C1, C2, C3, e1, e2) for C2, C3 in itertools.combinations(incoming, 2) for e1 in self.adj_out[C2][C1] for e2 in self.adj_out[C3][C1]] return motifs def outgoing_triad_motifs(self) -> List[Tuple]: """ :return: List of tuples of form (C1, C2, C3, C1->C2, C1->C3) as in paper """ motifs = [] for C1 in self.hypernodes: outgoing = list(self.adj_out[C1].keys()) motifs += [(C1, C2, C3, e1, e2) for C2, C3 in itertools.combinations(outgoing, 2) for e1 in self.adj_out[C1][C2] for e2 in self.adj_out[C1][C3]] return motifs
normal
{ "blob_id": "4a3611ecd70d80575f9f68bf45d67532a17b9c93", "index": 7527, "step-1": "<mask token>\n\n\nclass Hypergraph:\n <mask token>\n\n def __init__(self):\n self.nodes = dict()\n self.hypernodes = dict()\n self.adj_out = dict()\n self.adj_in = dict()\n <mask token>\n\n def add_hypernode(self, name: Hashable, nodes: Collection[Hashable],\n info: Optional[dict]=None) ->None:\n self.hypernodes[name] = set(nodes)\n self.adj_out[name] = dict()\n self.adj_in[name] = dict()\n <mask token>\n <mask token>\n <mask token>\n\n def outgoing_hypernodes(self, u) ->Dict[Hashable, List]:\n assert u in self.adj_out\n return dict((v, lst) for v, lst in self.adj_out[u].items() if v in\n self.hypernodes)\n <mask token>\n\n def incoming_hypernodes(self, v: Hashable) ->Dict[Hashable, List]:\n assert v in self.adj_in\n return dict((u, lst) for u, lst in self.adj_in[v].items() if u in\n self.hypernodes)\n <mask token>\n\n def indegrees(self, from_hyper: bool=False, to_hyper: bool=False) ->List[\n int]:\n return [sum([len(l) for u, l in self.adj_in[v].items() if u in (\n self.hypernodes if from_hyper else self.nodes)]) for v in (self\n .hypernodes if to_hyper else self.nodes)]\n\n def reciprocity_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, c1, c2, C1->c2, c2->c1) as in paper\n \"\"\"\n motifs = []\n for C1, c1_nodes in self.hypernodes.items():\n for c1 in c1_nodes:\n motifs += [(C1, c1, c2, e1, e2) for c2 in self.adj_in[c1] if\n c2 in self.nodes and c2 in self.adj_out[C1] for e1 in\n self.adj_out[C1][c2] for e2 in self.adj_out[c2][c1]]\n return motifs\n <mask token>\n <mask token>\n <mask token>\n\n def outgoing_triad_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, C2, C3, C1->C2, C1->C3) as in paper\n \"\"\"\n motifs = []\n for C1 in self.hypernodes:\n outgoing = list(self.adj_out[C1].keys())\n motifs += [(C1, C2, C3, e1, e2) for C2, C3 in itertools.\n combinations(outgoing, 2) for e1 in self.adj_out[C1][C2] for\n e2 in self.adj_out[C1][C3]]\n return motifs\n", "step-2": "<mask token>\n\n\nclass Hypergraph:\n <mask token>\n\n def __init__(self):\n self.nodes = dict()\n self.hypernodes = dict()\n self.adj_out = dict()\n self.adj_in = dict()\n <mask token>\n\n def add_hypernode(self, name: Hashable, nodes: Collection[Hashable],\n info: Optional[dict]=None) ->None:\n self.hypernodes[name] = set(nodes)\n self.adj_out[name] = dict()\n self.adj_in[name] = dict()\n\n def add_edge(self, u: Hashable, v: Hashable, info: Optional[dict]=None\n ) ->None:\n assert u in self.nodes or u in self.hypernodes\n assert v in self.nodes or v in self.hypernodes\n if u in self.hypernodes and v in self.hypernodes:\n assert len(info.keys()) > 0\n if v not in self.adj_out[u]:\n self.adj_out[u][v] = []\n if u not in self.adj_in[v]:\n self.adj_in[v][u] = []\n if info is None:\n info = dict()\n self.adj_out[u][v].append(info)\n self.adj_in[v][u].append(info)\n <mask token>\n <mask token>\n\n def outgoing_hypernodes(self, u) ->Dict[Hashable, List]:\n assert u in self.adj_out\n return dict((v, lst) for v, lst in self.adj_out[u].items() if v in\n self.hypernodes)\n\n def incoming_nodes(self, v: Hashable) ->Dict[Hashable, List]:\n assert v in self.adj_in\n return dict((u, lst) for u, lst in self.adj_in[v].items() if u in\n self.nodes)\n\n def incoming_hypernodes(self, v: Hashable) ->Dict[Hashable, List]:\n assert v in self.adj_in\n return dict((u, lst) for u, lst in self.adj_in[v].items() if u in\n self.hypernodes)\n <mask token>\n\n def indegrees(self, from_hyper: bool=False, to_hyper: bool=False) ->List[\n int]:\n return [sum([len(l) for u, l in self.adj_in[v].items() if u in (\n self.hypernodes if from_hyper else self.nodes)]) for v in (self\n .hypernodes if to_hyper else self.nodes)]\n\n def reciprocity_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, c1, c2, C1->c2, c2->c1) as in paper\n \"\"\"\n motifs = []\n for C1, c1_nodes in self.hypernodes.items():\n for c1 in c1_nodes:\n motifs += [(C1, c1, c2, e1, e2) for c2 in self.adj_in[c1] if\n c2 in self.nodes and c2 in self.adj_out[C1] for e1 in\n self.adj_out[C1][c2] for e2 in self.adj_out[c2][c1]]\n return motifs\n <mask token>\n <mask token>\n <mask token>\n\n def outgoing_triad_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, C2, C3, C1->C2, C1->C3) as in paper\n \"\"\"\n motifs = []\n for C1 in self.hypernodes:\n outgoing = list(self.adj_out[C1].keys())\n motifs += [(C1, C2, C3, e1, e2) for C2, C3 in itertools.\n combinations(outgoing, 2) for e1 in self.adj_out[C1][C2] for\n e2 in self.adj_out[C1][C3]]\n return motifs\n", "step-3": "<mask token>\n\n\nclass Hypergraph:\n <mask token>\n\n def __init__(self):\n self.nodes = dict()\n self.hypernodes = dict()\n self.adj_out = dict()\n self.adj_in = dict()\n <mask token>\n\n def add_hypernode(self, name: Hashable, nodes: Collection[Hashable],\n info: Optional[dict]=None) ->None:\n self.hypernodes[name] = set(nodes)\n self.adj_out[name] = dict()\n self.adj_in[name] = dict()\n\n def add_edge(self, u: Hashable, v: Hashable, info: Optional[dict]=None\n ) ->None:\n assert u in self.nodes or u in self.hypernodes\n assert v in self.nodes or v in self.hypernodes\n if u in self.hypernodes and v in self.hypernodes:\n assert len(info.keys()) > 0\n if v not in self.adj_out[u]:\n self.adj_out[u][v] = []\n if u not in self.adj_in[v]:\n self.adj_in[v][u] = []\n if info is None:\n info = dict()\n self.adj_out[u][v].append(info)\n self.adj_in[v][u].append(info)\n\n def edges(self) ->Dict[Tuple[Hashable, Hashable], List]:\n return dict(((u, v), lst) for u, d in self.adj_out.items() for v,\n lst in d.items())\n\n def outgoing_nodes(self, u: Hashable) ->Dict[Hashable, List]:\n assert u in self.adj_out\n return dict((v, lst) for v, lst in self.adj_out[u].items() if v in\n self.nodes)\n\n def outgoing_hypernodes(self, u) ->Dict[Hashable, List]:\n assert u in self.adj_out\n return dict((v, lst) for v, lst in self.adj_out[u].items() if v in\n self.hypernodes)\n\n def incoming_nodes(self, v: Hashable) ->Dict[Hashable, List]:\n assert v in self.adj_in\n return dict((u, lst) for u, lst in self.adj_in[v].items() if u in\n self.nodes)\n\n def incoming_hypernodes(self, v: Hashable) ->Dict[Hashable, List]:\n assert v in self.adj_in\n return dict((u, lst) for u, lst in self.adj_in[v].items() if u in\n self.hypernodes)\n <mask token>\n\n def indegrees(self, from_hyper: bool=False, to_hyper: bool=False) ->List[\n int]:\n return [sum([len(l) for u, l in self.adj_in[v].items() if u in (\n self.hypernodes if from_hyper else self.nodes)]) for v in (self\n .hypernodes if to_hyper else self.nodes)]\n\n def reciprocity_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, c1, c2, C1->c2, c2->c1) as in paper\n \"\"\"\n motifs = []\n for C1, c1_nodes in self.hypernodes.items():\n for c1 in c1_nodes:\n motifs += [(C1, c1, c2, e1, e2) for c2 in self.adj_in[c1] if\n c2 in self.nodes and c2 in self.adj_out[C1] for e1 in\n self.adj_out[C1][c2] for e2 in self.adj_out[c2][c1]]\n return motifs\n\n def external_reciprocity_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C3, c2, c1, C3->c2, c2->c1) as in paper\n \"\"\"\n motifs = []\n for C3 in self.hypernodes:\n for c2 in self.adj_out[C3]:\n if c2 in self.nodes:\n motifs += [(C3, c2, c1, e1, e2) for c1 in set(self.\n adj_out[c2].keys()) - self.hypernodes[C3] if c1 in\n self.nodes for e1 in self.adj_out[C3][c2] for e2 in\n self.adj_out[c2][c1]]\n return motifs\n\n def dyadic_interaction_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, C2, C1->C2, C2->C1) as in paper\n \"\"\"\n motifs = []\n for C1 in self.hypernodes:\n motifs += [(C1, C2, e1, e2) for C2 in self.adj_out[C1] if C2 in\n self.hypernodes and C1 in self.adj_out[C2] for e1 in self.\n adj_out[C1][C2] for e2 in self.adj_out[C2][C1]]\n return motifs\n\n def incoming_triad_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, C2, C3, C2->C1, C3->C1) as in paper\n \"\"\"\n motifs = []\n for C1 in self.hypernodes:\n incoming = list(self.adj_in[C1].keys())\n motifs += [(C1, C2, C3, e1, e2) for C2, C3 in itertools.\n combinations(incoming, 2) for e1 in self.adj_out[C2][C1] for\n e2 in self.adj_out[C3][C1]]\n return motifs\n\n def outgoing_triad_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, C2, C3, C1->C2, C1->C3) as in paper\n \"\"\"\n motifs = []\n for C1 in self.hypernodes:\n outgoing = list(self.adj_out[C1].keys())\n motifs += [(C1, C2, C3, e1, e2) for C2, C3 in itertools.\n combinations(outgoing, 2) for e1 in self.adj_out[C1][C2] for\n e2 in self.adj_out[C1][C3]]\n return motifs\n", "step-4": "<mask token>\n\n\nclass Hypergraph:\n <mask token>\n\n def __init__(self):\n self.nodes = dict()\n self.hypernodes = dict()\n self.adj_out = dict()\n self.adj_in = dict()\n\n def add_node(self, u: Hashable, info: Optional[Dict]=None) ->None:\n self.nodes[u] = info if info is not None else dict()\n self.adj_out[u] = dict()\n self.adj_in[u] = dict()\n\n def add_hypernode(self, name: Hashable, nodes: Collection[Hashable],\n info: Optional[dict]=None) ->None:\n self.hypernodes[name] = set(nodes)\n self.adj_out[name] = dict()\n self.adj_in[name] = dict()\n\n def add_edge(self, u: Hashable, v: Hashable, info: Optional[dict]=None\n ) ->None:\n assert u in self.nodes or u in self.hypernodes\n assert v in self.nodes or v in self.hypernodes\n if u in self.hypernodes and v in self.hypernodes:\n assert len(info.keys()) > 0\n if v not in self.adj_out[u]:\n self.adj_out[u][v] = []\n if u not in self.adj_in[v]:\n self.adj_in[v][u] = []\n if info is None:\n info = dict()\n self.adj_out[u][v].append(info)\n self.adj_in[v][u].append(info)\n\n def edges(self) ->Dict[Tuple[Hashable, Hashable], List]:\n return dict(((u, v), lst) for u, d in self.adj_out.items() for v,\n lst in d.items())\n\n def outgoing_nodes(self, u: Hashable) ->Dict[Hashable, List]:\n assert u in self.adj_out\n return dict((v, lst) for v, lst in self.adj_out[u].items() if v in\n self.nodes)\n\n def outgoing_hypernodes(self, u) ->Dict[Hashable, List]:\n assert u in self.adj_out\n return dict((v, lst) for v, lst in self.adj_out[u].items() if v in\n self.hypernodes)\n\n def incoming_nodes(self, v: Hashable) ->Dict[Hashable, List]:\n assert v in self.adj_in\n return dict((u, lst) for u, lst in self.adj_in[v].items() if u in\n self.nodes)\n\n def incoming_hypernodes(self, v: Hashable) ->Dict[Hashable, List]:\n assert v in self.adj_in\n return dict((u, lst) for u, lst in self.adj_in[v].items() if u in\n self.hypernodes)\n <mask token>\n\n def indegrees(self, from_hyper: bool=False, to_hyper: bool=False) ->List[\n int]:\n return [sum([len(l) for u, l in self.adj_in[v].items() if u in (\n self.hypernodes if from_hyper else self.nodes)]) for v in (self\n .hypernodes if to_hyper else self.nodes)]\n\n def reciprocity_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, c1, c2, C1->c2, c2->c1) as in paper\n \"\"\"\n motifs = []\n for C1, c1_nodes in self.hypernodes.items():\n for c1 in c1_nodes:\n motifs += [(C1, c1, c2, e1, e2) for c2 in self.adj_in[c1] if\n c2 in self.nodes and c2 in self.adj_out[C1] for e1 in\n self.adj_out[C1][c2] for e2 in self.adj_out[c2][c1]]\n return motifs\n\n def external_reciprocity_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C3, c2, c1, C3->c2, c2->c1) as in paper\n \"\"\"\n motifs = []\n for C3 in self.hypernodes:\n for c2 in self.adj_out[C3]:\n if c2 in self.nodes:\n motifs += [(C3, c2, c1, e1, e2) for c1 in set(self.\n adj_out[c2].keys()) - self.hypernodes[C3] if c1 in\n self.nodes for e1 in self.adj_out[C3][c2] for e2 in\n self.adj_out[c2][c1]]\n return motifs\n\n def dyadic_interaction_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, C2, C1->C2, C2->C1) as in paper\n \"\"\"\n motifs = []\n for C1 in self.hypernodes:\n motifs += [(C1, C2, e1, e2) for C2 in self.adj_out[C1] if C2 in\n self.hypernodes and C1 in self.adj_out[C2] for e1 in self.\n adj_out[C1][C2] for e2 in self.adj_out[C2][C1]]\n return motifs\n\n def incoming_triad_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, C2, C3, C2->C1, C3->C1) as in paper\n \"\"\"\n motifs = []\n for C1 in self.hypernodes:\n incoming = list(self.adj_in[C1].keys())\n motifs += [(C1, C2, C3, e1, e2) for C2, C3 in itertools.\n combinations(incoming, 2) for e1 in self.adj_out[C2][C1] for\n e2 in self.adj_out[C3][C1]]\n return motifs\n\n def outgoing_triad_motifs(self) ->List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, C2, C3, C1->C2, C1->C3) as in paper\n \"\"\"\n motifs = []\n for C1 in self.hypernodes:\n outgoing = list(self.adj_out[C1].keys())\n motifs += [(C1, C2, C3, e1, e2) for C2, C3 in itertools.\n combinations(outgoing, 2) for e1 in self.adj_out[C1][C2] for\n e2 in self.adj_out[C1][C3]]\n return motifs\n", "step-5": "import itertools\nfrom typing import Tuple, List, Dict, Optional, Hashable, Collection\n\nclass Hypergraph:\n \"\"\"\n Represents a hypergraph, consisting of nodes, directed edges,\n hypernodes (each of which is a set of nodes) and hyperedges (directed edges\n from hypernodes to hypernodes). Contains functionality to extract motifs\n from hypergraphs (Fig 2 of\n http://www.cs.cornell.edu/~cristian/Patterns_of_participant_interactions.html)\n \"\"\"\n def __init__(self):\n # public\n self.nodes = dict()\n self.hypernodes = dict()\n\n # private\n self.adj_out = dict() # out edges for each (hyper)node\n self.adj_in = dict() # in edges for each (hyper)node\n\n def add_node(self, u: Hashable, info: Optional[Dict]=None) -> None:\n self.nodes[u] = info if info is not None else dict()\n self.adj_out[u] = dict()\n self.adj_in[u] = dict()\n\n def add_hypernode(self, name: Hashable,\n nodes: Collection[Hashable],\n info: Optional[dict]=None) -> None:\n self.hypernodes[name] = set(nodes)\n self.adj_out[name] = dict()\n self.adj_in[name] = dict()\n\n # edge or hyperedge\n def add_edge(self, u: Hashable, v: Hashable, info: Optional[dict]=None) -> None:\n assert u in self.nodes or u in self.hypernodes\n assert v in self.nodes or v in self.hypernodes\n if u in self.hypernodes and v in self.hypernodes:\n assert len(info.keys()) > 0\n if v not in self.adj_out[u]:\n self.adj_out[u][v] = []\n if u not in self.adj_in[v]:\n self.adj_in[v][u] = []\n if info is None: info = dict()\n self.adj_out[u][v].append(info)\n self.adj_in[v][u].append(info)\n\n def edges(self) -> Dict[Tuple[Hashable, Hashable], List]:\n return dict(((u, v), lst) for u, d in self.adj_out.items()\n for v, lst in d.items())\n\n def outgoing_nodes(self, u: Hashable) -> Dict[Hashable, List]:\n assert u in self.adj_out\n return dict((v, lst) for v, lst in self.adj_out[u].items()\n if v in self.nodes)\n\n def outgoing_hypernodes(self, u) -> Dict[Hashable, List]:\n assert u in self.adj_out\n return dict((v, lst) for v, lst in self.adj_out[u].items()\n if v in self.hypernodes)\n\n def incoming_nodes(self, v: Hashable) -> Dict[Hashable, List]:\n assert v in self.adj_in\n return dict((u, lst) for u, lst in self.adj_in[v].items() if u in\n self.nodes)\n\n def incoming_hypernodes(self, v: Hashable) -> Dict[Hashable, List]:\n assert v in self.adj_in\n return dict((u, lst) for u, lst in self.adj_in[v].items() if u in\n self.hypernodes)\n\n def outdegrees(self, from_hyper: bool=False, to_hyper: bool=False) -> List[int]:\n return [sum([len(l) for v, l in self.adj_out[u].items() if v in\n (self.hypernodes if to_hyper else self.nodes)]) for u in\n (self.hypernodes if from_hyper else self.nodes)]\n\n def indegrees(self, from_hyper: bool=False, to_hyper: bool=False) -> List[int]:\n return [sum([len(l) for u, l in self.adj_in[v].items() if u in\n (self.hypernodes if from_hyper else self.nodes)]) for v in\n (self.hypernodes if to_hyper else self.nodes)]\n\n def reciprocity_motifs(self) -> List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, c1, c2, C1->c2, c2->c1) as in paper\n \"\"\"\n motifs = []\n for C1, c1_nodes in self.hypernodes.items():\n for c1 in c1_nodes:\n motifs += [(C1, c1, c2, e1, e2) for c2 in self.adj_in[c1] if\n c2 in self.nodes and c2 in self.adj_out[C1]\n for e1 in self.adj_out[C1][c2]\n for e2 in self.adj_out[c2][c1]]\n return motifs\n\n def external_reciprocity_motifs(self) -> List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C3, c2, c1, C3->c2, c2->c1) as in paper\n \"\"\"\n motifs = []\n for C3 in self.hypernodes:\n for c2 in self.adj_out[C3]:\n if c2 in self.nodes:\n motifs += [(C3, c2, c1, e1, e2) for c1 in\n set(self.adj_out[c2].keys()) - self.hypernodes[C3]\n if c1 in self.nodes\n for e1 in self.adj_out[C3][c2]\n for e2 in self.adj_out[c2][c1]]\n return motifs\n\n def dyadic_interaction_motifs(self) -> List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, C2, C1->C2, C2->C1) as in paper\n \"\"\"\n\n motifs = []\n for C1 in self.hypernodes:\n motifs += [(C1, C2, e1, e2) for C2 in self.adj_out[C1] if C2 in\n self.hypernodes and C1 in self.adj_out[C2]\n for e1 in self.adj_out[C1][C2]\n for e2 in self.adj_out[C2][C1]]\n return motifs\n\n def incoming_triad_motifs(self) -> List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, C2, C3, C2->C1, C3->C1) as in paper\n \"\"\"\n motifs = []\n for C1 in self.hypernodes:\n incoming = list(self.adj_in[C1].keys())\n motifs += [(C1, C2, C3, e1, e2) for C2, C3 in\n itertools.combinations(incoming, 2)\n for e1 in self.adj_out[C2][C1]\n for e2 in self.adj_out[C3][C1]]\n return motifs\n\n def outgoing_triad_motifs(self) -> List[Tuple]:\n \"\"\"\n :return: List of tuples of form (C1, C2, C3, C1->C2, C1->C3) as in paper\n \"\"\"\n motifs = []\n for C1 in self.hypernodes:\n outgoing = list(self.adj_out[C1].keys())\n motifs += [(C1, C2, C3, e1, e2) for C2, C3 in\n itertools.combinations(outgoing, 2)\n for e1 in self.adj_out[C1][C2]\n for e2 in self.adj_out[C1][C3]]\n return motifs\n", "step-ids": [ 8, 10, 15, 16, 20 ] }
[ 8, 10, 15, 16, 20 ]
from sonosscripts import common from sonosscripts.common import round_nearest def run(_): parser = common.get_argument_parser() parser.add_argument("--step", help="volume step", type=int, default=5) parsed_args = parser.parse_args() sonos = common.get_sonos(parsed_args) step = parsed_args.step current_volume = sonos.volume new_volume = current_volume + step new_volume = round_nearest(new_volume, step) new_volume = min(common.max_volume, new_volume) new_volume = max(common.min_volume, new_volume) sonos.volume = new_volume if new_volume != current_volume: common.send_notification(f"Changed volume from {current_volume} to {new_volume}", common.get_icon(new_volume), common.process_volume) else: if new_volume == common.max_volume: common.send_notification("Volume is already at maximum", common.get_icon(new_volume), common.process_volume) else: common.send_notification("Volume is already at minimum", common.get_icon(new_volume), common.process_volume)
normal
{ "blob_id": "6e78dee46276f738197ba6796fe1a027ab743354", "index": 1769, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef run(_):\n parser = common.get_argument_parser()\n parser.add_argument('--step', help='volume step', type=int, default=5)\n parsed_args = parser.parse_args()\n sonos = common.get_sonos(parsed_args)\n step = parsed_args.step\n current_volume = sonos.volume\n new_volume = current_volume + step\n new_volume = round_nearest(new_volume, step)\n new_volume = min(common.max_volume, new_volume)\n new_volume = max(common.min_volume, new_volume)\n sonos.volume = new_volume\n if new_volume != current_volume:\n common.send_notification(\n f'Changed volume from {current_volume} to {new_volume}', common\n .get_icon(new_volume), common.process_volume)\n elif new_volume == common.max_volume:\n common.send_notification('Volume is already at maximum', common.\n get_icon(new_volume), common.process_volume)\n else:\n common.send_notification('Volume is already at minimum', common.\n get_icon(new_volume), common.process_volume)\n", "step-3": "from sonosscripts import common\nfrom sonosscripts.common import round_nearest\n\n\ndef run(_):\n parser = common.get_argument_parser()\n parser.add_argument('--step', help='volume step', type=int, default=5)\n parsed_args = parser.parse_args()\n sonos = common.get_sonos(parsed_args)\n step = parsed_args.step\n current_volume = sonos.volume\n new_volume = current_volume + step\n new_volume = round_nearest(new_volume, step)\n new_volume = min(common.max_volume, new_volume)\n new_volume = max(common.min_volume, new_volume)\n sonos.volume = new_volume\n if new_volume != current_volume:\n common.send_notification(\n f'Changed volume from {current_volume} to {new_volume}', common\n .get_icon(new_volume), common.process_volume)\n elif new_volume == common.max_volume:\n common.send_notification('Volume is already at maximum', common.\n get_icon(new_volume), common.process_volume)\n else:\n common.send_notification('Volume is already at minimum', common.\n get_icon(new_volume), common.process_volume)\n", "step-4": "from sonosscripts import common\nfrom sonosscripts.common import round_nearest\n\n\ndef run(_):\n parser = common.get_argument_parser()\n parser.add_argument(\"--step\", help=\"volume step\", type=int, default=5)\n parsed_args = parser.parse_args()\n sonos = common.get_sonos(parsed_args)\n step = parsed_args.step\n current_volume = sonos.volume\n new_volume = current_volume + step\n new_volume = round_nearest(new_volume, step)\n new_volume = min(common.max_volume, new_volume)\n new_volume = max(common.min_volume, new_volume)\n sonos.volume = new_volume\n if new_volume != current_volume:\n common.send_notification(f\"Changed volume from {current_volume} to {new_volume}\", common.get_icon(new_volume),\n common.process_volume)\n else:\n if new_volume == common.max_volume:\n common.send_notification(\"Volume is already at maximum\", common.get_icon(new_volume), common.process_volume)\n else:\n common.send_notification(\"Volume is already at minimum\", common.get_icon(new_volume), common.process_volume)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# coding: utf-8 import sys, os sys.path.append(os.pardir) import matplotlib.pyplot as plt from dataset.mnist import load_mnist from common.util import smooth_curve from common.multi_layer_net import MultiLayerNet from common.optimizer import * # 0. MNIST 데이터 로딩 (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True) train_size = x_train.shape[0] batch_size = 128 max_iterations = 2000 # 1. 실험용 설정 셋팅 optimizers = {} optimizers['SGD'] = SGD() optimizers['Momentum'] = Momentum() optimizers['AdaGrad'] = AdaGrad() optimizers['Adam'] = Adam() #network, loss를 저장할 dictionary를 설정 networks = {} train_loss = {} #각 optimizer마다 network를 MultiLayerNet을 이용해서 똑같은 구조로 만들고, train_loss 딕셔너리를 초기화 한다. for key in optimizers.keys(): networks[key] = MultiLayerNet(input_size=784, hidden_size_list=[100, 100, 100, 100], output_size=10) train_loss[key] = [] # 2. 훈련 시작 for i in range(max_iterations): #4개의 최적화 기법에 똑같이 들어갈 batch 생성 batch_mask = np.random.choice(train_size, batch_size) x_batch = x_train[batch_mask] t_batch = t_train[batch_mask] for key in optimizers.keys(): grads = networks[key].gradient(x_batch, t_batch) #배치를 넣어서 각 네트워크의 기울기를 구함 optimizers[key].update(networks[key].params, grads) #네트워크의 parameter를 기울기에 대해 update함 loss = networks[key].loss(x_batch, t_batch) #사실 이것이 먼저 계산되어야 하지만, 이 코드에서는 기록용으로 저장 train_loss[key].append(loss) #각 최적화 기법의 학습 loss 리스트에 저장 #학습 진행 경과 및 각 최적화 기법에 해당하는 loss 확인 if i % 100 == 0: print("===========" + "iteration:" + str(i) + "===========") for key in optimizers.keys(): loss = networks[key].loss(x_batch, t_batch) print(key + ':' + str(loss)) # 3. 그래프 그리기 markers = {"SGD": "o", "Momentum": "x", "AdaGrad": "s", "Adam": "D"} x = np.arange(max_iterations) for key in optimizers.keys(): plt.plot(x, smooth_curve(train_loss[key]), marker=markers[key], markevery=100, label=key) plt.xlabel("iterations") plt.ylabel("loss") plt.ylim(0, 1) plt.legend() plt.show()
normal
{ "blob_id": "85d40a49341c7bd7af7a5dc62e4bce0253eb25e6", "index": 9944, "step-1": "<mask token>\n", "step-2": "<mask token>\nsys.path.append(os.pardir)\n<mask token>\nfor key in optimizers.keys():\n networks[key] = MultiLayerNet(input_size=784, hidden_size_list=[100, \n 100, 100, 100], output_size=10)\n train_loss[key] = []\nfor i in range(max_iterations):\n batch_mask = np.random.choice(train_size, batch_size)\n x_batch = x_train[batch_mask]\n t_batch = t_train[batch_mask]\n for key in optimizers.keys():\n grads = networks[key].gradient(x_batch, t_batch)\n optimizers[key].update(networks[key].params, grads)\n loss = networks[key].loss(x_batch, t_batch)\n train_loss[key].append(loss)\n if i % 100 == 0:\n print('===========' + 'iteration:' + str(i) + '===========')\n for key in optimizers.keys():\n loss = networks[key].loss(x_batch, t_batch)\n print(key + ':' + str(loss))\n<mask token>\nfor key in optimizers.keys():\n plt.plot(x, smooth_curve(train_loss[key]), marker=markers[key],\n markevery=100, label=key)\nplt.xlabel('iterations')\nplt.ylabel('loss')\nplt.ylim(0, 1)\nplt.legend()\nplt.show()\n", "step-3": "<mask token>\nsys.path.append(os.pardir)\n<mask token>\n(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)\ntrain_size = x_train.shape[0]\nbatch_size = 128\nmax_iterations = 2000\noptimizers = {}\noptimizers['SGD'] = SGD()\noptimizers['Momentum'] = Momentum()\noptimizers['AdaGrad'] = AdaGrad()\noptimizers['Adam'] = Adam()\nnetworks = {}\ntrain_loss = {}\nfor key in optimizers.keys():\n networks[key] = MultiLayerNet(input_size=784, hidden_size_list=[100, \n 100, 100, 100], output_size=10)\n train_loss[key] = []\nfor i in range(max_iterations):\n batch_mask = np.random.choice(train_size, batch_size)\n x_batch = x_train[batch_mask]\n t_batch = t_train[batch_mask]\n for key in optimizers.keys():\n grads = networks[key].gradient(x_batch, t_batch)\n optimizers[key].update(networks[key].params, grads)\n loss = networks[key].loss(x_batch, t_batch)\n train_loss[key].append(loss)\n if i % 100 == 0:\n print('===========' + 'iteration:' + str(i) + '===========')\n for key in optimizers.keys():\n loss = networks[key].loss(x_batch, t_batch)\n print(key + ':' + str(loss))\nmarkers = {'SGD': 'o', 'Momentum': 'x', 'AdaGrad': 's', 'Adam': 'D'}\nx = np.arange(max_iterations)\nfor key in optimizers.keys():\n plt.plot(x, smooth_curve(train_loss[key]), marker=markers[key],\n markevery=100, label=key)\nplt.xlabel('iterations')\nplt.ylabel('loss')\nplt.ylim(0, 1)\nplt.legend()\nplt.show()\n", "step-4": "import sys, os\nsys.path.append(os.pardir)\nimport matplotlib.pyplot as plt\nfrom dataset.mnist import load_mnist\nfrom common.util import smooth_curve\nfrom common.multi_layer_net import MultiLayerNet\nfrom common.optimizer import *\n(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)\ntrain_size = x_train.shape[0]\nbatch_size = 128\nmax_iterations = 2000\noptimizers = {}\noptimizers['SGD'] = SGD()\noptimizers['Momentum'] = Momentum()\noptimizers['AdaGrad'] = AdaGrad()\noptimizers['Adam'] = Adam()\nnetworks = {}\ntrain_loss = {}\nfor key in optimizers.keys():\n networks[key] = MultiLayerNet(input_size=784, hidden_size_list=[100, \n 100, 100, 100], output_size=10)\n train_loss[key] = []\nfor i in range(max_iterations):\n batch_mask = np.random.choice(train_size, batch_size)\n x_batch = x_train[batch_mask]\n t_batch = t_train[batch_mask]\n for key in optimizers.keys():\n grads = networks[key].gradient(x_batch, t_batch)\n optimizers[key].update(networks[key].params, grads)\n loss = networks[key].loss(x_batch, t_batch)\n train_loss[key].append(loss)\n if i % 100 == 0:\n print('===========' + 'iteration:' + str(i) + '===========')\n for key in optimizers.keys():\n loss = networks[key].loss(x_batch, t_batch)\n print(key + ':' + str(loss))\nmarkers = {'SGD': 'o', 'Momentum': 'x', 'AdaGrad': 's', 'Adam': 'D'}\nx = np.arange(max_iterations)\nfor key in optimizers.keys():\n plt.plot(x, smooth_curve(train_loss[key]), marker=markers[key],\n markevery=100, label=key)\nplt.xlabel('iterations')\nplt.ylabel('loss')\nplt.ylim(0, 1)\nplt.legend()\nplt.show()\n", "step-5": "# coding: utf-8\r\n\r\n\r\nimport sys, os\r\nsys.path.append(os.pardir)\r\nimport matplotlib.pyplot as plt\r\nfrom dataset.mnist import load_mnist\r\nfrom common.util import smooth_curve\r\nfrom common.multi_layer_net import MultiLayerNet\r\nfrom common.optimizer import *\r\n\r\n# 0. MNIST 데이터 로딩\r\n(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)\r\n\r\ntrain_size = x_train.shape[0]\r\nbatch_size = 128\r\nmax_iterations = 2000\r\n\r\n# 1. 실험용 설정 셋팅\r\noptimizers = {}\r\noptimizers['SGD'] = SGD()\r\noptimizers['Momentum'] = Momentum()\r\noptimizers['AdaGrad'] = AdaGrad()\r\noptimizers['Adam'] = Adam()\r\n\r\n#network, loss를 저장할 dictionary를 설정\r\nnetworks = {}\r\ntrain_loss = {}\r\n\r\n#각 optimizer마다 network를 MultiLayerNet을 이용해서 똑같은 구조로 만들고, train_loss 딕셔너리를 초기화 한다.\r\nfor key in optimizers.keys():\r\n networks[key] = MultiLayerNet(input_size=784,\r\n hidden_size_list=[100, 100, 100, 100],\r\n output_size=10)\r\n train_loss[key] = []\r\n\r\n# 2. 훈련 시작\r\nfor i in range(max_iterations):\r\n #4개의 최적화 기법에 똑같이 들어갈 batch 생성\r\n batch_mask = np.random.choice(train_size, batch_size)\r\n x_batch = x_train[batch_mask]\r\n t_batch = t_train[batch_mask]\r\n\r\n for key in optimizers.keys():\r\n grads = networks[key].gradient(x_batch, t_batch) #배치를 넣어서 각 네트워크의 기울기를 구함\r\n optimizers[key].update(networks[key].params, grads) #네트워크의 parameter를 기울기에 대해 update함\r\n loss = networks[key].loss(x_batch, t_batch) #사실 이것이 먼저 계산되어야 하지만, 이 코드에서는 기록용으로 저장\r\n train_loss[key].append(loss) #각 최적화 기법의 학습 loss 리스트에 저장\r\n\r\n #학습 진행 경과 및 각 최적화 기법에 해당하는 loss 확인\r\n if i % 100 == 0:\r\n print(\"===========\" + \"iteration:\" + str(i) + \"===========\")\r\n for key in optimizers.keys():\r\n loss = networks[key].loss(x_batch, t_batch)\r\n print(key + ':' + str(loss))\r\n\r\n\r\n# 3. 그래프 그리기\r\nmarkers = {\"SGD\": \"o\", \"Momentum\": \"x\", \"AdaGrad\": \"s\", \"Adam\": \"D\"}\r\nx = np.arange(max_iterations)\r\nfor key in optimizers.keys():\r\n plt.plot(x, smooth_curve(train_loss[key]), marker=markers[key], markevery=100, label=key)\r\nplt.xlabel(\"iterations\")\r\nplt.ylabel(\"loss\")\r\nplt.ylim(0, 1)\r\nplt.legend()\r\nplt.show()\r\n\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import tempfile from functools import partial import numpy as np import torch from ax.benchmark.benchmark_problem import SimpleBenchmarkProblem from ax.core.metric import Metric from ax.core.runner import Runner from ax.exceptions.storage import JSONDecodeError, JSONEncodeError from ax.modelbridge.base import ModelBridge from ax.modelbridge.registry import Models from ax.storage.json_store.decoder import ( generation_strategy_from_json, object_from_json, ) from ax.storage.json_store.decoders import class_from_json from ax.storage.json_store.encoder import object_to_json from ax.storage.json_store.encoders import botorch_modular_to_dict from ax.storage.json_store.load import load_experiment from ax.storage.json_store.registry import CLASS_ENCODER_REGISTRY from ax.storage.json_store.save import save_experiment from ax.storage.metric_registry import register_metric from ax.storage.runner_registry import register_runner from ax.utils.common.testutils import TestCase from ax.utils.measurement.synthetic_functions import ackley, branin, from_botorch from ax.utils.testing.benchmark_stubs import ( get_branin_benchmark_problem, get_branin_simple_benchmark_problem, get_mult_simple_benchmark_problem, get_sum_simple_benchmark_problem, ) from ax.utils.testing.core_stubs import ( get_abandoned_arm, get_acquisition_function_type, get_acquisition_type, get_arm, get_augmented_branin_metric, get_augmented_hartmann_metric, get_batch_trial, get_botorch_model, get_botorch_model_with_default_acquisition_class, get_branin_data, get_branin_experiment, get_branin_metric, get_choice_parameter, get_experiment_with_batch_and_single_trial, get_experiment_with_data, get_experiment_with_trial_with_ttl, get_experiment_with_map_data_type, get_factorial_metric, get_fixed_parameter, get_generator_run, get_map_data, get_hartmann_metric, get_list_surrogate, get_metric, get_mll_type, get_model_type, get_multi_objective, get_multi_objective_optimization_config, get_multi_type_experiment, get_objective, get_objective_threshold, get_optimization_config, get_order_constraint, get_outcome_constraint, get_parameter_constraint, get_percentile_early_stopping_strategy, get_range_parameter, get_scalarized_objective, get_search_space, get_simple_experiment_with_batch_trial, get_sum_constraint1, get_sum_constraint2, get_surrogate, get_synthetic_runner, get_trial, ) from ax.utils.testing.modeling_stubs import ( get_generation_strategy, get_observation_features, get_transform_type, ) from botorch.test_functions.synthetic import Ackley TEST_CASES = [ ("AbandonedArm", get_abandoned_arm), ("Arm", get_arm), ("AugmentedBraninMetric", get_augmented_branin_metric), ("AugmentedHartmannMetric", get_augmented_hartmann_metric), ("BatchTrial", get_batch_trial), ("BenchmarkProblem", get_branin_benchmark_problem), ("BoTorchModel", get_botorch_model), ("BoTorchModel", get_botorch_model_with_default_acquisition_class), ("BraninMetric", get_branin_metric), ("ChoiceParameter", get_choice_parameter), ("Experiment", get_experiment_with_batch_and_single_trial), ("Experiment", get_experiment_with_trial_with_ttl), ("Experiment", get_experiment_with_data), ("Experiment", get_experiment_with_map_data_type), ("FactorialMetric", get_factorial_metric), ("FixedParameter", get_fixed_parameter), ("Hartmann6Metric", get_hartmann_metric), ("GenerationStrategy", partial(get_generation_strategy, with_experiment=True)), ("GeneratorRun", get_generator_run), ("ListSurrogate", get_list_surrogate), ("MapData", get_map_data), ("Metric", get_metric), ("MultiObjective", get_multi_objective), ("MultiObjectiveOptimizationConfig", get_multi_objective_optimization_config), ("MultiTypeExperiment", get_multi_type_experiment), ("ObservationFeatures", get_observation_features), ("Objective", get_objective), ("ObjectiveThreshold", get_objective_threshold), ("OptimizationConfig", get_optimization_config), ("OrderConstraint", get_order_constraint), ("OutcomeConstraint", get_outcome_constraint), ("PercentileEarlyStoppingStrategy", get_percentile_early_stopping_strategy), ("ParameterConstraint", get_parameter_constraint), ("RangeParameter", get_range_parameter), ("ScalarizedObjective", get_scalarized_objective), ("SearchSpace", get_search_space), ("SimpleBenchmarkProblem", get_mult_simple_benchmark_problem), ("SimpleBenchmarkProblem", get_branin_simple_benchmark_problem), ("SimpleBenchmarkProblem", get_sum_simple_benchmark_problem), ("SimpleExperiment", get_simple_experiment_with_batch_trial), ("SumConstraint", get_sum_constraint1), ("SumConstraint", get_sum_constraint2), ("Surrogate", get_surrogate), ("SyntheticRunner", get_synthetic_runner), ("Type[Acquisition]", get_acquisition_type), ("Type[AcquisitionFunction]", get_acquisition_function_type), ("Type[Model]", get_model_type), ("Type[MarginalLogLikelihood]", get_mll_type), ("Type[Transform]", get_transform_type), ("Trial", get_trial), ] class JSONStoreTest(TestCase): def setUp(self): self.experiment = get_experiment_with_batch_and_single_trial() def testJSONEncodeFailure(self): self.assertRaises(JSONEncodeError, object_to_json, RuntimeError("foobar")) def testJSONDecodeFailure(self): self.assertRaises(JSONDecodeError, object_from_json, RuntimeError("foobar")) self.assertRaises(JSONDecodeError, object_from_json, {"__type": "foobar"}) def testSaveAndLoad(self): with tempfile.NamedTemporaryFile(mode="w+", delete=False, suffix=".json") as f: save_experiment(self.experiment, f.name) loaded_experiment = load_experiment(f.name) self.assertEqual(loaded_experiment, self.experiment) os.remove(f.name) def testSaveValidation(self): with self.assertRaises(ValueError): save_experiment(self.experiment.trials[0], "test.json") def testValidateFilename(self): bad_filename = "test" self.assertRaises(ValueError, save_experiment, self.experiment, bad_filename) def testEncodeDecode(self): for class_, fake_func in TEST_CASES: # Can't load trials from JSON, because a batch needs an experiment # in order to be initialized if class_ == "BatchTrial" or class_ == "Trial": continue # Can't load parameter constraints from JSON, because they require # a SearchSpace in order to be initialized if class_ == "OrderConstraint" or class_ == "SumConstraint": continue original_object = fake_func() json_object = object_to_json(original_object) converted_object = object_from_json(json_object) if class_ == "SimpleExperiment": # Evaluation functions will be different, so need to do # this so equality test passes with self.assertRaises(RuntimeError): converted_object.evaluation_function(parameterization={}) original_object.evaluation_function = None converted_object.evaluation_function = None self.assertEqual( original_object, converted_object, msg=f"Error encoding/decoding {class_}.", ) def testEncodeDecodeTorchTensor(self): x = torch.tensor( [[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64, device=torch.device("cpu") ) expected_json = { "__type": "Tensor", "value": [[1.0, 2.0], [3.0, 4.0]], "dtype": {"__type": "torch_dtype", "value": "torch.float64"}, "device": {"__type": "torch_device", "value": "cpu"}, } x_json = object_to_json(x) self.assertEqual(expected_json, x_json) x2 = object_from_json(x_json) self.assertTrue(torch.equal(x, x2)) def testDecodeGenerationStrategy(self): generation_strategy = get_generation_strategy() experiment = get_branin_experiment() gs_json = object_to_json(generation_strategy) new_generation_strategy = generation_strategy_from_json(gs_json) self.assertEqual(generation_strategy, new_generation_strategy) self.assertGreater(len(new_generation_strategy._steps), 0) self.assertIsInstance(new_generation_strategy._steps[0].model, Models) # Model has not yet been initialized on this GS since it hasn't generated # anything yet. self.assertIsNone(new_generation_strategy.model) # Check that we can encode and decode the generation strategy after # it has generated some generator runs. Since we now need to `gen`, # we remove the fake callable kwarg we added, since model does not # expect it. generation_strategy = get_generation_strategy(with_callable_model_kwarg=False) gr = generation_strategy.gen(experiment) gs_json = object_to_json(generation_strategy) new_generation_strategy = generation_strategy_from_json(gs_json) self.assertEqual(generation_strategy, new_generation_strategy) self.assertIsInstance(new_generation_strategy._steps[0].model, Models) # Since this GS has now generated one generator run, model should have # been initialized and restored when decoding from JSON. self.assertIsInstance(new_generation_strategy.model, ModelBridge) # Check that we can encode and decode the generation strategy after # it has generated some trials and been updated with some data. generation_strategy = new_generation_strategy experiment.new_trial(gr) # Add previously generated GR as trial. # Make generation strategy aware of the trial's data via `gen`. generation_strategy.gen(experiment, data=get_branin_data()) gs_json = object_to_json(generation_strategy) new_generation_strategy = generation_strategy_from_json(gs_json) self.assertEqual(generation_strategy, new_generation_strategy) self.assertIsInstance(new_generation_strategy._steps[0].model, Models) self.assertIsInstance(new_generation_strategy.model, ModelBridge) def testEncodeDecodeNumpy(self): arr = np.array([[1, 2, 3], [4, 5, 6]]) self.assertTrue(np.array_equal(arr, object_from_json(object_to_json(arr)))) def testEncodeDecodeSimpleBenchmarkProblem(self): branin_problem = get_branin_simple_benchmark_problem() sum_problem = get_sum_simple_benchmark_problem() new_branin_problem = object_from_json(object_to_json(branin_problem)) new_sum_problem = object_from_json(object_to_json(sum_problem)) self.assertEqual( branin_problem.f(1, 2), new_branin_problem.f(1, 2), branin(1, 2) ) self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3) # Test using `from_botorch`. ackley_problem = SimpleBenchmarkProblem( f=from_botorch(Ackley()), noise_sd=0.0, minimize=True ) new_ackley_problem = object_from_json(object_to_json(ackley_problem)) self.assertEqual( ackley_problem.f(1, 2), new_ackley_problem.f(1, 2), ackley(1, 2) ) def testRegistryAdditions(self): class MyRunner(Runner): def run(): pass def staging_required(): return False class MyMetric(Metric): pass register_metric(MyMetric) register_runner(MyRunner) experiment = get_experiment_with_batch_and_single_trial() experiment.runner = MyRunner() experiment.add_tracking_metric(MyMetric(name="my_metric")) with tempfile.NamedTemporaryFile(mode="w+", delete=False, suffix=".json") as f: save_experiment(experiment, f.name) loaded_experiment = load_experiment(f.name) self.assertEqual(loaded_experiment, experiment) os.remove(f.name) def testEncodeUnknownClassToDict(self): # Cannot encode `UnknownClass` type because it is not registered in the # CLASS_ENCODER_REGISTRY. class UnknownClass: def __init__(self): pass with self.assertRaisesRegex( ValueError, "is a class. Add it to the CLASS_ENCODER_REGISTRY" ): object_to_json(UnknownClass) # `UnknownClass` type is registered in the CLASS_ENCODER_REGISTRY and uses the # `botorch_modular_to_dict` encoder, but `UnknownClass` is not registered in # the `botorch_modular_registry.py` file. CLASS_ENCODER_REGISTRY[UnknownClass] = botorch_modular_to_dict with self.assertRaisesRegex( ValueError, "does not have a corresponding parent class in CLASS_TO_REGISTRY", ): object_to_json(UnknownClass) def testDecodeUnknownClassFromJson(self): with self.assertRaisesRegex( ValueError, "does not have a corresponding entry in CLASS_TO_REVERSE_REGISTRY", ): class_from_json({"index": 0, "class": "unknown_path"})
normal
{ "blob_id": "52eec56f7f5da8356f61301994f846ef7769f73b", "index": 6189, "step-1": "<mask token>\n\n\nclass JSONStoreTest(TestCase):\n\n def setUp(self):\n self.experiment = get_experiment_with_batch_and_single_trial()\n\n def testJSONEncodeFailure(self):\n self.assertRaises(JSONEncodeError, object_to_json, RuntimeError(\n 'foobar'))\n\n def testJSONDecodeFailure(self):\n self.assertRaises(JSONDecodeError, object_from_json, RuntimeError(\n 'foobar'))\n self.assertRaises(JSONDecodeError, object_from_json, {'__type':\n 'foobar'})\n <mask token>\n <mask token>\n\n def testValidateFilename(self):\n bad_filename = 'test'\n self.assertRaises(ValueError, save_experiment, self.experiment,\n bad_filename)\n <mask token>\n <mask token>\n <mask token>\n\n def testEncodeDecodeNumpy(self):\n arr = np.array([[1, 2, 3], [4, 5, 6]])\n self.assertTrue(np.array_equal(arr, object_from_json(object_to_json\n (arr))))\n\n def testEncodeDecodeSimpleBenchmarkProblem(self):\n branin_problem = get_branin_simple_benchmark_problem()\n sum_problem = get_sum_simple_benchmark_problem()\n new_branin_problem = object_from_json(object_to_json(branin_problem))\n new_sum_problem = object_from_json(object_to_json(sum_problem))\n self.assertEqual(branin_problem.f(1, 2), new_branin_problem.f(1, 2),\n branin(1, 2))\n self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3)\n ackley_problem = SimpleBenchmarkProblem(f=from_botorch(Ackley()),\n noise_sd=0.0, minimize=True)\n new_ackley_problem = object_from_json(object_to_json(ackley_problem))\n self.assertEqual(ackley_problem.f(1, 2), new_ackley_problem.f(1, 2),\n ackley(1, 2))\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass JSONStoreTest(TestCase):\n\n def setUp(self):\n self.experiment = get_experiment_with_batch_and_single_trial()\n\n def testJSONEncodeFailure(self):\n self.assertRaises(JSONEncodeError, object_to_json, RuntimeError(\n 'foobar'))\n\n def testJSONDecodeFailure(self):\n self.assertRaises(JSONDecodeError, object_from_json, RuntimeError(\n 'foobar'))\n self.assertRaises(JSONDecodeError, object_from_json, {'__type':\n 'foobar'})\n\n def testSaveAndLoad(self):\n with tempfile.NamedTemporaryFile(mode='w+', delete=False, suffix=\n '.json') as f:\n save_experiment(self.experiment, f.name)\n loaded_experiment = load_experiment(f.name)\n self.assertEqual(loaded_experiment, self.experiment)\n os.remove(f.name)\n <mask token>\n\n def testValidateFilename(self):\n bad_filename = 'test'\n self.assertRaises(ValueError, save_experiment, self.experiment,\n bad_filename)\n <mask token>\n\n def testEncodeDecodeTorchTensor(self):\n x = torch.tensor([[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64,\n device=torch.device('cpu'))\n expected_json = {'__type': 'Tensor', 'value': [[1.0, 2.0], [3.0, \n 4.0]], 'dtype': {'__type': 'torch_dtype', 'value':\n 'torch.float64'}, 'device': {'__type': 'torch_device', 'value':\n 'cpu'}}\n x_json = object_to_json(x)\n self.assertEqual(expected_json, x_json)\n x2 = object_from_json(x_json)\n self.assertTrue(torch.equal(x, x2))\n\n def testDecodeGenerationStrategy(self):\n generation_strategy = get_generation_strategy()\n experiment = get_branin_experiment()\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertGreater(len(new_generation_strategy._steps), 0)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsNone(new_generation_strategy.model)\n generation_strategy = get_generation_strategy(with_callable_model_kwarg\n =False)\n gr = generation_strategy.gen(experiment)\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n generation_strategy = new_generation_strategy\n experiment.new_trial(gr)\n generation_strategy.gen(experiment, data=get_branin_data())\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n\n def testEncodeDecodeNumpy(self):\n arr = np.array([[1, 2, 3], [4, 5, 6]])\n self.assertTrue(np.array_equal(arr, object_from_json(object_to_json\n (arr))))\n\n def testEncodeDecodeSimpleBenchmarkProblem(self):\n branin_problem = get_branin_simple_benchmark_problem()\n sum_problem = get_sum_simple_benchmark_problem()\n new_branin_problem = object_from_json(object_to_json(branin_problem))\n new_sum_problem = object_from_json(object_to_json(sum_problem))\n self.assertEqual(branin_problem.f(1, 2), new_branin_problem.f(1, 2),\n branin(1, 2))\n self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3)\n ackley_problem = SimpleBenchmarkProblem(f=from_botorch(Ackley()),\n noise_sd=0.0, minimize=True)\n new_ackley_problem = object_from_json(object_to_json(ackley_problem))\n self.assertEqual(ackley_problem.f(1, 2), new_ackley_problem.f(1, 2),\n ackley(1, 2))\n <mask token>\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass JSONStoreTest(TestCase):\n\n def setUp(self):\n self.experiment = get_experiment_with_batch_and_single_trial()\n\n def testJSONEncodeFailure(self):\n self.assertRaises(JSONEncodeError, object_to_json, RuntimeError(\n 'foobar'))\n\n def testJSONDecodeFailure(self):\n self.assertRaises(JSONDecodeError, object_from_json, RuntimeError(\n 'foobar'))\n self.assertRaises(JSONDecodeError, object_from_json, {'__type':\n 'foobar'})\n\n def testSaveAndLoad(self):\n with tempfile.NamedTemporaryFile(mode='w+', delete=False, suffix=\n '.json') as f:\n save_experiment(self.experiment, f.name)\n loaded_experiment = load_experiment(f.name)\n self.assertEqual(loaded_experiment, self.experiment)\n os.remove(f.name)\n\n def testSaveValidation(self):\n with self.assertRaises(ValueError):\n save_experiment(self.experiment.trials[0], 'test.json')\n\n def testValidateFilename(self):\n bad_filename = 'test'\n self.assertRaises(ValueError, save_experiment, self.experiment,\n bad_filename)\n <mask token>\n\n def testEncodeDecodeTorchTensor(self):\n x = torch.tensor([[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64,\n device=torch.device('cpu'))\n expected_json = {'__type': 'Tensor', 'value': [[1.0, 2.0], [3.0, \n 4.0]], 'dtype': {'__type': 'torch_dtype', 'value':\n 'torch.float64'}, 'device': {'__type': 'torch_device', 'value':\n 'cpu'}}\n x_json = object_to_json(x)\n self.assertEqual(expected_json, x_json)\n x2 = object_from_json(x_json)\n self.assertTrue(torch.equal(x, x2))\n\n def testDecodeGenerationStrategy(self):\n generation_strategy = get_generation_strategy()\n experiment = get_branin_experiment()\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertGreater(len(new_generation_strategy._steps), 0)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsNone(new_generation_strategy.model)\n generation_strategy = get_generation_strategy(with_callable_model_kwarg\n =False)\n gr = generation_strategy.gen(experiment)\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n generation_strategy = new_generation_strategy\n experiment.new_trial(gr)\n generation_strategy.gen(experiment, data=get_branin_data())\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n\n def testEncodeDecodeNumpy(self):\n arr = np.array([[1, 2, 3], [4, 5, 6]])\n self.assertTrue(np.array_equal(arr, object_from_json(object_to_json\n (arr))))\n\n def testEncodeDecodeSimpleBenchmarkProblem(self):\n branin_problem = get_branin_simple_benchmark_problem()\n sum_problem = get_sum_simple_benchmark_problem()\n new_branin_problem = object_from_json(object_to_json(branin_problem))\n new_sum_problem = object_from_json(object_to_json(sum_problem))\n self.assertEqual(branin_problem.f(1, 2), new_branin_problem.f(1, 2),\n branin(1, 2))\n self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3)\n ackley_problem = SimpleBenchmarkProblem(f=from_botorch(Ackley()),\n noise_sd=0.0, minimize=True)\n new_ackley_problem = object_from_json(object_to_json(ackley_problem))\n self.assertEqual(ackley_problem.f(1, 2), new_ackley_problem.f(1, 2),\n ackley(1, 2))\n <mask token>\n <mask token>\n\n def testDecodeUnknownClassFromJson(self):\n with self.assertRaisesRegex(ValueError,\n 'does not have a corresponding entry in CLASS_TO_REVERSE_REGISTRY'\n ):\n class_from_json({'index': 0, 'class': 'unknown_path'})\n", "step-4": "<mask token>\n\n\nclass JSONStoreTest(TestCase):\n\n def setUp(self):\n self.experiment = get_experiment_with_batch_and_single_trial()\n\n def testJSONEncodeFailure(self):\n self.assertRaises(JSONEncodeError, object_to_json, RuntimeError(\n 'foobar'))\n\n def testJSONDecodeFailure(self):\n self.assertRaises(JSONDecodeError, object_from_json, RuntimeError(\n 'foobar'))\n self.assertRaises(JSONDecodeError, object_from_json, {'__type':\n 'foobar'})\n\n def testSaveAndLoad(self):\n with tempfile.NamedTemporaryFile(mode='w+', delete=False, suffix=\n '.json') as f:\n save_experiment(self.experiment, f.name)\n loaded_experiment = load_experiment(f.name)\n self.assertEqual(loaded_experiment, self.experiment)\n os.remove(f.name)\n\n def testSaveValidation(self):\n with self.assertRaises(ValueError):\n save_experiment(self.experiment.trials[0], 'test.json')\n\n def testValidateFilename(self):\n bad_filename = 'test'\n self.assertRaises(ValueError, save_experiment, self.experiment,\n bad_filename)\n <mask token>\n\n def testEncodeDecodeTorchTensor(self):\n x = torch.tensor([[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64,\n device=torch.device('cpu'))\n expected_json = {'__type': 'Tensor', 'value': [[1.0, 2.0], [3.0, \n 4.0]], 'dtype': {'__type': 'torch_dtype', 'value':\n 'torch.float64'}, 'device': {'__type': 'torch_device', 'value':\n 'cpu'}}\n x_json = object_to_json(x)\n self.assertEqual(expected_json, x_json)\n x2 = object_from_json(x_json)\n self.assertTrue(torch.equal(x, x2))\n\n def testDecodeGenerationStrategy(self):\n generation_strategy = get_generation_strategy()\n experiment = get_branin_experiment()\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertGreater(len(new_generation_strategy._steps), 0)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsNone(new_generation_strategy.model)\n generation_strategy = get_generation_strategy(with_callable_model_kwarg\n =False)\n gr = generation_strategy.gen(experiment)\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n generation_strategy = new_generation_strategy\n experiment.new_trial(gr)\n generation_strategy.gen(experiment, data=get_branin_data())\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n\n def testEncodeDecodeNumpy(self):\n arr = np.array([[1, 2, 3], [4, 5, 6]])\n self.assertTrue(np.array_equal(arr, object_from_json(object_to_json\n (arr))))\n\n def testEncodeDecodeSimpleBenchmarkProblem(self):\n branin_problem = get_branin_simple_benchmark_problem()\n sum_problem = get_sum_simple_benchmark_problem()\n new_branin_problem = object_from_json(object_to_json(branin_problem))\n new_sum_problem = object_from_json(object_to_json(sum_problem))\n self.assertEqual(branin_problem.f(1, 2), new_branin_problem.f(1, 2),\n branin(1, 2))\n self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3)\n ackley_problem = SimpleBenchmarkProblem(f=from_botorch(Ackley()),\n noise_sd=0.0, minimize=True)\n new_ackley_problem = object_from_json(object_to_json(ackley_problem))\n self.assertEqual(ackley_problem.f(1, 2), new_ackley_problem.f(1, 2),\n ackley(1, 2))\n\n def testRegistryAdditions(self):\n\n\n class MyRunner(Runner):\n\n def run():\n pass\n\n def staging_required():\n return False\n\n\n class MyMetric(Metric):\n pass\n register_metric(MyMetric)\n register_runner(MyRunner)\n experiment = get_experiment_with_batch_and_single_trial()\n experiment.runner = MyRunner()\n experiment.add_tracking_metric(MyMetric(name='my_metric'))\n with tempfile.NamedTemporaryFile(mode='w+', delete=False, suffix=\n '.json') as f:\n save_experiment(experiment, f.name)\n loaded_experiment = load_experiment(f.name)\n self.assertEqual(loaded_experiment, experiment)\n os.remove(f.name)\n\n def testEncodeUnknownClassToDict(self):\n\n\n class UnknownClass:\n\n def __init__(self):\n pass\n with self.assertRaisesRegex(ValueError,\n 'is a class. Add it to the CLASS_ENCODER_REGISTRY'):\n object_to_json(UnknownClass)\n CLASS_ENCODER_REGISTRY[UnknownClass] = botorch_modular_to_dict\n with self.assertRaisesRegex(ValueError,\n 'does not have a corresponding parent class in CLASS_TO_REGISTRY'):\n object_to_json(UnknownClass)\n\n def testDecodeUnknownClassFromJson(self):\n with self.assertRaisesRegex(ValueError,\n 'does not have a corresponding entry in CLASS_TO_REVERSE_REGISTRY'\n ):\n class_from_json({'index': 0, 'class': 'unknown_path'})\n", "step-5": "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport tempfile\nfrom functools import partial\n\nimport numpy as np\nimport torch\nfrom ax.benchmark.benchmark_problem import SimpleBenchmarkProblem\nfrom ax.core.metric import Metric\nfrom ax.core.runner import Runner\nfrom ax.exceptions.storage import JSONDecodeError, JSONEncodeError\nfrom ax.modelbridge.base import ModelBridge\nfrom ax.modelbridge.registry import Models\nfrom ax.storage.json_store.decoder import (\n generation_strategy_from_json,\n object_from_json,\n)\nfrom ax.storage.json_store.decoders import class_from_json\nfrom ax.storage.json_store.encoder import object_to_json\nfrom ax.storage.json_store.encoders import botorch_modular_to_dict\nfrom ax.storage.json_store.load import load_experiment\nfrom ax.storage.json_store.registry import CLASS_ENCODER_REGISTRY\nfrom ax.storage.json_store.save import save_experiment\nfrom ax.storage.metric_registry import register_metric\nfrom ax.storage.runner_registry import register_runner\nfrom ax.utils.common.testutils import TestCase\nfrom ax.utils.measurement.synthetic_functions import ackley, branin, from_botorch\nfrom ax.utils.testing.benchmark_stubs import (\n get_branin_benchmark_problem,\n get_branin_simple_benchmark_problem,\n get_mult_simple_benchmark_problem,\n get_sum_simple_benchmark_problem,\n)\nfrom ax.utils.testing.core_stubs import (\n get_abandoned_arm,\n get_acquisition_function_type,\n get_acquisition_type,\n get_arm,\n get_augmented_branin_metric,\n get_augmented_hartmann_metric,\n get_batch_trial,\n get_botorch_model,\n get_botorch_model_with_default_acquisition_class,\n get_branin_data,\n get_branin_experiment,\n get_branin_metric,\n get_choice_parameter,\n get_experiment_with_batch_and_single_trial,\n get_experiment_with_data,\n get_experiment_with_trial_with_ttl,\n get_experiment_with_map_data_type,\n get_factorial_metric,\n get_fixed_parameter,\n get_generator_run,\n get_map_data,\n get_hartmann_metric,\n get_list_surrogate,\n get_metric,\n get_mll_type,\n get_model_type,\n get_multi_objective,\n get_multi_objective_optimization_config,\n get_multi_type_experiment,\n get_objective,\n get_objective_threshold,\n get_optimization_config,\n get_order_constraint,\n get_outcome_constraint,\n get_parameter_constraint,\n get_percentile_early_stopping_strategy,\n get_range_parameter,\n get_scalarized_objective,\n get_search_space,\n get_simple_experiment_with_batch_trial,\n get_sum_constraint1,\n get_sum_constraint2,\n get_surrogate,\n get_synthetic_runner,\n get_trial,\n)\nfrom ax.utils.testing.modeling_stubs import (\n get_generation_strategy,\n get_observation_features,\n get_transform_type,\n)\nfrom botorch.test_functions.synthetic import Ackley\n\n\nTEST_CASES = [\n (\"AbandonedArm\", get_abandoned_arm),\n (\"Arm\", get_arm),\n (\"AugmentedBraninMetric\", get_augmented_branin_metric),\n (\"AugmentedHartmannMetric\", get_augmented_hartmann_metric),\n (\"BatchTrial\", get_batch_trial),\n (\"BenchmarkProblem\", get_branin_benchmark_problem),\n (\"BoTorchModel\", get_botorch_model),\n (\"BoTorchModel\", get_botorch_model_with_default_acquisition_class),\n (\"BraninMetric\", get_branin_metric),\n (\"ChoiceParameter\", get_choice_parameter),\n (\"Experiment\", get_experiment_with_batch_and_single_trial),\n (\"Experiment\", get_experiment_with_trial_with_ttl),\n (\"Experiment\", get_experiment_with_data),\n (\"Experiment\", get_experiment_with_map_data_type),\n (\"FactorialMetric\", get_factorial_metric),\n (\"FixedParameter\", get_fixed_parameter),\n (\"Hartmann6Metric\", get_hartmann_metric),\n (\"GenerationStrategy\", partial(get_generation_strategy, with_experiment=True)),\n (\"GeneratorRun\", get_generator_run),\n (\"ListSurrogate\", get_list_surrogate),\n (\"MapData\", get_map_data),\n (\"Metric\", get_metric),\n (\"MultiObjective\", get_multi_objective),\n (\"MultiObjectiveOptimizationConfig\", get_multi_objective_optimization_config),\n (\"MultiTypeExperiment\", get_multi_type_experiment),\n (\"ObservationFeatures\", get_observation_features),\n (\"Objective\", get_objective),\n (\"ObjectiveThreshold\", get_objective_threshold),\n (\"OptimizationConfig\", get_optimization_config),\n (\"OrderConstraint\", get_order_constraint),\n (\"OutcomeConstraint\", get_outcome_constraint),\n (\"PercentileEarlyStoppingStrategy\", get_percentile_early_stopping_strategy),\n (\"ParameterConstraint\", get_parameter_constraint),\n (\"RangeParameter\", get_range_parameter),\n (\"ScalarizedObjective\", get_scalarized_objective),\n (\"SearchSpace\", get_search_space),\n (\"SimpleBenchmarkProblem\", get_mult_simple_benchmark_problem),\n (\"SimpleBenchmarkProblem\", get_branin_simple_benchmark_problem),\n (\"SimpleBenchmarkProblem\", get_sum_simple_benchmark_problem),\n (\"SimpleExperiment\", get_simple_experiment_with_batch_trial),\n (\"SumConstraint\", get_sum_constraint1),\n (\"SumConstraint\", get_sum_constraint2),\n (\"Surrogate\", get_surrogate),\n (\"SyntheticRunner\", get_synthetic_runner),\n (\"Type[Acquisition]\", get_acquisition_type),\n (\"Type[AcquisitionFunction]\", get_acquisition_function_type),\n (\"Type[Model]\", get_model_type),\n (\"Type[MarginalLogLikelihood]\", get_mll_type),\n (\"Type[Transform]\", get_transform_type),\n (\"Trial\", get_trial),\n]\n\n\nclass JSONStoreTest(TestCase):\n def setUp(self):\n self.experiment = get_experiment_with_batch_and_single_trial()\n\n def testJSONEncodeFailure(self):\n self.assertRaises(JSONEncodeError, object_to_json, RuntimeError(\"foobar\"))\n\n def testJSONDecodeFailure(self):\n self.assertRaises(JSONDecodeError, object_from_json, RuntimeError(\"foobar\"))\n self.assertRaises(JSONDecodeError, object_from_json, {\"__type\": \"foobar\"})\n\n def testSaveAndLoad(self):\n with tempfile.NamedTemporaryFile(mode=\"w+\", delete=False, suffix=\".json\") as f:\n save_experiment(self.experiment, f.name)\n loaded_experiment = load_experiment(f.name)\n self.assertEqual(loaded_experiment, self.experiment)\n os.remove(f.name)\n\n def testSaveValidation(self):\n with self.assertRaises(ValueError):\n save_experiment(self.experiment.trials[0], \"test.json\")\n\n def testValidateFilename(self):\n bad_filename = \"test\"\n self.assertRaises(ValueError, save_experiment, self.experiment, bad_filename)\n\n def testEncodeDecode(self):\n for class_, fake_func in TEST_CASES:\n # Can't load trials from JSON, because a batch needs an experiment\n # in order to be initialized\n if class_ == \"BatchTrial\" or class_ == \"Trial\":\n continue\n\n # Can't load parameter constraints from JSON, because they require\n # a SearchSpace in order to be initialized\n if class_ == \"OrderConstraint\" or class_ == \"SumConstraint\":\n continue\n\n original_object = fake_func()\n json_object = object_to_json(original_object)\n converted_object = object_from_json(json_object)\n\n if class_ == \"SimpleExperiment\":\n # Evaluation functions will be different, so need to do\n # this so equality test passes\n with self.assertRaises(RuntimeError):\n converted_object.evaluation_function(parameterization={})\n\n original_object.evaluation_function = None\n converted_object.evaluation_function = None\n\n self.assertEqual(\n original_object,\n converted_object,\n msg=f\"Error encoding/decoding {class_}.\",\n )\n\n def testEncodeDecodeTorchTensor(self):\n x = torch.tensor(\n [[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64, device=torch.device(\"cpu\")\n )\n expected_json = {\n \"__type\": \"Tensor\",\n \"value\": [[1.0, 2.0], [3.0, 4.0]],\n \"dtype\": {\"__type\": \"torch_dtype\", \"value\": \"torch.float64\"},\n \"device\": {\"__type\": \"torch_device\", \"value\": \"cpu\"},\n }\n x_json = object_to_json(x)\n self.assertEqual(expected_json, x_json)\n x2 = object_from_json(x_json)\n self.assertTrue(torch.equal(x, x2))\n\n def testDecodeGenerationStrategy(self):\n generation_strategy = get_generation_strategy()\n experiment = get_branin_experiment()\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertGreater(len(new_generation_strategy._steps), 0)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n # Model has not yet been initialized on this GS since it hasn't generated\n # anything yet.\n self.assertIsNone(new_generation_strategy.model)\n\n # Check that we can encode and decode the generation strategy after\n # it has generated some generator runs. Since we now need to `gen`,\n # we remove the fake callable kwarg we added, since model does not\n # expect it.\n generation_strategy = get_generation_strategy(with_callable_model_kwarg=False)\n gr = generation_strategy.gen(experiment)\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n # Since this GS has now generated one generator run, model should have\n # been initialized and restored when decoding from JSON.\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n\n # Check that we can encode and decode the generation strategy after\n # it has generated some trials and been updated with some data.\n generation_strategy = new_generation_strategy\n experiment.new_trial(gr) # Add previously generated GR as trial.\n # Make generation strategy aware of the trial's data via `gen`.\n generation_strategy.gen(experiment, data=get_branin_data())\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n\n def testEncodeDecodeNumpy(self):\n arr = np.array([[1, 2, 3], [4, 5, 6]])\n self.assertTrue(np.array_equal(arr, object_from_json(object_to_json(arr))))\n\n def testEncodeDecodeSimpleBenchmarkProblem(self):\n branin_problem = get_branin_simple_benchmark_problem()\n sum_problem = get_sum_simple_benchmark_problem()\n new_branin_problem = object_from_json(object_to_json(branin_problem))\n new_sum_problem = object_from_json(object_to_json(sum_problem))\n self.assertEqual(\n branin_problem.f(1, 2), new_branin_problem.f(1, 2), branin(1, 2)\n )\n self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3)\n # Test using `from_botorch`.\n ackley_problem = SimpleBenchmarkProblem(\n f=from_botorch(Ackley()), noise_sd=0.0, minimize=True\n )\n new_ackley_problem = object_from_json(object_to_json(ackley_problem))\n self.assertEqual(\n ackley_problem.f(1, 2), new_ackley_problem.f(1, 2), ackley(1, 2)\n )\n\n def testRegistryAdditions(self):\n class MyRunner(Runner):\n def run():\n pass\n\n def staging_required():\n return False\n\n class MyMetric(Metric):\n pass\n\n register_metric(MyMetric)\n register_runner(MyRunner)\n\n experiment = get_experiment_with_batch_and_single_trial()\n experiment.runner = MyRunner()\n experiment.add_tracking_metric(MyMetric(name=\"my_metric\"))\n with tempfile.NamedTemporaryFile(mode=\"w+\", delete=False, suffix=\".json\") as f:\n save_experiment(experiment, f.name)\n loaded_experiment = load_experiment(f.name)\n self.assertEqual(loaded_experiment, experiment)\n os.remove(f.name)\n\n def testEncodeUnknownClassToDict(self):\n # Cannot encode `UnknownClass` type because it is not registered in the\n # CLASS_ENCODER_REGISTRY.\n class UnknownClass:\n def __init__(self):\n pass\n\n with self.assertRaisesRegex(\n ValueError, \"is a class. Add it to the CLASS_ENCODER_REGISTRY\"\n ):\n object_to_json(UnknownClass)\n # `UnknownClass` type is registered in the CLASS_ENCODER_REGISTRY and uses the\n # `botorch_modular_to_dict` encoder, but `UnknownClass` is not registered in\n # the `botorch_modular_registry.py` file.\n CLASS_ENCODER_REGISTRY[UnknownClass] = botorch_modular_to_dict\n with self.assertRaisesRegex(\n ValueError,\n \"does not have a corresponding parent class in CLASS_TO_REGISTRY\",\n ):\n object_to_json(UnknownClass)\n\n def testDecodeUnknownClassFromJson(self):\n with self.assertRaisesRegex(\n ValueError,\n \"does not have a corresponding entry in CLASS_TO_REVERSE_REGISTRY\",\n ):\n class_from_json({\"index\": 0, \"class\": \"unknown_path\"})\n", "step-ids": [ 7, 10, 12, 14, 18 ] }
[ 7, 10, 12, 14, 18 ]
import os import location import teamList import pandas as pd import csv import matplotlib.pyplot as plt import numpy as np from scipy import stats ##adapted from code from this website: ## https://towardsdatascience.com/simple-little-tables-with-matplotlib-9780ef5d0bc4 year = "18-19" team = "ARI" seasonReportRaw = pd.read_csv("Data/" + year + " " + team + "/" + team + "_SeasonRaw.csv") seasonReportRaw['tEPPfP'] = seasonReportRaw['tEPDHP'] + seasonReportRaw['tEPDEP'] + seasonReportRaw['tEPDOP'] homeWins = seasonReportRaw[(seasonReportRaw["Home Team"] == team) & (seasonReportRaw["Home Score"] > seasonReportRaw["Away Score"])] awayWins = seasonReportRaw[(seasonReportRaw["Away Team"] == team) & (seasonReportRaw["Away Score"] > seasonReportRaw["Home Score"])] homeLosses = seasonReportRaw[(seasonReportRaw["Home Team"] == team) & (seasonReportRaw["Home Score"] < seasonReportRaw["Away Score"])] awayLosses = seasonReportRaw[(seasonReportRaw["Away Team"] == team) & (seasonReportRaw["Away Score"] < seasonReportRaw["Home Score"])] winCount = homeWins["Home Team"].count() + awayWins["Away Team"].count() PenaltiesSeasonTotal = seasonReportRaw["tPEN(#)"].sum() PenaltiesSeasonAverage = PenaltiesSeasonTotal / 16 PenaltiesWinTotal = homeWins["tPEN(#)"].sum() + awayWins["tPEN(#)"].sum() PenaltiesWinAverage = PenaltiesWinTotal / winCount PenaltiesLossTotal = homeLosses["tPEN(#)"].sum() + awayLosses["tPEN(#)"].sum() PenaltiesLossAverage = PenaltiesLossTotal / (16-winCount) EPCSeasonTotal = seasonReportRaw["tEPPfP"].sum() EPCSeasonAverage = EPCSeasonTotal / 16 EPCWinTotal = homeWins["tEPPfP"].sum() + awayWins["tEPPfP"].sum() EPCWinAverage = EPCWinTotal / winCount EPCLossTotal = homeLosses["tEPPfP"].sum() + awayLosses["tEPPfP"].sum() EPCLossAverage = EPCLossTotal / (16-winCount) EPCDHPSeasonTotal = seasonReportRaw["tEPDHP"].sum() EPCDHPSeasonAverage = EPCDHPSeasonTotal / 16 EPCDHPWinTotal = homeWins["tEPDHP"].sum() + awayWins["tEPDHP"].sum() EPCDHPWinAverage = EPCDHPWinTotal / winCount EPCDHPLossTotal = homeLosses["tEPDHP"].sum() + awayLosses["tEPDHP"].sum() EPCDHPLossAverage = EPCDHPLossTotal / (16-winCount) EPCDEPSeasonTotal = seasonReportRaw["tEPDEP"].sum() EPCDEPSeasonAverage = EPCDEPSeasonTotal / 16 EPCDEPWinTotal = homeWins["tEPDEP"].sum() + awayWins["tEPDEP"].sum() EPCDEPWinAverage = EPCDEPWinTotal / winCount EPCDEPLossTotal = homeLosses["tEPDEP"].sum() + awayLosses["tEPDEP"].sum() EPCDEPLossAverage = EPCDEPLossTotal / (16-winCount) EPCOPSeasonTotal = seasonReportRaw["tEPDOP"].sum() EPCOPSeasonAverage = EPCOPSeasonTotal / 16 EPCOPWinTotal = homeWins["tEPDOP"].sum() + awayWins["tEPDOP"].sum() EPCOPWinAverage = EPCOPWinTotal / winCount EPCOPLossTotal = homeLosses["tEPDOP"].sum() + awayLosses["tEPDOP"].sum() EPCOPLossAverage = EPCOPLossTotal / (16-winCount) headerRow = ['Season Total', 'Per Game', 'Win Total', 'Per Win', 'Loss Total','Per Loss'] penaltiesRow = ['Penalties',PenaltiesSeasonTotal,PenaltiesSeasonAverage,PenaltiesWinTotal,PenaltiesWinAverage,PenaltiesLossTotal,PenaltiesLossAverage] EPCRow = ['EPC',EPCSeasonTotal,EPCSeasonAverage,EPCWinTotal,EPCWinAverage,EPCLossTotal,EPCLossAverage] EPCDHPRow = ['EPCDHP',EPCDHPSeasonTotal,EPCDHPSeasonAverage,EPCDHPWinTotal,EPCDHPWinAverage,EPCDHPLossTotal,EPCDHPLossAverage] EPCDEPRow = ['EPCDEP',EPCDEPSeasonTotal,EPCDEPSeasonAverage,EPCDEPWinTotal,EPCDEPWinAverage,EPCDEPLossTotal,EPCDEPLossAverage] EPCOPRow = ['EPCOP',EPCOPSeasonTotal,EPCOPSeasonAverage,EPCOPWinTotal,EPCOPWinAverage,EPCOPLossTotal,EPCOPLossAverage] fig_background_color = 'white' fig_border = 'black' data = [headerRow,penaltiesRow,EPCRow,EPCDHPRow,EPCDEPRow,EPCOPRow] # Pop the headers from the data array column_headers = data.pop(0) row_headers = [x.pop(0) for x in data] # Table data needs to be non-numeric text. Format the data # while I'm at it. cell_text = [] for row in data: cell_text.append([f'{x:1.2f}' for x in row]) # Get some lists of color specs for row and column headers rcolors = plt.cm.BuPu(np.full(len(row_headers), 0.1)) ccolors = plt.cm.BuPu(np.full(len(column_headers), 0.1)) # Create the figure. Setting a small pad on tight_layout # seems to better regulate white space. Sometimes experimenting # with an explicit figsize here can produce better outcome. plt.figure(linewidth=2, edgecolor=fig_border, facecolor=fig_background_color, tight_layout={'pad':1}, figsize=(4.5,1.75) ) # Add a table at the bottom of the axes the_table = plt.table(cellText=cell_text, rowLabels=row_headers, rowColours=rcolors, rowLoc='right', colColours=ccolors, colLabels=column_headers, loc='center') # Scaling is the only influence we have over top and bottom cell padding. # Make the rows taller (i.e., make cell y scale larger). the_table.scale(1, 1.1) # Hide axes ax = plt.gca() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # Hide axes border plt.box(on=None) # Force the figure to update, so backends center objects correctly within the figure. # Without plt.draw() here, the title will center on the axes and not the figure. plt.draw() # Create image. plt.savefig ignores figure edge and face colors, so map them. fig = plt.gcf() plt.savefig('pyplot-table-demo.png', edgecolor=fig.get_edgecolor(), facecolor=fig.get_facecolor(), dpi=175 )
normal
{ "blob_id": "ba7db49ca7956fdc055702ffccba769485fd0046", "index": 8915, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor row in data:\n cell_text.append([f'{x:1.2f}' for x in row])\n<mask token>\nplt.figure(linewidth=2, edgecolor=fig_border, facecolor=\n fig_background_color, tight_layout={'pad': 1}, figsize=(4.5, 1.75))\n<mask token>\nthe_table.scale(1, 1.1)\n<mask token>\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)\nplt.box(on=None)\nplt.draw()\n<mask token>\nplt.savefig('pyplot-table-demo.png', edgecolor=fig.get_edgecolor(),\n facecolor=fig.get_facecolor(), dpi=175)\n", "step-3": "<mask token>\nyear = '18-19'\nteam = 'ARI'\nseasonReportRaw = pd.read_csv('Data/' + year + ' ' + team + '/' + team +\n '_SeasonRaw.csv')\nseasonReportRaw['tEPPfP'] = seasonReportRaw['tEPDHP'] + seasonReportRaw[\n 'tEPDEP'] + seasonReportRaw['tEPDOP']\nhomeWins = seasonReportRaw[(seasonReportRaw['Home Team'] == team) & (\n seasonReportRaw['Home Score'] > seasonReportRaw['Away Score'])]\nawayWins = seasonReportRaw[(seasonReportRaw['Away Team'] == team) & (\n seasonReportRaw['Away Score'] > seasonReportRaw['Home Score'])]\nhomeLosses = seasonReportRaw[(seasonReportRaw['Home Team'] == team) & (\n seasonReportRaw['Home Score'] < seasonReportRaw['Away Score'])]\nawayLosses = seasonReportRaw[(seasonReportRaw['Away Team'] == team) & (\n seasonReportRaw['Away Score'] < seasonReportRaw['Home Score'])]\nwinCount = homeWins['Home Team'].count() + awayWins['Away Team'].count()\nPenaltiesSeasonTotal = seasonReportRaw['tPEN(#)'].sum()\nPenaltiesSeasonAverage = PenaltiesSeasonTotal / 16\nPenaltiesWinTotal = homeWins['tPEN(#)'].sum() + awayWins['tPEN(#)'].sum()\nPenaltiesWinAverage = PenaltiesWinTotal / winCount\nPenaltiesLossTotal = homeLosses['tPEN(#)'].sum() + awayLosses['tPEN(#)'].sum()\nPenaltiesLossAverage = PenaltiesLossTotal / (16 - winCount)\nEPCSeasonTotal = seasonReportRaw['tEPPfP'].sum()\nEPCSeasonAverage = EPCSeasonTotal / 16\nEPCWinTotal = homeWins['tEPPfP'].sum() + awayWins['tEPPfP'].sum()\nEPCWinAverage = EPCWinTotal / winCount\nEPCLossTotal = homeLosses['tEPPfP'].sum() + awayLosses['tEPPfP'].sum()\nEPCLossAverage = EPCLossTotal / (16 - winCount)\nEPCDHPSeasonTotal = seasonReportRaw['tEPDHP'].sum()\nEPCDHPSeasonAverage = EPCDHPSeasonTotal / 16\nEPCDHPWinTotal = homeWins['tEPDHP'].sum() + awayWins['tEPDHP'].sum()\nEPCDHPWinAverage = EPCDHPWinTotal / winCount\nEPCDHPLossTotal = homeLosses['tEPDHP'].sum() + awayLosses['tEPDHP'].sum()\nEPCDHPLossAverage = EPCDHPLossTotal / (16 - winCount)\nEPCDEPSeasonTotal = seasonReportRaw['tEPDEP'].sum()\nEPCDEPSeasonAverage = EPCDEPSeasonTotal / 16\nEPCDEPWinTotal = homeWins['tEPDEP'].sum() + awayWins['tEPDEP'].sum()\nEPCDEPWinAverage = EPCDEPWinTotal / winCount\nEPCDEPLossTotal = homeLosses['tEPDEP'].sum() + awayLosses['tEPDEP'].sum()\nEPCDEPLossAverage = EPCDEPLossTotal / (16 - winCount)\nEPCOPSeasonTotal = seasonReportRaw['tEPDOP'].sum()\nEPCOPSeasonAverage = EPCOPSeasonTotal / 16\nEPCOPWinTotal = homeWins['tEPDOP'].sum() + awayWins['tEPDOP'].sum()\nEPCOPWinAverage = EPCOPWinTotal / winCount\nEPCOPLossTotal = homeLosses['tEPDOP'].sum() + awayLosses['tEPDOP'].sum()\nEPCOPLossAverage = EPCOPLossTotal / (16 - winCount)\nheaderRow = ['Season Total', 'Per Game', 'Win Total', 'Per Win',\n 'Loss Total', 'Per Loss']\npenaltiesRow = ['Penalties', PenaltiesSeasonTotal, PenaltiesSeasonAverage,\n PenaltiesWinTotal, PenaltiesWinAverage, PenaltiesLossTotal,\n PenaltiesLossAverage]\nEPCRow = ['EPC', EPCSeasonTotal, EPCSeasonAverage, EPCWinTotal,\n EPCWinAverage, EPCLossTotal, EPCLossAverage]\nEPCDHPRow = ['EPCDHP', EPCDHPSeasonTotal, EPCDHPSeasonAverage,\n EPCDHPWinTotal, EPCDHPWinAverage, EPCDHPLossTotal, EPCDHPLossAverage]\nEPCDEPRow = ['EPCDEP', EPCDEPSeasonTotal, EPCDEPSeasonAverage,\n EPCDEPWinTotal, EPCDEPWinAverage, EPCDEPLossTotal, EPCDEPLossAverage]\nEPCOPRow = ['EPCOP', EPCOPSeasonTotal, EPCOPSeasonAverage, EPCOPWinTotal,\n EPCOPWinAverage, EPCOPLossTotal, EPCOPLossAverage]\nfig_background_color = 'white'\nfig_border = 'black'\ndata = [headerRow, penaltiesRow, EPCRow, EPCDHPRow, EPCDEPRow, EPCOPRow]\ncolumn_headers = data.pop(0)\nrow_headers = [x.pop(0) for x in data]\ncell_text = []\nfor row in data:\n cell_text.append([f'{x:1.2f}' for x in row])\nrcolors = plt.cm.BuPu(np.full(len(row_headers), 0.1))\nccolors = plt.cm.BuPu(np.full(len(column_headers), 0.1))\nplt.figure(linewidth=2, edgecolor=fig_border, facecolor=\n fig_background_color, tight_layout={'pad': 1}, figsize=(4.5, 1.75))\nthe_table = plt.table(cellText=cell_text, rowLabels=row_headers, rowColours\n =rcolors, rowLoc='right', colColours=ccolors, colLabels=column_headers,\n loc='center')\nthe_table.scale(1, 1.1)\nax = plt.gca()\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)\nplt.box(on=None)\nplt.draw()\nfig = plt.gcf()\nplt.savefig('pyplot-table-demo.png', edgecolor=fig.get_edgecolor(),\n facecolor=fig.get_facecolor(), dpi=175)\n", "step-4": "import os\nimport location\nimport teamList\nimport pandas as pd\nimport csv\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import stats\nyear = '18-19'\nteam = 'ARI'\nseasonReportRaw = pd.read_csv('Data/' + year + ' ' + team + '/' + team +\n '_SeasonRaw.csv')\nseasonReportRaw['tEPPfP'] = seasonReportRaw['tEPDHP'] + seasonReportRaw[\n 'tEPDEP'] + seasonReportRaw['tEPDOP']\nhomeWins = seasonReportRaw[(seasonReportRaw['Home Team'] == team) & (\n seasonReportRaw['Home Score'] > seasonReportRaw['Away Score'])]\nawayWins = seasonReportRaw[(seasonReportRaw['Away Team'] == team) & (\n seasonReportRaw['Away Score'] > seasonReportRaw['Home Score'])]\nhomeLosses = seasonReportRaw[(seasonReportRaw['Home Team'] == team) & (\n seasonReportRaw['Home Score'] < seasonReportRaw['Away Score'])]\nawayLosses = seasonReportRaw[(seasonReportRaw['Away Team'] == team) & (\n seasonReportRaw['Away Score'] < seasonReportRaw['Home Score'])]\nwinCount = homeWins['Home Team'].count() + awayWins['Away Team'].count()\nPenaltiesSeasonTotal = seasonReportRaw['tPEN(#)'].sum()\nPenaltiesSeasonAverage = PenaltiesSeasonTotal / 16\nPenaltiesWinTotal = homeWins['tPEN(#)'].sum() + awayWins['tPEN(#)'].sum()\nPenaltiesWinAverage = PenaltiesWinTotal / winCount\nPenaltiesLossTotal = homeLosses['tPEN(#)'].sum() + awayLosses['tPEN(#)'].sum()\nPenaltiesLossAverage = PenaltiesLossTotal / (16 - winCount)\nEPCSeasonTotal = seasonReportRaw['tEPPfP'].sum()\nEPCSeasonAverage = EPCSeasonTotal / 16\nEPCWinTotal = homeWins['tEPPfP'].sum() + awayWins['tEPPfP'].sum()\nEPCWinAverage = EPCWinTotal / winCount\nEPCLossTotal = homeLosses['tEPPfP'].sum() + awayLosses['tEPPfP'].sum()\nEPCLossAverage = EPCLossTotal / (16 - winCount)\nEPCDHPSeasonTotal = seasonReportRaw['tEPDHP'].sum()\nEPCDHPSeasonAverage = EPCDHPSeasonTotal / 16\nEPCDHPWinTotal = homeWins['tEPDHP'].sum() + awayWins['tEPDHP'].sum()\nEPCDHPWinAverage = EPCDHPWinTotal / winCount\nEPCDHPLossTotal = homeLosses['tEPDHP'].sum() + awayLosses['tEPDHP'].sum()\nEPCDHPLossAverage = EPCDHPLossTotal / (16 - winCount)\nEPCDEPSeasonTotal = seasonReportRaw['tEPDEP'].sum()\nEPCDEPSeasonAverage = EPCDEPSeasonTotal / 16\nEPCDEPWinTotal = homeWins['tEPDEP'].sum() + awayWins['tEPDEP'].sum()\nEPCDEPWinAverage = EPCDEPWinTotal / winCount\nEPCDEPLossTotal = homeLosses['tEPDEP'].sum() + awayLosses['tEPDEP'].sum()\nEPCDEPLossAverage = EPCDEPLossTotal / (16 - winCount)\nEPCOPSeasonTotal = seasonReportRaw['tEPDOP'].sum()\nEPCOPSeasonAverage = EPCOPSeasonTotal / 16\nEPCOPWinTotal = homeWins['tEPDOP'].sum() + awayWins['tEPDOP'].sum()\nEPCOPWinAverage = EPCOPWinTotal / winCount\nEPCOPLossTotal = homeLosses['tEPDOP'].sum() + awayLosses['tEPDOP'].sum()\nEPCOPLossAverage = EPCOPLossTotal / (16 - winCount)\nheaderRow = ['Season Total', 'Per Game', 'Win Total', 'Per Win',\n 'Loss Total', 'Per Loss']\npenaltiesRow = ['Penalties', PenaltiesSeasonTotal, PenaltiesSeasonAverage,\n PenaltiesWinTotal, PenaltiesWinAverage, PenaltiesLossTotal,\n PenaltiesLossAverage]\nEPCRow = ['EPC', EPCSeasonTotal, EPCSeasonAverage, EPCWinTotal,\n EPCWinAverage, EPCLossTotal, EPCLossAverage]\nEPCDHPRow = ['EPCDHP', EPCDHPSeasonTotal, EPCDHPSeasonAverage,\n EPCDHPWinTotal, EPCDHPWinAverage, EPCDHPLossTotal, EPCDHPLossAverage]\nEPCDEPRow = ['EPCDEP', EPCDEPSeasonTotal, EPCDEPSeasonAverage,\n EPCDEPWinTotal, EPCDEPWinAverage, EPCDEPLossTotal, EPCDEPLossAverage]\nEPCOPRow = ['EPCOP', EPCOPSeasonTotal, EPCOPSeasonAverage, EPCOPWinTotal,\n EPCOPWinAverage, EPCOPLossTotal, EPCOPLossAverage]\nfig_background_color = 'white'\nfig_border = 'black'\ndata = [headerRow, penaltiesRow, EPCRow, EPCDHPRow, EPCDEPRow, EPCOPRow]\ncolumn_headers = data.pop(0)\nrow_headers = [x.pop(0) for x in data]\ncell_text = []\nfor row in data:\n cell_text.append([f'{x:1.2f}' for x in row])\nrcolors = plt.cm.BuPu(np.full(len(row_headers), 0.1))\nccolors = plt.cm.BuPu(np.full(len(column_headers), 0.1))\nplt.figure(linewidth=2, edgecolor=fig_border, facecolor=\n fig_background_color, tight_layout={'pad': 1}, figsize=(4.5, 1.75))\nthe_table = plt.table(cellText=cell_text, rowLabels=row_headers, rowColours\n =rcolors, rowLoc='right', colColours=ccolors, colLabels=column_headers,\n loc='center')\nthe_table.scale(1, 1.1)\nax = plt.gca()\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)\nplt.box(on=None)\nplt.draw()\nfig = plt.gcf()\nplt.savefig('pyplot-table-demo.png', edgecolor=fig.get_edgecolor(),\n facecolor=fig.get_facecolor(), dpi=175)\n", "step-5": "import os\nimport location\nimport teamList\nimport pandas as pd\nimport csv\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import stats\n\n##adapted from code from this website:\n## https://towardsdatascience.com/simple-little-tables-with-matplotlib-9780ef5d0bc4\n\nyear = \"18-19\"\n\nteam = \"ARI\"\n\nseasonReportRaw = pd.read_csv(\"Data/\" + year + \" \" + team + \"/\" + team + \"_SeasonRaw.csv\")\nseasonReportRaw['tEPPfP'] = seasonReportRaw['tEPDHP'] + seasonReportRaw['tEPDEP'] + seasonReportRaw['tEPDOP']\n\nhomeWins = seasonReportRaw[(seasonReportRaw[\"Home Team\"] == team) & (seasonReportRaw[\"Home Score\"] > seasonReportRaw[\"Away Score\"])]\nawayWins = seasonReportRaw[(seasonReportRaw[\"Away Team\"] == team) & (seasonReportRaw[\"Away Score\"] > seasonReportRaw[\"Home Score\"])]\n\nhomeLosses = seasonReportRaw[(seasonReportRaw[\"Home Team\"] == team) & (seasonReportRaw[\"Home Score\"] < seasonReportRaw[\"Away Score\"])]\nawayLosses = seasonReportRaw[(seasonReportRaw[\"Away Team\"] == team) & (seasonReportRaw[\"Away Score\"] < seasonReportRaw[\"Home Score\"])]\n\nwinCount = homeWins[\"Home Team\"].count() + awayWins[\"Away Team\"].count()\n\nPenaltiesSeasonTotal = seasonReportRaw[\"tPEN(#)\"].sum()\nPenaltiesSeasonAverage = PenaltiesSeasonTotal / 16\nPenaltiesWinTotal = homeWins[\"tPEN(#)\"].sum() + awayWins[\"tPEN(#)\"].sum()\nPenaltiesWinAverage = PenaltiesWinTotal / winCount\nPenaltiesLossTotal = homeLosses[\"tPEN(#)\"].sum() + awayLosses[\"tPEN(#)\"].sum()\nPenaltiesLossAverage = PenaltiesLossTotal / (16-winCount)\n\nEPCSeasonTotal = seasonReportRaw[\"tEPPfP\"].sum()\nEPCSeasonAverage = EPCSeasonTotal / 16\nEPCWinTotal = homeWins[\"tEPPfP\"].sum() + awayWins[\"tEPPfP\"].sum()\nEPCWinAverage = EPCWinTotal / winCount\nEPCLossTotal = homeLosses[\"tEPPfP\"].sum() + awayLosses[\"tEPPfP\"].sum()\nEPCLossAverage = EPCLossTotal / (16-winCount)\n\nEPCDHPSeasonTotal = seasonReportRaw[\"tEPDHP\"].sum()\nEPCDHPSeasonAverage = EPCDHPSeasonTotal / 16\nEPCDHPWinTotal = homeWins[\"tEPDHP\"].sum() + awayWins[\"tEPDHP\"].sum()\nEPCDHPWinAverage = EPCDHPWinTotal / winCount\nEPCDHPLossTotal = homeLosses[\"tEPDHP\"].sum() + awayLosses[\"tEPDHP\"].sum()\nEPCDHPLossAverage = EPCDHPLossTotal / (16-winCount)\n\nEPCDEPSeasonTotal = seasonReportRaw[\"tEPDEP\"].sum()\nEPCDEPSeasonAverage = EPCDEPSeasonTotal / 16\nEPCDEPWinTotal = homeWins[\"tEPDEP\"].sum() + awayWins[\"tEPDEP\"].sum()\nEPCDEPWinAverage = EPCDEPWinTotal / winCount\nEPCDEPLossTotal = homeLosses[\"tEPDEP\"].sum() + awayLosses[\"tEPDEP\"].sum()\nEPCDEPLossAverage = EPCDEPLossTotal / (16-winCount)\n\nEPCOPSeasonTotal = seasonReportRaw[\"tEPDOP\"].sum()\nEPCOPSeasonAverage = EPCOPSeasonTotal / 16\nEPCOPWinTotal = homeWins[\"tEPDOP\"].sum() + awayWins[\"tEPDOP\"].sum()\nEPCOPWinAverage = EPCOPWinTotal / winCount\nEPCOPLossTotal = homeLosses[\"tEPDOP\"].sum() + awayLosses[\"tEPDOP\"].sum()\nEPCOPLossAverage = EPCOPLossTotal / (16-winCount)\n\nheaderRow = ['Season Total', 'Per Game', 'Win Total', 'Per Win', 'Loss Total','Per Loss']\npenaltiesRow = ['Penalties',PenaltiesSeasonTotal,PenaltiesSeasonAverage,PenaltiesWinTotal,PenaltiesWinAverage,PenaltiesLossTotal,PenaltiesLossAverage]\nEPCRow = ['EPC',EPCSeasonTotal,EPCSeasonAverage,EPCWinTotal,EPCWinAverage,EPCLossTotal,EPCLossAverage]\nEPCDHPRow = ['EPCDHP',EPCDHPSeasonTotal,EPCDHPSeasonAverage,EPCDHPWinTotal,EPCDHPWinAverage,EPCDHPLossTotal,EPCDHPLossAverage]\nEPCDEPRow = ['EPCDEP',EPCDEPSeasonTotal,EPCDEPSeasonAverage,EPCDEPWinTotal,EPCDEPWinAverage,EPCDEPLossTotal,EPCDEPLossAverage]\nEPCOPRow = ['EPCOP',EPCOPSeasonTotal,EPCOPSeasonAverage,EPCOPWinTotal,EPCOPWinAverage,EPCOPLossTotal,EPCOPLossAverage]\n\nfig_background_color = 'white'\nfig_border = 'black'\ndata = [headerRow,penaltiesRow,EPCRow,EPCDHPRow,EPCDEPRow,EPCOPRow]\n\n# Pop the headers from the data array\ncolumn_headers = data.pop(0)\nrow_headers = [x.pop(0) for x in data]\n\n# Table data needs to be non-numeric text. Format the data\n# while I'm at it.\ncell_text = []\nfor row in data:\n cell_text.append([f'{x:1.2f}' for x in row])\n\n# Get some lists of color specs for row and column headers\nrcolors = plt.cm.BuPu(np.full(len(row_headers), 0.1))\nccolors = plt.cm.BuPu(np.full(len(column_headers), 0.1))\n\n# Create the figure. Setting a small pad on tight_layout\n# seems to better regulate white space. Sometimes experimenting\n# with an explicit figsize here can produce better outcome.\nplt.figure(linewidth=2,\n edgecolor=fig_border,\n facecolor=fig_background_color,\n tight_layout={'pad':1},\n figsize=(4.5,1.75)\n )\n\n# Add a table at the bottom of the axes\nthe_table = plt.table(cellText=cell_text,\n rowLabels=row_headers,\n rowColours=rcolors,\n rowLoc='right',\n colColours=ccolors,\n colLabels=column_headers,\n loc='center')\n\n# Scaling is the only influence we have over top and bottom cell padding.\n# Make the rows taller (i.e., make cell y scale larger).\nthe_table.scale(1, 1.1)\n# Hide axes\nax = plt.gca()\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)\n# Hide axes border\nplt.box(on=None)\n# Force the figure to update, so backends center objects correctly within the figure.\n# Without plt.draw() here, the title will center on the axes and not the figure.\nplt.draw()\n# Create image. plt.savefig ignores figure edge and face colors, so map them.\nfig = plt.gcf()\nplt.savefig('pyplot-table-demo.png',\n edgecolor=fig.get_edgecolor(),\n facecolor=fig.get_facecolor(),\n dpi=175\n )", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
def domain_name(url): while "https://" in url or "http://" in url or "www." in url: url = url.replace("https://", ' ') if "https://" in url else url.replace("http://", ' ') if "http://" in url else url.replace("www.", ' ') url = list(url) for i in range(len(url)): if url[i] == ".": return "".join(url[0:i]).strip() print(domain_name("https://www.codewars.com/kata/514a024011ea4fb54200004b/train/python"))
normal
{ "blob_id": "2b9dfd0cfd62276330f1a4f983f318076f329437", "index": 5026, "step-1": "<mask token>\n", "step-2": "def domain_name(url):\n while 'https://' in url or 'http://' in url or 'www.' in url:\n url = url.replace('https://', ' '\n ) if 'https://' in url else url.replace('http://', ' '\n ) if 'http://' in url else url.replace('www.', ' ')\n url = list(url)\n for i in range(len(url)):\n if url[i] == '.':\n return ''.join(url[0:i]).strip()\n\n\n<mask token>\n", "step-3": "def domain_name(url):\n while 'https://' in url or 'http://' in url or 'www.' in url:\n url = url.replace('https://', ' '\n ) if 'https://' in url else url.replace('http://', ' '\n ) if 'http://' in url else url.replace('www.', ' ')\n url = list(url)\n for i in range(len(url)):\n if url[i] == '.':\n return ''.join(url[0:i]).strip()\n\n\nprint(domain_name(\n 'https://www.codewars.com/kata/514a024011ea4fb54200004b/train/python'))\n", "step-4": "def domain_name(url):\n while \"https://\" in url or \"http://\" in url or \"www.\" in url:\n url = url.replace(\"https://\", ' ') if \"https://\" in url else url.replace(\"http://\", ' ') if \"http://\" in url else url.replace(\"www.\", ' ')\n url = list(url)\n for i in range(len(url)):\n if url[i] == \".\":\n return \"\".join(url[0:i]).strip()\nprint(domain_name(\"https://www.codewars.com/kata/514a024011ea4fb54200004b/train/python\"))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
class MedianFinder: def __init__(self): """ initialize your data structure here. """ self.minheap = [] self.maxheap = [] def addNum(self, num: int) -> None: heapq.heappush (self.maxheap ,-heapq.heappushpop(self.minheap , num) ) if len(self.maxheap) > len(self.minheap): heapq.heappush( self.minheap, -heapq.heappop(self.maxheap)) def findMedian(self) -> float: if len(self.maxheap) == len(self.minheap): return (self.minheap[0] + -self.maxheap[0]) / 2. return self.minheap[0] # Your MedianFinder object will be instantiated and called as such: # obj = MedianFinder() # obj.addNum(num) # param_2 = obj.findMedian()
normal
{ "blob_id": "e7699bb3f6080c78517f11445e2c48a0e40f3332", "index": 3209, "step-1": "class MedianFinder:\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "class MedianFinder:\n <mask token>\n <mask token>\n\n def findMedian(self) ->float:\n if len(self.maxheap) == len(self.minheap):\n return (self.minheap[0] + -self.maxheap[0]) / 2.0\n return self.minheap[0]\n", "step-3": "class MedianFinder:\n\n def __init__(self):\n \"\"\"\n initialize your data structure here.\n \"\"\"\n self.minheap = []\n self.maxheap = []\n <mask token>\n\n def findMedian(self) ->float:\n if len(self.maxheap) == len(self.minheap):\n return (self.minheap[0] + -self.maxheap[0]) / 2.0\n return self.minheap[0]\n", "step-4": "class MedianFinder:\n\n def __init__(self):\n \"\"\"\n initialize your data structure here.\n \"\"\"\n self.minheap = []\n self.maxheap = []\n\n def addNum(self, num: int) ->None:\n heapq.heappush(self.maxheap, -heapq.heappushpop(self.minheap, num))\n if len(self.maxheap) > len(self.minheap):\n heapq.heappush(self.minheap, -heapq.heappop(self.maxheap))\n\n def findMedian(self) ->float:\n if len(self.maxheap) == len(self.minheap):\n return (self.minheap[0] + -self.maxheap[0]) / 2.0\n return self.minheap[0]\n", "step-5": "class MedianFinder:\n\n def __init__(self):\n \"\"\"\n initialize your data structure here.\n \"\"\"\n self.minheap = []\n self.maxheap = []\n\n def addNum(self, num: int) -> None:\n heapq.heappush (self.maxheap ,-heapq.heappushpop(self.minheap , num) )\n if len(self.maxheap) > len(self.minheap):\n heapq.heappush( self.minheap, -heapq.heappop(self.maxheap))\n\n \n\n def findMedian(self) -> float:\n if len(self.maxheap) == len(self.minheap):\n return (self.minheap[0] + -self.maxheap[0]) / 2.\n return self.minheap[0]\n\n\n# Your MedianFinder object will be instantiated and called as such:\n# obj = MedianFinder()\n# obj.addNum(num)\n# param_2 = obj.findMedian()", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
def fun1(fun): return "Hai!!!! "+fun def message(): return "How are you" res = fun1(message()) print(res)
normal
{ "blob_id": "e9fff1fb0a79493d4d7f3417c7d554eb10a978a0", "index": 6616, "step-1": "<mask token>\n", "step-2": "def fun1(fun):\n return 'Hai!!!! ' + fun\n\n\ndef message():\n return 'How are you'\n\n\n<mask token>\n", "step-3": "def fun1(fun):\n return 'Hai!!!! ' + fun\n\n\ndef message():\n return 'How are you'\n\n\n<mask token>\nprint(res)\n", "step-4": "def fun1(fun):\n return 'Hai!!!! ' + fun\n\n\ndef message():\n return 'How are you'\n\n\nres = fun1(message())\nprint(res)\n", "step-5": "def fun1(fun):\r\n return \"Hai!!!! \"+fun\r\ndef message():\r\n return \"How are you\"\r\n\r\nres = fun1(message())\r\nprint(res)\r\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
def solution(record): answer = [] arr = dict() history = [] for i in record: tmp = i.split() if tmp[0] == "Enter" : arr[tmp[1]] = tmp[2] history.append([tmp[1], "님이 들어왔습니다."]) elif tmp[0] == "Leave" : history.append([tmp[1], "님이 나갔습니다."]) elif tmp[0] == "Change" : arr[tmp[1]] = tmp[2] for i in history : answer.append(arr[i[0]] + i[1]) return answer
normal
{ "blob_id": "d9f66cc3ba40292c49da08d7573d4c605a2771ae", "index": 3730, "step-1": "<mask token>\n", "step-2": "def solution(record):\n answer = []\n arr = dict()\n history = []\n for i in record:\n tmp = i.split()\n if tmp[0] == 'Enter':\n arr[tmp[1]] = tmp[2]\n history.append([tmp[1], '님이 들어왔습니다.'])\n elif tmp[0] == 'Leave':\n history.append([tmp[1], '님이 나갔습니다.'])\n elif tmp[0] == 'Change':\n arr[tmp[1]] = tmp[2]\n for i in history:\n answer.append(arr[i[0]] + i[1])\n return answer\n", "step-3": "def solution(record):\n answer = []\n arr = dict()\n history = []\n for i in record:\n tmp = i.split()\n if tmp[0] == \"Enter\" :\n arr[tmp[1]] = tmp[2]\n history.append([tmp[1], \"님이 들어왔습니다.\"])\n elif tmp[0] == \"Leave\" :\n history.append([tmp[1], \"님이 나갔습니다.\"])\n elif tmp[0] == \"Change\" :\n arr[tmp[1]] = tmp[2]\n\n for i in history :\n answer.append(arr[i[0]] + i[1])\n return answer", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import numpy as np import matplotlib.pyplot as plt import math filename = '/home/kolan/mycode/python/dektak/data/t10_1_1_normal.csv' #filename = '/home/kolan/mycode/python/dektak/t10_1_3_normal.csv' #filename = '/home/kolan/mycode/python/dektak/t10_1_6_normal.csv' #filename = '/home/kolan/mycode/python/dektak/t10_1_7_normal.csv' #filename = '/home/kolan/mycode/python/dektak/t10_1_3_parallel.csv' def FindHeaderLength(): """ Finds the positionon the 'Scan Data' and adds additional 2 lines to give as a result the lenght of the header in number of lines. This is then used in csv function """ lookup = 'Lateral um' with open(filename) as myFile: for FoundPosition, line in enumerate(myFile, 1): if lookup in line: print 'Scan Data found at line:', FoundPosition break return FoundPosition+4 x=np.loadtxt(filename,dtype=float,delimiter=',',skiprows=FindHeaderLength(),usecols=(0,)) y=np.loadtxt(filename,dtype=float,delimiter=',',skiprows=FindHeaderLength(),usecols=(1,)) coefficients = np.polyfit(x, y, 1) polynomial = np.poly1d(coefficients) ys = polynomial(x) print coefficients print polynomial yLevelled=y-ys plt.figure(1) plt.plot(x,y) plt.plot(x,ys) plt.title('Raw data plot') plt.xlabel('Lateral [um]') plt.ylabel('Raw Micrometer [um]') plt.grid(True) plt.figure(2) plt.title('Histogram of y') n, bins, patches = plt.hist(y, 256, normed=1, facecolor='g', alpha=0.75) plt.grid(True) plt.figure(3) d = np.diff(y) plt.plot(d) plt.title('Derivative of y') plt.xlabel('Point []') plt.ylabel('Raw Micrometer [um]') plt.grid(True) plt.figure(4) plt.plot(x,yLevelled) plt.title('Levelled data plot') plt.xlabel('Lateral [um]') plt.ylabel('Micrometer [um]') plt.grid(True) plt.figure(5) plt.title('Histogram of yLevelled') n, bins, patches = plt.hist(yLevelled, 256, normed=1, facecolor='g', alpha=0.75) plt.grid(True) dataLenght = len(yLevelled) xDiff = np.delete(x,dataLenght-1) #diff consumes one last element from the array plt.figure(6) d = np.diff(y) plt.plot(xDiff,d) plt.title('Derivative of y') plt.xlabel('Lateral [um]') plt.ylabel('Raw Micrometer [um]') plt.grid(True) yLevelledMin = np.min(yLevelled) yLevelledZeroShift = yLevelled - yLevelledMin plt.figure(7) plt.plot(x,yLevelledZeroShift) plt.title('Levelled and shifted data plot') plt.xlabel('Lateral [um]') plt.ylabel('Micrometer [um]') plt.grid(True) ##FFT########################################################################### dataLenghtFFT = len(yLevelled)/2 #divide by 2 to satify rfft # scale by the number of points so that # the magnitude does not depend on the length # of the signal or on its sampling frequency calculatedFFT = np.fft.rfft(yLevelled) #calculatedFFT = np.fft.rfft(yLevelledZeroShift) amplitudeFFT = np.abs(calculatedFFT) #calculates FFT amplitude from #complex calculatedFFT output phaseFFT = np.angle(calculatedFFT) #calculates FFT phase from #complex calculatedFFT output phaseDegreesFFT = np.rad2deg(phaseFFT) #convert to degrees amplitudeScaledFFT = amplitudeFFT/float(dataLenghtFFT) # scale by the number of points so that # the magnitude does not depend on the length # of the signal amplitudeScaledRMSFFT = amplitudeFFT/float(dataLenghtFFT)/math.sqrt(2) # Scaling to Root mean square amplitude (dataLenghtFFT/sqrt{2}), ############################################################################# # Plot the results ############################################################################# xFFT = np.linspace(0,dataLenghtFFT+1,dataLenghtFFT+1) #the range is two times smaller +1 for RFFT #sinus signal without noise used for fit plt.figure("FFT amplitude and phase coefficients") plt.subplot(2,1,1) plt.vlines(xFFT,0,amplitudeScaledFFT) plt.title("FFT amplitude coefficients") plt.xlabel("Harmonics") plt.ylabel("Amplitude [V]") plt.xlim(0,dataLenghtFFT/2+1) #adjuts the x axis to maximum of numberOfPoints plt.grid(True) plt.subplot(2,1,2) plt.vlines(xFFT,0,phaseDegreesFFT) plt.title("FFT phase coefficients") plt.xlabel("Harmonics") plt.ylabel("Phase [deg]") plt.tight_layout() #removes the overlapping of the labels in subplots plt.xlim(0,dataLenghtFFT+1) plt.grid(True) ############################################################################## ##Moving average ############################################################################## plt.figure('LevelledData with moving average ') yLevelledMA = np.convolve(yLevelled, np.ones(10)/10) plt.plot(yLevelled) plt.hold(True) plt.plot(yLevelledMA) plt.title('Filtered levelled data plot') plt.xlabel('Sample []') plt.ylabel('Micrometer [um]') plt.grid(True) ##orizontal line diffMA = np.convolve(d, np.ones(10)/10) dataLenghtDiff = len(d) dataLenghtDiffMA = len(diffMA) xLine = np.linspace(0,dataLenghtDiffMA,dataLenghtDiffMA) yLine = np.linspace(0.05,0.05,dataLenghtDiffMA) plt.figure('Derivative with moving average') plt.plot(d) plt.hold(True) plt.plot(diffMA) plt.plot(yLine) plt.title('Derivative with moving average') plt.xlabel('Sample []') plt.ylabel('Micrometer [um]') plt.grid(True) print dataLenghtDiff print dataLenghtDiffMA #thresholded = np.array(diffMA) #x = np.where(thresholded == 0.05)[0] #print x #plt.figure('Derivative with moving average thresholded') #plt.plot(thresholded) #plt.title('Derivative with moving average') #plt.xlabel('Sample []') #plt.ylabel('Micrometer [um]') #plt.grid(True) # #itemindex = np.where(diffMA > 0.05 and diffMA < 0.051) plt.show()
normal
{ "blob_id": "139d06497a44031f6414980ad54454477e3d0b2c", "index": 4540, "step-1": "import numpy as np \nimport matplotlib.pyplot as plt\nimport math\n\nfilename = '/home/kolan/mycode/python/dektak/data/t10_1_1_normal.csv'\n#filename = '/home/kolan/mycode/python/dektak/t10_1_3_normal.csv'\n#filename = '/home/kolan/mycode/python/dektak/t10_1_6_normal.csv'\n#filename = '/home/kolan/mycode/python/dektak/t10_1_7_normal.csv'\n#filename = '/home/kolan/mycode/python/dektak/t10_1_3_parallel.csv'\n\n\n\ndef FindHeaderLength():\n \"\"\"\n Finds the positionon the 'Scan Data' and adds additional 2 lines\n to give as a result the lenght of the header in number of lines.\n This is then used in csv function\n \"\"\"\n\n lookup = 'Lateral um'\n \n with open(filename) as myFile:\n for FoundPosition, line in enumerate(myFile, 1):\n if lookup in line:\n print 'Scan Data found at line:', FoundPosition\n break\n \n return FoundPosition+4\n\n\nx=np.loadtxt(filename,dtype=float,delimiter=',',skiprows=FindHeaderLength(),usecols=(0,))\ny=np.loadtxt(filename,dtype=float,delimiter=',',skiprows=FindHeaderLength(),usecols=(1,))\n\ncoefficients = np.polyfit(x, y, 1)\npolynomial = np.poly1d(coefficients)\nys = polynomial(x)\nprint coefficients\nprint polynomial\n\nyLevelled=y-ys\n\nplt.figure(1)\nplt.plot(x,y)\nplt.plot(x,ys)\nplt.title('Raw data plot')\nplt.xlabel('Lateral [um]')\nplt.ylabel('Raw Micrometer [um]')\nplt.grid(True)\n\nplt.figure(2)\nplt.title('Histogram of y')\nn, bins, patches = plt.hist(y, 256, normed=1, facecolor='g', alpha=0.75)\nplt.grid(True)\n\nplt.figure(3)\nd = np.diff(y)\nplt.plot(d)\nplt.title('Derivative of y')\nplt.xlabel('Point []')\nplt.ylabel('Raw Micrometer [um]')\nplt.grid(True)\n\nplt.figure(4)\nplt.plot(x,yLevelled)\nplt.title('Levelled data plot')\nplt.xlabel('Lateral [um]')\nplt.ylabel('Micrometer [um]')\nplt.grid(True)\n\nplt.figure(5)\nplt.title('Histogram of yLevelled')\nn, bins, patches = plt.hist(yLevelled, 256, normed=1, facecolor='g', alpha=0.75)\nplt.grid(True)\n\ndataLenght = len(yLevelled) \nxDiff = np.delete(x,dataLenght-1) #diff consumes one last element from the array\n\nplt.figure(6)\nd = np.diff(y)\nplt.plot(xDiff,d)\nplt.title('Derivative of y')\nplt.xlabel('Lateral [um]')\nplt.ylabel('Raw Micrometer [um]')\nplt.grid(True)\n\nyLevelledMin = np.min(yLevelled)\nyLevelledZeroShift = yLevelled - yLevelledMin\n\nplt.figure(7)\nplt.plot(x,yLevelledZeroShift)\nplt.title('Levelled and shifted data plot')\nplt.xlabel('Lateral [um]')\nplt.ylabel('Micrometer [um]')\nplt.grid(True)\n\n##FFT###########################################################################\n\ndataLenghtFFT = len(yLevelled)/2 #divide by 2 to satify rfft\n # scale by the number of points so that\n # the magnitude does not depend on the length \n # of the signal or on its sampling frequency \n\ncalculatedFFT = np.fft.rfft(yLevelled) \n#calculatedFFT = np.fft.rfft(yLevelledZeroShift) \n\namplitudeFFT = np.abs(calculatedFFT) #calculates FFT amplitude from \n #complex calculatedFFT output\nphaseFFT = np.angle(calculatedFFT) #calculates FFT phase from \n #complex calculatedFFT output\nphaseDegreesFFT = np.rad2deg(phaseFFT) #convert to degrees\namplitudeScaledFFT = amplitudeFFT/float(dataLenghtFFT)\n # scale by the number of points so that\n # the magnitude does not depend on the length \n # of the signal\namplitudeScaledRMSFFT = amplitudeFFT/float(dataLenghtFFT)/math.sqrt(2)\n\n\n# Scaling to Root mean square amplitude (dataLenghtFFT/sqrt{2}),\n#############################################################################\n# Plot the results\n#############################################################################\n\nxFFT = np.linspace(0,dataLenghtFFT+1,dataLenghtFFT+1) \n #the range is two times smaller +1 for RFFT\n #sinus signal without noise used for fit\n\nplt.figure(\"FFT amplitude and phase coefficients\")\nplt.subplot(2,1,1)\nplt.vlines(xFFT,0,amplitudeScaledFFT)\nplt.title(\"FFT amplitude coefficients\")\nplt.xlabel(\"Harmonics\")\nplt.ylabel(\"Amplitude [V]\")\nplt.xlim(0,dataLenghtFFT/2+1) #adjuts the x axis to maximum of numberOfPoints\nplt.grid(True)\n\nplt.subplot(2,1,2)\nplt.vlines(xFFT,0,phaseDegreesFFT)\nplt.title(\"FFT phase coefficients\")\nplt.xlabel(\"Harmonics\")\nplt.ylabel(\"Phase [deg]\")\nplt.tight_layout() #removes the overlapping of the labels in subplots\nplt.xlim(0,dataLenghtFFT+1)\nplt.grid(True)\n\n\n##############################################################################\n##Moving average\n##############################################################################\nplt.figure('LevelledData with moving average ')\nyLevelledMA = np.convolve(yLevelled, np.ones(10)/10)\nplt.plot(yLevelled)\nplt.hold(True)\nplt.plot(yLevelledMA)\nplt.title('Filtered levelled data plot')\nplt.xlabel('Sample []')\nplt.ylabel('Micrometer [um]')\nplt.grid(True)\n\n##orizontal line\n\n\n\ndiffMA = np.convolve(d, np.ones(10)/10)\n\ndataLenghtDiff = len(d)\ndataLenghtDiffMA = len(diffMA)\n\nxLine = np.linspace(0,dataLenghtDiffMA,dataLenghtDiffMA)\nyLine = np.linspace(0.05,0.05,dataLenghtDiffMA) \n\nplt.figure('Derivative with moving average')\nplt.plot(d)\nplt.hold(True)\nplt.plot(diffMA)\nplt.plot(yLine)\nplt.title('Derivative with moving average')\nplt.xlabel('Sample []')\nplt.ylabel('Micrometer [um]')\nplt.grid(True)\n\n\n\nprint dataLenghtDiff\nprint dataLenghtDiffMA\n\n\n\n#thresholded = np.array(diffMA)\n#x = np.where(thresholded == 0.05)[0]\n#print x\n#plt.figure('Derivative with moving average thresholded')\n#plt.plot(thresholded)\n#plt.title('Derivative with moving average')\n#plt.xlabel('Sample []')\n#plt.ylabel('Micrometer [um]')\n#plt.grid(True)\n#\n#itemindex = np.where(diffMA > 0.05 and diffMA < 0.051)\n\nplt.show()", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from __future__ import annotations from typing import List, Dict, NamedTuple, Union, Optional import codecs import collections import enum import json import re import struct from refinery.lib.structures import StructReader from refinery.units.formats.office.xtdoc import xtdoc, UnpackResult from refinery.lib import chunks from refinery.lib.types import ByteStr from refinery.lib.mime import FileMagicInfo from refinery.lib.tools import cached_property class MsiType(enum.IntEnum): """ Known data types for MSI table cell entries. """ Long = 0x104 Short = 0x502 Binary = 0x900 String = 0xD00 StringLocalized = 0xF00 Unknown = 0 def __str__(self): return self.name class MSITableColumnInfo(NamedTuple): """ Represents information about an MSI table column. See also: https://doxygen.reactos.org/db/de4/msipriv_8h.html """ number: int attributes: int @property def type(self) -> MsiType: try: if self.is_integer: return MsiType(self.attributes & 0xFFF) else: return MsiType(self.attributes & 0xF00) except Exception: return MsiType.Unknown @property def is_integer(self) -> bool: return self.attributes & 0x0F00 < 0x800 @property def is_key(self) -> bool: return self.attributes & 0x2000 == 0x2000 @property def is_nullable(self) -> bool: return self.attributes & 0x1000 == 0x1000 @property def length(self) -> int: vt = self.type if vt is MsiType.Long: return 4 if vt is MsiType.Short: return 2 return self.attributes & 0xFF @property def struct_format(self) -> str: vt = self.type if vt is MsiType.Long: return 'I' elif vt is MsiType.Short: return 'H' else: return 'H' class MSIStringData: def __init__(self, string_data: ByteStr, string_pool: ByteStr): data = StructReader(string_data) pool = StructReader(string_pool) self.strings: List[bytes] = [] self.provided_ref_count: List[int] = [] self.computed_ref_count: List[int] = [] self.codepage = pool.u16() self._unknown = pool.u16() while not pool.eof: size, rc = pool.read_struct('<HH') string = data.read_bytes(size) self.strings.append(string) self.provided_ref_count.append(rc) self.computed_ref_count.append(0) @cached_property def codec(self): try: return codecs.lookup(F'cp{self.codepage}').name except Exception: xtmsi.log_info('failed looking up codec', self.codepage) return 'latin1' def __len__(self): return len(self.strings) def __iter__(self): yield from range(1, len(self) + 1) def __contains__(self, index): return 0 < index <= len(self) def ref(self, index: int, increment=True) -> Union[str, bytes]: assert index > 0 index -= 1 if increment: self.computed_ref_count[index] += 1 data = self.strings[index] data = data.decode(self.codec) return data class xtmsi(xtdoc): """ Extract files and metadata from Microsoft Installer (MSI) archives. The synthetic file {FN} contains parsed MSI table information, similar to the output of the Orca tool. Binary streams are placed in a virtual folder called "Binary", and extracted scripts from custom actions are separately extracted in a virtual folder named "Action". """ _SYNTHETIC_STREAMS_FILENAME = 'MsiTables.json' # https://learn.microsoft.com/en-us/windows/win32/msi/summary-list-of-all-custom-action-types _CUSTOM_ACTION_TYPES = { 0x01: 'DLL file stored in a Binary table stream.', 0x02: 'EXE file stored in a Binary table stream.', 0x05: 'JScript file stored in a Binary table stream.', 0x06: 'VBScript file stored in a Binary table stream.', 0x11: 'DLL file that is installed with a product.', 0x12: 'EXE file that is installed with a product.', 0x13: 'Displays a specified error message and returns failure, terminating the installation.', 0x15: 'JScript file that is installed with a product.', 0x16: 'VBScript file that is installed with a product.', 0x22: 'EXE file having a path referencing a directory.', 0x23: 'Directory set with formatted text.', 0x25: 'JScript text stored in this sequence table.', 0x26: 'VBScript text stored in this sequence table.', 0x32: 'EXE file having a path specified by a property value.', 0x33: 'Property set with formatted text.', 0x35: 'JScript text specified by a property value.', 0x36: 'VBScript text specified by a property value.', } def unpack(self, data): streams = {result.path: result for result in super().unpack(data)} def stream(name: str): return streams.pop(name).get_data() def column_formats(table: Dict[str, MSITableColumnInfo]) -> str: return ''.join(v.struct_format for v in table.values()) def stream_to_rows(data: ByteStr, row_format: str): row_size = struct.calcsize(F'<{row_format}') row_count = int(len(data) / row_size) reader = StructReader(data) columns = [reader.read_struct(F'<{sc*row_count}') for sc in row_format] for i in range(row_count): yield [c[i] for c in columns] tables: Dict[str, Dict[str, MSITableColumnInfo]] = collections.defaultdict(collections.OrderedDict) strings = MSIStringData(stream('!_StringData'), stream('!_StringPool')) for tbl_name_id, col_number, col_name_id, col_attributes in stream_to_rows(stream('!_Columns'), 'HHHH'): tbl_name = strings.ref(tbl_name_id) col_name = strings.ref(col_name_id) tables[tbl_name][col_name] = MSITableColumnInfo(col_number, col_attributes) table_names_given = {strings.ref(k) for k in chunks.unpack(stream('!_Tables'), 2, False)} table_names_known = set(tables) for name in table_names_known - table_names_given: self.log_warn(F'table name known but not given: {name}') for name in table_names_given - table_names_known: self.log_warn(F'table name given but not known: {name}') class ScriptItem(NamedTuple): row_index: int extension: Optional[str] processed_table_data: Dict[str, List[Dict[str, str]]] = {} tbl_properties: Dict[str, str] = {} tbl_files: Dict[str, str] = {} tbl_components: Dict[str, str] = {} postprocessing: List[ScriptItem] = [] def format_string(string: str): # https://learn.microsoft.com/en-us/windows/win32/msi/formatted def _replace(match: re.Match[str]): _replace.done = False prefix, name = match.groups() if not prefix: tbl = tbl_properties elif prefix in '%': name = name.rstrip('%').upper() return F'%{name}%' elif prefix in '!#': tbl = tbl_files elif prefix in '$': tbl = tbl_components else: raise ValueError return tbl.get(name, '') while True: _replace.done = True string = re.sub(R'''(?x) \[ # open square brackent (?![~\\]) # not followed by escapes ([%$!#]?) # any of the valid prefix characters ([^[\]{}]+) # no brackets or braces \]''', _replace, string) if _replace.done: break string = re.sub(r'\[\\(.)\]', r'\1', string) string = string.replace('[~]', '\0') return string for table_name, table in tables.items(): stream_name = F'!{table_name}' if stream_name not in streams: continue processed = [] info = list(table.values()) for r, row in enumerate(stream_to_rows(stream(stream_name), column_formats(table))): values = [] for index, value in enumerate(row): vt = info[index].type if vt is MsiType.Long: if value != 0: value -= 0x80000000 elif vt is MsiType.Short: if value != 0: value -= 0x8000 elif value in strings: value = strings.ref(value) elif not info[index].is_integer: value = '' values.append(value) if table_name == 'Property': tbl_properties[values[0]] = values[1] if table_name == 'File': tbl_properties[values[0]] = values[2] if table_name == 'Component': tbl_properties[values[0]] = F'%{values[2]}%' entry = dict(zip(table, values)) einfo = {t: i for t, i in zip(table, info)} if table_name == 'MsiFileHash': entry['Hash'] = struct.pack( '<IIII', row[2] ^ 0x80000000, row[3] ^ 0x80000000, row[4] ^ 0x80000000, row[5] ^ 0x80000000, ).hex() if table_name == 'CustomAction': code = row[1] & 0x3F try: entry['Comment'] = self._CUSTOM_ACTION_TYPES[code] except LookupError: pass t = einfo.get('Target') c = {0x25: 'js', 0x26: 'vbs', 0x33: None} if code in c and t and not t.is_integer: postprocessing.append(ScriptItem(r, c[code])) processed.append(entry) if processed: processed_table_data[table_name] = processed ca = processed_table_data.get('CustomAction', None) for item in postprocessing: entry = ca[item.row_index] try: path: str = entry['Action'] data: str = entry['Target'] except KeyError: continue root = F'Action/{path}' if item.extension: path = F'{root}.{item.extension}' streams[path] = UnpackResult(path, data.encode(self.codec)) continue data = format_string(data) parts = [part.partition('\x02') for part in data.split('\x01')] if not all(part[1] == '\x02' for part in parts): continue for name, _, script in parts: if not name.lower().startswith('script'): continue if not script: continue path = F'{root}.{name}' streams[path] = UnpackResult(path, script.encode(self.codec)) for ignored_stream in [ '[5]SummaryInformation', '[5]DocumentSummaryInformation', '[5]DigitalSignature', '[5]MsiDigitalSignatureEx' ]: streams.pop(ignored_stream, None) inconsistencies = 0 for k in range(len(strings)): c = strings.computed_ref_count[k] p = strings.provided_ref_count[k] if c != p and not self.log_debug(F'string reference count computed={c} provided={p}:', strings.ref(k + 1, False)): inconsistencies += 1 if inconsistencies: self.log_info(F'found {inconsistencies} incorrect string reference counts') def fix_msi_path(path: str): prefix, dot, name = path.partition('.') if dot == '.' and prefix.lower() == 'binary': path = F'{prefix}/{name}' return path streams = {fix_msi_path(path): item for path, item in streams.items()} ds = UnpackResult(self._SYNTHETIC_STREAMS_FILENAME, json.dumps(processed_table_data, indent=4).encode(self.codec)) streams[ds.path] = ds for path in sorted(streams): streams[path].path = path yield streams[path] @classmethod def handles(self, data: bytearray): if not data.startswith(B'\xD0\xCF\x11\xE0'): return False return FileMagicInfo(data).extension == 'msi' xtmsi.__doc__ = xtmsi.__doc__.format(FN=xtmsi._SYNTHETIC_STREAMS_FILENAME)
normal
{ "blob_id": "566dab589cdb04332a92138b1a1faf53cd0f58b8", "index": 5419, "step-1": "<mask token>\n\n\nclass MSITableColumnInfo(NamedTuple):\n <mask token>\n number: int\n attributes: int\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def length(self) ->int:\n vt = self.type\n if vt is MsiType.Long:\n return 4\n if vt is MsiType.Short:\n return 2\n return self.attributes & 255\n <mask token>\n\n\nclass MSIStringData:\n\n def __init__(self, string_data: ByteStr, string_pool: ByteStr):\n data = StructReader(string_data)\n pool = StructReader(string_pool)\n self.strings: List[bytes] = []\n self.provided_ref_count: List[int] = []\n self.computed_ref_count: List[int] = []\n self.codepage = pool.u16()\n self._unknown = pool.u16()\n while not pool.eof:\n size, rc = pool.read_struct('<HH')\n string = data.read_bytes(size)\n self.strings.append(string)\n self.provided_ref_count.append(rc)\n self.computed_ref_count.append(0)\n\n @cached_property\n def codec(self):\n try:\n return codecs.lookup(f'cp{self.codepage}').name\n except Exception:\n xtmsi.log_info('failed looking up codec', self.codepage)\n return 'latin1'\n\n def __len__(self):\n return len(self.strings)\n\n def __iter__(self):\n yield from range(1, len(self) + 1)\n\n def __contains__(self, index):\n return 0 < index <= len(self)\n\n def ref(self, index: int, increment=True) ->Union[str, bytes]:\n assert index > 0\n index -= 1\n if increment:\n self.computed_ref_count[index] += 1\n data = self.strings[index]\n data = data.decode(self.codec)\n return data\n\n\nclass xtmsi(xtdoc):\n \"\"\"\n Extract files and metadata from Microsoft Installer (MSI) archives. The synthetic file {FN} contains\n parsed MSI table information, similar to the output of the Orca tool. Binary streams are placed in a\n virtual folder called \"Binary\", and extracted scripts from custom actions are separately extracted in\n a virtual folder named \"Action\".\n \"\"\"\n _SYNTHETIC_STREAMS_FILENAME = 'MsiTables.json'\n _CUSTOM_ACTION_TYPES = {(1):\n 'DLL file stored in a Binary table stream.', (2):\n 'EXE file stored in a Binary table stream.', (5):\n 'JScript file stored in a Binary table stream.', (6):\n 'VBScript file stored in a Binary table stream.', (17):\n 'DLL file that is installed with a product.', (18):\n 'EXE file that is installed with a product.', (19):\n 'Displays a specified error message and returns failure, terminating the installation.'\n , (21): 'JScript file that is installed with a product.', (22):\n 'VBScript file that is installed with a product.', (34):\n 'EXE file having a path referencing a directory.', (35):\n 'Directory set with formatted text.', (37):\n 'JScript text stored in this sequence table.', (38):\n 'VBScript text stored in this sequence table.', (50):\n 'EXE file having a path specified by a property value.', (51):\n 'Property set with formatted text.', (53):\n 'JScript text specified by a property value.', (54):\n 'VBScript text specified by a property value.'}\n\n def unpack(self, data):\n streams = {result.path: result for result in super().unpack(data)}\n\n def stream(name: str):\n return streams.pop(name).get_data()\n\n def column_formats(table: Dict[str, MSITableColumnInfo]) ->str:\n return ''.join(v.struct_format for v in table.values())\n\n def stream_to_rows(data: ByteStr, row_format: str):\n row_size = struct.calcsize(f'<{row_format}')\n row_count = int(len(data) / row_size)\n reader = StructReader(data)\n columns = [reader.read_struct(f'<{sc * row_count}') for sc in\n row_format]\n for i in range(row_count):\n yield [c[i] for c in columns]\n tables: Dict[str, Dict[str, MSITableColumnInfo]\n ] = collections.defaultdict(collections.OrderedDict)\n strings = MSIStringData(stream('!_StringData'), stream('!_StringPool'))\n for tbl_name_id, col_number, col_name_id, col_attributes in stream_to_rows(\n stream('!_Columns'), 'HHHH'):\n tbl_name = strings.ref(tbl_name_id)\n col_name = strings.ref(col_name_id)\n tables[tbl_name][col_name] = MSITableColumnInfo(col_number,\n col_attributes)\n table_names_given = {strings.ref(k) for k in chunks.unpack(stream(\n '!_Tables'), 2, False)}\n table_names_known = set(tables)\n for name in (table_names_known - table_names_given):\n self.log_warn(f'table name known but not given: {name}')\n for name in (table_names_given - table_names_known):\n self.log_warn(f'table name given but not known: {name}')\n\n\n class ScriptItem(NamedTuple):\n row_index: int\n extension: Optional[str]\n processed_table_data: Dict[str, List[Dict[str, str]]] = {}\n tbl_properties: Dict[str, str] = {}\n tbl_files: Dict[str, str] = {}\n tbl_components: Dict[str, str] = {}\n postprocessing: List[ScriptItem] = []\n\n def format_string(string: str):\n\n def _replace(match: re.Match[str]):\n _replace.done = False\n prefix, name = match.groups()\n if not prefix:\n tbl = tbl_properties\n elif prefix in '%':\n name = name.rstrip('%').upper()\n return f'%{name}%'\n elif prefix in '!#':\n tbl = tbl_files\n elif prefix in '$':\n tbl = tbl_components\n else:\n raise ValueError\n return tbl.get(name, '')\n while True:\n _replace.done = True\n string = re.sub(\n \"\"\"(?x)\n \\\\[ # open square brackent\n (?![~\\\\\\\\]) # not followed by escapes\n ([%$!#]?) # any of the valid prefix characters\n ([^[\\\\]{}]+) # no brackets or braces\n \\\\]\"\"\"\n , _replace, string)\n if _replace.done:\n break\n string = re.sub('\\\\[\\\\\\\\(.)\\\\]', '\\\\1', string)\n string = string.replace('[~]', '\\x00')\n return string\n for table_name, table in tables.items():\n stream_name = f'!{table_name}'\n if stream_name not in streams:\n continue\n processed = []\n info = list(table.values())\n for r, row in enumerate(stream_to_rows(stream(stream_name),\n column_formats(table))):\n values = []\n for index, value in enumerate(row):\n vt = info[index].type\n if vt is MsiType.Long:\n if value != 0:\n value -= 2147483648\n elif vt is MsiType.Short:\n if value != 0:\n value -= 32768\n elif value in strings:\n value = strings.ref(value)\n elif not info[index].is_integer:\n value = ''\n values.append(value)\n if table_name == 'Property':\n tbl_properties[values[0]] = values[1]\n if table_name == 'File':\n tbl_properties[values[0]] = values[2]\n if table_name == 'Component':\n tbl_properties[values[0]] = f'%{values[2]}%'\n entry = dict(zip(table, values))\n einfo = {t: i for t, i in zip(table, info)}\n if table_name == 'MsiFileHash':\n entry['Hash'] = struct.pack('<IIII', row[2] ^ \n 2147483648, row[3] ^ 2147483648, row[4] ^ \n 2147483648, row[5] ^ 2147483648).hex()\n if table_name == 'CustomAction':\n code = row[1] & 63\n try:\n entry['Comment'] = self._CUSTOM_ACTION_TYPES[code]\n except LookupError:\n pass\n t = einfo.get('Target')\n c = {(37): 'js', (38): 'vbs', (51): None}\n if code in c and t and not t.is_integer:\n postprocessing.append(ScriptItem(r, c[code]))\n processed.append(entry)\n if processed:\n processed_table_data[table_name] = processed\n ca = processed_table_data.get('CustomAction', None)\n for item in postprocessing:\n entry = ca[item.row_index]\n try:\n path: str = entry['Action']\n data: str = entry['Target']\n except KeyError:\n continue\n root = f'Action/{path}'\n if item.extension:\n path = f'{root}.{item.extension}'\n streams[path] = UnpackResult(path, data.encode(self.codec))\n continue\n data = format_string(data)\n parts = [part.partition('\\x02') for part in data.split('\\x01')]\n if not all(part[1] == '\\x02' for part in parts):\n continue\n for name, _, script in parts:\n if not name.lower().startswith('script'):\n continue\n if not script:\n continue\n path = f'{root}.{name}'\n streams[path] = UnpackResult(path, script.encode(self.codec))\n for ignored_stream in ['[5]SummaryInformation',\n '[5]DocumentSummaryInformation', '[5]DigitalSignature',\n '[5]MsiDigitalSignatureEx']:\n streams.pop(ignored_stream, None)\n inconsistencies = 0\n for k in range(len(strings)):\n c = strings.computed_ref_count[k]\n p = strings.provided_ref_count[k]\n if c != p and not self.log_debug(\n f'string reference count computed={c} provided={p}:',\n strings.ref(k + 1, False)):\n inconsistencies += 1\n if inconsistencies:\n self.log_info(\n f'found {inconsistencies} incorrect string reference counts')\n\n def fix_msi_path(path: str):\n prefix, dot, name = path.partition('.')\n if dot == '.' and prefix.lower() == 'binary':\n path = f'{prefix}/{name}'\n return path\n streams = {fix_msi_path(path): item for path, item in streams.items()}\n ds = UnpackResult(self._SYNTHETIC_STREAMS_FILENAME, json.dumps(\n processed_table_data, indent=4).encode(self.codec))\n streams[ds.path] = ds\n for path in sorted(streams):\n streams[path].path = path\n yield streams[path]\n\n @classmethod\n def handles(self, data: bytearray):\n if not data.startswith(b'\\xd0\\xcf\\x11\\xe0'):\n return False\n return FileMagicInfo(data).extension == 'msi'\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass MsiType(enum.IntEnum):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass MSITableColumnInfo(NamedTuple):\n \"\"\"\n Represents information about an MSI table column. See also:\n https://doxygen.reactos.org/db/de4/msipriv_8h.html\n \"\"\"\n number: int\n attributes: int\n\n @property\n def type(self) ->MsiType:\n try:\n if self.is_integer:\n return MsiType(self.attributes & 4095)\n else:\n return MsiType(self.attributes & 3840)\n except Exception:\n return MsiType.Unknown\n\n @property\n def is_integer(self) ->bool:\n return self.attributes & 3840 < 2048\n\n @property\n def is_key(self) ->bool:\n return self.attributes & 8192 == 8192\n\n @property\n def is_nullable(self) ->bool:\n return self.attributes & 4096 == 4096\n\n @property\n def length(self) ->int:\n vt = self.type\n if vt is MsiType.Long:\n return 4\n if vt is MsiType.Short:\n return 2\n return self.attributes & 255\n\n @property\n def struct_format(self) ->str:\n vt = self.type\n if vt is MsiType.Long:\n return 'I'\n elif vt is MsiType.Short:\n return 'H'\n else:\n return 'H'\n\n\nclass MSIStringData:\n\n def __init__(self, string_data: ByteStr, string_pool: ByteStr):\n data = StructReader(string_data)\n pool = StructReader(string_pool)\n self.strings: List[bytes] = []\n self.provided_ref_count: List[int] = []\n self.computed_ref_count: List[int] = []\n self.codepage = pool.u16()\n self._unknown = pool.u16()\n while not pool.eof:\n size, rc = pool.read_struct('<HH')\n string = data.read_bytes(size)\n self.strings.append(string)\n self.provided_ref_count.append(rc)\n self.computed_ref_count.append(0)\n\n @cached_property\n def codec(self):\n try:\n return codecs.lookup(f'cp{self.codepage}').name\n except Exception:\n xtmsi.log_info('failed looking up codec', self.codepage)\n return 'latin1'\n\n def __len__(self):\n return len(self.strings)\n\n def __iter__(self):\n yield from range(1, len(self) + 1)\n\n def __contains__(self, index):\n return 0 < index <= len(self)\n\n def ref(self, index: int, increment=True) ->Union[str, bytes]:\n assert index > 0\n index -= 1\n if increment:\n self.computed_ref_count[index] += 1\n data = self.strings[index]\n data = data.decode(self.codec)\n return data\n\n\nclass xtmsi(xtdoc):\n \"\"\"\n Extract files and metadata from Microsoft Installer (MSI) archives. The synthetic file {FN} contains\n parsed MSI table information, similar to the output of the Orca tool. Binary streams are placed in a\n virtual folder called \"Binary\", and extracted scripts from custom actions are separately extracted in\n a virtual folder named \"Action\".\n \"\"\"\n _SYNTHETIC_STREAMS_FILENAME = 'MsiTables.json'\n _CUSTOM_ACTION_TYPES = {(1):\n 'DLL file stored in a Binary table stream.', (2):\n 'EXE file stored in a Binary table stream.', (5):\n 'JScript file stored in a Binary table stream.', (6):\n 'VBScript file stored in a Binary table stream.', (17):\n 'DLL file that is installed with a product.', (18):\n 'EXE file that is installed with a product.', (19):\n 'Displays a specified error message and returns failure, terminating the installation.'\n , (21): 'JScript file that is installed with a product.', (22):\n 'VBScript file that is installed with a product.', (34):\n 'EXE file having a path referencing a directory.', (35):\n 'Directory set with formatted text.', (37):\n 'JScript text stored in this sequence table.', (38):\n 'VBScript text stored in this sequence table.', (50):\n 'EXE file having a path specified by a property value.', (51):\n 'Property set with formatted text.', (53):\n 'JScript text specified by a property value.', (54):\n 'VBScript text specified by a property value.'}\n\n def unpack(self, data):\n streams = {result.path: result for result in super().unpack(data)}\n\n def stream(name: str):\n return streams.pop(name).get_data()\n\n def column_formats(table: Dict[str, MSITableColumnInfo]) ->str:\n return ''.join(v.struct_format for v in table.values())\n\n def stream_to_rows(data: ByteStr, row_format: str):\n row_size = struct.calcsize(f'<{row_format}')\n row_count = int(len(data) / row_size)\n reader = StructReader(data)\n columns = [reader.read_struct(f'<{sc * row_count}') for sc in\n row_format]\n for i in range(row_count):\n yield [c[i] for c in columns]\n tables: Dict[str, Dict[str, MSITableColumnInfo]\n ] = collections.defaultdict(collections.OrderedDict)\n strings = MSIStringData(stream('!_StringData'), stream('!_StringPool'))\n for tbl_name_id, col_number, col_name_id, col_attributes in stream_to_rows(\n stream('!_Columns'), 'HHHH'):\n tbl_name = strings.ref(tbl_name_id)\n col_name = strings.ref(col_name_id)\n tables[tbl_name][col_name] = MSITableColumnInfo(col_number,\n col_attributes)\n table_names_given = {strings.ref(k) for k in chunks.unpack(stream(\n '!_Tables'), 2, False)}\n table_names_known = set(tables)\n for name in (table_names_known - table_names_given):\n self.log_warn(f'table name known but not given: {name}')\n for name in (table_names_given - table_names_known):\n self.log_warn(f'table name given but not known: {name}')\n\n\n class ScriptItem(NamedTuple):\n row_index: int\n extension: Optional[str]\n processed_table_data: Dict[str, List[Dict[str, str]]] = {}\n tbl_properties: Dict[str, str] = {}\n tbl_files: Dict[str, str] = {}\n tbl_components: Dict[str, str] = {}\n postprocessing: List[ScriptItem] = []\n\n def format_string(string: str):\n\n def _replace(match: re.Match[str]):\n _replace.done = False\n prefix, name = match.groups()\n if not prefix:\n tbl = tbl_properties\n elif prefix in '%':\n name = name.rstrip('%').upper()\n return f'%{name}%'\n elif prefix in '!#':\n tbl = tbl_files\n elif prefix in '$':\n tbl = tbl_components\n else:\n raise ValueError\n return tbl.get(name, '')\n while True:\n _replace.done = True\n string = re.sub(\n \"\"\"(?x)\n \\\\[ # open square brackent\n (?![~\\\\\\\\]) # not followed by escapes\n ([%$!#]?) # any of the valid prefix characters\n ([^[\\\\]{}]+) # no brackets or braces\n \\\\]\"\"\"\n , _replace, string)\n if _replace.done:\n break\n string = re.sub('\\\\[\\\\\\\\(.)\\\\]', '\\\\1', string)\n string = string.replace('[~]', '\\x00')\n return string\n for table_name, table in tables.items():\n stream_name = f'!{table_name}'\n if stream_name not in streams:\n continue\n processed = []\n info = list(table.values())\n for r, row in enumerate(stream_to_rows(stream(stream_name),\n column_formats(table))):\n values = []\n for index, value in enumerate(row):\n vt = info[index].type\n if vt is MsiType.Long:\n if value != 0:\n value -= 2147483648\n elif vt is MsiType.Short:\n if value != 0:\n value -= 32768\n elif value in strings:\n value = strings.ref(value)\n elif not info[index].is_integer:\n value = ''\n values.append(value)\n if table_name == 'Property':\n tbl_properties[values[0]] = values[1]\n if table_name == 'File':\n tbl_properties[values[0]] = values[2]\n if table_name == 'Component':\n tbl_properties[values[0]] = f'%{values[2]}%'\n entry = dict(zip(table, values))\n einfo = {t: i for t, i in zip(table, info)}\n if table_name == 'MsiFileHash':\n entry['Hash'] = struct.pack('<IIII', row[2] ^ \n 2147483648, row[3] ^ 2147483648, row[4] ^ \n 2147483648, row[5] ^ 2147483648).hex()\n if table_name == 'CustomAction':\n code = row[1] & 63\n try:\n entry['Comment'] = self._CUSTOM_ACTION_TYPES[code]\n except LookupError:\n pass\n t = einfo.get('Target')\n c = {(37): 'js', (38): 'vbs', (51): None}\n if code in c and t and not t.is_integer:\n postprocessing.append(ScriptItem(r, c[code]))\n processed.append(entry)\n if processed:\n processed_table_data[table_name] = processed\n ca = processed_table_data.get('CustomAction', None)\n for item in postprocessing:\n entry = ca[item.row_index]\n try:\n path: str = entry['Action']\n data: str = entry['Target']\n except KeyError:\n continue\n root = f'Action/{path}'\n if item.extension:\n path = f'{root}.{item.extension}'\n streams[path] = UnpackResult(path, data.encode(self.codec))\n continue\n data = format_string(data)\n parts = [part.partition('\\x02') for part in data.split('\\x01')]\n if not all(part[1] == '\\x02' for part in parts):\n continue\n for name, _, script in parts:\n if not name.lower().startswith('script'):\n continue\n if not script:\n continue\n path = f'{root}.{name}'\n streams[path] = UnpackResult(path, script.encode(self.codec))\n for ignored_stream in ['[5]SummaryInformation',\n '[5]DocumentSummaryInformation', '[5]DigitalSignature',\n '[5]MsiDigitalSignatureEx']:\n streams.pop(ignored_stream, None)\n inconsistencies = 0\n for k in range(len(strings)):\n c = strings.computed_ref_count[k]\n p = strings.provided_ref_count[k]\n if c != p and not self.log_debug(\n f'string reference count computed={c} provided={p}:',\n strings.ref(k + 1, False)):\n inconsistencies += 1\n if inconsistencies:\n self.log_info(\n f'found {inconsistencies} incorrect string reference counts')\n\n def fix_msi_path(path: str):\n prefix, dot, name = path.partition('.')\n if dot == '.' and prefix.lower() == 'binary':\n path = f'{prefix}/{name}'\n return path\n streams = {fix_msi_path(path): item for path, item in streams.items()}\n ds = UnpackResult(self._SYNTHETIC_STREAMS_FILENAME, json.dumps(\n processed_table_data, indent=4).encode(self.codec))\n streams[ds.path] = ds\n for path in sorted(streams):\n streams[path].path = path\n yield streams[path]\n\n @classmethod\n def handles(self, data: bytearray):\n if not data.startswith(b'\\xd0\\xcf\\x11\\xe0'):\n return False\n return FileMagicInfo(data).extension == 'msi'\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass MsiType(enum.IntEnum):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.name\n\n\nclass MSITableColumnInfo(NamedTuple):\n \"\"\"\n Represents information about an MSI table column. See also:\n https://doxygen.reactos.org/db/de4/msipriv_8h.html\n \"\"\"\n number: int\n attributes: int\n\n @property\n def type(self) ->MsiType:\n try:\n if self.is_integer:\n return MsiType(self.attributes & 4095)\n else:\n return MsiType(self.attributes & 3840)\n except Exception:\n return MsiType.Unknown\n\n @property\n def is_integer(self) ->bool:\n return self.attributes & 3840 < 2048\n\n @property\n def is_key(self) ->bool:\n return self.attributes & 8192 == 8192\n\n @property\n def is_nullable(self) ->bool:\n return self.attributes & 4096 == 4096\n\n @property\n def length(self) ->int:\n vt = self.type\n if vt is MsiType.Long:\n return 4\n if vt is MsiType.Short:\n return 2\n return self.attributes & 255\n\n @property\n def struct_format(self) ->str:\n vt = self.type\n if vt is MsiType.Long:\n return 'I'\n elif vt is MsiType.Short:\n return 'H'\n else:\n return 'H'\n\n\nclass MSIStringData:\n\n def __init__(self, string_data: ByteStr, string_pool: ByteStr):\n data = StructReader(string_data)\n pool = StructReader(string_pool)\n self.strings: List[bytes] = []\n self.provided_ref_count: List[int] = []\n self.computed_ref_count: List[int] = []\n self.codepage = pool.u16()\n self._unknown = pool.u16()\n while not pool.eof:\n size, rc = pool.read_struct('<HH')\n string = data.read_bytes(size)\n self.strings.append(string)\n self.provided_ref_count.append(rc)\n self.computed_ref_count.append(0)\n\n @cached_property\n def codec(self):\n try:\n return codecs.lookup(f'cp{self.codepage}').name\n except Exception:\n xtmsi.log_info('failed looking up codec', self.codepage)\n return 'latin1'\n\n def __len__(self):\n return len(self.strings)\n\n def __iter__(self):\n yield from range(1, len(self) + 1)\n\n def __contains__(self, index):\n return 0 < index <= len(self)\n\n def ref(self, index: int, increment=True) ->Union[str, bytes]:\n assert index > 0\n index -= 1\n if increment:\n self.computed_ref_count[index] += 1\n data = self.strings[index]\n data = data.decode(self.codec)\n return data\n\n\nclass xtmsi(xtdoc):\n \"\"\"\n Extract files and metadata from Microsoft Installer (MSI) archives. The synthetic file {FN} contains\n parsed MSI table information, similar to the output of the Orca tool. Binary streams are placed in a\n virtual folder called \"Binary\", and extracted scripts from custom actions are separately extracted in\n a virtual folder named \"Action\".\n \"\"\"\n _SYNTHETIC_STREAMS_FILENAME = 'MsiTables.json'\n _CUSTOM_ACTION_TYPES = {(1):\n 'DLL file stored in a Binary table stream.', (2):\n 'EXE file stored in a Binary table stream.', (5):\n 'JScript file stored in a Binary table stream.', (6):\n 'VBScript file stored in a Binary table stream.', (17):\n 'DLL file that is installed with a product.', (18):\n 'EXE file that is installed with a product.', (19):\n 'Displays a specified error message and returns failure, terminating the installation.'\n , (21): 'JScript file that is installed with a product.', (22):\n 'VBScript file that is installed with a product.', (34):\n 'EXE file having a path referencing a directory.', (35):\n 'Directory set with formatted text.', (37):\n 'JScript text stored in this sequence table.', (38):\n 'VBScript text stored in this sequence table.', (50):\n 'EXE file having a path specified by a property value.', (51):\n 'Property set with formatted text.', (53):\n 'JScript text specified by a property value.', (54):\n 'VBScript text specified by a property value.'}\n\n def unpack(self, data):\n streams = {result.path: result for result in super().unpack(data)}\n\n def stream(name: str):\n return streams.pop(name).get_data()\n\n def column_formats(table: Dict[str, MSITableColumnInfo]) ->str:\n return ''.join(v.struct_format for v in table.values())\n\n def stream_to_rows(data: ByteStr, row_format: str):\n row_size = struct.calcsize(f'<{row_format}')\n row_count = int(len(data) / row_size)\n reader = StructReader(data)\n columns = [reader.read_struct(f'<{sc * row_count}') for sc in\n row_format]\n for i in range(row_count):\n yield [c[i] for c in columns]\n tables: Dict[str, Dict[str, MSITableColumnInfo]\n ] = collections.defaultdict(collections.OrderedDict)\n strings = MSIStringData(stream('!_StringData'), stream('!_StringPool'))\n for tbl_name_id, col_number, col_name_id, col_attributes in stream_to_rows(\n stream('!_Columns'), 'HHHH'):\n tbl_name = strings.ref(tbl_name_id)\n col_name = strings.ref(col_name_id)\n tables[tbl_name][col_name] = MSITableColumnInfo(col_number,\n col_attributes)\n table_names_given = {strings.ref(k) for k in chunks.unpack(stream(\n '!_Tables'), 2, False)}\n table_names_known = set(tables)\n for name in (table_names_known - table_names_given):\n self.log_warn(f'table name known but not given: {name}')\n for name in (table_names_given - table_names_known):\n self.log_warn(f'table name given but not known: {name}')\n\n\n class ScriptItem(NamedTuple):\n row_index: int\n extension: Optional[str]\n processed_table_data: Dict[str, List[Dict[str, str]]] = {}\n tbl_properties: Dict[str, str] = {}\n tbl_files: Dict[str, str] = {}\n tbl_components: Dict[str, str] = {}\n postprocessing: List[ScriptItem] = []\n\n def format_string(string: str):\n\n def _replace(match: re.Match[str]):\n _replace.done = False\n prefix, name = match.groups()\n if not prefix:\n tbl = tbl_properties\n elif prefix in '%':\n name = name.rstrip('%').upper()\n return f'%{name}%'\n elif prefix in '!#':\n tbl = tbl_files\n elif prefix in '$':\n tbl = tbl_components\n else:\n raise ValueError\n return tbl.get(name, '')\n while True:\n _replace.done = True\n string = re.sub(\n \"\"\"(?x)\n \\\\[ # open square brackent\n (?![~\\\\\\\\]) # not followed by escapes\n ([%$!#]?) # any of the valid prefix characters\n ([^[\\\\]{}]+) # no brackets or braces\n \\\\]\"\"\"\n , _replace, string)\n if _replace.done:\n break\n string = re.sub('\\\\[\\\\\\\\(.)\\\\]', '\\\\1', string)\n string = string.replace('[~]', '\\x00')\n return string\n for table_name, table in tables.items():\n stream_name = f'!{table_name}'\n if stream_name not in streams:\n continue\n processed = []\n info = list(table.values())\n for r, row in enumerate(stream_to_rows(stream(stream_name),\n column_formats(table))):\n values = []\n for index, value in enumerate(row):\n vt = info[index].type\n if vt is MsiType.Long:\n if value != 0:\n value -= 2147483648\n elif vt is MsiType.Short:\n if value != 0:\n value -= 32768\n elif value in strings:\n value = strings.ref(value)\n elif not info[index].is_integer:\n value = ''\n values.append(value)\n if table_name == 'Property':\n tbl_properties[values[0]] = values[1]\n if table_name == 'File':\n tbl_properties[values[0]] = values[2]\n if table_name == 'Component':\n tbl_properties[values[0]] = f'%{values[2]}%'\n entry = dict(zip(table, values))\n einfo = {t: i for t, i in zip(table, info)}\n if table_name == 'MsiFileHash':\n entry['Hash'] = struct.pack('<IIII', row[2] ^ \n 2147483648, row[3] ^ 2147483648, row[4] ^ \n 2147483648, row[5] ^ 2147483648).hex()\n if table_name == 'CustomAction':\n code = row[1] & 63\n try:\n entry['Comment'] = self._CUSTOM_ACTION_TYPES[code]\n except LookupError:\n pass\n t = einfo.get('Target')\n c = {(37): 'js', (38): 'vbs', (51): None}\n if code in c and t and not t.is_integer:\n postprocessing.append(ScriptItem(r, c[code]))\n processed.append(entry)\n if processed:\n processed_table_data[table_name] = processed\n ca = processed_table_data.get('CustomAction', None)\n for item in postprocessing:\n entry = ca[item.row_index]\n try:\n path: str = entry['Action']\n data: str = entry['Target']\n except KeyError:\n continue\n root = f'Action/{path}'\n if item.extension:\n path = f'{root}.{item.extension}'\n streams[path] = UnpackResult(path, data.encode(self.codec))\n continue\n data = format_string(data)\n parts = [part.partition('\\x02') for part in data.split('\\x01')]\n if not all(part[1] == '\\x02' for part in parts):\n continue\n for name, _, script in parts:\n if not name.lower().startswith('script'):\n continue\n if not script:\n continue\n path = f'{root}.{name}'\n streams[path] = UnpackResult(path, script.encode(self.codec))\n for ignored_stream in ['[5]SummaryInformation',\n '[5]DocumentSummaryInformation', '[5]DigitalSignature',\n '[5]MsiDigitalSignatureEx']:\n streams.pop(ignored_stream, None)\n inconsistencies = 0\n for k in range(len(strings)):\n c = strings.computed_ref_count[k]\n p = strings.provided_ref_count[k]\n if c != p and not self.log_debug(\n f'string reference count computed={c} provided={p}:',\n strings.ref(k + 1, False)):\n inconsistencies += 1\n if inconsistencies:\n self.log_info(\n f'found {inconsistencies} incorrect string reference counts')\n\n def fix_msi_path(path: str):\n prefix, dot, name = path.partition('.')\n if dot == '.' and prefix.lower() == 'binary':\n path = f'{prefix}/{name}'\n return path\n streams = {fix_msi_path(path): item for path, item in streams.items()}\n ds = UnpackResult(self._SYNTHETIC_STREAMS_FILENAME, json.dumps(\n processed_table_data, indent=4).encode(self.codec))\n streams[ds.path] = ds\n for path in sorted(streams):\n streams[path].path = path\n yield streams[path]\n\n @classmethod\n def handles(self, data: bytearray):\n if not data.startswith(b'\\xd0\\xcf\\x11\\xe0'):\n return False\n return FileMagicInfo(data).extension == 'msi'\n\n\n<mask token>\n", "step-4": "from __future__ import annotations\nfrom typing import List, Dict, NamedTuple, Union, Optional\nimport codecs\nimport collections\nimport enum\nimport json\nimport re\nimport struct\nfrom refinery.lib.structures import StructReader\nfrom refinery.units.formats.office.xtdoc import xtdoc, UnpackResult\nfrom refinery.lib import chunks\nfrom refinery.lib.types import ByteStr\nfrom refinery.lib.mime import FileMagicInfo\nfrom refinery.lib.tools import cached_property\n\n\nclass MsiType(enum.IntEnum):\n \"\"\"\n Known data types for MSI table cell entries.\n \"\"\"\n Long = 260\n Short = 1282\n Binary = 2304\n String = 3328\n StringLocalized = 3840\n Unknown = 0\n\n def __str__(self):\n return self.name\n\n\nclass MSITableColumnInfo(NamedTuple):\n \"\"\"\n Represents information about an MSI table column. See also:\n https://doxygen.reactos.org/db/de4/msipriv_8h.html\n \"\"\"\n number: int\n attributes: int\n\n @property\n def type(self) ->MsiType:\n try:\n if self.is_integer:\n return MsiType(self.attributes & 4095)\n else:\n return MsiType(self.attributes & 3840)\n except Exception:\n return MsiType.Unknown\n\n @property\n def is_integer(self) ->bool:\n return self.attributes & 3840 < 2048\n\n @property\n def is_key(self) ->bool:\n return self.attributes & 8192 == 8192\n\n @property\n def is_nullable(self) ->bool:\n return self.attributes & 4096 == 4096\n\n @property\n def length(self) ->int:\n vt = self.type\n if vt is MsiType.Long:\n return 4\n if vt is MsiType.Short:\n return 2\n return self.attributes & 255\n\n @property\n def struct_format(self) ->str:\n vt = self.type\n if vt is MsiType.Long:\n return 'I'\n elif vt is MsiType.Short:\n return 'H'\n else:\n return 'H'\n\n\nclass MSIStringData:\n\n def __init__(self, string_data: ByteStr, string_pool: ByteStr):\n data = StructReader(string_data)\n pool = StructReader(string_pool)\n self.strings: List[bytes] = []\n self.provided_ref_count: List[int] = []\n self.computed_ref_count: List[int] = []\n self.codepage = pool.u16()\n self._unknown = pool.u16()\n while not pool.eof:\n size, rc = pool.read_struct('<HH')\n string = data.read_bytes(size)\n self.strings.append(string)\n self.provided_ref_count.append(rc)\n self.computed_ref_count.append(0)\n\n @cached_property\n def codec(self):\n try:\n return codecs.lookup(f'cp{self.codepage}').name\n except Exception:\n xtmsi.log_info('failed looking up codec', self.codepage)\n return 'latin1'\n\n def __len__(self):\n return len(self.strings)\n\n def __iter__(self):\n yield from range(1, len(self) + 1)\n\n def __contains__(self, index):\n return 0 < index <= len(self)\n\n def ref(self, index: int, increment=True) ->Union[str, bytes]:\n assert index > 0\n index -= 1\n if increment:\n self.computed_ref_count[index] += 1\n data = self.strings[index]\n data = data.decode(self.codec)\n return data\n\n\nclass xtmsi(xtdoc):\n \"\"\"\n Extract files and metadata from Microsoft Installer (MSI) archives. The synthetic file {FN} contains\n parsed MSI table information, similar to the output of the Orca tool. Binary streams are placed in a\n virtual folder called \"Binary\", and extracted scripts from custom actions are separately extracted in\n a virtual folder named \"Action\".\n \"\"\"\n _SYNTHETIC_STREAMS_FILENAME = 'MsiTables.json'\n _CUSTOM_ACTION_TYPES = {(1):\n 'DLL file stored in a Binary table stream.', (2):\n 'EXE file stored in a Binary table stream.', (5):\n 'JScript file stored in a Binary table stream.', (6):\n 'VBScript file stored in a Binary table stream.', (17):\n 'DLL file that is installed with a product.', (18):\n 'EXE file that is installed with a product.', (19):\n 'Displays a specified error message and returns failure, terminating the installation.'\n , (21): 'JScript file that is installed with a product.', (22):\n 'VBScript file that is installed with a product.', (34):\n 'EXE file having a path referencing a directory.', (35):\n 'Directory set with formatted text.', (37):\n 'JScript text stored in this sequence table.', (38):\n 'VBScript text stored in this sequence table.', (50):\n 'EXE file having a path specified by a property value.', (51):\n 'Property set with formatted text.', (53):\n 'JScript text specified by a property value.', (54):\n 'VBScript text specified by a property value.'}\n\n def unpack(self, data):\n streams = {result.path: result for result in super().unpack(data)}\n\n def stream(name: str):\n return streams.pop(name).get_data()\n\n def column_formats(table: Dict[str, MSITableColumnInfo]) ->str:\n return ''.join(v.struct_format for v in table.values())\n\n def stream_to_rows(data: ByteStr, row_format: str):\n row_size = struct.calcsize(f'<{row_format}')\n row_count = int(len(data) / row_size)\n reader = StructReader(data)\n columns = [reader.read_struct(f'<{sc * row_count}') for sc in\n row_format]\n for i in range(row_count):\n yield [c[i] for c in columns]\n tables: Dict[str, Dict[str, MSITableColumnInfo]\n ] = collections.defaultdict(collections.OrderedDict)\n strings = MSIStringData(stream('!_StringData'), stream('!_StringPool'))\n for tbl_name_id, col_number, col_name_id, col_attributes in stream_to_rows(\n stream('!_Columns'), 'HHHH'):\n tbl_name = strings.ref(tbl_name_id)\n col_name = strings.ref(col_name_id)\n tables[tbl_name][col_name] = MSITableColumnInfo(col_number,\n col_attributes)\n table_names_given = {strings.ref(k) for k in chunks.unpack(stream(\n '!_Tables'), 2, False)}\n table_names_known = set(tables)\n for name in (table_names_known - table_names_given):\n self.log_warn(f'table name known but not given: {name}')\n for name in (table_names_given - table_names_known):\n self.log_warn(f'table name given but not known: {name}')\n\n\n class ScriptItem(NamedTuple):\n row_index: int\n extension: Optional[str]\n processed_table_data: Dict[str, List[Dict[str, str]]] = {}\n tbl_properties: Dict[str, str] = {}\n tbl_files: Dict[str, str] = {}\n tbl_components: Dict[str, str] = {}\n postprocessing: List[ScriptItem] = []\n\n def format_string(string: str):\n\n def _replace(match: re.Match[str]):\n _replace.done = False\n prefix, name = match.groups()\n if not prefix:\n tbl = tbl_properties\n elif prefix in '%':\n name = name.rstrip('%').upper()\n return f'%{name}%'\n elif prefix in '!#':\n tbl = tbl_files\n elif prefix in '$':\n tbl = tbl_components\n else:\n raise ValueError\n return tbl.get(name, '')\n while True:\n _replace.done = True\n string = re.sub(\n \"\"\"(?x)\n \\\\[ # open square brackent\n (?![~\\\\\\\\]) # not followed by escapes\n ([%$!#]?) # any of the valid prefix characters\n ([^[\\\\]{}]+) # no brackets or braces\n \\\\]\"\"\"\n , _replace, string)\n if _replace.done:\n break\n string = re.sub('\\\\[\\\\\\\\(.)\\\\]', '\\\\1', string)\n string = string.replace('[~]', '\\x00')\n return string\n for table_name, table in tables.items():\n stream_name = f'!{table_name}'\n if stream_name not in streams:\n continue\n processed = []\n info = list(table.values())\n for r, row in enumerate(stream_to_rows(stream(stream_name),\n column_formats(table))):\n values = []\n for index, value in enumerate(row):\n vt = info[index].type\n if vt is MsiType.Long:\n if value != 0:\n value -= 2147483648\n elif vt is MsiType.Short:\n if value != 0:\n value -= 32768\n elif value in strings:\n value = strings.ref(value)\n elif not info[index].is_integer:\n value = ''\n values.append(value)\n if table_name == 'Property':\n tbl_properties[values[0]] = values[1]\n if table_name == 'File':\n tbl_properties[values[0]] = values[2]\n if table_name == 'Component':\n tbl_properties[values[0]] = f'%{values[2]}%'\n entry = dict(zip(table, values))\n einfo = {t: i for t, i in zip(table, info)}\n if table_name == 'MsiFileHash':\n entry['Hash'] = struct.pack('<IIII', row[2] ^ \n 2147483648, row[3] ^ 2147483648, row[4] ^ \n 2147483648, row[5] ^ 2147483648).hex()\n if table_name == 'CustomAction':\n code = row[1] & 63\n try:\n entry['Comment'] = self._CUSTOM_ACTION_TYPES[code]\n except LookupError:\n pass\n t = einfo.get('Target')\n c = {(37): 'js', (38): 'vbs', (51): None}\n if code in c and t and not t.is_integer:\n postprocessing.append(ScriptItem(r, c[code]))\n processed.append(entry)\n if processed:\n processed_table_data[table_name] = processed\n ca = processed_table_data.get('CustomAction', None)\n for item in postprocessing:\n entry = ca[item.row_index]\n try:\n path: str = entry['Action']\n data: str = entry['Target']\n except KeyError:\n continue\n root = f'Action/{path}'\n if item.extension:\n path = f'{root}.{item.extension}'\n streams[path] = UnpackResult(path, data.encode(self.codec))\n continue\n data = format_string(data)\n parts = [part.partition('\\x02') for part in data.split('\\x01')]\n if not all(part[1] == '\\x02' for part in parts):\n continue\n for name, _, script in parts:\n if not name.lower().startswith('script'):\n continue\n if not script:\n continue\n path = f'{root}.{name}'\n streams[path] = UnpackResult(path, script.encode(self.codec))\n for ignored_stream in ['[5]SummaryInformation',\n '[5]DocumentSummaryInformation', '[5]DigitalSignature',\n '[5]MsiDigitalSignatureEx']:\n streams.pop(ignored_stream, None)\n inconsistencies = 0\n for k in range(len(strings)):\n c = strings.computed_ref_count[k]\n p = strings.provided_ref_count[k]\n if c != p and not self.log_debug(\n f'string reference count computed={c} provided={p}:',\n strings.ref(k + 1, False)):\n inconsistencies += 1\n if inconsistencies:\n self.log_info(\n f'found {inconsistencies} incorrect string reference counts')\n\n def fix_msi_path(path: str):\n prefix, dot, name = path.partition('.')\n if dot == '.' and prefix.lower() == 'binary':\n path = f'{prefix}/{name}'\n return path\n streams = {fix_msi_path(path): item for path, item in streams.items()}\n ds = UnpackResult(self._SYNTHETIC_STREAMS_FILENAME, json.dumps(\n processed_table_data, indent=4).encode(self.codec))\n streams[ds.path] = ds\n for path in sorted(streams):\n streams[path].path = path\n yield streams[path]\n\n @classmethod\n def handles(self, data: bytearray):\n if not data.startswith(b'\\xd0\\xcf\\x11\\xe0'):\n return False\n return FileMagicInfo(data).extension == 'msi'\n\n\nxtmsi.__doc__ = xtmsi.__doc__.format(FN=xtmsi._SYNTHETIC_STREAMS_FILENAME)\n", "step-5": "#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\nfrom __future__ import annotations\r\nfrom typing import List, Dict, NamedTuple, Union, Optional\r\n\r\nimport codecs\r\nimport collections\r\nimport enum\r\nimport json\r\nimport re\r\nimport struct\r\n\r\nfrom refinery.lib.structures import StructReader\r\nfrom refinery.units.formats.office.xtdoc import xtdoc, UnpackResult\r\nfrom refinery.lib import chunks\r\nfrom refinery.lib.types import ByteStr\r\nfrom refinery.lib.mime import FileMagicInfo\r\nfrom refinery.lib.tools import cached_property\r\n\r\n\r\nclass MsiType(enum.IntEnum):\r\n \"\"\"\r\n Known data types for MSI table cell entries.\r\n \"\"\"\r\n\r\n Long = 0x104\r\n Short = 0x502\r\n Binary = 0x900\r\n String = 0xD00\r\n StringLocalized = 0xF00\r\n Unknown = 0\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\n\r\nclass MSITableColumnInfo(NamedTuple):\r\n \"\"\"\r\n Represents information about an MSI table column. See also:\r\n https://doxygen.reactos.org/db/de4/msipriv_8h.html\r\n \"\"\"\r\n number: int\r\n attributes: int\r\n\r\n @property\r\n def type(self) -> MsiType:\r\n try:\r\n if self.is_integer:\r\n return MsiType(self.attributes & 0xFFF)\r\n else:\r\n return MsiType(self.attributes & 0xF00)\r\n except Exception:\r\n return MsiType.Unknown\r\n\r\n @property\r\n def is_integer(self) -> bool:\r\n return self.attributes & 0x0F00 < 0x800\r\n\r\n @property\r\n def is_key(self) -> bool:\r\n return self.attributes & 0x2000 == 0x2000\r\n\r\n @property\r\n def is_nullable(self) -> bool:\r\n return self.attributes & 0x1000 == 0x1000\r\n\r\n @property\r\n def length(self) -> int:\r\n vt = self.type\r\n if vt is MsiType.Long:\r\n return 4\r\n if vt is MsiType.Short:\r\n return 2\r\n return self.attributes & 0xFF\r\n\r\n @property\r\n def struct_format(self) -> str:\r\n vt = self.type\r\n if vt is MsiType.Long:\r\n return 'I'\r\n elif vt is MsiType.Short:\r\n return 'H'\r\n else:\r\n return 'H'\r\n\r\n\r\nclass MSIStringData:\r\n def __init__(self, string_data: ByteStr, string_pool: ByteStr):\r\n data = StructReader(string_data)\r\n pool = StructReader(string_pool)\r\n self.strings: List[bytes] = []\r\n self.provided_ref_count: List[int] = []\r\n self.computed_ref_count: List[int] = []\r\n self.codepage = pool.u16()\r\n self._unknown = pool.u16()\r\n while not pool.eof:\r\n size, rc = pool.read_struct('<HH')\r\n string = data.read_bytes(size)\r\n self.strings.append(string)\r\n self.provided_ref_count.append(rc)\r\n self.computed_ref_count.append(0)\r\n\r\n @cached_property\r\n def codec(self):\r\n try:\r\n return codecs.lookup(F'cp{self.codepage}').name\r\n except Exception:\r\n xtmsi.log_info('failed looking up codec', self.codepage)\r\n return 'latin1'\r\n\r\n def __len__(self):\r\n return len(self.strings)\r\n\r\n def __iter__(self):\r\n yield from range(1, len(self) + 1)\r\n\r\n def __contains__(self, index):\r\n return 0 < index <= len(self)\r\n\r\n def ref(self, index: int, increment=True) -> Union[str, bytes]:\r\n assert index > 0\r\n index -= 1\r\n if increment:\r\n self.computed_ref_count[index] += 1\r\n data = self.strings[index]\r\n data = data.decode(self.codec)\r\n return data\r\n\r\n\r\nclass xtmsi(xtdoc):\r\n \"\"\"\r\n Extract files and metadata from Microsoft Installer (MSI) archives. The synthetic file {FN} contains\r\n parsed MSI table information, similar to the output of the Orca tool. Binary streams are placed in a\r\n virtual folder called \"Binary\", and extracted scripts from custom actions are separately extracted in\r\n a virtual folder named \"Action\".\r\n \"\"\"\r\n\r\n _SYNTHETIC_STREAMS_FILENAME = 'MsiTables.json'\r\n\r\n # https://learn.microsoft.com/en-us/windows/win32/msi/summary-list-of-all-custom-action-types\r\n _CUSTOM_ACTION_TYPES = {\r\n 0x01: 'DLL file stored in a Binary table stream.',\r\n 0x02: 'EXE file stored in a Binary table stream.',\r\n 0x05: 'JScript file stored in a Binary table stream.',\r\n 0x06: 'VBScript file stored in a Binary table stream.',\r\n 0x11: 'DLL file that is installed with a product.',\r\n 0x12: 'EXE file that is installed with a product.',\r\n 0x13: 'Displays a specified error message and returns failure, terminating the installation.',\r\n 0x15: 'JScript file that is installed with a product.',\r\n 0x16: 'VBScript file that is installed with a product.',\r\n 0x22: 'EXE file having a path referencing a directory.',\r\n 0x23: 'Directory set with formatted text.',\r\n 0x25: 'JScript text stored in this sequence table.',\r\n 0x26: 'VBScript text stored in this sequence table.',\r\n 0x32: 'EXE file having a path specified by a property value.',\r\n 0x33: 'Property set with formatted text.',\r\n 0x35: 'JScript text specified by a property value.',\r\n 0x36: 'VBScript text specified by a property value.',\r\n }\r\n\r\n def unpack(self, data):\r\n streams = {result.path: result for result in super().unpack(data)}\r\n\r\n def stream(name: str):\r\n return streams.pop(name).get_data()\r\n\r\n def column_formats(table: Dict[str, MSITableColumnInfo]) -> str:\r\n return ''.join(v.struct_format for v in table.values())\r\n\r\n def stream_to_rows(data: ByteStr, row_format: str):\r\n row_size = struct.calcsize(F'<{row_format}')\r\n row_count = int(len(data) / row_size)\r\n reader = StructReader(data)\r\n columns = [reader.read_struct(F'<{sc*row_count}') for sc in row_format]\r\n for i in range(row_count):\r\n yield [c[i] for c in columns]\r\n\r\n tables: Dict[str, Dict[str, MSITableColumnInfo]] = collections.defaultdict(collections.OrderedDict)\r\n strings = MSIStringData(stream('!_StringData'), stream('!_StringPool'))\r\n\r\n for tbl_name_id, col_number, col_name_id, col_attributes in stream_to_rows(stream('!_Columns'), 'HHHH'):\r\n tbl_name = strings.ref(tbl_name_id)\r\n col_name = strings.ref(col_name_id)\r\n tables[tbl_name][col_name] = MSITableColumnInfo(col_number, col_attributes)\r\n\r\n table_names_given = {strings.ref(k) for k in chunks.unpack(stream('!_Tables'), 2, False)}\r\n table_names_known = set(tables)\r\n\r\n for name in table_names_known - table_names_given:\r\n self.log_warn(F'table name known but not given: {name}')\r\n for name in table_names_given - table_names_known:\r\n self.log_warn(F'table name given but not known: {name}')\r\n\r\n class ScriptItem(NamedTuple):\r\n row_index: int\r\n extension: Optional[str]\r\n\r\n processed_table_data: Dict[str, List[Dict[str, str]]] = {}\r\n tbl_properties: Dict[str, str] = {}\r\n tbl_files: Dict[str, str] = {}\r\n tbl_components: Dict[str, str] = {}\r\n postprocessing: List[ScriptItem] = []\r\n\r\n def format_string(string: str):\r\n # https://learn.microsoft.com/en-us/windows/win32/msi/formatted\r\n def _replace(match: re.Match[str]):\r\n _replace.done = False\r\n prefix, name = match.groups()\r\n if not prefix:\r\n tbl = tbl_properties\r\n elif prefix in '%':\r\n name = name.rstrip('%').upper()\r\n return F'%{name}%'\r\n elif prefix in '!#':\r\n tbl = tbl_files\r\n elif prefix in '$':\r\n tbl = tbl_components\r\n else:\r\n raise ValueError\r\n return tbl.get(name, '')\r\n while True:\r\n _replace.done = True\r\n string = re.sub(R'''(?x)\r\n \\[ # open square brackent\r\n (?![~\\\\]) # not followed by escapes\r\n ([%$!#]?) # any of the valid prefix characters\r\n ([^[\\]{}]+) # no brackets or braces\r\n \\]''', _replace, string)\r\n if _replace.done:\r\n break\r\n string = re.sub(r'\\[\\\\(.)\\]', r'\\1', string)\r\n string = string.replace('[~]', '\\0')\r\n return string\r\n\r\n for table_name, table in tables.items():\r\n stream_name = F'!{table_name}'\r\n if stream_name not in streams:\r\n continue\r\n processed = []\r\n info = list(table.values())\r\n for r, row in enumerate(stream_to_rows(stream(stream_name), column_formats(table))):\r\n values = []\r\n for index, value in enumerate(row):\r\n vt = info[index].type\r\n if vt is MsiType.Long:\r\n if value != 0:\r\n value -= 0x80000000\r\n elif vt is MsiType.Short:\r\n if value != 0:\r\n value -= 0x8000\r\n elif value in strings:\r\n value = strings.ref(value)\r\n elif not info[index].is_integer:\r\n value = ''\r\n values.append(value)\r\n if table_name == 'Property':\r\n tbl_properties[values[0]] = values[1]\r\n if table_name == 'File':\r\n tbl_properties[values[0]] = values[2]\r\n if table_name == 'Component':\r\n tbl_properties[values[0]] = F'%{values[2]}%'\r\n entry = dict(zip(table, values))\r\n einfo = {t: i for t, i in zip(table, info)}\r\n if table_name == 'MsiFileHash':\r\n entry['Hash'] = struct.pack(\r\n '<IIII',\r\n row[2] ^ 0x80000000,\r\n row[3] ^ 0x80000000,\r\n row[4] ^ 0x80000000,\r\n row[5] ^ 0x80000000,\r\n ).hex()\r\n if table_name == 'CustomAction':\r\n code = row[1] & 0x3F\r\n try:\r\n entry['Comment'] = self._CUSTOM_ACTION_TYPES[code]\r\n except LookupError:\r\n pass\r\n t = einfo.get('Target')\r\n c = {0x25: 'js', 0x26: 'vbs', 0x33: None}\r\n if code in c and t and not t.is_integer:\r\n postprocessing.append(ScriptItem(r, c[code]))\r\n processed.append(entry)\r\n if processed:\r\n processed_table_data[table_name] = processed\r\n\r\n ca = processed_table_data.get('CustomAction', None)\r\n for item in postprocessing:\r\n entry = ca[item.row_index]\r\n try:\r\n path: str = entry['Action']\r\n data: str = entry['Target']\r\n except KeyError:\r\n continue\r\n root = F'Action/{path}'\r\n if item.extension:\r\n path = F'{root}.{item.extension}'\r\n streams[path] = UnpackResult(path, data.encode(self.codec))\r\n continue\r\n data = format_string(data)\r\n parts = [part.partition('\\x02') for part in data.split('\\x01')]\r\n if not all(part[1] == '\\x02' for part in parts):\r\n continue\r\n for name, _, script in parts:\r\n if not name.lower().startswith('script'):\r\n continue\r\n if not script:\r\n continue\r\n path = F'{root}.{name}'\r\n streams[path] = UnpackResult(path, script.encode(self.codec))\r\n\r\n for ignored_stream in [\r\n '[5]SummaryInformation',\r\n '[5]DocumentSummaryInformation',\r\n '[5]DigitalSignature',\r\n '[5]MsiDigitalSignatureEx'\r\n ]:\r\n streams.pop(ignored_stream, None)\r\n\r\n inconsistencies = 0\r\n for k in range(len(strings)):\r\n c = strings.computed_ref_count[k]\r\n p = strings.provided_ref_count[k]\r\n if c != p and not self.log_debug(F'string reference count computed={c} provided={p}:', strings.ref(k + 1, False)):\r\n inconsistencies += 1\r\n if inconsistencies:\r\n self.log_info(F'found {inconsistencies} incorrect string reference counts')\r\n\r\n def fix_msi_path(path: str):\r\n prefix, dot, name = path.partition('.')\r\n if dot == '.' and prefix.lower() == 'binary':\r\n path = F'{prefix}/{name}'\r\n return path\r\n\r\n streams = {fix_msi_path(path): item for path, item in streams.items()}\r\n ds = UnpackResult(self._SYNTHETIC_STREAMS_FILENAME,\r\n json.dumps(processed_table_data, indent=4).encode(self.codec))\r\n streams[ds.path] = ds\r\n\r\n for path in sorted(streams):\r\n streams[path].path = path\r\n yield streams[path]\r\n\r\n @classmethod\r\n def handles(self, data: bytearray):\r\n if not data.startswith(B'\\xD0\\xCF\\x11\\xE0'):\r\n return False\r\n return FileMagicInfo(data).extension == 'msi'\r\n\r\n\r\nxtmsi.__doc__ = xtmsi.__doc__.format(FN=xtmsi._SYNTHETIC_STREAMS_FILENAME)\r\n", "step-ids": [ 14, 21, 22, 26, 27 ] }
[ 14, 21, 22, 26, 27 ]
# -*- coding: utf-8 -*- __all__ = ["kepler", "quad_solution_vector", "contact_points"] import numpy as np from .. import driver def kepler(mean_anomaly, eccentricity): mean_anomaly = np.ascontiguousarray(mean_anomaly, dtype=np.float64) eccentricity = np.ascontiguousarray(eccentricity, dtype=np.float64) sinf = np.empty_like(mean_anomaly) cosf = np.empty_like(mean_anomaly) driver.solve_kepler(mean_anomaly, eccentricity, sinf, cosf) return sinf, cosf def quad_solution_vector(b, r): b = np.ascontiguousarray(b, dtype=np.float64) r = np.ascontiguousarray(r, dtype=np.float64) s = np.empty(r.shape + (3,), dtype=np.float64) driver.quad_solution_vector(b, r, s) return s def contact_points(a, e, cosw, sinw, cosi, sini, L): a = np.ascontiguousarray(a, dtype=np.float64) e = np.ascontiguousarray(e, dtype=np.float64) cosw = np.ascontiguousarray(cosw, dtype=np.float64) sinw = np.ascontiguousarray(sinw, dtype=np.float64) cosi = np.ascontiguousarray(cosi, dtype=np.float64) sini = np.ascontiguousarray(sini, dtype=np.float64) L = np.ascontiguousarray(L, dtype=np.float64) M_left = np.empty_like(a) M_right = np.empty_like(a) flag = np.empty_like(a, dtype=np.int32) driver.contact_points( a, e, cosw, sinw, cosi, sini, L, M_left, M_right, flag ) return M_left, M_right, flag
normal
{ "blob_id": "ccd32a6ca98c205a6f5d4936288392251522db29", "index": 4896, "step-1": "<mask token>\n\n\ndef kepler(mean_anomaly, eccentricity):\n mean_anomaly = np.ascontiguousarray(mean_anomaly, dtype=np.float64)\n eccentricity = np.ascontiguousarray(eccentricity, dtype=np.float64)\n sinf = np.empty_like(mean_anomaly)\n cosf = np.empty_like(mean_anomaly)\n driver.solve_kepler(mean_anomaly, eccentricity, sinf, cosf)\n return sinf, cosf\n\n\n<mask token>\n\n\ndef contact_points(a, e, cosw, sinw, cosi, sini, L):\n a = np.ascontiguousarray(a, dtype=np.float64)\n e = np.ascontiguousarray(e, dtype=np.float64)\n cosw = np.ascontiguousarray(cosw, dtype=np.float64)\n sinw = np.ascontiguousarray(sinw, dtype=np.float64)\n cosi = np.ascontiguousarray(cosi, dtype=np.float64)\n sini = np.ascontiguousarray(sini, dtype=np.float64)\n L = np.ascontiguousarray(L, dtype=np.float64)\n M_left = np.empty_like(a)\n M_right = np.empty_like(a)\n flag = np.empty_like(a, dtype=np.int32)\n driver.contact_points(a, e, cosw, sinw, cosi, sini, L, M_left, M_right,\n flag)\n return M_left, M_right, flag\n", "step-2": "<mask token>\n\n\ndef kepler(mean_anomaly, eccentricity):\n mean_anomaly = np.ascontiguousarray(mean_anomaly, dtype=np.float64)\n eccentricity = np.ascontiguousarray(eccentricity, dtype=np.float64)\n sinf = np.empty_like(mean_anomaly)\n cosf = np.empty_like(mean_anomaly)\n driver.solve_kepler(mean_anomaly, eccentricity, sinf, cosf)\n return sinf, cosf\n\n\ndef quad_solution_vector(b, r):\n b = np.ascontiguousarray(b, dtype=np.float64)\n r = np.ascontiguousarray(r, dtype=np.float64)\n s = np.empty(r.shape + (3,), dtype=np.float64)\n driver.quad_solution_vector(b, r, s)\n return s\n\n\ndef contact_points(a, e, cosw, sinw, cosi, sini, L):\n a = np.ascontiguousarray(a, dtype=np.float64)\n e = np.ascontiguousarray(e, dtype=np.float64)\n cosw = np.ascontiguousarray(cosw, dtype=np.float64)\n sinw = np.ascontiguousarray(sinw, dtype=np.float64)\n cosi = np.ascontiguousarray(cosi, dtype=np.float64)\n sini = np.ascontiguousarray(sini, dtype=np.float64)\n L = np.ascontiguousarray(L, dtype=np.float64)\n M_left = np.empty_like(a)\n M_right = np.empty_like(a)\n flag = np.empty_like(a, dtype=np.int32)\n driver.contact_points(a, e, cosw, sinw, cosi, sini, L, M_left, M_right,\n flag)\n return M_left, M_right, flag\n", "step-3": "__all__ = ['kepler', 'quad_solution_vector', 'contact_points']\n<mask token>\n\n\ndef kepler(mean_anomaly, eccentricity):\n mean_anomaly = np.ascontiguousarray(mean_anomaly, dtype=np.float64)\n eccentricity = np.ascontiguousarray(eccentricity, dtype=np.float64)\n sinf = np.empty_like(mean_anomaly)\n cosf = np.empty_like(mean_anomaly)\n driver.solve_kepler(mean_anomaly, eccentricity, sinf, cosf)\n return sinf, cosf\n\n\ndef quad_solution_vector(b, r):\n b = np.ascontiguousarray(b, dtype=np.float64)\n r = np.ascontiguousarray(r, dtype=np.float64)\n s = np.empty(r.shape + (3,), dtype=np.float64)\n driver.quad_solution_vector(b, r, s)\n return s\n\n\ndef contact_points(a, e, cosw, sinw, cosi, sini, L):\n a = np.ascontiguousarray(a, dtype=np.float64)\n e = np.ascontiguousarray(e, dtype=np.float64)\n cosw = np.ascontiguousarray(cosw, dtype=np.float64)\n sinw = np.ascontiguousarray(sinw, dtype=np.float64)\n cosi = np.ascontiguousarray(cosi, dtype=np.float64)\n sini = np.ascontiguousarray(sini, dtype=np.float64)\n L = np.ascontiguousarray(L, dtype=np.float64)\n M_left = np.empty_like(a)\n M_right = np.empty_like(a)\n flag = np.empty_like(a, dtype=np.int32)\n driver.contact_points(a, e, cosw, sinw, cosi, sini, L, M_left, M_right,\n flag)\n return M_left, M_right, flag\n", "step-4": "__all__ = ['kepler', 'quad_solution_vector', 'contact_points']\nimport numpy as np\nfrom .. import driver\n\n\ndef kepler(mean_anomaly, eccentricity):\n mean_anomaly = np.ascontiguousarray(mean_anomaly, dtype=np.float64)\n eccentricity = np.ascontiguousarray(eccentricity, dtype=np.float64)\n sinf = np.empty_like(mean_anomaly)\n cosf = np.empty_like(mean_anomaly)\n driver.solve_kepler(mean_anomaly, eccentricity, sinf, cosf)\n return sinf, cosf\n\n\ndef quad_solution_vector(b, r):\n b = np.ascontiguousarray(b, dtype=np.float64)\n r = np.ascontiguousarray(r, dtype=np.float64)\n s = np.empty(r.shape + (3,), dtype=np.float64)\n driver.quad_solution_vector(b, r, s)\n return s\n\n\ndef contact_points(a, e, cosw, sinw, cosi, sini, L):\n a = np.ascontiguousarray(a, dtype=np.float64)\n e = np.ascontiguousarray(e, dtype=np.float64)\n cosw = np.ascontiguousarray(cosw, dtype=np.float64)\n sinw = np.ascontiguousarray(sinw, dtype=np.float64)\n cosi = np.ascontiguousarray(cosi, dtype=np.float64)\n sini = np.ascontiguousarray(sini, dtype=np.float64)\n L = np.ascontiguousarray(L, dtype=np.float64)\n M_left = np.empty_like(a)\n M_right = np.empty_like(a)\n flag = np.empty_like(a, dtype=np.int32)\n driver.contact_points(a, e, cosw, sinw, cosi, sini, L, M_left, M_right,\n flag)\n return M_left, M_right, flag\n", "step-5": "# -*- coding: utf-8 -*-\n\n__all__ = [\"kepler\", \"quad_solution_vector\", \"contact_points\"]\n\n\nimport numpy as np\n\nfrom .. import driver\n\n\ndef kepler(mean_anomaly, eccentricity):\n mean_anomaly = np.ascontiguousarray(mean_anomaly, dtype=np.float64)\n eccentricity = np.ascontiguousarray(eccentricity, dtype=np.float64)\n sinf = np.empty_like(mean_anomaly)\n cosf = np.empty_like(mean_anomaly)\n driver.solve_kepler(mean_anomaly, eccentricity, sinf, cosf)\n return sinf, cosf\n\n\ndef quad_solution_vector(b, r):\n b = np.ascontiguousarray(b, dtype=np.float64)\n r = np.ascontiguousarray(r, dtype=np.float64)\n s = np.empty(r.shape + (3,), dtype=np.float64)\n driver.quad_solution_vector(b, r, s)\n return s\n\n\ndef contact_points(a, e, cosw, sinw, cosi, sini, L):\n a = np.ascontiguousarray(a, dtype=np.float64)\n e = np.ascontiguousarray(e, dtype=np.float64)\n cosw = np.ascontiguousarray(cosw, dtype=np.float64)\n sinw = np.ascontiguousarray(sinw, dtype=np.float64)\n cosi = np.ascontiguousarray(cosi, dtype=np.float64)\n sini = np.ascontiguousarray(sini, dtype=np.float64)\n L = np.ascontiguousarray(L, dtype=np.float64)\n M_left = np.empty_like(a)\n M_right = np.empty_like(a)\n flag = np.empty_like(a, dtype=np.int32)\n driver.contact_points(\n a, e, cosw, sinw, cosi, sini, L, M_left, M_right, flag\n )\n return M_left, M_right, flag\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
#! /usr/bin/env python3 import os import requests # import json external_ip = "xx" data_path = "/data/feedback/" url = "http://{}/feedback/".format(external_ip) def read(): # read file file_list = os.listdir(data_path) result_list = [] for file in file_list: with open(data_path + file) as f: # read line, title, name, date, feedback content = f.readlines() # envolope to dictionary dict = {} dict["title"] = content[0] dict["name"] = content[1] dict["date"] = content[2] dict["feedback"] = content[3] result_list.append(dict) f.close() return result_list def send(list): for dict in list: response = requests.post(url, json=dict) if(response.status_code == 200): forDEBUG("SEND_SUCC", dict["title"]) else: forDEBUG("SEND_FAIL", dict["title"]) def forDEBUG(p1, p2): print("DEBUG:: {}, {}".format(p1, p2)) def action(): plist = read() send(plist) action()
normal
{ "blob_id": "6f1bb9fde9ed9667ab81baa9e8ec965d711a0556", "index": 9853, "step-1": "<mask token>\n\n\ndef read():\n file_list = os.listdir(data_path)\n result_list = []\n for file in file_list:\n with open(data_path + file) as f:\n content = f.readlines()\n dict = {}\n dict['title'] = content[0]\n dict['name'] = content[1]\n dict['date'] = content[2]\n dict['feedback'] = content[3]\n result_list.append(dict)\n f.close()\n return result_list\n\n\ndef send(list):\n for dict in list:\n response = requests.post(url, json=dict)\n if response.status_code == 200:\n forDEBUG('SEND_SUCC', dict['title'])\n else:\n forDEBUG('SEND_FAIL', dict['title'])\n\n\ndef forDEBUG(p1, p2):\n print('DEBUG:: {}, {}'.format(p1, p2))\n\n\ndef action():\n plist = read()\n send(plist)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef read():\n file_list = os.listdir(data_path)\n result_list = []\n for file in file_list:\n with open(data_path + file) as f:\n content = f.readlines()\n dict = {}\n dict['title'] = content[0]\n dict['name'] = content[1]\n dict['date'] = content[2]\n dict['feedback'] = content[3]\n result_list.append(dict)\n f.close()\n return result_list\n\n\ndef send(list):\n for dict in list:\n response = requests.post(url, json=dict)\n if response.status_code == 200:\n forDEBUG('SEND_SUCC', dict['title'])\n else:\n forDEBUG('SEND_FAIL', dict['title'])\n\n\ndef forDEBUG(p1, p2):\n print('DEBUG:: {}, {}'.format(p1, p2))\n\n\ndef action():\n plist = read()\n send(plist)\n\n\naction()\n", "step-3": "<mask token>\nexternal_ip = 'xx'\ndata_path = '/data/feedback/'\nurl = 'http://{}/feedback/'.format(external_ip)\n\n\ndef read():\n file_list = os.listdir(data_path)\n result_list = []\n for file in file_list:\n with open(data_path + file) as f:\n content = f.readlines()\n dict = {}\n dict['title'] = content[0]\n dict['name'] = content[1]\n dict['date'] = content[2]\n dict['feedback'] = content[3]\n result_list.append(dict)\n f.close()\n return result_list\n\n\ndef send(list):\n for dict in list:\n response = requests.post(url, json=dict)\n if response.status_code == 200:\n forDEBUG('SEND_SUCC', dict['title'])\n else:\n forDEBUG('SEND_FAIL', dict['title'])\n\n\ndef forDEBUG(p1, p2):\n print('DEBUG:: {}, {}'.format(p1, p2))\n\n\ndef action():\n plist = read()\n send(plist)\n\n\naction()\n", "step-4": "import os\nimport requests\nexternal_ip = 'xx'\ndata_path = '/data/feedback/'\nurl = 'http://{}/feedback/'.format(external_ip)\n\n\ndef read():\n file_list = os.listdir(data_path)\n result_list = []\n for file in file_list:\n with open(data_path + file) as f:\n content = f.readlines()\n dict = {}\n dict['title'] = content[0]\n dict['name'] = content[1]\n dict['date'] = content[2]\n dict['feedback'] = content[3]\n result_list.append(dict)\n f.close()\n return result_list\n\n\ndef send(list):\n for dict in list:\n response = requests.post(url, json=dict)\n if response.status_code == 200:\n forDEBUG('SEND_SUCC', dict['title'])\n else:\n forDEBUG('SEND_FAIL', dict['title'])\n\n\ndef forDEBUG(p1, p2):\n print('DEBUG:: {}, {}'.format(p1, p2))\n\n\ndef action():\n plist = read()\n send(plist)\n\n\naction()\n", "step-5": "#! /usr/bin/env python3\n\nimport os\nimport requests\n# import json\n\nexternal_ip = \"xx\"\ndata_path = \"/data/feedback/\"\nurl = \"http://{}/feedback/\".format(external_ip)\n\ndef read():\n # read file\n file_list = os.listdir(data_path)\n\n result_list = []\n for file in file_list:\n with open(data_path + file) as f:\n # read line, title, name, date, feedback\n content = f.readlines()\n # envolope to dictionary\n dict = {}\n dict[\"title\"] = content[0]\n dict[\"name\"] = content[1]\n dict[\"date\"] = content[2]\n dict[\"feedback\"] = content[3]\n result_list.append(dict)\n f.close()\n return result_list\n\n \ndef send(list):\n for dict in list:\n response = requests.post(url, json=dict)\n if(response.status_code == 200):\n forDEBUG(\"SEND_SUCC\", dict[\"title\"])\n else:\n forDEBUG(\"SEND_FAIL\", dict[\"title\"])\n\ndef forDEBUG(p1, p2):\n print(\"DEBUG:: {}, {}\".format(p1, p2))\n\ndef action():\n plist = read()\n send(plist)\n\naction()", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
from threading import Thread import time def sleeping(): time.sleep(5) print('Ended') Thread(target=sleeping, daemon=True).start() print('Hello world') time.sleep(5.5)
normal
{ "blob_id": "628fdf848079d0ecf5bf4f5bd46e07ad6cd10358", "index": 5070, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef sleeping():\n time.sleep(5)\n print('Ended')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef sleeping():\n time.sleep(5)\n print('Ended')\n\n\nThread(target=sleeping, daemon=True).start()\nprint('Hello world')\ntime.sleep(5.5)\n", "step-4": "from threading import Thread\nimport time\n\n\ndef sleeping():\n time.sleep(5)\n print('Ended')\n\n\nThread(target=sleeping, daemon=True).start()\nprint('Hello world')\ntime.sleep(5.5)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import nltk import spacy import textacy from keras.layers import Embedding, Bidirectional, Dense, Dropout, BatchNormalization from keras_preprocessing.sequence import pad_sequences from keras_preprocessing.text import Tokenizer from nltk import word_tokenize, re from rasa import model import pandas as pd from spacy import lemmatizer nlp = spacy.load('en_core_web_sm') text=input("Enter the text to find the triplet: ") str=nlp(text) def load_dataset(filename): df = pd.read_csv(filename, encoding="latin1", names=["Sentence", "Intent"]) intent = df["Intent"] unique_intent = list(set(intent)) sentences = list(df["Sentence"]) return (intent, unique_intent, sentences) def cleaning(sentences): words = [] for s in sentences: clean = re.sub(r'[^ a-z A-Z 0-9]', " ", s) w = nltk.word_tokenize(clean) # lemmatizing words.append([lemmatizer.lemmatize(i.lower()) for i in w]) return words def create_tokenizer(words, filters = '!"#$%&()*+,-./:;<=>?@[\]^_`{|}~'): token = Tokenizer(filters = filters) token.fit_on_texts(words) return token def max_length(words): return(len(max(words, key = len))) def encoding_doc(token, words): return(token.texts_to_sequences(words)) def findTriplets(str): tuple_data=textacy.extract.subject_verb_object_triples(str) return tuple_data def creatingLists(tuple_data): tuple_to_lists=list(tuple_data) return tuple_to_lists def displaySubjectVerbObject(tuples_to_lists): for item in tuples_to_lists: print(item) tuple_data=findTriplets(str) list=creatingLists(tuple_data) displaySubjectVerbObject(list)
normal
{ "blob_id": "707855a4e07b68d9ae97c2e1dc8bfd52f11c314c", "index": 1812, "step-1": "<mask token>\n\n\ndef load_dataset(filename):\n df = pd.read_csv(filename, encoding='latin1', names=['Sentence', 'Intent'])\n intent = df['Intent']\n unique_intent = list(set(intent))\n sentences = list(df['Sentence'])\n return intent, unique_intent, sentences\n\n\ndef cleaning(sentences):\n words = []\n for s in sentences:\n clean = re.sub('[^ a-z A-Z 0-9]', ' ', s)\n w = nltk.word_tokenize(clean)\n words.append([lemmatizer.lemmatize(i.lower()) for i in w])\n return words\n\n\ndef create_tokenizer(words, filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~'):\n token = Tokenizer(filters=filters)\n token.fit_on_texts(words)\n return token\n\n\ndef max_length(words):\n return len(max(words, key=len))\n\n\n<mask token>\n\n\ndef findTriplets(str):\n tuple_data = textacy.extract.subject_verb_object_triples(str)\n return tuple_data\n\n\ndef creatingLists(tuple_data):\n tuple_to_lists = list(tuple_data)\n return tuple_to_lists\n\n\ndef displaySubjectVerbObject(tuples_to_lists):\n for item in tuples_to_lists:\n print(item)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef load_dataset(filename):\n df = pd.read_csv(filename, encoding='latin1', names=['Sentence', 'Intent'])\n intent = df['Intent']\n unique_intent = list(set(intent))\n sentences = list(df['Sentence'])\n return intent, unique_intent, sentences\n\n\ndef cleaning(sentences):\n words = []\n for s in sentences:\n clean = re.sub('[^ a-z A-Z 0-9]', ' ', s)\n w = nltk.word_tokenize(clean)\n words.append([lemmatizer.lemmatize(i.lower()) for i in w])\n return words\n\n\ndef create_tokenizer(words, filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~'):\n token = Tokenizer(filters=filters)\n token.fit_on_texts(words)\n return token\n\n\ndef max_length(words):\n return len(max(words, key=len))\n\n\ndef encoding_doc(token, words):\n return token.texts_to_sequences(words)\n\n\ndef findTriplets(str):\n tuple_data = textacy.extract.subject_verb_object_triples(str)\n return tuple_data\n\n\ndef creatingLists(tuple_data):\n tuple_to_lists = list(tuple_data)\n return tuple_to_lists\n\n\ndef displaySubjectVerbObject(tuples_to_lists):\n for item in tuples_to_lists:\n print(item)\n\n\n<mask token>\ndisplaySubjectVerbObject(list)\n", "step-3": "<mask token>\nnlp = spacy.load('en_core_web_sm')\ntext = input('Enter the text to find the triplet: ')\nstr = nlp(text)\n\n\ndef load_dataset(filename):\n df = pd.read_csv(filename, encoding='latin1', names=['Sentence', 'Intent'])\n intent = df['Intent']\n unique_intent = list(set(intent))\n sentences = list(df['Sentence'])\n return intent, unique_intent, sentences\n\n\ndef cleaning(sentences):\n words = []\n for s in sentences:\n clean = re.sub('[^ a-z A-Z 0-9]', ' ', s)\n w = nltk.word_tokenize(clean)\n words.append([lemmatizer.lemmatize(i.lower()) for i in w])\n return words\n\n\ndef create_tokenizer(words, filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~'):\n token = Tokenizer(filters=filters)\n token.fit_on_texts(words)\n return token\n\n\ndef max_length(words):\n return len(max(words, key=len))\n\n\ndef encoding_doc(token, words):\n return token.texts_to_sequences(words)\n\n\ndef findTriplets(str):\n tuple_data = textacy.extract.subject_verb_object_triples(str)\n return tuple_data\n\n\ndef creatingLists(tuple_data):\n tuple_to_lists = list(tuple_data)\n return tuple_to_lists\n\n\ndef displaySubjectVerbObject(tuples_to_lists):\n for item in tuples_to_lists:\n print(item)\n\n\ntuple_data = findTriplets(str)\nlist = creatingLists(tuple_data)\ndisplaySubjectVerbObject(list)\n", "step-4": "import nltk\nimport spacy\nimport textacy\nfrom keras.layers import Embedding, Bidirectional, Dense, Dropout, BatchNormalization\nfrom keras_preprocessing.sequence import pad_sequences\nfrom keras_preprocessing.text import Tokenizer\nfrom nltk import word_tokenize, re\nfrom rasa import model\nimport pandas as pd\nfrom spacy import lemmatizer\nnlp = spacy.load('en_core_web_sm')\ntext = input('Enter the text to find the triplet: ')\nstr = nlp(text)\n\n\ndef load_dataset(filename):\n df = pd.read_csv(filename, encoding='latin1', names=['Sentence', 'Intent'])\n intent = df['Intent']\n unique_intent = list(set(intent))\n sentences = list(df['Sentence'])\n return intent, unique_intent, sentences\n\n\ndef cleaning(sentences):\n words = []\n for s in sentences:\n clean = re.sub('[^ a-z A-Z 0-9]', ' ', s)\n w = nltk.word_tokenize(clean)\n words.append([lemmatizer.lemmatize(i.lower()) for i in w])\n return words\n\n\ndef create_tokenizer(words, filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~'):\n token = Tokenizer(filters=filters)\n token.fit_on_texts(words)\n return token\n\n\ndef max_length(words):\n return len(max(words, key=len))\n\n\ndef encoding_doc(token, words):\n return token.texts_to_sequences(words)\n\n\ndef findTriplets(str):\n tuple_data = textacy.extract.subject_verb_object_triples(str)\n return tuple_data\n\n\ndef creatingLists(tuple_data):\n tuple_to_lists = list(tuple_data)\n return tuple_to_lists\n\n\ndef displaySubjectVerbObject(tuples_to_lists):\n for item in tuples_to_lists:\n print(item)\n\n\ntuple_data = findTriplets(str)\nlist = creatingLists(tuple_data)\ndisplaySubjectVerbObject(list)\n", "step-5": "import nltk\nimport spacy\nimport textacy\nfrom keras.layers import Embedding, Bidirectional, Dense, Dropout, BatchNormalization\nfrom keras_preprocessing.sequence import pad_sequences\nfrom keras_preprocessing.text import Tokenizer\nfrom nltk import word_tokenize, re\nfrom rasa import model\nimport pandas as pd\nfrom spacy import lemmatizer\n\nnlp = spacy.load('en_core_web_sm')\n\ntext=input(\"Enter the text to find the triplet: \")\nstr=nlp(text)\n\ndef load_dataset(filename):\n df = pd.read_csv(filename, encoding=\"latin1\",\n names=[\"Sentence\", \"Intent\"])\n intent = df[\"Intent\"]\n unique_intent = list(set(intent))\n sentences = list(df[\"Sentence\"])\n\n return (intent, unique_intent, sentences)\n\n\ndef cleaning(sentences):\n words = []\n for s in sentences:\n clean = re.sub(r'[^ a-z A-Z 0-9]', \" \", s)\n w = nltk.word_tokenize(clean)\n # lemmatizing\n words.append([lemmatizer.lemmatize(i.lower()) for i in w])\n\n\n return words\n\ndef create_tokenizer(words,\n filters = '!\"#$%&()*+,-./:;<=>?@[\\]^_`{|}~'):\n token = Tokenizer(filters = filters)\n token.fit_on_texts(words)\n return token\ndef max_length(words):\n return(len(max(words, key = len)))\ndef encoding_doc(token, words):\n return(token.texts_to_sequences(words))\n\ndef findTriplets(str):\n tuple_data=textacy.extract.subject_verb_object_triples(str)\n return tuple_data\n\ndef creatingLists(tuple_data):\n tuple_to_lists=list(tuple_data)\n return tuple_to_lists\n\n\ndef displaySubjectVerbObject(tuples_to_lists):\n for item in tuples_to_lists:\n print(item)\n\n\ntuple_data=findTriplets(str)\nlist=creatingLists(tuple_data)\ndisplaySubjectVerbObject(list)", "step-ids": [ 7, 9, 10, 11, 12 ] }
[ 7, 9, 10, 11, 12 ]
import scrapy from yijing64.items import Yijing64Item # import pymysql class ZhouyiSpider(scrapy.Spider): name = 'zhouyi' allowed_domains = ['m.zhouyi.cc'] start_urls = ['https://m.zhouyi.cc/zhouyi/yijing64/'] def parse(self, response): li_list = response.xpath("//div[@class='gualist1 tip_text']/ul/li") for li in li_list: item = Yijing64Item() item['name'] = li.xpath("./a/text()").extract_first() # item['urls'] = li.xpath("./a/@href").extract_first() detail_urls = 'https://m.zhouyi.cc' + \ li.xpath("./a/@href").extract_first() if detail_urls is not None: yield scrapy.Request(detail_urls, callback=self.parse_detail, meta={'item': item}) def parse_detail(self, response): item = response.meta["item"] item['hexagram1'] = response.xpath("//div/table/tbody/tr[3]/td[1]/text()").extract_first().strip() item['hexagram2'] = response.xpath("//div/table/tbody/tr[3]/td[2]/text()").extract_first().strip() item['hexagram3'] = response.xpath("//div/table/tbody/tr[3]/td[3]/text()").extract_first().strip() item['hexagram4'] = response.xpath("//div/table/tbody/tr[3]/td[4]/text()").extract_first().strip() # item['hexagram'] = response.xpath("//div[@class='tip_text'][1]").extract_first().strip() # item['one_yao'] = response.xpath("//div[@class='tip_text'][2]").extract_first().strip() # item['two_yao'] = response.xpath("//div[@class='tip_text'][3]").extract_first().strip() # item['san_yao'] = response.xpath("//div[@class='tip_text'][4]").extract_first().strip() # item['si_yao'] = response.xpath("//div[@class='tip_text'][5]").extract_first().strip() # item['wu_yao'] = response.xpath("//div[@class='tip_text'][6]").extract_first().strip() # item['liu_yao'] = response.xpath("//div[@class='tip_text'][7]").extract_first().strip() yield item # hexagram_list = response.xpath( # "//div/table/tbody/tr[3]/td/text()").extract() # for i, v in enumerate(hexagram_list): # # print("=="*10) # # print(i,index) # if i == 0: # item['hexagram1'] = v.strip() # elif i == 1: # item['hexagram2'] = v.strip() # elif i == 2: # item['hexagram3'] = v.strip() # else: # item['hexagram4'] = v.strip() # yield item # print(item) # def __init__(self): # con = pymysql.connect(host=settings['MYSQL_HOST'], user=settings['MYSQL_USER'], passwd=settings['MYSQL_PASS'], db=settings['MYSQL_DB'],charset='utf8') # cur = con.cursor() # 创建数据库连接,定义连接指针 # con.close()
normal
{ "blob_id": "cd9f25a2810b02f5588e4e9e8445e7aaec056bf8", "index": 7704, "step-1": "<mask token>\n\n\nclass ZhouyiSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def parse_detail(self, response):\n item = response.meta['item']\n item['hexagram1'] = response.xpath(\n '//div/table/tbody/tr[3]/td[1]/text()').extract_first().strip()\n item['hexagram2'] = response.xpath(\n '//div/table/tbody/tr[3]/td[2]/text()').extract_first().strip()\n item['hexagram3'] = response.xpath(\n '//div/table/tbody/tr[3]/td[3]/text()').extract_first().strip()\n item['hexagram4'] = response.xpath(\n '//div/table/tbody/tr[3]/td[4]/text()').extract_first().strip()\n yield item\n", "step-2": "<mask token>\n\n\nclass ZhouyiSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self, response):\n li_list = response.xpath(\"//div[@class='gualist1 tip_text']/ul/li\")\n for li in li_list:\n item = Yijing64Item()\n item['name'] = li.xpath('./a/text()').extract_first()\n detail_urls = 'https://m.zhouyi.cc' + li.xpath('./a/@href'\n ).extract_first()\n if detail_urls is not None:\n yield scrapy.Request(detail_urls, callback=self.\n parse_detail, meta={'item': item})\n\n def parse_detail(self, response):\n item = response.meta['item']\n item['hexagram1'] = response.xpath(\n '//div/table/tbody/tr[3]/td[1]/text()').extract_first().strip()\n item['hexagram2'] = response.xpath(\n '//div/table/tbody/tr[3]/td[2]/text()').extract_first().strip()\n item['hexagram3'] = response.xpath(\n '//div/table/tbody/tr[3]/td[3]/text()').extract_first().strip()\n item['hexagram4'] = response.xpath(\n '//div/table/tbody/tr[3]/td[4]/text()').extract_first().strip()\n yield item\n", "step-3": "<mask token>\n\n\nclass ZhouyiSpider(scrapy.Spider):\n name = 'zhouyi'\n allowed_domains = ['m.zhouyi.cc']\n start_urls = ['https://m.zhouyi.cc/zhouyi/yijing64/']\n\n def parse(self, response):\n li_list = response.xpath(\"//div[@class='gualist1 tip_text']/ul/li\")\n for li in li_list:\n item = Yijing64Item()\n item['name'] = li.xpath('./a/text()').extract_first()\n detail_urls = 'https://m.zhouyi.cc' + li.xpath('./a/@href'\n ).extract_first()\n if detail_urls is not None:\n yield scrapy.Request(detail_urls, callback=self.\n parse_detail, meta={'item': item})\n\n def parse_detail(self, response):\n item = response.meta['item']\n item['hexagram1'] = response.xpath(\n '//div/table/tbody/tr[3]/td[1]/text()').extract_first().strip()\n item['hexagram2'] = response.xpath(\n '//div/table/tbody/tr[3]/td[2]/text()').extract_first().strip()\n item['hexagram3'] = response.xpath(\n '//div/table/tbody/tr[3]/td[3]/text()').extract_first().strip()\n item['hexagram4'] = response.xpath(\n '//div/table/tbody/tr[3]/td[4]/text()').extract_first().strip()\n yield item\n", "step-4": "import scrapy\nfrom yijing64.items import Yijing64Item\n\n\nclass ZhouyiSpider(scrapy.Spider):\n name = 'zhouyi'\n allowed_domains = ['m.zhouyi.cc']\n start_urls = ['https://m.zhouyi.cc/zhouyi/yijing64/']\n\n def parse(self, response):\n li_list = response.xpath(\"//div[@class='gualist1 tip_text']/ul/li\")\n for li in li_list:\n item = Yijing64Item()\n item['name'] = li.xpath('./a/text()').extract_first()\n detail_urls = 'https://m.zhouyi.cc' + li.xpath('./a/@href'\n ).extract_first()\n if detail_urls is not None:\n yield scrapy.Request(detail_urls, callback=self.\n parse_detail, meta={'item': item})\n\n def parse_detail(self, response):\n item = response.meta['item']\n item['hexagram1'] = response.xpath(\n '//div/table/tbody/tr[3]/td[1]/text()').extract_first().strip()\n item['hexagram2'] = response.xpath(\n '//div/table/tbody/tr[3]/td[2]/text()').extract_first().strip()\n item['hexagram3'] = response.xpath(\n '//div/table/tbody/tr[3]/td[3]/text()').extract_first().strip()\n item['hexagram4'] = response.xpath(\n '//div/table/tbody/tr[3]/td[4]/text()').extract_first().strip()\n yield item\n", "step-5": "import scrapy\nfrom yijing64.items import Yijing64Item\n# import pymysql\n\n\nclass ZhouyiSpider(scrapy.Spider):\n name = 'zhouyi'\n allowed_domains = ['m.zhouyi.cc']\n start_urls = ['https://m.zhouyi.cc/zhouyi/yijing64/']\n\n def parse(self, response):\n li_list = response.xpath(\"//div[@class='gualist1 tip_text']/ul/li\")\n\n for li in li_list:\n item = Yijing64Item()\n item['name'] = li.xpath(\"./a/text()\").extract_first()\n # item['urls'] = li.xpath(\"./a/@href\").extract_first()\n detail_urls = 'https://m.zhouyi.cc' + \\\n li.xpath(\"./a/@href\").extract_first()\n\n if detail_urls is not None:\n yield scrapy.Request(detail_urls, callback=self.parse_detail, meta={'item': item})\n\n def parse_detail(self, response):\n item = response.meta[\"item\"]\n item['hexagram1'] = response.xpath(\"//div/table/tbody/tr[3]/td[1]/text()\").extract_first().strip()\n item['hexagram2'] = response.xpath(\"//div/table/tbody/tr[3]/td[2]/text()\").extract_first().strip()\n item['hexagram3'] = response.xpath(\"//div/table/tbody/tr[3]/td[3]/text()\").extract_first().strip()\n item['hexagram4'] = response.xpath(\"//div/table/tbody/tr[3]/td[4]/text()\").extract_first().strip()\n # item['hexagram'] = response.xpath(\"//div[@class='tip_text'][1]\").extract_first().strip()\n # item['one_yao'] = response.xpath(\"//div[@class='tip_text'][2]\").extract_first().strip()\n # item['two_yao'] = response.xpath(\"//div[@class='tip_text'][3]\").extract_first().strip()\n # item['san_yao'] = response.xpath(\"//div[@class='tip_text'][4]\").extract_first().strip()\n # item['si_yao'] = response.xpath(\"//div[@class='tip_text'][5]\").extract_first().strip()\n # item['wu_yao'] = response.xpath(\"//div[@class='tip_text'][6]\").extract_first().strip()\n # item['liu_yao'] = response.xpath(\"//div[@class='tip_text'][7]\").extract_first().strip()\n yield item\n # hexagram_list = response.xpath(\n # \"//div/table/tbody/tr[3]/td/text()\").extract()\n # for i, v in enumerate(hexagram_list):\n # # print(\"==\"*10)\n # # print(i,index)\n # if i == 0:\n # item['hexagram1'] = v.strip()\n # elif i == 1:\n # item['hexagram2'] = v.strip()\n # elif i == 2:\n # item['hexagram3'] = v.strip()\n # else:\n # item['hexagram4'] = v.strip()\n # yield item\n # print(item)\n\n # def __init__(self):\n # \tcon = pymysql.connect(host=settings['MYSQL_HOST'], user=settings['MYSQL_USER'], passwd=settings['MYSQL_PASS'], db=settings['MYSQL_DB'],charset='utf8')\n # \tcur = con.cursor() # 创建数据库连接,定义连接指针\n # \tcon.close()\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
from .base import GnuRecipe class CAresRecipe(GnuRecipe): def __init__(self, *args, **kwargs): super(CAresRecipe, self).__init__(*args, **kwargs) self.sha256 = '45d3c1fd29263ceec2afc8ff9cd06d5f' \ '8f889636eb4e80ce3cc7f0eaf7aadc6e' self.name = 'c-ares' self.version = '1.14.0' self.url = 'https://c-ares.haxx.se/download/$name-$version.tar.gz'
normal
{ "blob_id": "bf7676dc2c47d9cd2f1ce2d436202ae2c5061265", "index": 8634, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass CAresRecipe(GnuRecipe):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass CAresRecipe(GnuRecipe):\n\n def __init__(self, *args, **kwargs):\n super(CAresRecipe, self).__init__(*args, **kwargs)\n self.sha256 = (\n '45d3c1fd29263ceec2afc8ff9cd06d5f8f889636eb4e80ce3cc7f0eaf7aadc6e')\n self.name = 'c-ares'\n self.version = '1.14.0'\n self.url = 'https://c-ares.haxx.se/download/$name-$version.tar.gz'\n", "step-4": "from .base import GnuRecipe\n\n\nclass CAresRecipe(GnuRecipe):\n\n def __init__(self, *args, **kwargs):\n super(CAresRecipe, self).__init__(*args, **kwargs)\n self.sha256 = (\n '45d3c1fd29263ceec2afc8ff9cd06d5f8f889636eb4e80ce3cc7f0eaf7aadc6e')\n self.name = 'c-ares'\n self.version = '1.14.0'\n self.url = 'https://c-ares.haxx.se/download/$name-$version.tar.gz'\n", "step-5": "from .base import GnuRecipe\n\n\nclass CAresRecipe(GnuRecipe):\n def __init__(self, *args, **kwargs):\n super(CAresRecipe, self).__init__(*args, **kwargs)\n self.sha256 = '45d3c1fd29263ceec2afc8ff9cd06d5f' \\\n '8f889636eb4e80ce3cc7f0eaf7aadc6e'\n self.name = 'c-ares'\n self.version = '1.14.0'\n self.url = 'https://c-ares.haxx.se/download/$name-$version.tar.gz'\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# Copyright (c) 2015, the Fletch project authors. Please see the AUTHORS file # for details. All rights reserved. Use of this source code is governed by a # BSD-style license that can be found in the LICENSE.md file. { 'variables': { 'mac_asan_dylib': '<(PRODUCT_DIR)/libclang_rt.asan_osx_dynamic.dylib', }, 'targets': [ { 'target_name': 'fletch-vm', 'type': 'none', 'dependencies': [ 'src/vm/vm.gyp:fletch-vm', ], }, { 'target_name': 'c_test_library', 'type': 'none', 'dependencies': [ 'src/vm/vm.gyp:ffi_test_library', ], }, { 'target_name': 'natives_json', 'type': 'none', 'toolsets': ['host'], 'dependencies': [ 'src/shared/shared.gyp:natives_json', ], }, { 'target_name': 'toplevel_fletch', 'type': 'none', 'toolsets': ['target'], 'dependencies': [ 'src/tools/driver/driver.gyp:fletch', 'copy_dart#host', ], }, { # C based test executables. See also tests/cc_tests/README.md. 'target_name': 'cc_tests', 'type': 'none', 'toolsets': ['target'], 'dependencies': [ 'src/shared/shared.gyp:shared_cc_tests', 'src/vm/vm.gyp:vm_cc_tests', 'copy_asan', ], }, { # The actual snapshots used in these tests are generated at test time. # TODO(zerny): Compile these programs at test time and remove this target. 'target_name': 'snapshot_tests', 'type': 'none', 'toolsets': ['target'], 'dependencies': [ 'src/vm/vm.gyp:fletch-vm', 'copy_dart#host', 'tests/service_tests/service_tests.gyp:service_performance_test', 'tests/service_tests/service_tests.gyp:service_conformance_test', 'samples/todomvc/todomvc.gyp:todomvc_sample', 'copy_asan', ], }, { 'target_name': 'copy_asan', 'type': 'none', 'conditions': [ [ 'OS=="mac"', { 'copies': [ { # The asan dylib file sets its install name as # @executable_path/..., and by copying to PRODUCT_DIR, we avoid # having to set DYLD_LIBRARY_PATH. 'destination': '<(PRODUCT_DIR)', 'files': [ 'third_party/clang/mac/lib/clang/3.7.0/' 'lib/darwin/libclang_rt.asan_osx_dynamic.dylib', ], }, ], }, { # OS!="mac" 'actions': [ { 'action_name': 'touch_asan_dylib', 'inputs': [ ], 'outputs': [ '<(mac_asan_dylib)', ], 'action': [ 'touch', '<@(_outputs)' ], }, ], }], ], }, { 'target_name': 'copy_dart', 'type': 'none', 'toolsets': ['host'], 'copies': [ { 'destination': '<(PRODUCT_DIR)', 'files': [ 'third_party/bin/<(OS)/dart', ], }, ], }, ], }
normal
{ "blob_id": "84b98ebf6e44d03d16f792f3586be1248c1d0221", "index": 6957, "step-1": "<mask token>\n", "step-2": "{'variables': {'mac_asan_dylib':\n '<(PRODUCT_DIR)/libclang_rt.asan_osx_dynamic.dylib'}, 'targets': [{\n 'target_name': 'fletch-vm', 'type': 'none', 'dependencies': [\n 'src/vm/vm.gyp:fletch-vm']}, {'target_name': 'c_test_library', 'type':\n 'none', 'dependencies': ['src/vm/vm.gyp:ffi_test_library']}, {\n 'target_name': 'natives_json', 'type': 'none', 'toolsets': ['host'],\n 'dependencies': ['src/shared/shared.gyp:natives_json']}, {'target_name':\n 'toplevel_fletch', 'type': 'none', 'toolsets': ['target'],\n 'dependencies': ['src/tools/driver/driver.gyp:fletch', 'copy_dart#host'\n ]}, {'target_name': 'cc_tests', 'type': 'none', 'toolsets': ['target'],\n 'dependencies': ['src/shared/shared.gyp:shared_cc_tests',\n 'src/vm/vm.gyp:vm_cc_tests', 'copy_asan']}, {'target_name':\n 'snapshot_tests', 'type': 'none', 'toolsets': ['target'],\n 'dependencies': ['src/vm/vm.gyp:fletch-vm', 'copy_dart#host',\n 'tests/service_tests/service_tests.gyp:service_performance_test',\n 'tests/service_tests/service_tests.gyp:service_conformance_test',\n 'samples/todomvc/todomvc.gyp:todomvc_sample', 'copy_asan']}, {\n 'target_name': 'copy_asan', 'type': 'none', 'conditions': [['OS==\"mac\"',\n {'copies': [{'destination': '<(PRODUCT_DIR)', 'files': [\n 'third_party/clang/mac/lib/clang/3.7.0/lib/darwin/libclang_rt.asan_osx_dynamic.dylib'\n ]}]}, {'actions': [{'action_name': 'touch_asan_dylib', 'inputs': [],\n 'outputs': ['<(mac_asan_dylib)'], 'action': ['touch', '<@(_outputs)']}]\n }]]}, {'target_name': 'copy_dart', 'type': 'none', 'toolsets': ['host'],\n 'copies': [{'destination': '<(PRODUCT_DIR)', 'files': [\n 'third_party/bin/<(OS)/dart']}]}]}\n", "step-3": "# Copyright (c) 2015, the Fletch project authors. Please see the AUTHORS file\n# for details. All rights reserved. Use of this source code is governed by a\n# BSD-style license that can be found in the LICENSE.md file.\n\n{\n 'variables': {\n 'mac_asan_dylib': '<(PRODUCT_DIR)/libclang_rt.asan_osx_dynamic.dylib',\n },\n\n 'targets': [\n {\n 'target_name': 'fletch-vm',\n 'type': 'none',\n 'dependencies': [\n 'src/vm/vm.gyp:fletch-vm',\n ],\n },\n {\n 'target_name': 'c_test_library',\n 'type': 'none',\n 'dependencies': [\n 'src/vm/vm.gyp:ffi_test_library',\n ],\n },\n {\n 'target_name': 'natives_json',\n 'type': 'none',\n 'toolsets': ['host'],\n 'dependencies': [\n 'src/shared/shared.gyp:natives_json',\n ],\n },\n {\n 'target_name': 'toplevel_fletch',\n 'type': 'none',\n 'toolsets': ['target'],\n 'dependencies': [\n 'src/tools/driver/driver.gyp:fletch',\n 'copy_dart#host',\n ],\n },\n {\n # C based test executables. See also tests/cc_tests/README.md.\n 'target_name': 'cc_tests',\n 'type': 'none',\n 'toolsets': ['target'],\n 'dependencies': [\n 'src/shared/shared.gyp:shared_cc_tests',\n 'src/vm/vm.gyp:vm_cc_tests',\n 'copy_asan',\n ],\n },\n {\n # The actual snapshots used in these tests are generated at test time.\n # TODO(zerny): Compile these programs at test time and remove this target.\n 'target_name': 'snapshot_tests',\n 'type': 'none',\n 'toolsets': ['target'],\n 'dependencies': [\n 'src/vm/vm.gyp:fletch-vm',\n 'copy_dart#host',\n 'tests/service_tests/service_tests.gyp:service_performance_test',\n 'tests/service_tests/service_tests.gyp:service_conformance_test',\n 'samples/todomvc/todomvc.gyp:todomvc_sample',\n 'copy_asan',\n ],\n },\n {\n 'target_name': 'copy_asan',\n 'type': 'none',\n 'conditions': [\n [ 'OS==\"mac\"', {\n 'copies': [\n {\n # The asan dylib file sets its install name as\n # @executable_path/..., and by copying to PRODUCT_DIR, we avoid\n # having to set DYLD_LIBRARY_PATH.\n 'destination': '<(PRODUCT_DIR)',\n 'files': [\n 'third_party/clang/mac/lib/clang/3.7.0/'\n 'lib/darwin/libclang_rt.asan_osx_dynamic.dylib',\n ],\n },\n ],\n }, { # OS!=\"mac\"\n 'actions': [\n {\n 'action_name': 'touch_asan_dylib',\n 'inputs': [\n ],\n 'outputs': [\n '<(mac_asan_dylib)',\n ],\n 'action': [\n 'touch', '<@(_outputs)'\n ],\n },\n ],\n }],\n ],\n },\n {\n 'target_name': 'copy_dart',\n 'type': 'none',\n 'toolsets': ['host'],\n 'copies': [\n {\n 'destination': '<(PRODUCT_DIR)',\n 'files': [\n 'third_party/bin/<(OS)/dart',\n ],\n },\n ],\n },\n ],\n}\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
from django.core.exceptions import ValidationError from django.utils import timezone def year_validator(value): if value < 1 or value > timezone.now().year: raise ValidationError( ('%s is not a correct year!' % value) ) def raiting_validator(value): if value < 1 or value > 10: raise ValidationError( ('%s is not a caorrect raiting!' % value) )
normal
{ "blob_id": "7a6d5309580b673413f57047e631a08e61e837cf", "index": 4447, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef raiting_validator(value):\n if value < 1 or value > 10:\n raise ValidationError('%s is not a caorrect raiting!' % value)\n", "step-3": "<mask token>\n\n\ndef year_validator(value):\n if value < 1 or value > timezone.now().year:\n raise ValidationError('%s is not a correct year!' % value)\n\n\ndef raiting_validator(value):\n if value < 1 or value > 10:\n raise ValidationError('%s is not a caorrect raiting!' % value)\n", "step-4": "from django.core.exceptions import ValidationError\nfrom django.utils import timezone\n\n\ndef year_validator(value):\n if value < 1 or value > timezone.now().year:\n raise ValidationError('%s is not a correct year!' % value)\n\n\ndef raiting_validator(value):\n if value < 1 or value > 10:\n raise ValidationError('%s is not a caorrect raiting!' % value)\n", "step-5": "from django.core.exceptions import ValidationError\nfrom django.utils import timezone\n\n\ndef year_validator(value):\n if value < 1 or value > timezone.now().year:\n raise ValidationError(\n ('%s is not a correct year!' % value)\n )\n\n\ndef raiting_validator(value):\n if value < 1 or value > 10:\n raise ValidationError(\n ('%s is not a caorrect raiting!' % value)\n )\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import argparse import gc import gcsfs import nibabel as nib import nilearn import nobrainer import numpy as np import os import os.path as op import pandas as pd import tensorflow as tf def interpolate_images(baseline, image, alphas): alphas_x = alphas[:, tf.newaxis, tf.newaxis, tf.newaxis, tf.newaxis] baseline_x = tf.expand_dims(baseline, axis=0) input_x = tf.expand_dims(image, axis=0) delta = input_x - baseline_x images = baseline_x + alphas_x * delta return images def compute_gradients(model, images, target_class): with tf.GradientTape() as tape: tape.watch(images) raw_probs = model(images) probs = (1 - raw_probs) * (1 - target_class) + raw_probs * target_class gradients = tape.gradient(probs, images) return gradients def integral_approximation(gradients): # riemann_trapezoidal grads = (gradients[:-1] + gradients[1:]) / tf.constant(2.0) return tf.math.reduce_mean(grads, axis=0) @tf.function def integrated_gradients( model, baseline, image, target_class, m_steps=50, batch_size=32 ): # 1. Generate alphas. alphas = tf.linspace(start=0.0, stop=1.0, num=m_steps + 1) # Initialize TensorArray outside loop to collect gradients. gradient_batches = tf.TensorArray(tf.float32, size=m_steps + 1) # Iterate alphas range and batch computation for speed, memory efficiency, and scaling to larger m_steps. for alpha in tf.range(0, len(alphas), batch_size): from_ = alpha to = tf.minimum(from_ + batch_size, len(alphas)) alpha_batch = alphas[from_:to] # 2. Generate interpolated inputs between baseline and input. interpolated_path_input_batch = interpolate_images( baseline=baseline, image=image, alphas=alpha_batch ) # 3. Compute gradients between model outputs and interpolated inputs. gradient_batch = compute_gradients( model=model, images=interpolated_path_input_batch, target_class=target_class, ) # Write batch indices and gradients to extend TensorArray. gradient_batches = gradient_batches.scatter(tf.range(from_, to), gradient_batch) # Stack path gradients together row-wise into single tensor. total_gradients = gradient_batches.stack() # 4. Integral approximation through averaging gradients. avg_gradients = integral_approximation(gradients=total_gradients) # 5. Scale integrated gradients with respect to input. return (image - baseline) * avg_gradients def main( gcs_bucket, n_channels=5, dataset_name="b0-tensorfa-dwiqc", model_dir="b0_tensorfa_dwiqc", dataset_seed=8, target_class=1, confusion_class="true_pos", ): print("Setting gpu thread mode to gpu_private.") os.environ["TF_GPU_THREAD_MODE"] = "gpu_private" print("Configuring distribution strategy") use_tpu = False try: resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu="") tf.config.experimental_connect_to_cluster(resolver) # This is the TPU initialization code that has to be at the beginning. tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.TPUStrategy(resolver) use_tpu = True print("TPU detected.") print("All devices: ", tf.config.list_logical_devices("TPU")) except ValueError: strategy = tf.distribute.MirroredStrategy() print("GPUs detected.") print("Number of accelerators: ", strategy.num_replicas_in_sync) # Train using mixed-precision policy tf.keras.mixed_precision.set_global_policy("mixed_float16") scope = strategy.scope() # Setting location were training logs and checkpoints will be stored GCS_BASE_PATH = f"gs://{gcs_bucket}/{model_dir}/seed_{dataset_seed}" GCS_SAVED_MODEL_DIR = op.join(GCS_BASE_PATH, "saved_model") GCS_OUTPUT_DIR = op.join(GCS_BASE_PATH, "integrated_gradients") fs = gcsfs.GCSFileSystem() LOCAL_SAVED_MODEL_DIR = "saved_model" LOCAL_OUTPUT_DIR = "output" os.makedirs(LOCAL_SAVED_MODEL_DIR, exist_ok=True) os.makedirs(LOCAL_OUTPUT_DIR, exist_ok=True) fs.get(GCS_SAVED_MODEL_DIR, LOCAL_SAVED_MODEL_DIR, recursive=True) # Specify the datasets on GCP storage GCS_DATA_PATH = f"gs://{gcs_bucket}" GCS_ALLDATA_DIR = op.join(GCS_DATA_PATH, "tfrecs", dataset_name, "all-data") if use_tpu: device_alldata_dir = GCS_ALLDATA_DIR else: LOCAL_ALLDATA_DIR = op.join(".", "tfrecs", dataset_name, "all-data") os.makedirs(LOCAL_ALLDATA_DIR, exist_ok=True) fs.get(GCS_ALLDATA_DIR, LOCAL_ALLDATA_DIR, recursive=True) device_alldata_dir = LOCAL_ALLDATA_DIR volume_shape = (128, 128, 128, n_channels) element_spec = ( tf.TensorSpec(shape=(), dtype=tf.int64, name=None), ( tf.TensorSpec(shape=(1, 128, 128, 128, 5), dtype=tf.float32, name=None), tf.TensorSpec(shape=(1,), dtype=tf.float32, name=None), ), ) dataset = tf.data.experimental.load( op.join(device_alldata_dir, confusion_class), element_spec=element_spec, ) volumes = [tf.squeeze(tensor[0]) for _, tensor in dataset] baseline = tf.zeros(shape=volume_shape, dtype=tf.float32) print("Computing integrated gradients") with scope: model = tf.keras.models.load_model(LOCAL_SAVED_MODEL_DIR) ig_attributions = [ integrated_gradients( model=model, baseline=baseline, image=volume, target_class=target_class, m_steps=128, batch_size=1, ) for volume in volumes ] if target_class == 1: postfix = "attribution_pass" else: postfix = "attribution_fail" ig_dataset = tf.data.Dataset.from_tensor_slices(tf.stack(ig_attributions)) tf.data.experimental.save( ig_dataset, op.join(LOCAL_OUTPUT_DIR, f"ig_{confusion_class}_{postfix}"), ) affine = np.diag([1, 1, 1, 1]) volume_niftis = [ { "b0": nib.Nifti1Image(volume[:, :, :, 3].numpy(), affine), "color_fa": nib.Nifti1Image(volume[:, :, :, :3].numpy(), affine), } for volume in volumes ] ig_niftis = [ { "b0": nib.Nifti1Image(attribution[:, :, :, 3].numpy(), affine), "color_fa": nib.Nifti1Image(attribution[:, :, :, :3].numpy(), affine), "sum": nib.Nifti1Image( tf.math.reduce_sum(attribution[:, :, :, :4], axis=-1).numpy(), affine ), } for attribution in ig_attributions ] for idx, (volume_nifti, ig_nifti) in enumerate(zip(volume_niftis, ig_niftis)): for key, value in volume_nifti.items(): nib.save( value, op.join(LOCAL_OUTPUT_DIR, f"{confusion_class}_{key}_{idx}.nii.gz"), ) for key, value in ig_nifti.items(): nib.save( value, op.join( LOCAL_OUTPUT_DIR, f"{confusion_class}_{postfix}_{key}_{idx}.nii.gz" ), ) fs.put(LOCAL_OUTPUT_DIR, GCS_OUTPUT_DIR, recursive=True) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--gcs_bucket", type=str, help=( "The name of the gcs bucket that will contain the saved models, " "checkpoints, etc." ), ) parser.add_argument( "--n_channels", type=int, help="The number of channels in the data.", default=5, ) parser.add_argument( "--dataset_name", type=str, help="The name of the dataset in the tfrecs folder of the GCS bucket.", default="b0-tensorfa-dwiqc", ) parser.add_argument( "--model_dir", type=str, help="The name of the GCS directory in which the tensorflow model is saved.", default="b0_tensorfa_dwiqc", ) parser.add_argument( "--dataset_seed", type=int, help="The seed for the dataset", default=8, ) parser.add_argument( "--target_class", type=int, help="The target class for the integrated gradients.", default=1, ) parser.add_argument( "--confusion_class", type=str, help="The confusion class for which to compute integrated gradients", default="true_pos", ) args = parser.parse_args() main( gcs_bucket=args.gcs_bucket, n_channels=args.n_channels, dataset_name=args.dataset_name, model_dir=args.model_dir, dataset_seed=args.dataset_seed, target_class=args.target_class, confusion_class=args.confusion_class, )
normal
{ "blob_id": "848e4abcd0b4f118030fc62f1272a19bfce9db4e", "index": 178, "step-1": "<mask token>\n\n\ndef interpolate_images(baseline, image, alphas):\n alphas_x = alphas[:, tf.newaxis, tf.newaxis, tf.newaxis, tf.newaxis]\n baseline_x = tf.expand_dims(baseline, axis=0)\n input_x = tf.expand_dims(image, axis=0)\n delta = input_x - baseline_x\n images = baseline_x + alphas_x * delta\n return images\n\n\ndef compute_gradients(model, images, target_class):\n with tf.GradientTape() as tape:\n tape.watch(images)\n raw_probs = model(images)\n probs = (1 - raw_probs) * (1 - target_class) + raw_probs * target_class\n gradients = tape.gradient(probs, images)\n return gradients\n\n\ndef integral_approximation(gradients):\n grads = (gradients[:-1] + gradients[1:]) / tf.constant(2.0)\n return tf.math.reduce_mean(grads, axis=0)\n\n\n<mask token>\n\n\ndef main(gcs_bucket, n_channels=5, dataset_name='b0-tensorfa-dwiqc',\n model_dir='b0_tensorfa_dwiqc', dataset_seed=8, target_class=1,\n confusion_class='true_pos'):\n print('Setting gpu thread mode to gpu_private.')\n os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'\n print('Configuring distribution strategy')\n use_tpu = False\n try:\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')\n tf.config.experimental_connect_to_cluster(resolver)\n tf.tpu.experimental.initialize_tpu_system(resolver)\n strategy = tf.distribute.TPUStrategy(resolver)\n use_tpu = True\n print('TPU detected.')\n print('All devices: ', tf.config.list_logical_devices('TPU'))\n except ValueError:\n strategy = tf.distribute.MirroredStrategy()\n print('GPUs detected.')\n print('Number of accelerators: ', strategy.num_replicas_in_sync)\n tf.keras.mixed_precision.set_global_policy('mixed_float16')\n scope = strategy.scope()\n GCS_BASE_PATH = f'gs://{gcs_bucket}/{model_dir}/seed_{dataset_seed}'\n GCS_SAVED_MODEL_DIR = op.join(GCS_BASE_PATH, 'saved_model')\n GCS_OUTPUT_DIR = op.join(GCS_BASE_PATH, 'integrated_gradients')\n fs = gcsfs.GCSFileSystem()\n LOCAL_SAVED_MODEL_DIR = 'saved_model'\n LOCAL_OUTPUT_DIR = 'output'\n os.makedirs(LOCAL_SAVED_MODEL_DIR, exist_ok=True)\n os.makedirs(LOCAL_OUTPUT_DIR, exist_ok=True)\n fs.get(GCS_SAVED_MODEL_DIR, LOCAL_SAVED_MODEL_DIR, recursive=True)\n GCS_DATA_PATH = f'gs://{gcs_bucket}'\n GCS_ALLDATA_DIR = op.join(GCS_DATA_PATH, 'tfrecs', dataset_name, 'all-data'\n )\n if use_tpu:\n device_alldata_dir = GCS_ALLDATA_DIR\n else:\n LOCAL_ALLDATA_DIR = op.join('.', 'tfrecs', dataset_name, 'all-data')\n os.makedirs(LOCAL_ALLDATA_DIR, exist_ok=True)\n fs.get(GCS_ALLDATA_DIR, LOCAL_ALLDATA_DIR, recursive=True)\n device_alldata_dir = LOCAL_ALLDATA_DIR\n volume_shape = 128, 128, 128, n_channels\n element_spec = tf.TensorSpec(shape=(), dtype=tf.int64, name=None), (tf.\n TensorSpec(shape=(1, 128, 128, 128, 5), dtype=tf.float32, name=None\n ), tf.TensorSpec(shape=(1,), dtype=tf.float32, name=None))\n dataset = tf.data.experimental.load(op.join(device_alldata_dir,\n confusion_class), element_spec=element_spec)\n volumes = [tf.squeeze(tensor[0]) for _, tensor in dataset]\n baseline = tf.zeros(shape=volume_shape, dtype=tf.float32)\n print('Computing integrated gradients')\n with scope:\n model = tf.keras.models.load_model(LOCAL_SAVED_MODEL_DIR)\n ig_attributions = [integrated_gradients(model=model, baseline=\n baseline, image=volume, target_class=target_class, m_steps=128,\n batch_size=1) for volume in volumes]\n if target_class == 1:\n postfix = 'attribution_pass'\n else:\n postfix = 'attribution_fail'\n ig_dataset = tf.data.Dataset.from_tensor_slices(tf.stack(ig_attributions))\n tf.data.experimental.save(ig_dataset, op.join(LOCAL_OUTPUT_DIR,\n f'ig_{confusion_class}_{postfix}'))\n affine = np.diag([1, 1, 1, 1])\n volume_niftis = [{'b0': nib.Nifti1Image(volume[:, :, :, 3].numpy(),\n affine), 'color_fa': nib.Nifti1Image(volume[:, :, :, :3].numpy(),\n affine)} for volume in volumes]\n ig_niftis = [{'b0': nib.Nifti1Image(attribution[:, :, :, 3].numpy(),\n affine), 'color_fa': nib.Nifti1Image(attribution[:, :, :, :3].numpy\n (), affine), 'sum': nib.Nifti1Image(tf.math.reduce_sum(attribution[\n :, :, :, :4], axis=-1).numpy(), affine)} for attribution in\n ig_attributions]\n for idx, (volume_nifti, ig_nifti) in enumerate(zip(volume_niftis,\n ig_niftis)):\n for key, value in volume_nifti.items():\n nib.save(value, op.join(LOCAL_OUTPUT_DIR,\n f'{confusion_class}_{key}_{idx}.nii.gz'))\n for key, value in ig_nifti.items():\n nib.save(value, op.join(LOCAL_OUTPUT_DIR,\n f'{confusion_class}_{postfix}_{key}_{idx}.nii.gz'))\n fs.put(LOCAL_OUTPUT_DIR, GCS_OUTPUT_DIR, recursive=True)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef interpolate_images(baseline, image, alphas):\n alphas_x = alphas[:, tf.newaxis, tf.newaxis, tf.newaxis, tf.newaxis]\n baseline_x = tf.expand_dims(baseline, axis=0)\n input_x = tf.expand_dims(image, axis=0)\n delta = input_x - baseline_x\n images = baseline_x + alphas_x * delta\n return images\n\n\ndef compute_gradients(model, images, target_class):\n with tf.GradientTape() as tape:\n tape.watch(images)\n raw_probs = model(images)\n probs = (1 - raw_probs) * (1 - target_class) + raw_probs * target_class\n gradients = tape.gradient(probs, images)\n return gradients\n\n\ndef integral_approximation(gradients):\n grads = (gradients[:-1] + gradients[1:]) / tf.constant(2.0)\n return tf.math.reduce_mean(grads, axis=0)\n\n\n@tf.function\ndef integrated_gradients(model, baseline, image, target_class, m_steps=50,\n batch_size=32):\n alphas = tf.linspace(start=0.0, stop=1.0, num=m_steps + 1)\n gradient_batches = tf.TensorArray(tf.float32, size=m_steps + 1)\n for alpha in tf.range(0, len(alphas), batch_size):\n from_ = alpha\n to = tf.minimum(from_ + batch_size, len(alphas))\n alpha_batch = alphas[from_:to]\n interpolated_path_input_batch = interpolate_images(baseline=\n baseline, image=image, alphas=alpha_batch)\n gradient_batch = compute_gradients(model=model, images=\n interpolated_path_input_batch, target_class=target_class)\n gradient_batches = gradient_batches.scatter(tf.range(from_, to),\n gradient_batch)\n total_gradients = gradient_batches.stack()\n avg_gradients = integral_approximation(gradients=total_gradients)\n return (image - baseline) * avg_gradients\n\n\ndef main(gcs_bucket, n_channels=5, dataset_name='b0-tensorfa-dwiqc',\n model_dir='b0_tensorfa_dwiqc', dataset_seed=8, target_class=1,\n confusion_class='true_pos'):\n print('Setting gpu thread mode to gpu_private.')\n os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'\n print('Configuring distribution strategy')\n use_tpu = False\n try:\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')\n tf.config.experimental_connect_to_cluster(resolver)\n tf.tpu.experimental.initialize_tpu_system(resolver)\n strategy = tf.distribute.TPUStrategy(resolver)\n use_tpu = True\n print('TPU detected.')\n print('All devices: ', tf.config.list_logical_devices('TPU'))\n except ValueError:\n strategy = tf.distribute.MirroredStrategy()\n print('GPUs detected.')\n print('Number of accelerators: ', strategy.num_replicas_in_sync)\n tf.keras.mixed_precision.set_global_policy('mixed_float16')\n scope = strategy.scope()\n GCS_BASE_PATH = f'gs://{gcs_bucket}/{model_dir}/seed_{dataset_seed}'\n GCS_SAVED_MODEL_DIR = op.join(GCS_BASE_PATH, 'saved_model')\n GCS_OUTPUT_DIR = op.join(GCS_BASE_PATH, 'integrated_gradients')\n fs = gcsfs.GCSFileSystem()\n LOCAL_SAVED_MODEL_DIR = 'saved_model'\n LOCAL_OUTPUT_DIR = 'output'\n os.makedirs(LOCAL_SAVED_MODEL_DIR, exist_ok=True)\n os.makedirs(LOCAL_OUTPUT_DIR, exist_ok=True)\n fs.get(GCS_SAVED_MODEL_DIR, LOCAL_SAVED_MODEL_DIR, recursive=True)\n GCS_DATA_PATH = f'gs://{gcs_bucket}'\n GCS_ALLDATA_DIR = op.join(GCS_DATA_PATH, 'tfrecs', dataset_name, 'all-data'\n )\n if use_tpu:\n device_alldata_dir = GCS_ALLDATA_DIR\n else:\n LOCAL_ALLDATA_DIR = op.join('.', 'tfrecs', dataset_name, 'all-data')\n os.makedirs(LOCAL_ALLDATA_DIR, exist_ok=True)\n fs.get(GCS_ALLDATA_DIR, LOCAL_ALLDATA_DIR, recursive=True)\n device_alldata_dir = LOCAL_ALLDATA_DIR\n volume_shape = 128, 128, 128, n_channels\n element_spec = tf.TensorSpec(shape=(), dtype=tf.int64, name=None), (tf.\n TensorSpec(shape=(1, 128, 128, 128, 5), dtype=tf.float32, name=None\n ), tf.TensorSpec(shape=(1,), dtype=tf.float32, name=None))\n dataset = tf.data.experimental.load(op.join(device_alldata_dir,\n confusion_class), element_spec=element_spec)\n volumes = [tf.squeeze(tensor[0]) for _, tensor in dataset]\n baseline = tf.zeros(shape=volume_shape, dtype=tf.float32)\n print('Computing integrated gradients')\n with scope:\n model = tf.keras.models.load_model(LOCAL_SAVED_MODEL_DIR)\n ig_attributions = [integrated_gradients(model=model, baseline=\n baseline, image=volume, target_class=target_class, m_steps=128,\n batch_size=1) for volume in volumes]\n if target_class == 1:\n postfix = 'attribution_pass'\n else:\n postfix = 'attribution_fail'\n ig_dataset = tf.data.Dataset.from_tensor_slices(tf.stack(ig_attributions))\n tf.data.experimental.save(ig_dataset, op.join(LOCAL_OUTPUT_DIR,\n f'ig_{confusion_class}_{postfix}'))\n affine = np.diag([1, 1, 1, 1])\n volume_niftis = [{'b0': nib.Nifti1Image(volume[:, :, :, 3].numpy(),\n affine), 'color_fa': nib.Nifti1Image(volume[:, :, :, :3].numpy(),\n affine)} for volume in volumes]\n ig_niftis = [{'b0': nib.Nifti1Image(attribution[:, :, :, 3].numpy(),\n affine), 'color_fa': nib.Nifti1Image(attribution[:, :, :, :3].numpy\n (), affine), 'sum': nib.Nifti1Image(tf.math.reduce_sum(attribution[\n :, :, :, :4], axis=-1).numpy(), affine)} for attribution in\n ig_attributions]\n for idx, (volume_nifti, ig_nifti) in enumerate(zip(volume_niftis,\n ig_niftis)):\n for key, value in volume_nifti.items():\n nib.save(value, op.join(LOCAL_OUTPUT_DIR,\n f'{confusion_class}_{key}_{idx}.nii.gz'))\n for key, value in ig_nifti.items():\n nib.save(value, op.join(LOCAL_OUTPUT_DIR,\n f'{confusion_class}_{postfix}_{key}_{idx}.nii.gz'))\n fs.put(LOCAL_OUTPUT_DIR, GCS_OUTPUT_DIR, recursive=True)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef interpolate_images(baseline, image, alphas):\n alphas_x = alphas[:, tf.newaxis, tf.newaxis, tf.newaxis, tf.newaxis]\n baseline_x = tf.expand_dims(baseline, axis=0)\n input_x = tf.expand_dims(image, axis=0)\n delta = input_x - baseline_x\n images = baseline_x + alphas_x * delta\n return images\n\n\ndef compute_gradients(model, images, target_class):\n with tf.GradientTape() as tape:\n tape.watch(images)\n raw_probs = model(images)\n probs = (1 - raw_probs) * (1 - target_class) + raw_probs * target_class\n gradients = tape.gradient(probs, images)\n return gradients\n\n\ndef integral_approximation(gradients):\n grads = (gradients[:-1] + gradients[1:]) / tf.constant(2.0)\n return tf.math.reduce_mean(grads, axis=0)\n\n\n@tf.function\ndef integrated_gradients(model, baseline, image, target_class, m_steps=50,\n batch_size=32):\n alphas = tf.linspace(start=0.0, stop=1.0, num=m_steps + 1)\n gradient_batches = tf.TensorArray(tf.float32, size=m_steps + 1)\n for alpha in tf.range(0, len(alphas), batch_size):\n from_ = alpha\n to = tf.minimum(from_ + batch_size, len(alphas))\n alpha_batch = alphas[from_:to]\n interpolated_path_input_batch = interpolate_images(baseline=\n baseline, image=image, alphas=alpha_batch)\n gradient_batch = compute_gradients(model=model, images=\n interpolated_path_input_batch, target_class=target_class)\n gradient_batches = gradient_batches.scatter(tf.range(from_, to),\n gradient_batch)\n total_gradients = gradient_batches.stack()\n avg_gradients = integral_approximation(gradients=total_gradients)\n return (image - baseline) * avg_gradients\n\n\ndef main(gcs_bucket, n_channels=5, dataset_name='b0-tensorfa-dwiqc',\n model_dir='b0_tensorfa_dwiqc', dataset_seed=8, target_class=1,\n confusion_class='true_pos'):\n print('Setting gpu thread mode to gpu_private.')\n os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'\n print('Configuring distribution strategy')\n use_tpu = False\n try:\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')\n tf.config.experimental_connect_to_cluster(resolver)\n tf.tpu.experimental.initialize_tpu_system(resolver)\n strategy = tf.distribute.TPUStrategy(resolver)\n use_tpu = True\n print('TPU detected.')\n print('All devices: ', tf.config.list_logical_devices('TPU'))\n except ValueError:\n strategy = tf.distribute.MirroredStrategy()\n print('GPUs detected.')\n print('Number of accelerators: ', strategy.num_replicas_in_sync)\n tf.keras.mixed_precision.set_global_policy('mixed_float16')\n scope = strategy.scope()\n GCS_BASE_PATH = f'gs://{gcs_bucket}/{model_dir}/seed_{dataset_seed}'\n GCS_SAVED_MODEL_DIR = op.join(GCS_BASE_PATH, 'saved_model')\n GCS_OUTPUT_DIR = op.join(GCS_BASE_PATH, 'integrated_gradients')\n fs = gcsfs.GCSFileSystem()\n LOCAL_SAVED_MODEL_DIR = 'saved_model'\n LOCAL_OUTPUT_DIR = 'output'\n os.makedirs(LOCAL_SAVED_MODEL_DIR, exist_ok=True)\n os.makedirs(LOCAL_OUTPUT_DIR, exist_ok=True)\n fs.get(GCS_SAVED_MODEL_DIR, LOCAL_SAVED_MODEL_DIR, recursive=True)\n GCS_DATA_PATH = f'gs://{gcs_bucket}'\n GCS_ALLDATA_DIR = op.join(GCS_DATA_PATH, 'tfrecs', dataset_name, 'all-data'\n )\n if use_tpu:\n device_alldata_dir = GCS_ALLDATA_DIR\n else:\n LOCAL_ALLDATA_DIR = op.join('.', 'tfrecs', dataset_name, 'all-data')\n os.makedirs(LOCAL_ALLDATA_DIR, exist_ok=True)\n fs.get(GCS_ALLDATA_DIR, LOCAL_ALLDATA_DIR, recursive=True)\n device_alldata_dir = LOCAL_ALLDATA_DIR\n volume_shape = 128, 128, 128, n_channels\n element_spec = tf.TensorSpec(shape=(), dtype=tf.int64, name=None), (tf.\n TensorSpec(shape=(1, 128, 128, 128, 5), dtype=tf.float32, name=None\n ), tf.TensorSpec(shape=(1,), dtype=tf.float32, name=None))\n dataset = tf.data.experimental.load(op.join(device_alldata_dir,\n confusion_class), element_spec=element_spec)\n volumes = [tf.squeeze(tensor[0]) for _, tensor in dataset]\n baseline = tf.zeros(shape=volume_shape, dtype=tf.float32)\n print('Computing integrated gradients')\n with scope:\n model = tf.keras.models.load_model(LOCAL_SAVED_MODEL_DIR)\n ig_attributions = [integrated_gradients(model=model, baseline=\n baseline, image=volume, target_class=target_class, m_steps=128,\n batch_size=1) for volume in volumes]\n if target_class == 1:\n postfix = 'attribution_pass'\n else:\n postfix = 'attribution_fail'\n ig_dataset = tf.data.Dataset.from_tensor_slices(tf.stack(ig_attributions))\n tf.data.experimental.save(ig_dataset, op.join(LOCAL_OUTPUT_DIR,\n f'ig_{confusion_class}_{postfix}'))\n affine = np.diag([1, 1, 1, 1])\n volume_niftis = [{'b0': nib.Nifti1Image(volume[:, :, :, 3].numpy(),\n affine), 'color_fa': nib.Nifti1Image(volume[:, :, :, :3].numpy(),\n affine)} for volume in volumes]\n ig_niftis = [{'b0': nib.Nifti1Image(attribution[:, :, :, 3].numpy(),\n affine), 'color_fa': nib.Nifti1Image(attribution[:, :, :, :3].numpy\n (), affine), 'sum': nib.Nifti1Image(tf.math.reduce_sum(attribution[\n :, :, :, :4], axis=-1).numpy(), affine)} for attribution in\n ig_attributions]\n for idx, (volume_nifti, ig_nifti) in enumerate(zip(volume_niftis,\n ig_niftis)):\n for key, value in volume_nifti.items():\n nib.save(value, op.join(LOCAL_OUTPUT_DIR,\n f'{confusion_class}_{key}_{idx}.nii.gz'))\n for key, value in ig_nifti.items():\n nib.save(value, op.join(LOCAL_OUTPUT_DIR,\n f'{confusion_class}_{postfix}_{key}_{idx}.nii.gz'))\n fs.put(LOCAL_OUTPUT_DIR, GCS_OUTPUT_DIR, recursive=True)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gcs_bucket', type=str, help=\n 'The name of the gcs bucket that will contain the saved models, checkpoints, etc.'\n )\n parser.add_argument('--n_channels', type=int, help=\n 'The number of channels in the data.', default=5)\n parser.add_argument('--dataset_name', type=str, help=\n 'The name of the dataset in the tfrecs folder of the GCS bucket.',\n default='b0-tensorfa-dwiqc')\n parser.add_argument('--model_dir', type=str, help=\n 'The name of the GCS directory in which the tensorflow model is saved.'\n , default='b0_tensorfa_dwiqc')\n parser.add_argument('--dataset_seed', type=int, help=\n 'The seed for the dataset', default=8)\n parser.add_argument('--target_class', type=int, help=\n 'The target class for the integrated gradients.', default=1)\n parser.add_argument('--confusion_class', type=str, help=\n 'The confusion class for which to compute integrated gradients',\n default='true_pos')\n args = parser.parse_args()\n main(gcs_bucket=args.gcs_bucket, n_channels=args.n_channels,\n dataset_name=args.dataset_name, model_dir=args.model_dir,\n dataset_seed=args.dataset_seed, target_class=args.target_class,\n confusion_class=args.confusion_class)\n", "step-4": "import argparse\nimport gc\nimport gcsfs\nimport nibabel as nib\nimport nilearn\nimport nobrainer\nimport numpy as np\nimport os\nimport os.path as op\nimport pandas as pd\nimport tensorflow as tf\n\n\ndef interpolate_images(baseline, image, alphas):\n alphas_x = alphas[:, tf.newaxis, tf.newaxis, tf.newaxis, tf.newaxis]\n baseline_x = tf.expand_dims(baseline, axis=0)\n input_x = tf.expand_dims(image, axis=0)\n delta = input_x - baseline_x\n images = baseline_x + alphas_x * delta\n return images\n\n\ndef compute_gradients(model, images, target_class):\n with tf.GradientTape() as tape:\n tape.watch(images)\n raw_probs = model(images)\n probs = (1 - raw_probs) * (1 - target_class) + raw_probs * target_class\n gradients = tape.gradient(probs, images)\n return gradients\n\n\ndef integral_approximation(gradients):\n grads = (gradients[:-1] + gradients[1:]) / tf.constant(2.0)\n return tf.math.reduce_mean(grads, axis=0)\n\n\n@tf.function\ndef integrated_gradients(model, baseline, image, target_class, m_steps=50,\n batch_size=32):\n alphas = tf.linspace(start=0.0, stop=1.0, num=m_steps + 1)\n gradient_batches = tf.TensorArray(tf.float32, size=m_steps + 1)\n for alpha in tf.range(0, len(alphas), batch_size):\n from_ = alpha\n to = tf.minimum(from_ + batch_size, len(alphas))\n alpha_batch = alphas[from_:to]\n interpolated_path_input_batch = interpolate_images(baseline=\n baseline, image=image, alphas=alpha_batch)\n gradient_batch = compute_gradients(model=model, images=\n interpolated_path_input_batch, target_class=target_class)\n gradient_batches = gradient_batches.scatter(tf.range(from_, to),\n gradient_batch)\n total_gradients = gradient_batches.stack()\n avg_gradients = integral_approximation(gradients=total_gradients)\n return (image - baseline) * avg_gradients\n\n\ndef main(gcs_bucket, n_channels=5, dataset_name='b0-tensorfa-dwiqc',\n model_dir='b0_tensorfa_dwiqc', dataset_seed=8, target_class=1,\n confusion_class='true_pos'):\n print('Setting gpu thread mode to gpu_private.')\n os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'\n print('Configuring distribution strategy')\n use_tpu = False\n try:\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')\n tf.config.experimental_connect_to_cluster(resolver)\n tf.tpu.experimental.initialize_tpu_system(resolver)\n strategy = tf.distribute.TPUStrategy(resolver)\n use_tpu = True\n print('TPU detected.')\n print('All devices: ', tf.config.list_logical_devices('TPU'))\n except ValueError:\n strategy = tf.distribute.MirroredStrategy()\n print('GPUs detected.')\n print('Number of accelerators: ', strategy.num_replicas_in_sync)\n tf.keras.mixed_precision.set_global_policy('mixed_float16')\n scope = strategy.scope()\n GCS_BASE_PATH = f'gs://{gcs_bucket}/{model_dir}/seed_{dataset_seed}'\n GCS_SAVED_MODEL_DIR = op.join(GCS_BASE_PATH, 'saved_model')\n GCS_OUTPUT_DIR = op.join(GCS_BASE_PATH, 'integrated_gradients')\n fs = gcsfs.GCSFileSystem()\n LOCAL_SAVED_MODEL_DIR = 'saved_model'\n LOCAL_OUTPUT_DIR = 'output'\n os.makedirs(LOCAL_SAVED_MODEL_DIR, exist_ok=True)\n os.makedirs(LOCAL_OUTPUT_DIR, exist_ok=True)\n fs.get(GCS_SAVED_MODEL_DIR, LOCAL_SAVED_MODEL_DIR, recursive=True)\n GCS_DATA_PATH = f'gs://{gcs_bucket}'\n GCS_ALLDATA_DIR = op.join(GCS_DATA_PATH, 'tfrecs', dataset_name, 'all-data'\n )\n if use_tpu:\n device_alldata_dir = GCS_ALLDATA_DIR\n else:\n LOCAL_ALLDATA_DIR = op.join('.', 'tfrecs', dataset_name, 'all-data')\n os.makedirs(LOCAL_ALLDATA_DIR, exist_ok=True)\n fs.get(GCS_ALLDATA_DIR, LOCAL_ALLDATA_DIR, recursive=True)\n device_alldata_dir = LOCAL_ALLDATA_DIR\n volume_shape = 128, 128, 128, n_channels\n element_spec = tf.TensorSpec(shape=(), dtype=tf.int64, name=None), (tf.\n TensorSpec(shape=(1, 128, 128, 128, 5), dtype=tf.float32, name=None\n ), tf.TensorSpec(shape=(1,), dtype=tf.float32, name=None))\n dataset = tf.data.experimental.load(op.join(device_alldata_dir,\n confusion_class), element_spec=element_spec)\n volumes = [tf.squeeze(tensor[0]) for _, tensor in dataset]\n baseline = tf.zeros(shape=volume_shape, dtype=tf.float32)\n print('Computing integrated gradients')\n with scope:\n model = tf.keras.models.load_model(LOCAL_SAVED_MODEL_DIR)\n ig_attributions = [integrated_gradients(model=model, baseline=\n baseline, image=volume, target_class=target_class, m_steps=128,\n batch_size=1) for volume in volumes]\n if target_class == 1:\n postfix = 'attribution_pass'\n else:\n postfix = 'attribution_fail'\n ig_dataset = tf.data.Dataset.from_tensor_slices(tf.stack(ig_attributions))\n tf.data.experimental.save(ig_dataset, op.join(LOCAL_OUTPUT_DIR,\n f'ig_{confusion_class}_{postfix}'))\n affine = np.diag([1, 1, 1, 1])\n volume_niftis = [{'b0': nib.Nifti1Image(volume[:, :, :, 3].numpy(),\n affine), 'color_fa': nib.Nifti1Image(volume[:, :, :, :3].numpy(),\n affine)} for volume in volumes]\n ig_niftis = [{'b0': nib.Nifti1Image(attribution[:, :, :, 3].numpy(),\n affine), 'color_fa': nib.Nifti1Image(attribution[:, :, :, :3].numpy\n (), affine), 'sum': nib.Nifti1Image(tf.math.reduce_sum(attribution[\n :, :, :, :4], axis=-1).numpy(), affine)} for attribution in\n ig_attributions]\n for idx, (volume_nifti, ig_nifti) in enumerate(zip(volume_niftis,\n ig_niftis)):\n for key, value in volume_nifti.items():\n nib.save(value, op.join(LOCAL_OUTPUT_DIR,\n f'{confusion_class}_{key}_{idx}.nii.gz'))\n for key, value in ig_nifti.items():\n nib.save(value, op.join(LOCAL_OUTPUT_DIR,\n f'{confusion_class}_{postfix}_{key}_{idx}.nii.gz'))\n fs.put(LOCAL_OUTPUT_DIR, GCS_OUTPUT_DIR, recursive=True)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gcs_bucket', type=str, help=\n 'The name of the gcs bucket that will contain the saved models, checkpoints, etc.'\n )\n parser.add_argument('--n_channels', type=int, help=\n 'The number of channels in the data.', default=5)\n parser.add_argument('--dataset_name', type=str, help=\n 'The name of the dataset in the tfrecs folder of the GCS bucket.',\n default='b0-tensorfa-dwiqc')\n parser.add_argument('--model_dir', type=str, help=\n 'The name of the GCS directory in which the tensorflow model is saved.'\n , default='b0_tensorfa_dwiqc')\n parser.add_argument('--dataset_seed', type=int, help=\n 'The seed for the dataset', default=8)\n parser.add_argument('--target_class', type=int, help=\n 'The target class for the integrated gradients.', default=1)\n parser.add_argument('--confusion_class', type=str, help=\n 'The confusion class for which to compute integrated gradients',\n default='true_pos')\n args = parser.parse_args()\n main(gcs_bucket=args.gcs_bucket, n_channels=args.n_channels,\n dataset_name=args.dataset_name, model_dir=args.model_dir,\n dataset_seed=args.dataset_seed, target_class=args.target_class,\n confusion_class=args.confusion_class)\n", "step-5": "import argparse\nimport gc\nimport gcsfs\nimport nibabel as nib\nimport nilearn\nimport nobrainer\nimport numpy as np\nimport os\nimport os.path as op\nimport pandas as pd\nimport tensorflow as tf\n\n\ndef interpolate_images(baseline, image, alphas):\n alphas_x = alphas[:, tf.newaxis, tf.newaxis, tf.newaxis, tf.newaxis]\n baseline_x = tf.expand_dims(baseline, axis=0)\n input_x = tf.expand_dims(image, axis=0)\n delta = input_x - baseline_x\n images = baseline_x + alphas_x * delta\n return images\n\n\ndef compute_gradients(model, images, target_class):\n with tf.GradientTape() as tape:\n tape.watch(images)\n raw_probs = model(images)\n probs = (1 - raw_probs) * (1 - target_class) + raw_probs * target_class\n\n gradients = tape.gradient(probs, images)\n return gradients\n\n\ndef integral_approximation(gradients):\n # riemann_trapezoidal\n grads = (gradients[:-1] + gradients[1:]) / tf.constant(2.0)\n return tf.math.reduce_mean(grads, axis=0)\n\n\n@tf.function\ndef integrated_gradients(\n model, baseline, image, target_class, m_steps=50, batch_size=32\n):\n # 1. Generate alphas.\n alphas = tf.linspace(start=0.0, stop=1.0, num=m_steps + 1)\n\n # Initialize TensorArray outside loop to collect gradients.\n gradient_batches = tf.TensorArray(tf.float32, size=m_steps + 1)\n\n # Iterate alphas range and batch computation for speed, memory efficiency, and scaling to larger m_steps.\n for alpha in tf.range(0, len(alphas), batch_size):\n from_ = alpha\n to = tf.minimum(from_ + batch_size, len(alphas))\n alpha_batch = alphas[from_:to]\n\n # 2. Generate interpolated inputs between baseline and input.\n interpolated_path_input_batch = interpolate_images(\n baseline=baseline, image=image, alphas=alpha_batch\n )\n\n # 3. Compute gradients between model outputs and interpolated inputs.\n gradient_batch = compute_gradients(\n model=model,\n images=interpolated_path_input_batch,\n target_class=target_class,\n )\n\n # Write batch indices and gradients to extend TensorArray.\n gradient_batches = gradient_batches.scatter(tf.range(from_, to), gradient_batch)\n\n # Stack path gradients together row-wise into single tensor.\n total_gradients = gradient_batches.stack()\n\n # 4. Integral approximation through averaging gradients.\n avg_gradients = integral_approximation(gradients=total_gradients)\n\n # 5. Scale integrated gradients with respect to input.\n return (image - baseline) * avg_gradients\n\n\ndef main(\n gcs_bucket,\n n_channels=5,\n dataset_name=\"b0-tensorfa-dwiqc\",\n model_dir=\"b0_tensorfa_dwiqc\",\n dataset_seed=8,\n target_class=1,\n confusion_class=\"true_pos\",\n):\n print(\"Setting gpu thread mode to gpu_private.\")\n os.environ[\"TF_GPU_THREAD_MODE\"] = \"gpu_private\"\n\n print(\"Configuring distribution strategy\")\n use_tpu = False\n\n try:\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=\"\")\n tf.config.experimental_connect_to_cluster(resolver)\n # This is the TPU initialization code that has to be at the beginning.\n tf.tpu.experimental.initialize_tpu_system(resolver)\n strategy = tf.distribute.TPUStrategy(resolver)\n\n use_tpu = True\n print(\"TPU detected.\")\n print(\"All devices: \", tf.config.list_logical_devices(\"TPU\"))\n except ValueError:\n strategy = tf.distribute.MirroredStrategy()\n print(\"GPUs detected.\")\n print(\"Number of accelerators: \", strategy.num_replicas_in_sync)\n\n # Train using mixed-precision policy\n tf.keras.mixed_precision.set_global_policy(\"mixed_float16\")\n\n scope = strategy.scope()\n\n # Setting location were training logs and checkpoints will be stored\n GCS_BASE_PATH = f\"gs://{gcs_bucket}/{model_dir}/seed_{dataset_seed}\"\n GCS_SAVED_MODEL_DIR = op.join(GCS_BASE_PATH, \"saved_model\")\n GCS_OUTPUT_DIR = op.join(GCS_BASE_PATH, \"integrated_gradients\")\n\n fs = gcsfs.GCSFileSystem()\n\n LOCAL_SAVED_MODEL_DIR = \"saved_model\"\n LOCAL_OUTPUT_DIR = \"output\"\n os.makedirs(LOCAL_SAVED_MODEL_DIR, exist_ok=True)\n os.makedirs(LOCAL_OUTPUT_DIR, exist_ok=True)\n\n fs.get(GCS_SAVED_MODEL_DIR, LOCAL_SAVED_MODEL_DIR, recursive=True)\n\n # Specify the datasets on GCP storage\n GCS_DATA_PATH = f\"gs://{gcs_bucket}\"\n GCS_ALLDATA_DIR = op.join(GCS_DATA_PATH, \"tfrecs\", dataset_name, \"all-data\")\n\n if use_tpu:\n device_alldata_dir = GCS_ALLDATA_DIR\n else:\n LOCAL_ALLDATA_DIR = op.join(\".\", \"tfrecs\", dataset_name, \"all-data\")\n os.makedirs(LOCAL_ALLDATA_DIR, exist_ok=True)\n fs.get(GCS_ALLDATA_DIR, LOCAL_ALLDATA_DIR, recursive=True)\n device_alldata_dir = LOCAL_ALLDATA_DIR\n\n volume_shape = (128, 128, 128, n_channels)\n element_spec = (\n tf.TensorSpec(shape=(), dtype=tf.int64, name=None),\n (\n tf.TensorSpec(shape=(1, 128, 128, 128, 5), dtype=tf.float32, name=None),\n tf.TensorSpec(shape=(1,), dtype=tf.float32, name=None),\n ),\n )\n\n dataset = tf.data.experimental.load(\n op.join(device_alldata_dir, confusion_class),\n element_spec=element_spec,\n )\n volumes = [tf.squeeze(tensor[0]) for _, tensor in dataset]\n baseline = tf.zeros(shape=volume_shape, dtype=tf.float32)\n\n print(\"Computing integrated gradients\")\n\n with scope:\n model = tf.keras.models.load_model(LOCAL_SAVED_MODEL_DIR)\n\n ig_attributions = [\n integrated_gradients(\n model=model,\n baseline=baseline,\n image=volume,\n target_class=target_class,\n m_steps=128,\n batch_size=1,\n )\n for volume in volumes\n ]\n\n if target_class == 1:\n postfix = \"attribution_pass\"\n else:\n postfix = \"attribution_fail\"\n\n ig_dataset = tf.data.Dataset.from_tensor_slices(tf.stack(ig_attributions))\n tf.data.experimental.save(\n ig_dataset,\n op.join(LOCAL_OUTPUT_DIR, f\"ig_{confusion_class}_{postfix}\"),\n )\n\n affine = np.diag([1, 1, 1, 1])\n volume_niftis = [\n {\n \"b0\": nib.Nifti1Image(volume[:, :, :, 3].numpy(), affine),\n \"color_fa\": nib.Nifti1Image(volume[:, :, :, :3].numpy(), affine),\n }\n for volume in volumes\n ]\n ig_niftis = [\n {\n \"b0\": nib.Nifti1Image(attribution[:, :, :, 3].numpy(), affine),\n \"color_fa\": nib.Nifti1Image(attribution[:, :, :, :3].numpy(), affine),\n \"sum\": nib.Nifti1Image(\n tf.math.reduce_sum(attribution[:, :, :, :4], axis=-1).numpy(), affine\n ),\n }\n for attribution in ig_attributions\n ]\n\n for idx, (volume_nifti, ig_nifti) in enumerate(zip(volume_niftis, ig_niftis)):\n for key, value in volume_nifti.items():\n nib.save(\n value,\n op.join(LOCAL_OUTPUT_DIR, f\"{confusion_class}_{key}_{idx}.nii.gz\"),\n )\n\n for key, value in ig_nifti.items():\n nib.save(\n value,\n op.join(\n LOCAL_OUTPUT_DIR, f\"{confusion_class}_{postfix}_{key}_{idx}.nii.gz\"\n ),\n )\n\n fs.put(LOCAL_OUTPUT_DIR, GCS_OUTPUT_DIR, recursive=True)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--gcs_bucket\",\n type=str,\n help=(\n \"The name of the gcs bucket that will contain the saved models, \"\n \"checkpoints, etc.\"\n ),\n )\n parser.add_argument(\n \"--n_channels\",\n type=int,\n help=\"The number of channels in the data.\",\n default=5,\n )\n parser.add_argument(\n \"--dataset_name\",\n type=str,\n help=\"The name of the dataset in the tfrecs folder of the GCS bucket.\",\n default=\"b0-tensorfa-dwiqc\",\n )\n parser.add_argument(\n \"--model_dir\",\n type=str,\n help=\"The name of the GCS directory in which the tensorflow model is saved.\",\n default=\"b0_tensorfa_dwiqc\",\n )\n parser.add_argument(\n \"--dataset_seed\",\n type=int,\n help=\"The seed for the dataset\",\n default=8,\n )\n parser.add_argument(\n \"--target_class\",\n type=int,\n help=\"The target class for the integrated gradients.\",\n default=1,\n )\n parser.add_argument(\n \"--confusion_class\",\n type=str,\n help=\"The confusion class for which to compute integrated gradients\",\n default=\"true_pos\",\n )\n\n args = parser.parse_args()\n\n main(\n gcs_bucket=args.gcs_bucket,\n n_channels=args.n_channels,\n dataset_name=args.dataset_name,\n model_dir=args.model_dir,\n dataset_seed=args.dataset_seed,\n target_class=args.target_class,\n confusion_class=args.confusion_class,\n )\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
#!/usr/bin/env python """ Calculate trigger efficiency error """ __author__ = "XIAO Suyu<xiaosuyu@ihep.ac.cn>" __copyright__ = "Copyright (c) XIAO Suyu" __created__ = "[2018-02-06 Tue 15:25]" import math n1 = 4212.0 n2 = 4237.0 N = 5000.0 eff = n1 / n2 err = math.sqrt(eff*(1-eff)/N) print 'trig_eff = %.4f +- %f' % (eff, err)
normal
{ "blob_id": "bac3f78b8eb9c4595bc9e8b85587819f92329729", "index": 2295, "step-1": "#!/usr/bin/env python\n\"\"\"\nCalculate trigger efficiency error\n\"\"\"\n\n__author__ = \"XIAO Suyu<xiaosuyu@ihep.ac.cn>\"\n__copyright__ = \"Copyright (c) XIAO Suyu\"\n__created__ = \"[2018-02-06 Tue 15:25]\"\n\nimport math\n\nn1 = 4212.0\nn2 = 4237.0\nN = 5000.0\n\neff = n1 / n2\nerr = math.sqrt(eff*(1-eff)/N)\n\nprint 'trig_eff = %.4f +- %f' % (eff, err)\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
class Order: """ Initiated a new order for the store """ def __init__(self, order_number, product_id, item_type, name, product_details, factory, quantity, holiday): """ Construct a new order :param order_number: str :param product_id: str :param item_type: str :param name: str :param product_details: str :param factory: Factory :param quantity: int :param holiday: str """ self._order_number = order_number self._product_id = product_id self._item_type = item_type self._name = name self._product_details = product_details self._factory = factory self._quantity = quantity self._holiday = holiday self._is_valid = True self._invalid_notes = "" @property def quantity(self): """ Return quantity of the order. :return: int """ return self._quantity @property def order_num(self): """ Return order num of the order. :return: str """ return self._order_number @property def product_id(self): """ Return product id of the order. :return: str """ return self._product_id @property def item_type(self): """ Return item type of the order. :return: str """ return self._item_type @property def name(self): """ Return item name of the order. :return: str """ return self._name @property def product_details(self): """ Return other details of the item of the order. :return: str """ return self._product_details @property def factory(self): """ Return the factory that can generate the item. :return: Factory """ return self._factory @property def holiday(self): """ Return the holiday that the item for. :return: str """ return self._holiday @property def invalid_notes(self): """ Return the invalid notes if the item is invalid. :return: str """ return self._invalid_notes @property def is_valid(self): """ Return the valid status. :return: str """ return self._is_valid def is_invalid(self): """ Set the status to invalid. """ self._is_valid = False def set_invalid_notes(self, error): """ Set the invalid notes. :param error: str """ self._invalid_notes = error def __str__(self): """ String method of the class. """ return f"Order Number: {self._order_number} " \ f"Product ID: {self._product_id} " \ f"Item: {self._item_type} " \ f"Name: {self._name} " \ f"Quantity: {self._quantity} " \ f"Product details: {self._product_details} "
normal
{ "blob_id": "0dce4ea8ef21f2535194330b82ce5706ae694247", "index": 4676, "step-1": "class Order:\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def order_num(self):\n \"\"\"\n Return order num of the order.\n :return: str\n \"\"\"\n return self._order_number\n <mask token>\n <mask token>\n\n @property\n def name(self):\n \"\"\"\n Return item name of the order.\n :return: str\n \"\"\"\n return self._name\n <mask token>\n <mask token>\n\n @property\n def holiday(self):\n \"\"\"\n Return the holiday that the item for.\n :return: str\n \"\"\"\n return self._holiday\n <mask token>\n\n @property\n def is_valid(self):\n \"\"\"\n Return the valid status.\n :return: str\n \"\"\"\n return self._is_valid\n\n def is_invalid(self):\n \"\"\"\n Set the status to invalid.\n \"\"\"\n self._is_valid = False\n\n def set_invalid_notes(self, error):\n \"\"\"\n Set the invalid notes.\n :param error: str\n \"\"\"\n self._invalid_notes = error\n <mask token>\n", "step-2": "class Order:\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def order_num(self):\n \"\"\"\n Return order num of the order.\n :return: str\n \"\"\"\n return self._order_number\n\n @property\n def product_id(self):\n \"\"\"\n Return product id of the order.\n :return: str\n \"\"\"\n return self._product_id\n\n @property\n def item_type(self):\n \"\"\"\n Return item type of the order.\n :return: str\n \"\"\"\n return self._item_type\n\n @property\n def name(self):\n \"\"\"\n Return item name of the order.\n :return: str\n \"\"\"\n return self._name\n <mask token>\n <mask token>\n\n @property\n def holiday(self):\n \"\"\"\n Return the holiday that the item for.\n :return: str\n \"\"\"\n return self._holiday\n <mask token>\n\n @property\n def is_valid(self):\n \"\"\"\n Return the valid status.\n :return: str\n \"\"\"\n return self._is_valid\n\n def is_invalid(self):\n \"\"\"\n Set the status to invalid.\n \"\"\"\n self._is_valid = False\n\n def set_invalid_notes(self, error):\n \"\"\"\n Set the invalid notes.\n :param error: str\n \"\"\"\n self._invalid_notes = error\n\n def __str__(self):\n \"\"\"\n String method of the class.\n \"\"\"\n return (\n f'Order Number: {self._order_number} Product ID: {self._product_id} Item: {self._item_type} Name: {self._name} Quantity: {self._quantity} Product details: {self._product_details} '\n )\n", "step-3": "class Order:\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def order_num(self):\n \"\"\"\n Return order num of the order.\n :return: str\n \"\"\"\n return self._order_number\n\n @property\n def product_id(self):\n \"\"\"\n Return product id of the order.\n :return: str\n \"\"\"\n return self._product_id\n\n @property\n def item_type(self):\n \"\"\"\n Return item type of the order.\n :return: str\n \"\"\"\n return self._item_type\n\n @property\n def name(self):\n \"\"\"\n Return item name of the order.\n :return: str\n \"\"\"\n return self._name\n\n @property\n def product_details(self):\n \"\"\"\n Return other details of the item of the order.\n :return: str\n \"\"\"\n return self._product_details\n <mask token>\n\n @property\n def holiday(self):\n \"\"\"\n Return the holiday that the item for.\n :return: str\n \"\"\"\n return self._holiday\n <mask token>\n\n @property\n def is_valid(self):\n \"\"\"\n Return the valid status.\n :return: str\n \"\"\"\n return self._is_valid\n\n def is_invalid(self):\n \"\"\"\n Set the status to invalid.\n \"\"\"\n self._is_valid = False\n\n def set_invalid_notes(self, error):\n \"\"\"\n Set the invalid notes.\n :param error: str\n \"\"\"\n self._invalid_notes = error\n\n def __str__(self):\n \"\"\"\n String method of the class.\n \"\"\"\n return (\n f'Order Number: {self._order_number} Product ID: {self._product_id} Item: {self._item_type} Name: {self._name} Quantity: {self._quantity} Product details: {self._product_details} '\n )\n", "step-4": "class Order:\n <mask token>\n\n def __init__(self, order_number, product_id, item_type, name,\n product_details, factory, quantity, holiday):\n \"\"\"\n Construct a new order\n :param order_number: str\n :param product_id: str\n :param item_type: str\n :param name: str\n :param product_details: str\n :param factory: Factory\n :param quantity: int\n :param holiday: str\n \"\"\"\n self._order_number = order_number\n self._product_id = product_id\n self._item_type = item_type\n self._name = name\n self._product_details = product_details\n self._factory = factory\n self._quantity = quantity\n self._holiday = holiday\n self._is_valid = True\n self._invalid_notes = ''\n\n @property\n def quantity(self):\n \"\"\"\n Return quantity of the order.\n :return: int\n \"\"\"\n return self._quantity\n\n @property\n def order_num(self):\n \"\"\"\n Return order num of the order.\n :return: str\n \"\"\"\n return self._order_number\n\n @property\n def product_id(self):\n \"\"\"\n Return product id of the order.\n :return: str\n \"\"\"\n return self._product_id\n\n @property\n def item_type(self):\n \"\"\"\n Return item type of the order.\n :return: str\n \"\"\"\n return self._item_type\n\n @property\n def name(self):\n \"\"\"\n Return item name of the order.\n :return: str\n \"\"\"\n return self._name\n\n @property\n def product_details(self):\n \"\"\"\n Return other details of the item of the order.\n :return: str\n \"\"\"\n return self._product_details\n\n @property\n def factory(self):\n \"\"\"\n Return the factory that can generate the item.\n :return: Factory\n \"\"\"\n return self._factory\n\n @property\n def holiday(self):\n \"\"\"\n Return the holiday that the item for.\n :return: str\n \"\"\"\n return self._holiday\n\n @property\n def invalid_notes(self):\n \"\"\"\n Return the invalid notes if the item is invalid.\n :return: str\n \"\"\"\n return self._invalid_notes\n\n @property\n def is_valid(self):\n \"\"\"\n Return the valid status.\n :return: str\n \"\"\"\n return self._is_valid\n\n def is_invalid(self):\n \"\"\"\n Set the status to invalid.\n \"\"\"\n self._is_valid = False\n\n def set_invalid_notes(self, error):\n \"\"\"\n Set the invalid notes.\n :param error: str\n \"\"\"\n self._invalid_notes = error\n\n def __str__(self):\n \"\"\"\n String method of the class.\n \"\"\"\n return (\n f'Order Number: {self._order_number} Product ID: {self._product_id} Item: {self._item_type} Name: {self._name} Quantity: {self._quantity} Product details: {self._product_details} '\n )\n", "step-5": "class Order:\n \"\"\"\n Initiated a new order for the store\n \"\"\"\n\n def __init__(self, order_number, product_id, item_type, name, product_details, factory, quantity, holiday):\n \"\"\"\n Construct a new order\n :param order_number: str\n :param product_id: str\n :param item_type: str\n :param name: str\n :param product_details: str\n :param factory: Factory\n :param quantity: int\n :param holiday: str\n \"\"\"\n self._order_number = order_number\n self._product_id = product_id\n self._item_type = item_type\n self._name = name\n self._product_details = product_details\n self._factory = factory\n self._quantity = quantity\n self._holiday = holiday\n self._is_valid = True\n self._invalid_notes = \"\"\n\n @property\n def quantity(self):\n \"\"\"\n Return quantity of the order.\n :return: int\n \"\"\"\n return self._quantity\n\n @property\n def order_num(self):\n \"\"\"\n Return order num of the order.\n :return: str\n \"\"\"\n return self._order_number\n\n @property\n def product_id(self):\n \"\"\"\n Return product id of the order.\n :return: str\n \"\"\"\n return self._product_id\n\n @property\n def item_type(self):\n \"\"\"\n Return item type of the order.\n :return: str\n \"\"\"\n return self._item_type\n\n @property\n def name(self):\n \"\"\"\n Return item name of the order.\n :return: str\n \"\"\"\n return self._name\n\n @property\n def product_details(self):\n \"\"\"\n Return other details of the item of the order.\n :return: str\n \"\"\"\n return self._product_details\n\n @property\n def factory(self):\n \"\"\"\n Return the factory that can generate the item.\n :return: Factory\n \"\"\"\n return self._factory\n\n @property\n def holiday(self):\n \"\"\"\n Return the holiday that the item for.\n :return: str\n \"\"\"\n return self._holiday\n\n @property\n def invalid_notes(self):\n \"\"\"\n Return the invalid notes if the item is invalid.\n :return: str\n \"\"\"\n return self._invalid_notes\n\n @property\n def is_valid(self):\n \"\"\"\n Return the valid status.\n :return: str\n \"\"\"\n return self._is_valid\n\n def is_invalid(self):\n \"\"\"\n Set the status to invalid.\n \"\"\"\n self._is_valid = False\n\n def set_invalid_notes(self, error):\n \"\"\"\n Set the invalid notes.\n :param error: str\n \"\"\"\n self._invalid_notes = error\n\n def __str__(self):\n \"\"\"\n String method of the class.\n \"\"\"\n return f\"Order Number: {self._order_number} \" \\\n f\"Product ID: {self._product_id} \" \\\n f\"Item: {self._item_type} \" \\\n f\"Name: {self._name} \" \\\n f\"Quantity: {self._quantity} \" \\\n f\"Product details: {self._product_details} \"\n", "step-ids": [ 7, 10, 11, 15, 17 ] }
[ 7, 10, 11, 15, 17 ]
# Bradley N. Miller, David L. Ranum # Introduction to Data Structures and Algorithms in Python # Copyright 2005 # __all__=['BinaryTree', 'Stack'] class Stack: def __init__(self): self.items = [] def isEmpty(self): return self.items == [] def push(self, item): self.items.append(item) def pop(self): return self.items.pop() def peek(self): return self.items[len(self.items)-1] def size(self): return len(self.items) class BinaryTree: """ A recursive implementation of Binary Tree Using links and Nodes approach. """ def __init__(self,rootObj): self.key = rootObj self.leftChild = None self.rightChild = None self.parent = None def insertLeft(self,newNode): if self.leftChild == None: self.leftChild = BinaryTree(newNode) if not isinstance(newNode, BinaryTree) else newNode else: t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree) else newNode # t.left = self.leftChild self.leftChild = t self.leftChild.parent = self def insertRight(self,newNode): if self.rightChild == None: self.rightChild = BinaryTree(newNode) if not isinstance(newNode, BinaryTree) else newNode else: t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree) else newNode # t.right = self.rightChild self.rightChild = t self.rightChild.parent = self def isLeaf(self): return ((not self.leftChild) and (not self.rightChild)) def isRoot(self): return not self.parent def getRightChild(self): return self.rightChild def getLeftChild(self): return self.leftChild def getSibling(self): if self.isRoot(): return None Rsib = self.parent.getRightChild() return Rsib if Rsib != self else self.parent.getLeftChild() def hasChild(self): return (self.rightChild != None) or (self.leftChild != None) def hasParent(self): return (self.key != None) def setRootVal(self,obj): self.key = obj def getRootVal(self): return self.key def inorder(self): if self.leftChild: self.leftChild.inorder() print(self.key) if self.rightChild: self.rightChild.inorder() def postorder(self): if self.leftChild: self.leftChild.postorder() if self.rightChild: self.rightChild.postorder() print(self.key) def preorder(self): print(self.key) if self.leftChild: self.leftChild.preorder() if self.rightChild: self.rightChild.preorder() def printexp(self): sVal = "" if self: sVal = '(' if self.hasChild() else '' sVal += printexp(self.getLeftChild()) sVal = sVal + str(self.getRootVal()) sVal = sVal + printexp(self.getRightChild()) sVal += ')' if self.hasChild() else '' return sVal def __str__(self): return self.printexp() def hasLeftChild(self): return self.leftChild def hasRightChild(self): return self.rightChild def __iter__(self): """The standard inorder traversal of a binary tree.""" if self: if self.hasLeftChild(): for elem in self.leftChild: yield elem yield self.key if self.hasRightChild(): for elem in self.rightChild: yield elem def postordereval(self, opers = None): if not opers: opers = {'+':operator.add, '-':operator.sub, '*':operator.mul, '/':operator.truediv} res1 = None res2 = None if self.leftChild: res1 = self.leftChild.postordereval() #// \label{peleft} if self.rightChild: res2 = self.rightChild.postordereval() #// \label{peright} if res1 and res2: return opers[self.key](res1,res2) #// \label{peeval} else: return self.key def inorder(tree): if tree != None: inorder(tree.getLeftChild()) print(tree.getRootVal()) inorder(tree.getRightChild()) # def printexp(tree): # if tree.leftChild: # print'( ' # printexp(tree.getLeftChild()) # print '%s '%tree.getRootVal() # if tree.rightChild: # printexp(tree.getRightChild()) # print') ' def printexp(tree): sVal = "" if tree: sVal = '(' if tree.hasChild() else '' sVal += printexp(tree.getLeftChild()) sVal = sVal + str(tree.getRootVal()) sVal = sVal + printexp(tree.getRightChild()) sVal += ')' if tree.hasChild() else '' return sVal def postordereval(tree): opers = {'+':operator.add, '-':operator.sub, '*':operator.mul, '/':operator.truediv} res1 = None res2 = None if tree: res1 = postordereval(tree.getLeftChild()) #// \label{peleft} res2 = postordereval(tree.getRightChild()) #// \label{peright} if res1 and res2: return opers[tree.getRootVal()](res1,res2) #// \label{peeval} else: return tree.getRootVal() def height(tree): if tree == None: return -1 else: return 1 + max(height(tree.leftChild),height(tree.rightChild)) if __name__ == '__main__': t = BinaryTree(7) t.insertLeft(3) t.insertRight(9) inorder(t) # import operator x = BinaryTree('*') x.insertLeft('+') l = x.getLeftChild() l.insertLeft(4) l.insertRight(5) x.insertRight(7) print(printexp(x)) # print(postordereval(x)) print(height(x))
normal
{ "blob_id": "5f48c7a68cb9734d84dee2cf8ff4d7be490cf328", "index": 2888, "step-1": "<mask token>\n\n\nclass BinaryTree:\n <mask token>\n\n def __init__(self, rootObj):\n self.key = rootObj\n self.leftChild = None\n self.rightChild = None\n self.parent = None\n\n def insertLeft(self, newNode):\n if self.leftChild == None:\n self.leftChild = BinaryTree(newNode) if not isinstance(newNode,\n BinaryTree) else newNode\n else:\n t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree\n ) else newNode\n self.leftChild = t\n self.leftChild.parent = self\n\n def insertRight(self, newNode):\n if self.rightChild == None:\n self.rightChild = BinaryTree(newNode) if not isinstance(newNode,\n BinaryTree) else newNode\n else:\n t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree\n ) else newNode\n self.rightChild = t\n self.rightChild.parent = self\n <mask token>\n <mask token>\n <mask token>\n\n def getLeftChild(self):\n return self.leftChild\n\n def getSibling(self):\n if self.isRoot():\n return None\n Rsib = self.parent.getRightChild()\n return Rsib if Rsib != self else self.parent.getLeftChild()\n <mask token>\n\n def hasParent(self):\n return self.key != None\n\n def setRootVal(self, obj):\n self.key = obj\n <mask token>\n\n def inorder(self):\n if self.leftChild:\n self.leftChild.inorder()\n print(self.key)\n if self.rightChild:\n self.rightChild.inorder()\n <mask token>\n <mask token>\n\n def printexp(self):\n sVal = ''\n if self:\n sVal = '(' if self.hasChild() else ''\n sVal += printexp(self.getLeftChild())\n sVal = sVal + str(self.getRootVal())\n sVal = sVal + printexp(self.getRightChild())\n sVal += ')' if self.hasChild() else ''\n return sVal\n\n def __str__(self):\n return self.printexp()\n <mask token>\n\n def hasRightChild(self):\n return self.rightChild\n <mask token>\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass BinaryTree:\n <mask token>\n\n def __init__(self, rootObj):\n self.key = rootObj\n self.leftChild = None\n self.rightChild = None\n self.parent = None\n\n def insertLeft(self, newNode):\n if self.leftChild == None:\n self.leftChild = BinaryTree(newNode) if not isinstance(newNode,\n BinaryTree) else newNode\n else:\n t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree\n ) else newNode\n self.leftChild = t\n self.leftChild.parent = self\n\n def insertRight(self, newNode):\n if self.rightChild == None:\n self.rightChild = BinaryTree(newNode) if not isinstance(newNode,\n BinaryTree) else newNode\n else:\n t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree\n ) else newNode\n self.rightChild = t\n self.rightChild.parent = self\n\n def isLeaf(self):\n return not self.leftChild and not self.rightChild\n <mask token>\n\n def getRightChild(self):\n return self.rightChild\n\n def getLeftChild(self):\n return self.leftChild\n\n def getSibling(self):\n if self.isRoot():\n return None\n Rsib = self.parent.getRightChild()\n return Rsib if Rsib != self else self.parent.getLeftChild()\n\n def hasChild(self):\n return self.rightChild != None or self.leftChild != None\n\n def hasParent(self):\n return self.key != None\n\n def setRootVal(self, obj):\n self.key = obj\n\n def getRootVal(self):\n return self.key\n\n def inorder(self):\n if self.leftChild:\n self.leftChild.inorder()\n print(self.key)\n if self.rightChild:\n self.rightChild.inorder()\n <mask token>\n\n def preorder(self):\n print(self.key)\n if self.leftChild:\n self.leftChild.preorder()\n if self.rightChild:\n self.rightChild.preorder()\n\n def printexp(self):\n sVal = ''\n if self:\n sVal = '(' if self.hasChild() else ''\n sVal += printexp(self.getLeftChild())\n sVal = sVal + str(self.getRootVal())\n sVal = sVal + printexp(self.getRightChild())\n sVal += ')' if self.hasChild() else ''\n return sVal\n\n def __str__(self):\n return self.printexp()\n <mask token>\n\n def hasRightChild(self):\n return self.rightChild\n\n def __iter__(self):\n \"\"\"The standard inorder traversal of a binary tree.\"\"\"\n if self:\n if self.hasLeftChild():\n for elem in self.leftChild:\n yield elem\n yield self.key\n if self.hasRightChild():\n for elem in self.rightChild:\n yield elem\n\n def postordereval(self, opers=None):\n if not opers:\n opers = {'+': operator.add, '-': operator.sub, '*': operator.\n mul, '/': operator.truediv}\n res1 = None\n res2 = None\n if self.leftChild:\n res1 = self.leftChild.postordereval()\n if self.rightChild:\n res2 = self.rightChild.postordereval()\n if res1 and res2:\n return opers[self.key](res1, res2)\n else:\n return self.key\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Stack:\n\n def __init__(self):\n self.items = []\n\n def isEmpty(self):\n return self.items == []\n\n def push(self, item):\n self.items.append(item)\n\n def pop(self):\n return self.items.pop()\n\n def peek(self):\n return self.items[len(self.items) - 1]\n\n def size(self):\n return len(self.items)\n\n\nclass BinaryTree:\n \"\"\"\n A recursive implementation of Binary Tree\n Using links and Nodes approach.\n \"\"\"\n\n def __init__(self, rootObj):\n self.key = rootObj\n self.leftChild = None\n self.rightChild = None\n self.parent = None\n\n def insertLeft(self, newNode):\n if self.leftChild == None:\n self.leftChild = BinaryTree(newNode) if not isinstance(newNode,\n BinaryTree) else newNode\n else:\n t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree\n ) else newNode\n self.leftChild = t\n self.leftChild.parent = self\n\n def insertRight(self, newNode):\n if self.rightChild == None:\n self.rightChild = BinaryTree(newNode) if not isinstance(newNode,\n BinaryTree) else newNode\n else:\n t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree\n ) else newNode\n self.rightChild = t\n self.rightChild.parent = self\n\n def isLeaf(self):\n return not self.leftChild and not self.rightChild\n\n def isRoot(self):\n return not self.parent\n\n def getRightChild(self):\n return self.rightChild\n\n def getLeftChild(self):\n return self.leftChild\n\n def getSibling(self):\n if self.isRoot():\n return None\n Rsib = self.parent.getRightChild()\n return Rsib if Rsib != self else self.parent.getLeftChild()\n\n def hasChild(self):\n return self.rightChild != None or self.leftChild != None\n\n def hasParent(self):\n return self.key != None\n\n def setRootVal(self, obj):\n self.key = obj\n\n def getRootVal(self):\n return self.key\n\n def inorder(self):\n if self.leftChild:\n self.leftChild.inorder()\n print(self.key)\n if self.rightChild:\n self.rightChild.inorder()\n\n def postorder(self):\n if self.leftChild:\n self.leftChild.postorder()\n if self.rightChild:\n self.rightChild.postorder()\n print(self.key)\n\n def preorder(self):\n print(self.key)\n if self.leftChild:\n self.leftChild.preorder()\n if self.rightChild:\n self.rightChild.preorder()\n\n def printexp(self):\n sVal = ''\n if self:\n sVal = '(' if self.hasChild() else ''\n sVal += printexp(self.getLeftChild())\n sVal = sVal + str(self.getRootVal())\n sVal = sVal + printexp(self.getRightChild())\n sVal += ')' if self.hasChild() else ''\n return sVal\n\n def __str__(self):\n return self.printexp()\n\n def hasLeftChild(self):\n return self.leftChild\n\n def hasRightChild(self):\n return self.rightChild\n\n def __iter__(self):\n \"\"\"The standard inorder traversal of a binary tree.\"\"\"\n if self:\n if self.hasLeftChild():\n for elem in self.leftChild:\n yield elem\n yield self.key\n if self.hasRightChild():\n for elem in self.rightChild:\n yield elem\n\n def postordereval(self, opers=None):\n if not opers:\n opers = {'+': operator.add, '-': operator.sub, '*': operator.\n mul, '/': operator.truediv}\n res1 = None\n res2 = None\n if self.leftChild:\n res1 = self.leftChild.postordereval()\n if self.rightChild:\n res2 = self.rightChild.postordereval()\n if res1 and res2:\n return opers[self.key](res1, res2)\n else:\n return self.key\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass Stack:\n\n def __init__(self):\n self.items = []\n\n def isEmpty(self):\n return self.items == []\n\n def push(self, item):\n self.items.append(item)\n\n def pop(self):\n return self.items.pop()\n\n def peek(self):\n return self.items[len(self.items) - 1]\n\n def size(self):\n return len(self.items)\n\n\nclass BinaryTree:\n \"\"\"\n A recursive implementation of Binary Tree\n Using links and Nodes approach.\n \"\"\"\n\n def __init__(self, rootObj):\n self.key = rootObj\n self.leftChild = None\n self.rightChild = None\n self.parent = None\n\n def insertLeft(self, newNode):\n if self.leftChild == None:\n self.leftChild = BinaryTree(newNode) if not isinstance(newNode,\n BinaryTree) else newNode\n else:\n t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree\n ) else newNode\n self.leftChild = t\n self.leftChild.parent = self\n\n def insertRight(self, newNode):\n if self.rightChild == None:\n self.rightChild = BinaryTree(newNode) if not isinstance(newNode,\n BinaryTree) else newNode\n else:\n t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree\n ) else newNode\n self.rightChild = t\n self.rightChild.parent = self\n\n def isLeaf(self):\n return not self.leftChild and not self.rightChild\n\n def isRoot(self):\n return not self.parent\n\n def getRightChild(self):\n return self.rightChild\n\n def getLeftChild(self):\n return self.leftChild\n\n def getSibling(self):\n if self.isRoot():\n return None\n Rsib = self.parent.getRightChild()\n return Rsib if Rsib != self else self.parent.getLeftChild()\n\n def hasChild(self):\n return self.rightChild != None or self.leftChild != None\n\n def hasParent(self):\n return self.key != None\n\n def setRootVal(self, obj):\n self.key = obj\n\n def getRootVal(self):\n return self.key\n\n def inorder(self):\n if self.leftChild:\n self.leftChild.inorder()\n print(self.key)\n if self.rightChild:\n self.rightChild.inorder()\n\n def postorder(self):\n if self.leftChild:\n self.leftChild.postorder()\n if self.rightChild:\n self.rightChild.postorder()\n print(self.key)\n\n def preorder(self):\n print(self.key)\n if self.leftChild:\n self.leftChild.preorder()\n if self.rightChild:\n self.rightChild.preorder()\n\n def printexp(self):\n sVal = ''\n if self:\n sVal = '(' if self.hasChild() else ''\n sVal += printexp(self.getLeftChild())\n sVal = sVal + str(self.getRootVal())\n sVal = sVal + printexp(self.getRightChild())\n sVal += ')' if self.hasChild() else ''\n return sVal\n\n def __str__(self):\n return self.printexp()\n\n def hasLeftChild(self):\n return self.leftChild\n\n def hasRightChild(self):\n return self.rightChild\n\n def __iter__(self):\n \"\"\"The standard inorder traversal of a binary tree.\"\"\"\n if self:\n if self.hasLeftChild():\n for elem in self.leftChild:\n yield elem\n yield self.key\n if self.hasRightChild():\n for elem in self.rightChild:\n yield elem\n\n def postordereval(self, opers=None):\n if not opers:\n opers = {'+': operator.add, '-': operator.sub, '*': operator.\n mul, '/': operator.truediv}\n res1 = None\n res2 = None\n if self.leftChild:\n res1 = self.leftChild.postordereval()\n if self.rightChild:\n res2 = self.rightChild.postordereval()\n if res1 and res2:\n return opers[self.key](res1, res2)\n else:\n return self.key\n\n\ndef inorder(tree):\n if tree != None:\n inorder(tree.getLeftChild())\n print(tree.getRootVal())\n inorder(tree.getRightChild())\n\n\n<mask token>\n", "step-5": "# Bradley N. Miller, David L. Ranum\n# Introduction to Data Structures and Algorithms in Python\n# Copyright 2005\n# \n\n\n__all__=['BinaryTree', 'Stack']\n\n\nclass Stack:\n def __init__(self):\n self.items = []\n\n def isEmpty(self):\n return self.items == []\n\n def push(self, item):\n self.items.append(item)\n\n def pop(self):\n return self.items.pop()\n\n def peek(self):\n return self.items[len(self.items)-1]\n\n def size(self):\n return len(self.items)\n\n\n\nclass BinaryTree:\n \"\"\"\n A recursive implementation of Binary Tree\n Using links and Nodes approach.\n \"\"\" \n def __init__(self,rootObj):\n self.key = rootObj\n self.leftChild = None\n self.rightChild = None\n self.parent = None\n\n def insertLeft(self,newNode):\n if self.leftChild == None:\n self.leftChild = BinaryTree(newNode) if not isinstance(newNode, BinaryTree) else newNode\n else:\n t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree) else newNode\n # t.left = self.leftChild\n self.leftChild = t\n self.leftChild.parent = self \n \n def insertRight(self,newNode):\n if self.rightChild == None:\n self.rightChild = BinaryTree(newNode) if not isinstance(newNode, BinaryTree) else newNode\n else:\n t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree) else newNode\n # t.right = self.rightChild\n self.rightChild = t\n self.rightChild.parent = self \n\n def isLeaf(self):\n return ((not self.leftChild) and (not self.rightChild))\n\n def isRoot(self):\n return not self.parent\n \n def getRightChild(self):\n return self.rightChild\n\n def getLeftChild(self):\n return self.leftChild\n \n def getSibling(self):\n if self.isRoot(): \n return None\n Rsib = self.parent.getRightChild() \n return Rsib if Rsib != self else self.parent.getLeftChild()\n\n def hasChild(self):\n return (self.rightChild != None) or (self.leftChild != None) \n\n def hasParent(self):\n return (self.key != None)\n\n def setRootVal(self,obj):\n self.key = obj\n\n def getRootVal(self):\n return self.key\n\n def inorder(self):\n if self.leftChild:\n self.leftChild.inorder()\n print(self.key)\n if self.rightChild:\n self.rightChild.inorder()\n\n def postorder(self):\n if self.leftChild:\n self.leftChild.postorder()\n if self.rightChild:\n self.rightChild.postorder()\n print(self.key)\n\n\n def preorder(self):\n print(self.key)\n if self.leftChild:\n self.leftChild.preorder()\n if self.rightChild:\n self.rightChild.preorder()\n\n def printexp(self):\n sVal = \"\"\n if self:\n sVal = '(' if self.hasChild() else ''\n sVal += printexp(self.getLeftChild())\n sVal = sVal + str(self.getRootVal())\n sVal = sVal + printexp(self.getRightChild()) \n sVal += ')' if self.hasChild() else ''\n return sVal\n\n def __str__(self):\n return self.printexp()\n\n def hasLeftChild(self):\n return self.leftChild\n\n def hasRightChild(self):\n return self.rightChild\n\n def __iter__(self):\n \"\"\"The standard inorder traversal of a binary tree.\"\"\"\n if self:\n if self.hasLeftChild():\n for elem in self.leftChild:\n yield elem\n yield self.key\n if self.hasRightChild():\n for elem in self.rightChild:\n yield elem\n\n\n def postordereval(self, opers = None):\n if not opers:\n opers = {'+':operator.add, '-':operator.sub, '*':operator.mul, '/':operator.truediv}\n res1 = None\n res2 = None\n if self.leftChild:\n res1 = self.leftChild.postordereval() #// \\label{peleft}\n if self.rightChild:\n res2 = self.rightChild.postordereval() #// \\label{peright}\n if res1 and res2:\n return opers[self.key](res1,res2) #// \\label{peeval}\n else:\n return self.key\n\ndef inorder(tree):\n if tree != None:\n inorder(tree.getLeftChild())\n print(tree.getRootVal())\n inorder(tree.getRightChild())\n\n# def printexp(tree):\n# if tree.leftChild:\n# print'( '\n# printexp(tree.getLeftChild())\n# print '%s '%tree.getRootVal()\n# if tree.rightChild:\n# printexp(tree.getRightChild())\n# print') '\n\ndef printexp(tree):\n sVal = \"\"\n if tree:\n sVal = '(' if tree.hasChild() else ''\n sVal += printexp(tree.getLeftChild())\n sVal = sVal + str(tree.getRootVal())\n sVal = sVal + printexp(tree.getRightChild()) \n sVal += ')' if tree.hasChild() else ''\n return sVal\n\ndef postordereval(tree):\n opers = {'+':operator.add, '-':operator.sub, '*':operator.mul, '/':operator.truediv}\n res1 = None\n res2 = None\n if tree:\n res1 = postordereval(tree.getLeftChild()) #// \\label{peleft}\n res2 = postordereval(tree.getRightChild()) #// \\label{peright}\n if res1 and res2:\n return opers[tree.getRootVal()](res1,res2) #// \\label{peeval}\n else:\n return tree.getRootVal()\n\ndef height(tree):\n if tree == None:\n return -1\n else:\n return 1 + max(height(tree.leftChild),height(tree.rightChild))\n\nif __name__ == '__main__':\n t = BinaryTree(7)\n t.insertLeft(3)\n t.insertRight(9)\n inorder(t)\n # import operator\n x = BinaryTree('*')\n x.insertLeft('+')\n l = x.getLeftChild()\n l.insertLeft(4)\n l.insertRight(5)\n x.insertRight(7)\n print(printexp(x))\n # print(postordereval(x))\n print(height(x))\n", "step-ids": [ 12, 19, 30, 31, 37 ] }
[ 12, 19, 30, 31, 37 ]
from typing import Any from typing import List from xsdata.codegen.mixins import RelativeHandlerInterface from xsdata.codegen.models import Attr from xsdata.codegen.models import Class from xsdata.models.enums import Tag from xsdata.utils.namespaces import build_qname class ClassEnumerationHandler(RelativeHandlerInterface): """Enumeration class processor.""" __slots__ = () def process(self, target: Class): """ Process class receiver. Steps: 1. Filter attrs not derived from xs:enumeration 2. Flatten attrs derived from xs:union of enumerations 3. Promote inner enumeration classes to root classes """ self.filter(target) self.flatten(target) self.promote(target) @classmethod def filter(cls, target: Class): """Filter attrs not derived from xs:enumeration if there are any xs:enumeration attrs.""" enumerations = [attr for attr in target.attrs if attr.is_enumeration] if enumerations: target.attrs = enumerations def flatten(self, target: Class): """ Flatten attrs derived from xs:union of enumeration classes. Find the enumeration classes and merge all of their members in the target class. """ if len(target.attrs) != 1 or target.attrs[0].tag != Tag.UNION: return enums: List[Any] = [] for attr_type in target.attrs[0].types: if attr_type.forward: enums.extend(target.inner) elif not attr_type.native: enums.append(self.container.find(attr_type.qname)) else: enums.append(None) merge = all(isinstance(x, Class) and x.is_enumeration for x in enums) if merge: target.attrs.clear() target.inner.clear() target.attrs.extend(attr.clone() for enum in enums for attr in enum.attrs) def promote(self, target: Class): """ Promote inner enumeration classes to root classes. Steps: 1. Find inner enumerations 2. Clone and update their qualified name 3. Update attributes types """ for inner in list(target.inner): if inner.is_enumeration: target.inner.remove(inner) clone = self.clone_enumeration(inner, target.name) self.container.add(clone) for attr in target.attrs: self.update_types(attr, inner.qname, clone.qname) @classmethod def clone_enumeration(cls, inner: Class, name: str) -> Class: clone = inner.clone() clone.qname = build_qname(clone.target_namespace, f"{name}_{clone.name}") return clone @classmethod def update_types(cls, attr: Attr, search: str, replace: str): for attr_type in attr.types: if attr_type.qname == search and attr_type.forward: attr_type.qname = replace attr_type.forward = False
normal
{ "blob_id": "4d9064add28302fe173a8b0a81ee7d187db8aead", "index": 6029, "step-1": "<mask token>\n\n\nclass ClassEnumerationHandler(RelativeHandlerInterface):\n <mask token>\n <mask token>\n\n def process(self, target: Class):\n \"\"\"\n Process class receiver.\n\n Steps:\n 1. Filter attrs not derived from xs:enumeration\n 2. Flatten attrs derived from xs:union of enumerations\n 3. Promote inner enumeration classes to root classes\n \"\"\"\n self.filter(target)\n self.flatten(target)\n self.promote(target)\n\n @classmethod\n def filter(cls, target: Class):\n \"\"\"Filter attrs not derived from xs:enumeration if there are any\n xs:enumeration attrs.\"\"\"\n enumerations = [attr for attr in target.attrs if attr.is_enumeration]\n if enumerations:\n target.attrs = enumerations\n <mask token>\n\n def promote(self, target: Class):\n \"\"\"\n Promote inner enumeration classes to root classes.\n\n Steps:\n 1. Find inner enumerations\n 2. Clone and update their qualified name\n 3. Update attributes types\n \"\"\"\n for inner in list(target.inner):\n if inner.is_enumeration:\n target.inner.remove(inner)\n clone = self.clone_enumeration(inner, target.name)\n self.container.add(clone)\n for attr in target.attrs:\n self.update_types(attr, inner.qname, clone.qname)\n\n @classmethod\n def clone_enumeration(cls, inner: Class, name: str) ->Class:\n clone = inner.clone()\n clone.qname = build_qname(clone.target_namespace,\n f'{name}_{clone.name}')\n return clone\n\n @classmethod\n def update_types(cls, attr: Attr, search: str, replace: str):\n for attr_type in attr.types:\n if attr_type.qname == search and attr_type.forward:\n attr_type.qname = replace\n attr_type.forward = False\n", "step-2": "<mask token>\n\n\nclass ClassEnumerationHandler(RelativeHandlerInterface):\n <mask token>\n <mask token>\n\n def process(self, target: Class):\n \"\"\"\n Process class receiver.\n\n Steps:\n 1. Filter attrs not derived from xs:enumeration\n 2. Flatten attrs derived from xs:union of enumerations\n 3. Promote inner enumeration classes to root classes\n \"\"\"\n self.filter(target)\n self.flatten(target)\n self.promote(target)\n\n @classmethod\n def filter(cls, target: Class):\n \"\"\"Filter attrs not derived from xs:enumeration if there are any\n xs:enumeration attrs.\"\"\"\n enumerations = [attr for attr in target.attrs if attr.is_enumeration]\n if enumerations:\n target.attrs = enumerations\n\n def flatten(self, target: Class):\n \"\"\"\n Flatten attrs derived from xs:union of enumeration classes.\n\n Find the enumeration classes and merge all of their members in\n the target class.\n \"\"\"\n if len(target.attrs) != 1 or target.attrs[0].tag != Tag.UNION:\n return\n enums: List[Any] = []\n for attr_type in target.attrs[0].types:\n if attr_type.forward:\n enums.extend(target.inner)\n elif not attr_type.native:\n enums.append(self.container.find(attr_type.qname))\n else:\n enums.append(None)\n merge = all(isinstance(x, Class) and x.is_enumeration for x in enums)\n if merge:\n target.attrs.clear()\n target.inner.clear()\n target.attrs.extend(attr.clone() for enum in enums for attr in\n enum.attrs)\n\n def promote(self, target: Class):\n \"\"\"\n Promote inner enumeration classes to root classes.\n\n Steps:\n 1. Find inner enumerations\n 2. Clone and update their qualified name\n 3. Update attributes types\n \"\"\"\n for inner in list(target.inner):\n if inner.is_enumeration:\n target.inner.remove(inner)\n clone = self.clone_enumeration(inner, target.name)\n self.container.add(clone)\n for attr in target.attrs:\n self.update_types(attr, inner.qname, clone.qname)\n\n @classmethod\n def clone_enumeration(cls, inner: Class, name: str) ->Class:\n clone = inner.clone()\n clone.qname = build_qname(clone.target_namespace,\n f'{name}_{clone.name}')\n return clone\n\n @classmethod\n def update_types(cls, attr: Attr, search: str, replace: str):\n for attr_type in attr.types:\n if attr_type.qname == search and attr_type.forward:\n attr_type.qname = replace\n attr_type.forward = False\n", "step-3": "<mask token>\n\n\nclass ClassEnumerationHandler(RelativeHandlerInterface):\n \"\"\"Enumeration class processor.\"\"\"\n __slots__ = ()\n\n def process(self, target: Class):\n \"\"\"\n Process class receiver.\n\n Steps:\n 1. Filter attrs not derived from xs:enumeration\n 2. Flatten attrs derived from xs:union of enumerations\n 3. Promote inner enumeration classes to root classes\n \"\"\"\n self.filter(target)\n self.flatten(target)\n self.promote(target)\n\n @classmethod\n def filter(cls, target: Class):\n \"\"\"Filter attrs not derived from xs:enumeration if there are any\n xs:enumeration attrs.\"\"\"\n enumerations = [attr for attr in target.attrs if attr.is_enumeration]\n if enumerations:\n target.attrs = enumerations\n\n def flatten(self, target: Class):\n \"\"\"\n Flatten attrs derived from xs:union of enumeration classes.\n\n Find the enumeration classes and merge all of their members in\n the target class.\n \"\"\"\n if len(target.attrs) != 1 or target.attrs[0].tag != Tag.UNION:\n return\n enums: List[Any] = []\n for attr_type in target.attrs[0].types:\n if attr_type.forward:\n enums.extend(target.inner)\n elif not attr_type.native:\n enums.append(self.container.find(attr_type.qname))\n else:\n enums.append(None)\n merge = all(isinstance(x, Class) and x.is_enumeration for x in enums)\n if merge:\n target.attrs.clear()\n target.inner.clear()\n target.attrs.extend(attr.clone() for enum in enums for attr in\n enum.attrs)\n\n def promote(self, target: Class):\n \"\"\"\n Promote inner enumeration classes to root classes.\n\n Steps:\n 1. Find inner enumerations\n 2. Clone and update their qualified name\n 3. Update attributes types\n \"\"\"\n for inner in list(target.inner):\n if inner.is_enumeration:\n target.inner.remove(inner)\n clone = self.clone_enumeration(inner, target.name)\n self.container.add(clone)\n for attr in target.attrs:\n self.update_types(attr, inner.qname, clone.qname)\n\n @classmethod\n def clone_enumeration(cls, inner: Class, name: str) ->Class:\n clone = inner.clone()\n clone.qname = build_qname(clone.target_namespace,\n f'{name}_{clone.name}')\n return clone\n\n @classmethod\n def update_types(cls, attr: Attr, search: str, replace: str):\n for attr_type in attr.types:\n if attr_type.qname == search and attr_type.forward:\n attr_type.qname = replace\n attr_type.forward = False\n", "step-4": "from typing import Any\nfrom typing import List\nfrom xsdata.codegen.mixins import RelativeHandlerInterface\nfrom xsdata.codegen.models import Attr\nfrom xsdata.codegen.models import Class\nfrom xsdata.models.enums import Tag\nfrom xsdata.utils.namespaces import build_qname\n\n\nclass ClassEnumerationHandler(RelativeHandlerInterface):\n \"\"\"Enumeration class processor.\"\"\"\n __slots__ = ()\n\n def process(self, target: Class):\n \"\"\"\n Process class receiver.\n\n Steps:\n 1. Filter attrs not derived from xs:enumeration\n 2. Flatten attrs derived from xs:union of enumerations\n 3. Promote inner enumeration classes to root classes\n \"\"\"\n self.filter(target)\n self.flatten(target)\n self.promote(target)\n\n @classmethod\n def filter(cls, target: Class):\n \"\"\"Filter attrs not derived from xs:enumeration if there are any\n xs:enumeration attrs.\"\"\"\n enumerations = [attr for attr in target.attrs if attr.is_enumeration]\n if enumerations:\n target.attrs = enumerations\n\n def flatten(self, target: Class):\n \"\"\"\n Flatten attrs derived from xs:union of enumeration classes.\n\n Find the enumeration classes and merge all of their members in\n the target class.\n \"\"\"\n if len(target.attrs) != 1 or target.attrs[0].tag != Tag.UNION:\n return\n enums: List[Any] = []\n for attr_type in target.attrs[0].types:\n if attr_type.forward:\n enums.extend(target.inner)\n elif not attr_type.native:\n enums.append(self.container.find(attr_type.qname))\n else:\n enums.append(None)\n merge = all(isinstance(x, Class) and x.is_enumeration for x in enums)\n if merge:\n target.attrs.clear()\n target.inner.clear()\n target.attrs.extend(attr.clone() for enum in enums for attr in\n enum.attrs)\n\n def promote(self, target: Class):\n \"\"\"\n Promote inner enumeration classes to root classes.\n\n Steps:\n 1. Find inner enumerations\n 2. Clone and update their qualified name\n 3. Update attributes types\n \"\"\"\n for inner in list(target.inner):\n if inner.is_enumeration:\n target.inner.remove(inner)\n clone = self.clone_enumeration(inner, target.name)\n self.container.add(clone)\n for attr in target.attrs:\n self.update_types(attr, inner.qname, clone.qname)\n\n @classmethod\n def clone_enumeration(cls, inner: Class, name: str) ->Class:\n clone = inner.clone()\n clone.qname = build_qname(clone.target_namespace,\n f'{name}_{clone.name}')\n return clone\n\n @classmethod\n def update_types(cls, attr: Attr, search: str, replace: str):\n for attr_type in attr.types:\n if attr_type.qname == search and attr_type.forward:\n attr_type.qname = replace\n attr_type.forward = False\n", "step-5": "from typing import Any\nfrom typing import List\n\nfrom xsdata.codegen.mixins import RelativeHandlerInterface\nfrom xsdata.codegen.models import Attr\nfrom xsdata.codegen.models import Class\nfrom xsdata.models.enums import Tag\nfrom xsdata.utils.namespaces import build_qname\n\n\nclass ClassEnumerationHandler(RelativeHandlerInterface):\n \"\"\"Enumeration class processor.\"\"\"\n\n __slots__ = ()\n\n def process(self, target: Class):\n \"\"\"\n Process class receiver.\n\n Steps:\n 1. Filter attrs not derived from xs:enumeration\n 2. Flatten attrs derived from xs:union of enumerations\n 3. Promote inner enumeration classes to root classes\n \"\"\"\n self.filter(target)\n self.flatten(target)\n self.promote(target)\n\n @classmethod\n def filter(cls, target: Class):\n \"\"\"Filter attrs not derived from xs:enumeration if there are any\n xs:enumeration attrs.\"\"\"\n enumerations = [attr for attr in target.attrs if attr.is_enumeration]\n if enumerations:\n target.attrs = enumerations\n\n def flatten(self, target: Class):\n \"\"\"\n Flatten attrs derived from xs:union of enumeration classes.\n\n Find the enumeration classes and merge all of their members in\n the target class.\n \"\"\"\n if len(target.attrs) != 1 or target.attrs[0].tag != Tag.UNION:\n return\n\n enums: List[Any] = []\n for attr_type in target.attrs[0].types:\n if attr_type.forward:\n enums.extend(target.inner)\n elif not attr_type.native:\n enums.append(self.container.find(attr_type.qname))\n else:\n enums.append(None)\n\n merge = all(isinstance(x, Class) and x.is_enumeration for x in enums)\n if merge:\n target.attrs.clear()\n target.inner.clear()\n\n target.attrs.extend(attr.clone() for enum in enums for attr in enum.attrs)\n\n def promote(self, target: Class):\n \"\"\"\n Promote inner enumeration classes to root classes.\n\n Steps:\n 1. Find inner enumerations\n 2. Clone and update their qualified name\n 3. Update attributes types\n \"\"\"\n for inner in list(target.inner):\n if inner.is_enumeration:\n target.inner.remove(inner)\n clone = self.clone_enumeration(inner, target.name)\n self.container.add(clone)\n for attr in target.attrs:\n self.update_types(attr, inner.qname, clone.qname)\n\n @classmethod\n def clone_enumeration(cls, inner: Class, name: str) -> Class:\n clone = inner.clone()\n clone.qname = build_qname(clone.target_namespace, f\"{name}_{clone.name}\")\n return clone\n\n @classmethod\n def update_types(cls, attr: Attr, search: str, replace: str):\n for attr_type in attr.types:\n if attr_type.qname == search and attr_type.forward:\n attr_type.qname = replace\n attr_type.forward = False\n", "step-ids": [ 6, 7, 9, 10, 11 ] }
[ 6, 7, 9, 10, 11 ]
import logging import os import callbacks import commands import dice import echo import inline import keyboards import mybot import myenigma import poll import rocketgram import send import unknown # avoid to remove "unused" imports by optimizers def fix_imports(): _ = callbacks _ = commands _ = echo _ = keyboards _ = myenigma _ = inline _ = send _ = dice _ = unknown _ = poll logger = logging.getLogger('minibots.engine') def main(): mode = os.environ.get('MODE') if mode is None and 'DYNO' in os.environ: mode = 'heroku' if mode not in ('updates', 'webhook', 'heroku'): raise TypeError('MODE must be `updates` or `webhook` or `heroku`!') logging.basicConfig(format='%(asctime)s - %(levelname)-5s - %(name)-25s: %(message)s') logging.basicConfig(level=logging.ERROR) logging.getLogger('engine').setLevel(logging.INFO) logging.getLogger('mybot').setLevel(logging.DEBUG) logging.getLogger('rocketgram').setLevel(logging.DEBUG) logging.getLogger('rocketgram.raw.in').setLevel(logging.INFO) logging.getLogger('rocketgram.raw.out').setLevel(logging.INFO) logger.info('Starting bot''s template in %s...', mode) bot = mybot.get_bot(os.environ['TOKEN'].strip()) if mode == 'updates': rocketgram.UpdatesExecutor.run(bot, drop_updates=bool(int(os.environ.get('DROP_UPDATES', 0)))) else: port = int(os.environ['PORT']) if mode == 'heroku' else int(os.environ.get('WEBHOOK_PORT', 8080)) rocketgram.AioHttpExecutor.run(bot, os.environ['WEBHOOK_URL'].strip(), os.environ.get('WEBHOOK_PATH', '/').strip(), host='0.0.0.0', port=port, drop_updates=bool(int(os.environ.get('DROP_UPDATES', 0))), webhook_remove=not mode == 'heroku') logger.info('Bye!') if __name__ == '__main__': main()
normal
{ "blob_id": "fd904c70b350c650362c55ccb3b915371f24e267", "index": 9623, "step-1": "import logging\nimport os\n\nimport callbacks\nimport commands\nimport dice\nimport echo\nimport inline\nimport keyboards\nimport mybot\nimport myenigma\nimport poll\nimport rocketgram\nimport send\nimport unknown\n\n\n# avoid to remove \"unused\" imports by optimizers\ndef fix_imports():\n _ = callbacks\n _ = commands\n _ = echo\n _ = keyboards\n _ = myenigma\n _ = inline\n _ = send\n _ = dice\n _ = unknown\n _ = poll\n\n\nlogger = logging.getLogger('minibots.engine')\n\n\ndef main():\n mode = os.environ.get('MODE')\n if mode is None and 'DYNO' in os.environ:\n mode = 'heroku'\n\n if mode not in ('updates', 'webhook', 'heroku'):\n raise TypeError('MODE must be `updates` or `webhook` or `heroku`!')\n\n logging.basicConfig(format='%(asctime)s - %(levelname)-5s - %(name)-25s: %(message)s')\n logging.basicConfig(level=logging.ERROR)\n logging.getLogger('engine').setLevel(logging.INFO)\n logging.getLogger('mybot').setLevel(logging.DEBUG)\n logging.getLogger('rocketgram').setLevel(logging.DEBUG)\n logging.getLogger('rocketgram.raw.in').setLevel(logging.INFO)\n logging.getLogger('rocketgram.raw.out').setLevel(logging.INFO)\n\n logger.info('Starting bot''s template in %s...', mode)\n\n bot = mybot.get_bot(os.environ['TOKEN'].strip())\n\n if mode == 'updates':\n rocketgram.UpdatesExecutor.run(bot, drop_updates=bool(int(os.environ.get('DROP_UPDATES', 0))))\n else:\n port = int(os.environ['PORT']) if mode == 'heroku' else int(os.environ.get('WEBHOOK_PORT', 8080))\n rocketgram.AioHttpExecutor.run(bot,\n os.environ['WEBHOOK_URL'].strip(),\n os.environ.get('WEBHOOK_PATH', '/').strip(),\n host='0.0.0.0', port=port,\n drop_updates=bool(int(os.environ.get('DROP_UPDATES', 0))),\n webhook_remove=not mode == 'heroku')\n\n logger.info('Bye!')\n\n\nif __name__ == '__main__':\n main()\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import datetime import traceback import sys import os def getErrorReport(): errorReport = ErrorReport() return errorReport class ErrorReport(): def __init__(self): return def startLog(self): timestamp = str(datetime.datetime.now()) fileName = 'Log_'+timestamp+'.txt.' self.logFile = open(fileName,'w') def endLog(self): self.logFile.close() def writeError(self): traceback.print_exc(file=self.logFile) self.logFile.write('\n') self.logFile.flush() os.fsync(self.logFile) def writeMessage(self, message=''): self.logFile.write(message) self.logFile.write('\n\n') self.logFile.flush() os.fsync(self.logFile)
normal
{ "blob_id": "6abc8b97117257e16da1f7b730b09ee0f7bd4c6e", "index": 4715, "step-1": "<mask token>\n\n\nclass ErrorReport:\n <mask token>\n\n def startLog(self):\n timestamp = str(datetime.datetime.now())\n fileName = 'Log_' + timestamp + '.txt.'\n self.logFile = open(fileName, 'w')\n\n def endLog(self):\n self.logFile.close()\n\n def writeError(self):\n traceback.print_exc(file=self.logFile)\n self.logFile.write('\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n <mask token>\n", "step-2": "<mask token>\n\n\nclass ErrorReport:\n\n def __init__(self):\n return\n\n def startLog(self):\n timestamp = str(datetime.datetime.now())\n fileName = 'Log_' + timestamp + '.txt.'\n self.logFile = open(fileName, 'w')\n\n def endLog(self):\n self.logFile.close()\n\n def writeError(self):\n traceback.print_exc(file=self.logFile)\n self.logFile.write('\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n\n def writeMessage(self, message=''):\n self.logFile.write(message)\n self.logFile.write('\\n\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n", "step-3": "<mask token>\n\n\ndef getErrorReport():\n errorReport = ErrorReport()\n return errorReport\n\n\nclass ErrorReport:\n\n def __init__(self):\n return\n\n def startLog(self):\n timestamp = str(datetime.datetime.now())\n fileName = 'Log_' + timestamp + '.txt.'\n self.logFile = open(fileName, 'w')\n\n def endLog(self):\n self.logFile.close()\n\n def writeError(self):\n traceback.print_exc(file=self.logFile)\n self.logFile.write('\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n\n def writeMessage(self, message=''):\n self.logFile.write(message)\n self.logFile.write('\\n\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n", "step-4": "import datetime\nimport traceback\nimport sys\nimport os\n\n\ndef getErrorReport():\n errorReport = ErrorReport()\n return errorReport\n\n\nclass ErrorReport:\n\n def __init__(self):\n return\n\n def startLog(self):\n timestamp = str(datetime.datetime.now())\n fileName = 'Log_' + timestamp + '.txt.'\n self.logFile = open(fileName, 'w')\n\n def endLog(self):\n self.logFile.close()\n\n def writeError(self):\n traceback.print_exc(file=self.logFile)\n self.logFile.write('\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n\n def writeMessage(self, message=''):\n self.logFile.write(message)\n self.logFile.write('\\n\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n", "step-5": "import datetime\nimport traceback\nimport sys\nimport os\n\n\ndef getErrorReport():\n errorReport = ErrorReport()\n return errorReport\n\n\nclass ErrorReport(): \n\n def __init__(self):\n return\n\n def startLog(self):\n timestamp = str(datetime.datetime.now())\n fileName = 'Log_'+timestamp+'.txt.'\n self.logFile = open(fileName,'w')\n\n def endLog(self):\n self.logFile.close()\n\n def writeError(self):\n traceback.print_exc(file=self.logFile)\n self.logFile.write('\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n\n def writeMessage(self, message=''):\n self.logFile.write(message)\n self.logFile.write('\\n\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n", "step-ids": [ 4, 6, 7, 8, 9 ] }
[ 4, 6, 7, 8, 9 ]
from datetime import datetime import warnings import numpy as np import xarray as xr from .common import HDF4, expects_file_info pyhdf_is_installed = False try: from pyhdf import HDF, VS, V from pyhdf.SD import SD, SDC pyhdf_is_installed = True except ImportError: pass __all__ = [ 'CloudSat', ] class CloudSat(HDF4): """File handler for CloudSat data in HDF4 files. """ # This file handler always wants to return at least time, lat and lon # fields. These fields are required for this: standard_fields = { "UTC_start", "Profile_time", "Latitude", "Longitude" } # Map the standard fields to standard names: mapping = { "Latitude": "lat", "Longitude": "lon", "dim_0": "scnline", } def __init__(self, **kwargs): # Call the base class initializer super().__init__(**kwargs) @expects_file_info() def get_info(self, file_info, **kwargs): """Return a :class:`FileInfo` object with parameters about the file content. Args: file_info: Path and name of the file of which to retrieve the info about. **kwargs: Additional keyword arguments. Returns: A FileInfo object. """ file = SD(file_info.path, SDC.READ) file_info.times[0] = \ datetime.strptime(getattr(file, 'start_time'), "%Y%m%d%H%M%S") file_info.times[1] = \ datetime.strptime(getattr(file, 'end_time'), "%Y%m%d%H%M%S") return file_info @expects_file_info() def read(self, file_info, **kwargs): """Read and parse HDF4 files and load them to a xarray.Dataset A description about all variables in CloudSat dataset can be found in http://www.cloudsat.cira.colostate.edu/data-products/level-2c/2c-ice?term=53. Args: file_info: Path and name of the file as string or FileInfo object. **kwargs: Additional keyword arguments that are valid for :class:`typhon.files.handlers.common.HDF4`. Returns: A xarray.Dataset object. """ # We need to import at least the standard fields user_fields = kwargs.pop("fields", {}) fields = self.standard_fields | set(user_fields) # We catch the user mapping here, since we do not want to deal with # user-defined names in the further processing. Instead, we use our own # mapping user_mapping = kwargs.pop("mapping", None) # Load the dataset from the file: dataset = super().read( file_info, fields=fields, mapping=self.mapping, **kwargs ) dataset["time"] = self._get_time_field(dataset, file_info) # Remove fields that we do not need any longer (expect the user asked # for them explicitly) dataset = dataset.drop_vars( {"UTC_start", "Profile_time"} - set(user_fields), ) if user_mapping is not None: dataset = dataset.rename(user_mapping) return dataset def _get_time_field(self, dataset, file_info): # This gives us the starting time of the first profile in seconds # since midnight in UTC: first_profile_time = round(dataset['UTC_start'].item(0)) # This gives us the starting time of all other profiles in seconds # since the start of the first profile. profile_times = dataset['Profile_time'] # Convert the seconds to milliseconds profile_times *= 1000 profile_times = profile_times.astype("int") try: date = file_info.times[0].date() except AttributeError: # We have to load the info by ourselves: date = self.get_info(file_info).times[0].date() # Put all times together so we obtain one full timestamp # (date + time) for each data point. We are using the # starting date coming from parsing the filename. profile_times = \ np.datetime64(date) \ + np.timedelta64(first_profile_time, "s") \ + profile_times.astype("timedelta64[ms]") return profile_times
normal
{ "blob_id": "4328d526da14db756fad8d05457724a23e3e3ef6", "index": 3939, "step-1": "<mask token>\n\n\nclass CloudSat(HDF4):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n @expects_file_info()\n def get_info(self, file_info, **kwargs):\n \"\"\"Return a :class:`FileInfo` object with parameters about the\n file content.\n\n Args:\n file_info: Path and name of the file of which to retrieve the info\n about.\n **kwargs: Additional keyword arguments.\n\n Returns:\n A FileInfo object.\n \"\"\"\n file = SD(file_info.path, SDC.READ)\n file_info.times[0] = datetime.strptime(getattr(file, 'start_time'),\n '%Y%m%d%H%M%S')\n file_info.times[1] = datetime.strptime(getattr(file, 'end_time'),\n '%Y%m%d%H%M%S')\n return file_info\n <mask token>\n\n def _get_time_field(self, dataset, file_info):\n first_profile_time = round(dataset['UTC_start'].item(0))\n profile_times = dataset['Profile_time']\n profile_times *= 1000\n profile_times = profile_times.astype('int')\n try:\n date = file_info.times[0].date()\n except AttributeError:\n date = self.get_info(file_info).times[0].date()\n profile_times = np.datetime64(date) + np.timedelta64(first_profile_time\n , 's') + profile_times.astype('timedelta64[ms]')\n return profile_times\n", "step-2": "<mask token>\n\n\nclass CloudSat(HDF4):\n \"\"\"File handler for CloudSat data in HDF4 files.\n \"\"\"\n standard_fields = {'UTC_start', 'Profile_time', 'Latitude', 'Longitude'}\n mapping = {'Latitude': 'lat', 'Longitude': 'lon', 'dim_0': 'scnline'}\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n @expects_file_info()\n def get_info(self, file_info, **kwargs):\n \"\"\"Return a :class:`FileInfo` object with parameters about the\n file content.\n\n Args:\n file_info: Path and name of the file of which to retrieve the info\n about.\n **kwargs: Additional keyword arguments.\n\n Returns:\n A FileInfo object.\n \"\"\"\n file = SD(file_info.path, SDC.READ)\n file_info.times[0] = datetime.strptime(getattr(file, 'start_time'),\n '%Y%m%d%H%M%S')\n file_info.times[1] = datetime.strptime(getattr(file, 'end_time'),\n '%Y%m%d%H%M%S')\n return file_info\n\n @expects_file_info()\n def read(self, file_info, **kwargs):\n \"\"\"Read and parse HDF4 files and load them to a xarray.Dataset\n\n A description about all variables in CloudSat dataset can be found in\n http://www.cloudsat.cira.colostate.edu/data-products/level-2c/2c-ice?term=53.\n\n Args:\n file_info: Path and name of the file as string or FileInfo object.\n **kwargs: Additional keyword arguments that are valid for\n :class:`typhon.files.handlers.common.HDF4`.\n\n Returns:\n A xarray.Dataset object.\n \"\"\"\n user_fields = kwargs.pop('fields', {})\n fields = self.standard_fields | set(user_fields)\n user_mapping = kwargs.pop('mapping', None)\n dataset = super().read(file_info, fields=fields, mapping=self.\n mapping, **kwargs)\n dataset['time'] = self._get_time_field(dataset, file_info)\n dataset = dataset.drop_vars({'UTC_start', 'Profile_time'} - set(\n user_fields))\n if user_mapping is not None:\n dataset = dataset.rename(user_mapping)\n return dataset\n\n def _get_time_field(self, dataset, file_info):\n first_profile_time = round(dataset['UTC_start'].item(0))\n profile_times = dataset['Profile_time']\n profile_times *= 1000\n profile_times = profile_times.astype('int')\n try:\n date = file_info.times[0].date()\n except AttributeError:\n date = self.get_info(file_info).times[0].date()\n profile_times = np.datetime64(date) + np.timedelta64(first_profile_time\n , 's') + profile_times.astype('timedelta64[ms]')\n return profile_times\n", "step-3": "<mask token>\ntry:\n from pyhdf import HDF, VS, V\n from pyhdf.SD import SD, SDC\n pyhdf_is_installed = True\nexcept ImportError:\n pass\n<mask token>\n\n\nclass CloudSat(HDF4):\n \"\"\"File handler for CloudSat data in HDF4 files.\n \"\"\"\n standard_fields = {'UTC_start', 'Profile_time', 'Latitude', 'Longitude'}\n mapping = {'Latitude': 'lat', 'Longitude': 'lon', 'dim_0': 'scnline'}\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n @expects_file_info()\n def get_info(self, file_info, **kwargs):\n \"\"\"Return a :class:`FileInfo` object with parameters about the\n file content.\n\n Args:\n file_info: Path and name of the file of which to retrieve the info\n about.\n **kwargs: Additional keyword arguments.\n\n Returns:\n A FileInfo object.\n \"\"\"\n file = SD(file_info.path, SDC.READ)\n file_info.times[0] = datetime.strptime(getattr(file, 'start_time'),\n '%Y%m%d%H%M%S')\n file_info.times[1] = datetime.strptime(getattr(file, 'end_time'),\n '%Y%m%d%H%M%S')\n return file_info\n\n @expects_file_info()\n def read(self, file_info, **kwargs):\n \"\"\"Read and parse HDF4 files and load them to a xarray.Dataset\n\n A description about all variables in CloudSat dataset can be found in\n http://www.cloudsat.cira.colostate.edu/data-products/level-2c/2c-ice?term=53.\n\n Args:\n file_info: Path and name of the file as string or FileInfo object.\n **kwargs: Additional keyword arguments that are valid for\n :class:`typhon.files.handlers.common.HDF4`.\n\n Returns:\n A xarray.Dataset object.\n \"\"\"\n user_fields = kwargs.pop('fields', {})\n fields = self.standard_fields | set(user_fields)\n user_mapping = kwargs.pop('mapping', None)\n dataset = super().read(file_info, fields=fields, mapping=self.\n mapping, **kwargs)\n dataset['time'] = self._get_time_field(dataset, file_info)\n dataset = dataset.drop_vars({'UTC_start', 'Profile_time'} - set(\n user_fields))\n if user_mapping is not None:\n dataset = dataset.rename(user_mapping)\n return dataset\n\n def _get_time_field(self, dataset, file_info):\n first_profile_time = round(dataset['UTC_start'].item(0))\n profile_times = dataset['Profile_time']\n profile_times *= 1000\n profile_times = profile_times.astype('int')\n try:\n date = file_info.times[0].date()\n except AttributeError:\n date = self.get_info(file_info).times[0].date()\n profile_times = np.datetime64(date) + np.timedelta64(first_profile_time\n , 's') + profile_times.astype('timedelta64[ms]')\n return profile_times\n", "step-4": "<mask token>\npyhdf_is_installed = False\ntry:\n from pyhdf import HDF, VS, V\n from pyhdf.SD import SD, SDC\n pyhdf_is_installed = True\nexcept ImportError:\n pass\n__all__ = ['CloudSat']\n\n\nclass CloudSat(HDF4):\n \"\"\"File handler for CloudSat data in HDF4 files.\n \"\"\"\n standard_fields = {'UTC_start', 'Profile_time', 'Latitude', 'Longitude'}\n mapping = {'Latitude': 'lat', 'Longitude': 'lon', 'dim_0': 'scnline'}\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n @expects_file_info()\n def get_info(self, file_info, **kwargs):\n \"\"\"Return a :class:`FileInfo` object with parameters about the\n file content.\n\n Args:\n file_info: Path and name of the file of which to retrieve the info\n about.\n **kwargs: Additional keyword arguments.\n\n Returns:\n A FileInfo object.\n \"\"\"\n file = SD(file_info.path, SDC.READ)\n file_info.times[0] = datetime.strptime(getattr(file, 'start_time'),\n '%Y%m%d%H%M%S')\n file_info.times[1] = datetime.strptime(getattr(file, 'end_time'),\n '%Y%m%d%H%M%S')\n return file_info\n\n @expects_file_info()\n def read(self, file_info, **kwargs):\n \"\"\"Read and parse HDF4 files and load them to a xarray.Dataset\n\n A description about all variables in CloudSat dataset can be found in\n http://www.cloudsat.cira.colostate.edu/data-products/level-2c/2c-ice?term=53.\n\n Args:\n file_info: Path and name of the file as string or FileInfo object.\n **kwargs: Additional keyword arguments that are valid for\n :class:`typhon.files.handlers.common.HDF4`.\n\n Returns:\n A xarray.Dataset object.\n \"\"\"\n user_fields = kwargs.pop('fields', {})\n fields = self.standard_fields | set(user_fields)\n user_mapping = kwargs.pop('mapping', None)\n dataset = super().read(file_info, fields=fields, mapping=self.\n mapping, **kwargs)\n dataset['time'] = self._get_time_field(dataset, file_info)\n dataset = dataset.drop_vars({'UTC_start', 'Profile_time'} - set(\n user_fields))\n if user_mapping is not None:\n dataset = dataset.rename(user_mapping)\n return dataset\n\n def _get_time_field(self, dataset, file_info):\n first_profile_time = round(dataset['UTC_start'].item(0))\n profile_times = dataset['Profile_time']\n profile_times *= 1000\n profile_times = profile_times.astype('int')\n try:\n date = file_info.times[0].date()\n except AttributeError:\n date = self.get_info(file_info).times[0].date()\n profile_times = np.datetime64(date) + np.timedelta64(first_profile_time\n , 's') + profile_times.astype('timedelta64[ms]')\n return profile_times\n", "step-5": "from datetime import datetime\nimport warnings\n\nimport numpy as np\nimport xarray as xr\n\nfrom .common import HDF4, expects_file_info\n\npyhdf_is_installed = False\ntry:\n from pyhdf import HDF, VS, V\n from pyhdf.SD import SD, SDC\n pyhdf_is_installed = True\nexcept ImportError:\n pass\n\n__all__ = [\n 'CloudSat',\n]\n\n\nclass CloudSat(HDF4):\n \"\"\"File handler for CloudSat data in HDF4 files.\n \"\"\"\n\n # This file handler always wants to return at least time, lat and lon\n # fields. These fields are required for this:\n standard_fields = {\n \"UTC_start\",\n \"Profile_time\",\n \"Latitude\",\n \"Longitude\"\n }\n\n # Map the standard fields to standard names:\n mapping = {\n \"Latitude\": \"lat\",\n \"Longitude\": \"lon\",\n \"dim_0\": \"scnline\",\n }\n\n def __init__(self, **kwargs):\n\n # Call the base class initializer\n super().__init__(**kwargs)\n\n @expects_file_info()\n def get_info(self, file_info, **kwargs):\n \"\"\"Return a :class:`FileInfo` object with parameters about the\n file content.\n\n Args:\n file_info: Path and name of the file of which to retrieve the info\n about.\n **kwargs: Additional keyword arguments.\n\n Returns:\n A FileInfo object.\n \"\"\"\n\n file = SD(file_info.path, SDC.READ)\n file_info.times[0] = \\\n datetime.strptime(getattr(file, 'start_time'), \"%Y%m%d%H%M%S\")\n file_info.times[1] = \\\n datetime.strptime(getattr(file, 'end_time'), \"%Y%m%d%H%M%S\")\n\n return file_info\n\n @expects_file_info()\n def read(self, file_info, **kwargs):\n \"\"\"Read and parse HDF4 files and load them to a xarray.Dataset\n\n A description about all variables in CloudSat dataset can be found in\n http://www.cloudsat.cira.colostate.edu/data-products/level-2c/2c-ice?term=53.\n\n Args:\n file_info: Path and name of the file as string or FileInfo object.\n **kwargs: Additional keyword arguments that are valid for\n :class:`typhon.files.handlers.common.HDF4`.\n\n Returns:\n A xarray.Dataset object.\n \"\"\"\n\n # We need to import at least the standard fields\n user_fields = kwargs.pop(\"fields\", {})\n fields = self.standard_fields | set(user_fields)\n\n # We catch the user mapping here, since we do not want to deal with\n # user-defined names in the further processing. Instead, we use our own\n # mapping\n user_mapping = kwargs.pop(\"mapping\", None)\n\n # Load the dataset from the file:\n dataset = super().read(\n file_info, fields=fields, mapping=self.mapping, **kwargs\n )\n\n dataset[\"time\"] = self._get_time_field(dataset, file_info)\n\n # Remove fields that we do not need any longer (expect the user asked\n # for them explicitly)\n dataset = dataset.drop_vars(\n {\"UTC_start\", \"Profile_time\"} - set(user_fields),\n )\n\n if user_mapping is not None:\n dataset = dataset.rename(user_mapping)\n\n return dataset\n\n def _get_time_field(self, dataset, file_info):\n # This gives us the starting time of the first profile in seconds\n # since midnight in UTC:\n first_profile_time = round(dataset['UTC_start'].item(0))\n\n # This gives us the starting time of all other profiles in seconds\n # since the start of the first profile.\n profile_times = dataset['Profile_time']\n\n # Convert the seconds to milliseconds\n profile_times *= 1000\n profile_times = profile_times.astype(\"int\")\n\n try:\n date = file_info.times[0].date()\n except AttributeError:\n # We have to load the info by ourselves:\n date = self.get_info(file_info).times[0].date()\n\n # Put all times together so we obtain one full timestamp\n # (date + time) for each data point. We are using the\n # starting date coming from parsing the filename.\n profile_times = \\\n np.datetime64(date) \\\n + np.timedelta64(first_profile_time, \"s\") \\\n + profile_times.astype(\"timedelta64[ms]\")\n\n return profile_times\n", "step-ids": [ 4, 7, 8, 9, 11 ] }
[ 4, 7, 8, 9, 11 ]
from dataclasses import dataclass from models.user import User class Customer(User): def __init__(self, first_name: str, last_name: str, user_name: str, email: str, password: str): super(Customer, self).__init__(first_name, last_name, user_name, email, password) # def __str__(self): # return f"'Firstname' : {self.get__first_name},"
normal
{ "blob_id": "254f34c923d49374e09b579c5bc1b17b8c69c0e4", "index": 2661, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Customer(User):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Customer(User):\n\n def __init__(self, first_name: str, last_name: str, user_name: str,\n email: str, password: str):\n super(Customer, self).__init__(first_name, last_name, user_name,\n email, password)\n", "step-4": "from dataclasses import dataclass\nfrom models.user import User\n\n\nclass Customer(User):\n\n def __init__(self, first_name: str, last_name: str, user_name: str,\n email: str, password: str):\n super(Customer, self).__init__(first_name, last_name, user_name,\n email, password)\n", "step-5": "from dataclasses import dataclass\n\nfrom models.user import User\n\n\nclass Customer(User):\n def __init__(self, first_name: str, last_name: str, user_name: str, email: str, password: str):\n super(Customer, self).__init__(first_name, last_name, user_name, email, password)\n\n # def __str__(self):\n # return f\"'Firstname' : {self.get__first_name},\"\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from collections import defaultdict, deque N = int(input()) adj_list = defaultdict(list) E = [] V_number = [None] * N for _ in range(N - 1): a, b = map(int, input().split()) E.append((a, b)) adj_list[a].append(b) adj_list[b].append(a) C = sorted(list(map(int, input().split())), reverse=True) q = deque([1]) i = 0 while q: v = q.popleft() V_number[v - 1] = C[i] i += 1 for u in adj_list[v]: if V_number[u - 1] is None: q.append(u) print(sum(C[1:])) print(*V_number)
normal
{ "blob_id": "b93f6c3192f8dd58b96dfdc6ea2b17e12cce34d0", "index": 9752, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor _ in range(N - 1):\n a, b = map(int, input().split())\n E.append((a, b))\n adj_list[a].append(b)\n adj_list[b].append(a)\n<mask token>\nwhile q:\n v = q.popleft()\n V_number[v - 1] = C[i]\n i += 1\n for u in adj_list[v]:\n if V_number[u - 1] is None:\n q.append(u)\nprint(sum(C[1:]))\nprint(*V_number)\n", "step-3": "<mask token>\nN = int(input())\nadj_list = defaultdict(list)\nE = []\nV_number = [None] * N\nfor _ in range(N - 1):\n a, b = map(int, input().split())\n E.append((a, b))\n adj_list[a].append(b)\n adj_list[b].append(a)\nC = sorted(list(map(int, input().split())), reverse=True)\nq = deque([1])\ni = 0\nwhile q:\n v = q.popleft()\n V_number[v - 1] = C[i]\n i += 1\n for u in adj_list[v]:\n if V_number[u - 1] is None:\n q.append(u)\nprint(sum(C[1:]))\nprint(*V_number)\n", "step-4": "from collections import defaultdict, deque\nN = int(input())\nadj_list = defaultdict(list)\nE = []\nV_number = [None] * N\nfor _ in range(N - 1):\n a, b = map(int, input().split())\n E.append((a, b))\n adj_list[a].append(b)\n adj_list[b].append(a)\nC = sorted(list(map(int, input().split())), reverse=True)\nq = deque([1])\ni = 0\nwhile q:\n v = q.popleft()\n V_number[v - 1] = C[i]\n i += 1\n for u in adj_list[v]:\n if V_number[u - 1] is None:\n q.append(u)\nprint(sum(C[1:]))\nprint(*V_number)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# encoding = utf-8 """ A flask session memcached store """ from datetime import timedelta, datetime from uuid import uuid4 __author__ = 'zou' import memcache import pickle from flask.sessions import SessionMixin, SessionInterface from werkzeug.datastructures import CallbackDict class MemcachedSession(CallbackDict, SessionMixin): """""" def __init__(self, initial=None, sid=None, new=False): def on_update(self): self.modified = True CallbackDict.__init__(self, initial, on_update) self.sid = sid self.new = new self.modified = False class MemcachedSessionInterface(SessionInterface): serializer = pickle session_class = MemcachedSession def generate_sid(self): return str(uuid4()) def get_memcache_expiration_time(self, app, session): if session.permanent: return app.permanent_session_lifetime return timedelta(days=8) def __init__(self, client=None, prefix="session:"): if client is None: client = memcache.Client() self.client = client self.prefix = prefix def open_session(self, app, request): sid = request.args.get("sessionid", None) or request.cookies.get(app.session_cookie_name) if not sid: sid = self.generate_sid() return self.session_class(sid=sid) val = self.client.get(str(self.prefix + sid)) if val is not None: data = self.serializer.loads(val) self.client.set(self.prefix + str(sid), val, int(timedelta(days=8).total_seconds())) return self.session_class(data, sid=sid) new_sid = self.generate_sid() return self.session_class(sid=new_sid, new=True) def save_session(self, app, session, response): domain = self.get_cookie_domain(app) if not session: self.client.delete(str(self.prefix + session.sid)) if session.modified: response.delete_cookie(app.session_cookie_name, domain=domain) return memcache_exp = self.get_memcache_expiration_time(app, session) cookie_exp = self.get_expiration_time(app, session) val = self.serializer.dumps(dict(session)) self.client.set(self.prefix + str(session.sid), val, int(memcache_exp.total_seconds())) response.set_cookie(app.session_cookie_name, session.sid, expires=cookie_exp, httponly=True, domain=domain, max_age= 7*24*60*60) def set_cas_ticket_to_session_mapping(self, app, session, ticket): memcache_exp = self.get_memcache_expiration_time(app, session) val = str(session.sid) self.client.set(str(ticket), val, int(memcache_exp.total_seconds())) def del_ticket_session_mapping(self, ticket): session_sid = self.client.get(str(ticket)) if session_sid: r = self.client.delete(self.prefix + str(session_sid)) # if r == 1: # print 'already delete session id= ' + session_sid r = self.client.delete(str(ticket)) # if r == 1: # print 'already delete ticket = ' + ticket
normal
{ "blob_id": "e4761c925643417f4fe906e8dd2c9356ae970d52", "index": 3706, "step-1": "<mask token>\n\n\nclass MemcachedSessionInterface(SessionInterface):\n <mask token>\n <mask token>\n\n def generate_sid(self):\n return str(uuid4())\n\n def get_memcache_expiration_time(self, app, session):\n if session.permanent:\n return app.permanent_session_lifetime\n return timedelta(days=8)\n\n def __init__(self, client=None, prefix='session:'):\n if client is None:\n client = memcache.Client()\n self.client = client\n self.prefix = prefix\n\n def open_session(self, app, request):\n sid = request.args.get('sessionid', None) or request.cookies.get(app\n .session_cookie_name)\n if not sid:\n sid = self.generate_sid()\n return self.session_class(sid=sid)\n val = self.client.get(str(self.prefix + sid))\n if val is not None:\n data = self.serializer.loads(val)\n self.client.set(self.prefix + str(sid), val, int(timedelta(days\n =8).total_seconds()))\n return self.session_class(data, sid=sid)\n new_sid = self.generate_sid()\n return self.session_class(sid=new_sid, new=True)\n\n def save_session(self, app, session, response):\n domain = self.get_cookie_domain(app)\n if not session:\n self.client.delete(str(self.prefix + session.sid))\n if session.modified:\n response.delete_cookie(app.session_cookie_name, domain=domain)\n return\n memcache_exp = self.get_memcache_expiration_time(app, session)\n cookie_exp = self.get_expiration_time(app, session)\n val = self.serializer.dumps(dict(session))\n self.client.set(self.prefix + str(session.sid), val, int(\n memcache_exp.total_seconds()))\n response.set_cookie(app.session_cookie_name, session.sid, expires=\n cookie_exp, httponly=True, domain=domain, max_age=7 * 24 * 60 * 60)\n\n def set_cas_ticket_to_session_mapping(self, app, session, ticket):\n memcache_exp = self.get_memcache_expiration_time(app, session)\n val = str(session.sid)\n self.client.set(str(ticket), val, int(memcache_exp.total_seconds()))\n\n def del_ticket_session_mapping(self, ticket):\n session_sid = self.client.get(str(ticket))\n if session_sid:\n r = self.client.delete(self.prefix + str(session_sid))\n r = self.client.delete(str(ticket))\n", "step-2": "<mask token>\n\n\nclass MemcachedSession(CallbackDict, SessionMixin):\n <mask token>\n <mask token>\n\n\nclass MemcachedSessionInterface(SessionInterface):\n serializer = pickle\n session_class = MemcachedSession\n\n def generate_sid(self):\n return str(uuid4())\n\n def get_memcache_expiration_time(self, app, session):\n if session.permanent:\n return app.permanent_session_lifetime\n return timedelta(days=8)\n\n def __init__(self, client=None, prefix='session:'):\n if client is None:\n client = memcache.Client()\n self.client = client\n self.prefix = prefix\n\n def open_session(self, app, request):\n sid = request.args.get('sessionid', None) or request.cookies.get(app\n .session_cookie_name)\n if not sid:\n sid = self.generate_sid()\n return self.session_class(sid=sid)\n val = self.client.get(str(self.prefix + sid))\n if val is not None:\n data = self.serializer.loads(val)\n self.client.set(self.prefix + str(sid), val, int(timedelta(days\n =8).total_seconds()))\n return self.session_class(data, sid=sid)\n new_sid = self.generate_sid()\n return self.session_class(sid=new_sid, new=True)\n\n def save_session(self, app, session, response):\n domain = self.get_cookie_domain(app)\n if not session:\n self.client.delete(str(self.prefix + session.sid))\n if session.modified:\n response.delete_cookie(app.session_cookie_name, domain=domain)\n return\n memcache_exp = self.get_memcache_expiration_time(app, session)\n cookie_exp = self.get_expiration_time(app, session)\n val = self.serializer.dumps(dict(session))\n self.client.set(self.prefix + str(session.sid), val, int(\n memcache_exp.total_seconds()))\n response.set_cookie(app.session_cookie_name, session.sid, expires=\n cookie_exp, httponly=True, domain=domain, max_age=7 * 24 * 60 * 60)\n\n def set_cas_ticket_to_session_mapping(self, app, session, ticket):\n memcache_exp = self.get_memcache_expiration_time(app, session)\n val = str(session.sid)\n self.client.set(str(ticket), val, int(memcache_exp.total_seconds()))\n\n def del_ticket_session_mapping(self, ticket):\n session_sid = self.client.get(str(ticket))\n if session_sid:\n r = self.client.delete(self.prefix + str(session_sid))\n r = self.client.delete(str(ticket))\n", "step-3": "<mask token>\n\n\nclass MemcachedSession(CallbackDict, SessionMixin):\n \"\"\"\"\"\"\n\n def __init__(self, initial=None, sid=None, new=False):\n\n def on_update(self):\n self.modified = True\n CallbackDict.__init__(self, initial, on_update)\n self.sid = sid\n self.new = new\n self.modified = False\n\n\nclass MemcachedSessionInterface(SessionInterface):\n serializer = pickle\n session_class = MemcachedSession\n\n def generate_sid(self):\n return str(uuid4())\n\n def get_memcache_expiration_time(self, app, session):\n if session.permanent:\n return app.permanent_session_lifetime\n return timedelta(days=8)\n\n def __init__(self, client=None, prefix='session:'):\n if client is None:\n client = memcache.Client()\n self.client = client\n self.prefix = prefix\n\n def open_session(self, app, request):\n sid = request.args.get('sessionid', None) or request.cookies.get(app\n .session_cookie_name)\n if not sid:\n sid = self.generate_sid()\n return self.session_class(sid=sid)\n val = self.client.get(str(self.prefix + sid))\n if val is not None:\n data = self.serializer.loads(val)\n self.client.set(self.prefix + str(sid), val, int(timedelta(days\n =8).total_seconds()))\n return self.session_class(data, sid=sid)\n new_sid = self.generate_sid()\n return self.session_class(sid=new_sid, new=True)\n\n def save_session(self, app, session, response):\n domain = self.get_cookie_domain(app)\n if not session:\n self.client.delete(str(self.prefix + session.sid))\n if session.modified:\n response.delete_cookie(app.session_cookie_name, domain=domain)\n return\n memcache_exp = self.get_memcache_expiration_time(app, session)\n cookie_exp = self.get_expiration_time(app, session)\n val = self.serializer.dumps(dict(session))\n self.client.set(self.prefix + str(session.sid), val, int(\n memcache_exp.total_seconds()))\n response.set_cookie(app.session_cookie_name, session.sid, expires=\n cookie_exp, httponly=True, domain=domain, max_age=7 * 24 * 60 * 60)\n\n def set_cas_ticket_to_session_mapping(self, app, session, ticket):\n memcache_exp = self.get_memcache_expiration_time(app, session)\n val = str(session.sid)\n self.client.set(str(ticket), val, int(memcache_exp.total_seconds()))\n\n def del_ticket_session_mapping(self, ticket):\n session_sid = self.client.get(str(ticket))\n if session_sid:\n r = self.client.delete(self.prefix + str(session_sid))\n r = self.client.delete(str(ticket))\n", "step-4": "<mask token>\nfrom datetime import timedelta, datetime\nfrom uuid import uuid4\n__author__ = 'zou'\nimport memcache\nimport pickle\nfrom flask.sessions import SessionMixin, SessionInterface\nfrom werkzeug.datastructures import CallbackDict\n\n\nclass MemcachedSession(CallbackDict, SessionMixin):\n \"\"\"\"\"\"\n\n def __init__(self, initial=None, sid=None, new=False):\n\n def on_update(self):\n self.modified = True\n CallbackDict.__init__(self, initial, on_update)\n self.sid = sid\n self.new = new\n self.modified = False\n\n\nclass MemcachedSessionInterface(SessionInterface):\n serializer = pickle\n session_class = MemcachedSession\n\n def generate_sid(self):\n return str(uuid4())\n\n def get_memcache_expiration_time(self, app, session):\n if session.permanent:\n return app.permanent_session_lifetime\n return timedelta(days=8)\n\n def __init__(self, client=None, prefix='session:'):\n if client is None:\n client = memcache.Client()\n self.client = client\n self.prefix = prefix\n\n def open_session(self, app, request):\n sid = request.args.get('sessionid', None) or request.cookies.get(app\n .session_cookie_name)\n if not sid:\n sid = self.generate_sid()\n return self.session_class(sid=sid)\n val = self.client.get(str(self.prefix + sid))\n if val is not None:\n data = self.serializer.loads(val)\n self.client.set(self.prefix + str(sid), val, int(timedelta(days\n =8).total_seconds()))\n return self.session_class(data, sid=sid)\n new_sid = self.generate_sid()\n return self.session_class(sid=new_sid, new=True)\n\n def save_session(self, app, session, response):\n domain = self.get_cookie_domain(app)\n if not session:\n self.client.delete(str(self.prefix + session.sid))\n if session.modified:\n response.delete_cookie(app.session_cookie_name, domain=domain)\n return\n memcache_exp = self.get_memcache_expiration_time(app, session)\n cookie_exp = self.get_expiration_time(app, session)\n val = self.serializer.dumps(dict(session))\n self.client.set(self.prefix + str(session.sid), val, int(\n memcache_exp.total_seconds()))\n response.set_cookie(app.session_cookie_name, session.sid, expires=\n cookie_exp, httponly=True, domain=domain, max_age=7 * 24 * 60 * 60)\n\n def set_cas_ticket_to_session_mapping(self, app, session, ticket):\n memcache_exp = self.get_memcache_expiration_time(app, session)\n val = str(session.sid)\n self.client.set(str(ticket), val, int(memcache_exp.total_seconds()))\n\n def del_ticket_session_mapping(self, ticket):\n session_sid = self.client.get(str(ticket))\n if session_sid:\n r = self.client.delete(self.prefix + str(session_sid))\n r = self.client.delete(str(ticket))\n", "step-5": "# encoding = utf-8\n\"\"\"\nA flask session memcached store\n\"\"\"\nfrom datetime import timedelta, datetime\nfrom uuid import uuid4\n\n__author__ = 'zou'\nimport memcache\nimport pickle\nfrom flask.sessions import SessionMixin, SessionInterface\nfrom werkzeug.datastructures import CallbackDict\n\n\nclass MemcachedSession(CallbackDict, SessionMixin):\n \"\"\"\"\"\"\n def __init__(self, initial=None, sid=None, new=False):\n def on_update(self):\n self.modified = True\n CallbackDict.__init__(self, initial, on_update)\n self.sid = sid\n self.new = new\n self.modified = False\n\n\nclass MemcachedSessionInterface(SessionInterface):\n serializer = pickle\n session_class = MemcachedSession\n\n def generate_sid(self):\n return str(uuid4())\n\n def get_memcache_expiration_time(self, app, session):\n if session.permanent:\n return app.permanent_session_lifetime\n return timedelta(days=8)\n\n def __init__(self, client=None, prefix=\"session:\"):\n if client is None:\n client = memcache.Client()\n self.client = client\n self.prefix = prefix\n\n def open_session(self, app, request):\n sid = request.args.get(\"sessionid\", None) or request.cookies.get(app.session_cookie_name)\n if not sid:\n sid = self.generate_sid()\n return self.session_class(sid=sid)\n val = self.client.get(str(self.prefix + sid))\n if val is not None:\n data = self.serializer.loads(val)\n self.client.set(self.prefix + str(sid), val, int(timedelta(days=8).total_seconds()))\n return self.session_class(data, sid=sid)\n new_sid = self.generate_sid()\n return self.session_class(sid=new_sid, new=True)\n\n def save_session(self, app, session, response):\n domain = self.get_cookie_domain(app)\n if not session:\n self.client.delete(str(self.prefix + session.sid))\n if session.modified:\n response.delete_cookie(app.session_cookie_name, domain=domain)\n return\n memcache_exp = self.get_memcache_expiration_time(app, session)\n cookie_exp = self.get_expiration_time(app, session)\n val = self.serializer.dumps(dict(session))\n self.client.set(self.prefix + str(session.sid), val, int(memcache_exp.total_seconds()))\n response.set_cookie(app.session_cookie_name, session.sid, expires=cookie_exp, httponly=True, domain=domain, max_age= 7*24*60*60)\n\n def set_cas_ticket_to_session_mapping(self, app, session, ticket):\n memcache_exp = self.get_memcache_expiration_time(app, session)\n val = str(session.sid)\n self.client.set(str(ticket), val, int(memcache_exp.total_seconds()))\n\n def del_ticket_session_mapping(self, ticket):\n session_sid = self.client.get(str(ticket))\n if session_sid:\n r = self.client.delete(self.prefix + str(session_sid))\n# if r == 1:\n# print 'already delete session id= ' + session_sid\n r = self.client.delete(str(ticket))\n# if r == 1:\n# print 'already delete ticket = ' + ticket\n", "step-ids": [ 8, 10, 12, 14, 15 ] }
[ 8, 10, 12, 14, 15 ]
from numpy import array, zeros, arange, concatenate, searchsorted, where, unique from pyNastran.bdf.fieldWriter import print_card_8 from pyNastran.bdf.bdfInterface.assign_type import (integer, integer_or_blank, double_or_blank, integer_double_or_blank, blank) class PBAR(object): type = 'PBAR' def __init__(self, model): """ Defines the PCOMP object. :param self: the PCOMP object :param model: the BDF object :param cards: the list of PCOMP cards """ self.model = model self.n = 0 self._cards = [] self._comments = [] def add(self, card, comment): self._cards.append(card) self._comments.append(comment) def build(self): cards = self._cards ncards = len(cards) self.n = ncards if ncards: #: Property ID self.property_id = zeros(ncards, 'int32') self.material_id = zeros(ncards, 'int32') self.area = zeros(ncards, 'float64') self.I1 = zeros(ncards, 'float64') self.I2 = zeros(ncards, 'float64') self.J = zeros(ncards, 'float64') self.nsm = zeros(ncards, 'float64') for i, card in enumerate(cards): #: property ID self.property_id[i] = integer(card, 1, 'property_id') #: material ID self.material_id[i] = integer(card, 2, 'material_id') #: material ID self.area[i] = double_or_blank(card, 3, 'area', 0.0) #: I1 self.I1[i] = double_or_blank(card, 4, 'I1', 0.0) #: I2 self.I2[i] = double_or_blank(card, 5, 'I2', 0.0) #: Polar Moment of Inertia J -> use J() #: default=1/2(I1+I2) for SOL=600, otherwise 0.0 #: .. todo:: support SOL 600 default Jdefault = 0.5 * (self.I1[i] + self.I2[i]) self.J[i] = double_or_blank(card, 6, 'J', Jdefault) self.nsm[i] = double_or_blank(card, 7, 'non-structural_mass', 0.0) if 0: self.C1 = double_or_blank(card, 9, 'C1', 0.0) self.C2 = double_or_blank(card, 10, 'C2', 0.0) self.D1 = double_or_blank(card, 11, 'D1', 0.0) self.D2 = double_or_blank(card, 12, 'D2', 0.0) self.E1 = double_or_blank(card, 13, 'E1', 0.0) self.E2 = double_or_blank(card, 14, 'E2', 0.0) self.F1 = double_or_blank(card, 15, 'F1', 0.0) self.F2 = double_or_blank(card, 16, 'F2', 0.0) #: default=infinite; assume 1e8 self.K1 = double_or_blank(card, 17, 'K1', 1e8) #: default=infinite; assume 1e8 self.K2 = double_or_blank(card, 18, 'K2', 1e8) #: I12 -> use I12() self.i12 = double_or_blank(card, 19, 'I12', 0.0) if self.A == 0.0 and self.i12 == 0.0: assert self.K1 is None, 'K1 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K1=%r' % (self.A, self.i12, self.K1) assert self.K2 is None, 'K2 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K2=%r' % (self.A, self.i12, self.K2) assert len(card) <= 20, 'len(PBAR card) = %i' % len(card) i = self.property_id.argsort() self.property_id = self.property_id[i] self.material_id = self.material_id[i] self.area = self.area[i] self.I1 = self.I1[i] self.I2 = self.I2[i] self.J = self.J[i] self.nsm = self.nsm[i] unique_pids = unique(self.property_id) if len(unique_pids) != len(self.property_id): raise RuntimeError('There are duplicate PCOMP IDs...') self._cards = [] self._comments = [] #========================================================================= def get_index(self, property_ids): if isinstance(property_ids, int): property_ids = array([property_ids]) if property_ids is None: return arange(self.n) indexs = searchsorted(self.property_id, property_ids) assert len(indexs) == len(property_ids), 'indexs=%s pids=%s' % (indexs, property_ids) return indexs #========================================================================= def write_bdf(self, f, size=8, property_ids=None): if self.n: if property_ids is None: i = arange(self.n) else: i = searchsorted(self.property_id, property_ids) for (pid, mid, area, I1, I2, J) in zip(self.property_id[i], self.material_id[i], self.area[i], self.I1[i], self.I2[i], self.J[i]): card = ['PBAR', pid, mid, area, I1, I2, J] f.write(print_card_8(card))
normal
{ "blob_id": "8f960ad465d0a7bf48752db35c73169be6da27d8", "index": 9092, "step-1": "<mask token>\n\n\nclass PBAR(object):\n <mask token>\n\n def __init__(self, model):\n \"\"\"\n Defines the PCOMP object.\n\n :param self: the PCOMP object\n :param model: the BDF object\n :param cards: the list of PCOMP cards\n \"\"\"\n self.model = model\n self.n = 0\n self._cards = []\n self._comments = []\n <mask token>\n\n def build(self):\n cards = self._cards\n ncards = len(cards)\n self.n = ncards\n if ncards:\n self.property_id = zeros(ncards, 'int32')\n self.material_id = zeros(ncards, 'int32')\n self.area = zeros(ncards, 'float64')\n self.I1 = zeros(ncards, 'float64')\n self.I2 = zeros(ncards, 'float64')\n self.J = zeros(ncards, 'float64')\n self.nsm = zeros(ncards, 'float64')\n for i, card in enumerate(cards):\n self.property_id[i] = integer(card, 1, 'property_id')\n self.material_id[i] = integer(card, 2, 'material_id')\n self.area[i] = double_or_blank(card, 3, 'area', 0.0)\n self.I1[i] = double_or_blank(card, 4, 'I1', 0.0)\n self.I2[i] = double_or_blank(card, 5, 'I2', 0.0)\n Jdefault = 0.5 * (self.I1[i] + self.I2[i])\n self.J[i] = double_or_blank(card, 6, 'J', Jdefault)\n self.nsm[i] = double_or_blank(card, 7,\n 'non-structural_mass', 0.0)\n if 0:\n self.C1 = double_or_blank(card, 9, 'C1', 0.0)\n self.C2 = double_or_blank(card, 10, 'C2', 0.0)\n self.D1 = double_or_blank(card, 11, 'D1', 0.0)\n self.D2 = double_or_blank(card, 12, 'D2', 0.0)\n self.E1 = double_or_blank(card, 13, 'E1', 0.0)\n self.E2 = double_or_blank(card, 14, 'E2', 0.0)\n self.F1 = double_or_blank(card, 15, 'F1', 0.0)\n self.F2 = double_or_blank(card, 16, 'F2', 0.0)\n self.K1 = double_or_blank(card, 17, 'K1', 100000000.0)\n self.K2 = double_or_blank(card, 18, 'K2', 100000000.0)\n self.i12 = double_or_blank(card, 19, 'I12', 0.0)\n if self.A == 0.0 and self.i12 == 0.0:\n assert self.K1 is None, 'K1 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K1=%r' % (\n self.A, self.i12, self.K1)\n assert self.K2 is None, 'K2 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K2=%r' % (\n self.A, self.i12, self.K2)\n assert len(card) <= 20, 'len(PBAR card) = %i' % len(card)\n i = self.property_id.argsort()\n self.property_id = self.property_id[i]\n self.material_id = self.material_id[i]\n self.area = self.area[i]\n self.I1 = self.I1[i]\n self.I2 = self.I2[i]\n self.J = self.J[i]\n self.nsm = self.nsm[i]\n unique_pids = unique(self.property_id)\n if len(unique_pids) != len(self.property_id):\n raise RuntimeError('There are duplicate PCOMP IDs...')\n self._cards = []\n self._comments = []\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass PBAR(object):\n <mask token>\n\n def __init__(self, model):\n \"\"\"\n Defines the PCOMP object.\n\n :param self: the PCOMP object\n :param model: the BDF object\n :param cards: the list of PCOMP cards\n \"\"\"\n self.model = model\n self.n = 0\n self._cards = []\n self._comments = []\n\n def add(self, card, comment):\n self._cards.append(card)\n self._comments.append(comment)\n\n def build(self):\n cards = self._cards\n ncards = len(cards)\n self.n = ncards\n if ncards:\n self.property_id = zeros(ncards, 'int32')\n self.material_id = zeros(ncards, 'int32')\n self.area = zeros(ncards, 'float64')\n self.I1 = zeros(ncards, 'float64')\n self.I2 = zeros(ncards, 'float64')\n self.J = zeros(ncards, 'float64')\n self.nsm = zeros(ncards, 'float64')\n for i, card in enumerate(cards):\n self.property_id[i] = integer(card, 1, 'property_id')\n self.material_id[i] = integer(card, 2, 'material_id')\n self.area[i] = double_or_blank(card, 3, 'area', 0.0)\n self.I1[i] = double_or_blank(card, 4, 'I1', 0.0)\n self.I2[i] = double_or_blank(card, 5, 'I2', 0.0)\n Jdefault = 0.5 * (self.I1[i] + self.I2[i])\n self.J[i] = double_or_blank(card, 6, 'J', Jdefault)\n self.nsm[i] = double_or_blank(card, 7,\n 'non-structural_mass', 0.0)\n if 0:\n self.C1 = double_or_blank(card, 9, 'C1', 0.0)\n self.C2 = double_or_blank(card, 10, 'C2', 0.0)\n self.D1 = double_or_blank(card, 11, 'D1', 0.0)\n self.D2 = double_or_blank(card, 12, 'D2', 0.0)\n self.E1 = double_or_blank(card, 13, 'E1', 0.0)\n self.E2 = double_or_blank(card, 14, 'E2', 0.0)\n self.F1 = double_or_blank(card, 15, 'F1', 0.0)\n self.F2 = double_or_blank(card, 16, 'F2', 0.0)\n self.K1 = double_or_blank(card, 17, 'K1', 100000000.0)\n self.K2 = double_or_blank(card, 18, 'K2', 100000000.0)\n self.i12 = double_or_blank(card, 19, 'I12', 0.0)\n if self.A == 0.0 and self.i12 == 0.0:\n assert self.K1 is None, 'K1 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K1=%r' % (\n self.A, self.i12, self.K1)\n assert self.K2 is None, 'K2 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K2=%r' % (\n self.A, self.i12, self.K2)\n assert len(card) <= 20, 'len(PBAR card) = %i' % len(card)\n i = self.property_id.argsort()\n self.property_id = self.property_id[i]\n self.material_id = self.material_id[i]\n self.area = self.area[i]\n self.I1 = self.I1[i]\n self.I2 = self.I2[i]\n self.J = self.J[i]\n self.nsm = self.nsm[i]\n unique_pids = unique(self.property_id)\n if len(unique_pids) != len(self.property_id):\n raise RuntimeError('There are duplicate PCOMP IDs...')\n self._cards = []\n self._comments = []\n\n def get_index(self, property_ids):\n if isinstance(property_ids, int):\n property_ids = array([property_ids])\n if property_ids is None:\n return arange(self.n)\n indexs = searchsorted(self.property_id, property_ids)\n assert len(indexs) == len(property_ids), 'indexs=%s pids=%s' % (indexs,\n property_ids)\n return indexs\n\n def write_bdf(self, f, size=8, property_ids=None):\n if self.n:\n if property_ids is None:\n i = arange(self.n)\n else:\n i = searchsorted(self.property_id, property_ids)\n for pid, mid, area, I1, I2, J in zip(self.property_id[i], self.\n material_id[i], self.area[i], self.I1[i], self.I2[i], self.J[i]\n ):\n card = ['PBAR', pid, mid, area, I1, I2, J]\n f.write(print_card_8(card))\n", "step-3": "<mask token>\n\n\nclass PBAR(object):\n type = 'PBAR'\n\n def __init__(self, model):\n \"\"\"\n Defines the PCOMP object.\n\n :param self: the PCOMP object\n :param model: the BDF object\n :param cards: the list of PCOMP cards\n \"\"\"\n self.model = model\n self.n = 0\n self._cards = []\n self._comments = []\n\n def add(self, card, comment):\n self._cards.append(card)\n self._comments.append(comment)\n\n def build(self):\n cards = self._cards\n ncards = len(cards)\n self.n = ncards\n if ncards:\n self.property_id = zeros(ncards, 'int32')\n self.material_id = zeros(ncards, 'int32')\n self.area = zeros(ncards, 'float64')\n self.I1 = zeros(ncards, 'float64')\n self.I2 = zeros(ncards, 'float64')\n self.J = zeros(ncards, 'float64')\n self.nsm = zeros(ncards, 'float64')\n for i, card in enumerate(cards):\n self.property_id[i] = integer(card, 1, 'property_id')\n self.material_id[i] = integer(card, 2, 'material_id')\n self.area[i] = double_or_blank(card, 3, 'area', 0.0)\n self.I1[i] = double_or_blank(card, 4, 'I1', 0.0)\n self.I2[i] = double_or_blank(card, 5, 'I2', 0.0)\n Jdefault = 0.5 * (self.I1[i] + self.I2[i])\n self.J[i] = double_or_blank(card, 6, 'J', Jdefault)\n self.nsm[i] = double_or_blank(card, 7,\n 'non-structural_mass', 0.0)\n if 0:\n self.C1 = double_or_blank(card, 9, 'C1', 0.0)\n self.C2 = double_or_blank(card, 10, 'C2', 0.0)\n self.D1 = double_or_blank(card, 11, 'D1', 0.0)\n self.D2 = double_or_blank(card, 12, 'D2', 0.0)\n self.E1 = double_or_blank(card, 13, 'E1', 0.0)\n self.E2 = double_or_blank(card, 14, 'E2', 0.0)\n self.F1 = double_or_blank(card, 15, 'F1', 0.0)\n self.F2 = double_or_blank(card, 16, 'F2', 0.0)\n self.K1 = double_or_blank(card, 17, 'K1', 100000000.0)\n self.K2 = double_or_blank(card, 18, 'K2', 100000000.0)\n self.i12 = double_or_blank(card, 19, 'I12', 0.0)\n if self.A == 0.0 and self.i12 == 0.0:\n assert self.K1 is None, 'K1 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K1=%r' % (\n self.A, self.i12, self.K1)\n assert self.K2 is None, 'K2 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K2=%r' % (\n self.A, self.i12, self.K2)\n assert len(card) <= 20, 'len(PBAR card) = %i' % len(card)\n i = self.property_id.argsort()\n self.property_id = self.property_id[i]\n self.material_id = self.material_id[i]\n self.area = self.area[i]\n self.I1 = self.I1[i]\n self.I2 = self.I2[i]\n self.J = self.J[i]\n self.nsm = self.nsm[i]\n unique_pids = unique(self.property_id)\n if len(unique_pids) != len(self.property_id):\n raise RuntimeError('There are duplicate PCOMP IDs...')\n self._cards = []\n self._comments = []\n\n def get_index(self, property_ids):\n if isinstance(property_ids, int):\n property_ids = array([property_ids])\n if property_ids is None:\n return arange(self.n)\n indexs = searchsorted(self.property_id, property_ids)\n assert len(indexs) == len(property_ids), 'indexs=%s pids=%s' % (indexs,\n property_ids)\n return indexs\n\n def write_bdf(self, f, size=8, property_ids=None):\n if self.n:\n if property_ids is None:\n i = arange(self.n)\n else:\n i = searchsorted(self.property_id, property_ids)\n for pid, mid, area, I1, I2, J in zip(self.property_id[i], self.\n material_id[i], self.area[i], self.I1[i], self.I2[i], self.J[i]\n ):\n card = ['PBAR', pid, mid, area, I1, I2, J]\n f.write(print_card_8(card))\n", "step-4": "from numpy import array, zeros, arange, concatenate, searchsorted, where, unique\nfrom pyNastran.bdf.fieldWriter import print_card_8\nfrom pyNastran.bdf.bdfInterface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, blank\n\n\nclass PBAR(object):\n type = 'PBAR'\n\n def __init__(self, model):\n \"\"\"\n Defines the PCOMP object.\n\n :param self: the PCOMP object\n :param model: the BDF object\n :param cards: the list of PCOMP cards\n \"\"\"\n self.model = model\n self.n = 0\n self._cards = []\n self._comments = []\n\n def add(self, card, comment):\n self._cards.append(card)\n self._comments.append(comment)\n\n def build(self):\n cards = self._cards\n ncards = len(cards)\n self.n = ncards\n if ncards:\n self.property_id = zeros(ncards, 'int32')\n self.material_id = zeros(ncards, 'int32')\n self.area = zeros(ncards, 'float64')\n self.I1 = zeros(ncards, 'float64')\n self.I2 = zeros(ncards, 'float64')\n self.J = zeros(ncards, 'float64')\n self.nsm = zeros(ncards, 'float64')\n for i, card in enumerate(cards):\n self.property_id[i] = integer(card, 1, 'property_id')\n self.material_id[i] = integer(card, 2, 'material_id')\n self.area[i] = double_or_blank(card, 3, 'area', 0.0)\n self.I1[i] = double_or_blank(card, 4, 'I1', 0.0)\n self.I2[i] = double_or_blank(card, 5, 'I2', 0.0)\n Jdefault = 0.5 * (self.I1[i] + self.I2[i])\n self.J[i] = double_or_blank(card, 6, 'J', Jdefault)\n self.nsm[i] = double_or_blank(card, 7,\n 'non-structural_mass', 0.0)\n if 0:\n self.C1 = double_or_blank(card, 9, 'C1', 0.0)\n self.C2 = double_or_blank(card, 10, 'C2', 0.0)\n self.D1 = double_or_blank(card, 11, 'D1', 0.0)\n self.D2 = double_or_blank(card, 12, 'D2', 0.0)\n self.E1 = double_or_blank(card, 13, 'E1', 0.0)\n self.E2 = double_or_blank(card, 14, 'E2', 0.0)\n self.F1 = double_or_blank(card, 15, 'F1', 0.0)\n self.F2 = double_or_blank(card, 16, 'F2', 0.0)\n self.K1 = double_or_blank(card, 17, 'K1', 100000000.0)\n self.K2 = double_or_blank(card, 18, 'K2', 100000000.0)\n self.i12 = double_or_blank(card, 19, 'I12', 0.0)\n if self.A == 0.0 and self.i12 == 0.0:\n assert self.K1 is None, 'K1 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K1=%r' % (\n self.A, self.i12, self.K1)\n assert self.K2 is None, 'K2 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K2=%r' % (\n self.A, self.i12, self.K2)\n assert len(card) <= 20, 'len(PBAR card) = %i' % len(card)\n i = self.property_id.argsort()\n self.property_id = self.property_id[i]\n self.material_id = self.material_id[i]\n self.area = self.area[i]\n self.I1 = self.I1[i]\n self.I2 = self.I2[i]\n self.J = self.J[i]\n self.nsm = self.nsm[i]\n unique_pids = unique(self.property_id)\n if len(unique_pids) != len(self.property_id):\n raise RuntimeError('There are duplicate PCOMP IDs...')\n self._cards = []\n self._comments = []\n\n def get_index(self, property_ids):\n if isinstance(property_ids, int):\n property_ids = array([property_ids])\n if property_ids is None:\n return arange(self.n)\n indexs = searchsorted(self.property_id, property_ids)\n assert len(indexs) == len(property_ids), 'indexs=%s pids=%s' % (indexs,\n property_ids)\n return indexs\n\n def write_bdf(self, f, size=8, property_ids=None):\n if self.n:\n if property_ids is None:\n i = arange(self.n)\n else:\n i = searchsorted(self.property_id, property_ids)\n for pid, mid, area, I1, I2, J in zip(self.property_id[i], self.\n material_id[i], self.area[i], self.I1[i], self.I2[i], self.J[i]\n ):\n card = ['PBAR', pid, mid, area, I1, I2, J]\n f.write(print_card_8(card))\n", "step-5": "from numpy import array, zeros, arange, concatenate, searchsorted, where, unique\n\nfrom pyNastran.bdf.fieldWriter import print_card_8\nfrom pyNastran.bdf.bdfInterface.assign_type import (integer, integer_or_blank,\n double_or_blank, integer_double_or_blank, blank)\n\n\nclass PBAR(object):\n type = 'PBAR'\n def __init__(self, model):\n \"\"\"\n Defines the PCOMP object.\n\n :param self: the PCOMP object\n :param model: the BDF object\n :param cards: the list of PCOMP cards\n \"\"\"\n self.model = model\n self.n = 0\n self._cards = []\n self._comments = []\n\n def add(self, card, comment):\n self._cards.append(card)\n self._comments.append(comment)\n\n def build(self):\n cards = self._cards\n ncards = len(cards)\n self.n = ncards\n\n if ncards:\n #: Property ID\n self.property_id = zeros(ncards, 'int32')\n self.material_id = zeros(ncards, 'int32')\n self.area = zeros(ncards, 'float64')\n self.I1 = zeros(ncards, 'float64')\n self.I2 = zeros(ncards, 'float64')\n self.J = zeros(ncards, 'float64')\n self.nsm = zeros(ncards, 'float64')\n\n for i, card in enumerate(cards):\n #: property ID\n self.property_id[i] = integer(card, 1, 'property_id')\n\n #: material ID\n self.material_id[i] = integer(card, 2, 'material_id')\n\n\n #: material ID\n self.area[i] = double_or_blank(card, 3, 'area', 0.0)\n\n #: I1\n self.I1[i] = double_or_blank(card, 4, 'I1', 0.0)\n\n #: I2\n self.I2[i] = double_or_blank(card, 5, 'I2', 0.0)\n\n #: Polar Moment of Inertia J -> use J()\n #: default=1/2(I1+I2) for SOL=600, otherwise 0.0\n #: .. todo:: support SOL 600 default\n\n Jdefault = 0.5 * (self.I1[i] + self.I2[i])\n self.J[i] = double_or_blank(card, 6, 'J', Jdefault)\n self.nsm[i] = double_or_blank(card, 7, 'non-structural_mass', 0.0)\n\n if 0:\n self.C1 = double_or_blank(card, 9, 'C1', 0.0)\n self.C2 = double_or_blank(card, 10, 'C2', 0.0)\n self.D1 = double_or_blank(card, 11, 'D1', 0.0)\n self.D2 = double_or_blank(card, 12, 'D2', 0.0)\n self.E1 = double_or_blank(card, 13, 'E1', 0.0)\n self.E2 = double_or_blank(card, 14, 'E2', 0.0)\n self.F1 = double_or_blank(card, 15, 'F1', 0.0)\n self.F2 = double_or_blank(card, 16, 'F2', 0.0)\n\n #: default=infinite; assume 1e8\n self.K1 = double_or_blank(card, 17, 'K1', 1e8)\n #: default=infinite; assume 1e8\n self.K2 = double_or_blank(card, 18, 'K2', 1e8)\n #: I12 -> use I12()\n self.i12 = double_or_blank(card, 19, 'I12', 0.0)\n if self.A == 0.0 and self.i12 == 0.0:\n assert self.K1 is None, 'K1 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K1=%r' % (self.A, self.i12, self.K1)\n assert self.K2 is None, 'K2 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K2=%r' % (self.A, self.i12, self.K2)\n assert len(card) <= 20, 'len(PBAR card) = %i' % len(card)\n\n i = self.property_id.argsort()\n self.property_id = self.property_id[i]\n self.material_id = self.material_id[i]\n\n self.area = self.area[i]\n self.I1 = self.I1[i]\n self.I2 = self.I2[i]\n self.J = self.J[i]\n self.nsm = self.nsm[i]\n\n unique_pids = unique(self.property_id)\n\n if len(unique_pids) != len(self.property_id):\n raise RuntimeError('There are duplicate PCOMP IDs...')\n self._cards = []\n self._comments = []\n\n #=========================================================================\n def get_index(self, property_ids):\n if isinstance(property_ids, int):\n property_ids = array([property_ids])\n if property_ids is None:\n return arange(self.n)\n\n indexs = searchsorted(self.property_id, property_ids)\n assert len(indexs) == len(property_ids), 'indexs=%s pids=%s' % (indexs, property_ids)\n return indexs\n\n #=========================================================================\n def write_bdf(self, f, size=8, property_ids=None):\n if self.n:\n if property_ids is None:\n i = arange(self.n)\n else:\n i = searchsorted(self.property_id, property_ids)\n\n for (pid, mid, area, I1, I2, J) in zip(self.property_id[i], self.material_id[i],\n self.area[i], self.I1[i], self.I2[i], self.J[i]):\n card = ['PBAR', pid, mid, area, I1, I2, J]\n f.write(print_card_8(card))\n", "step-ids": [ 3, 6, 7, 8, 9 ] }
[ 3, 6, 7, 8, 9 ]
from flask import Flask, request, redirect, render_template, session, flash from mysqlconnection import MySQLConnector import re EMAIL_REGEX = re.compile(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)") app = Flask(__name__) app.secret_key = "ThisIsSecret" mysql = MySQLConnector(app,'mydb') @app.route('/') def index(): return render_template('index.html') @app.route('/process', methods=["POST"]) def process(): if len(request.form['email']) < 1: flash("Email cannot be blank!") return redirect('/') elif not EMAIL_REGEX.match(request.form['email']): flash("Invalid Email Address!") return redirect('/') else: flash("The email address you entered " + str(request.form['email'] + " is VALID. Thank you!")) query = "INSERT INTO email_validation_assignment (email, created_at, updated_at)VALUES (:email, NOW(), NOW())" data = { "email": request.form['email'] } mysql.query_db(query, data) return redirect('/success') @app.route('/success') def success(): query = "SELECT email, DATE_FORMAT(created_at, '%m %d %Y %h:%i %p') as date_created FROM email_validation_assignment" emails = mysql.query_db(query) return render_template('success.html', all_emails = emails) @app.route('/delete', methods=["POST"]) def delete(): query = "DELETE FROM email_validation_assignment WHERE email = :email" data = { 'email': request.form['email']} mysql.query_db(query, data) return redirect('/success') app.run(debug=True)
normal
{ "blob_id": "187cf160b520001b6fe3a8d343391de1c04b3acd", "index": 1754, "step-1": "<mask token>\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/process', methods=['POST'])\ndef process():\n if len(request.form['email']) < 1:\n flash('Email cannot be blank!')\n return redirect('/')\n elif not EMAIL_REGEX.match(request.form['email']):\n flash('Invalid Email Address!')\n return redirect('/')\n else:\n flash('The email address you entered ' + str(request.form['email'] +\n ' is VALID. Thank you!'))\n query = (\n 'INSERT INTO email_validation_assignment (email, created_at, updated_at)VALUES (:email, NOW(), NOW())'\n )\n data = {'email': request.form['email']}\n mysql.query_db(query, data)\n return redirect('/success')\n\n\n@app.route('/success')\ndef success():\n query = (\n \"SELECT email, DATE_FORMAT(created_at, '%m %d %Y %h:%i %p') as date_created FROM email_validation_assignment\"\n )\n emails = mysql.query_db(query)\n return render_template('success.html', all_emails=emails)\n\n\n@app.route('/delete', methods=['POST'])\ndef delete():\n query = 'DELETE FROM email_validation_assignment WHERE email = :email'\n data = {'email': request.form['email']}\n mysql.query_db(query, data)\n return redirect('/success')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/process', methods=['POST'])\ndef process():\n if len(request.form['email']) < 1:\n flash('Email cannot be blank!')\n return redirect('/')\n elif not EMAIL_REGEX.match(request.form['email']):\n flash('Invalid Email Address!')\n return redirect('/')\n else:\n flash('The email address you entered ' + str(request.form['email'] +\n ' is VALID. Thank you!'))\n query = (\n 'INSERT INTO email_validation_assignment (email, created_at, updated_at)VALUES (:email, NOW(), NOW())'\n )\n data = {'email': request.form['email']}\n mysql.query_db(query, data)\n return redirect('/success')\n\n\n@app.route('/success')\ndef success():\n query = (\n \"SELECT email, DATE_FORMAT(created_at, '%m %d %Y %h:%i %p') as date_created FROM email_validation_assignment\"\n )\n emails = mysql.query_db(query)\n return render_template('success.html', all_emails=emails)\n\n\n@app.route('/delete', methods=['POST'])\ndef delete():\n query = 'DELETE FROM email_validation_assignment WHERE email = :email'\n data = {'email': request.form['email']}\n mysql.query_db(query, data)\n return redirect('/success')\n\n\napp.run(debug=True)\n", "step-3": "<mask token>\nEMAIL_REGEX = re.compile('(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$)')\napp = Flask(__name__)\napp.secret_key = 'ThisIsSecret'\nmysql = MySQLConnector(app, 'mydb')\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/process', methods=['POST'])\ndef process():\n if len(request.form['email']) < 1:\n flash('Email cannot be blank!')\n return redirect('/')\n elif not EMAIL_REGEX.match(request.form['email']):\n flash('Invalid Email Address!')\n return redirect('/')\n else:\n flash('The email address you entered ' + str(request.form['email'] +\n ' is VALID. Thank you!'))\n query = (\n 'INSERT INTO email_validation_assignment (email, created_at, updated_at)VALUES (:email, NOW(), NOW())'\n )\n data = {'email': request.form['email']}\n mysql.query_db(query, data)\n return redirect('/success')\n\n\n@app.route('/success')\ndef success():\n query = (\n \"SELECT email, DATE_FORMAT(created_at, '%m %d %Y %h:%i %p') as date_created FROM email_validation_assignment\"\n )\n emails = mysql.query_db(query)\n return render_template('success.html', all_emails=emails)\n\n\n@app.route('/delete', methods=['POST'])\ndef delete():\n query = 'DELETE FROM email_validation_assignment WHERE email = :email'\n data = {'email': request.form['email']}\n mysql.query_db(query, data)\n return redirect('/success')\n\n\napp.run(debug=True)\n", "step-4": "from flask import Flask, request, redirect, render_template, session, flash\nfrom mysqlconnection import MySQLConnector\nimport re\nEMAIL_REGEX = re.compile('(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$)')\napp = Flask(__name__)\napp.secret_key = 'ThisIsSecret'\nmysql = MySQLConnector(app, 'mydb')\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/process', methods=['POST'])\ndef process():\n if len(request.form['email']) < 1:\n flash('Email cannot be blank!')\n return redirect('/')\n elif not EMAIL_REGEX.match(request.form['email']):\n flash('Invalid Email Address!')\n return redirect('/')\n else:\n flash('The email address you entered ' + str(request.form['email'] +\n ' is VALID. Thank you!'))\n query = (\n 'INSERT INTO email_validation_assignment (email, created_at, updated_at)VALUES (:email, NOW(), NOW())'\n )\n data = {'email': request.form['email']}\n mysql.query_db(query, data)\n return redirect('/success')\n\n\n@app.route('/success')\ndef success():\n query = (\n \"SELECT email, DATE_FORMAT(created_at, '%m %d %Y %h:%i %p') as date_created FROM email_validation_assignment\"\n )\n emails = mysql.query_db(query)\n return render_template('success.html', all_emails=emails)\n\n\n@app.route('/delete', methods=['POST'])\ndef delete():\n query = 'DELETE FROM email_validation_assignment WHERE email = :email'\n data = {'email': request.form['email']}\n mysql.query_db(query, data)\n return redirect('/success')\n\n\napp.run(debug=True)\n", "step-5": "from flask import Flask, request, redirect, render_template, session, flash\nfrom mysqlconnection import MySQLConnector\nimport re\n\nEMAIL_REGEX = re.compile(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\")\napp = Flask(__name__)\napp.secret_key = \"ThisIsSecret\"\nmysql = MySQLConnector(app,'mydb')\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/process', methods=[\"POST\"])\ndef process():\n if len(request.form['email']) < 1:\n flash(\"Email cannot be blank!\")\n return redirect('/')\n elif not EMAIL_REGEX.match(request.form['email']):\n flash(\"Invalid Email Address!\")\n return redirect('/')\n else:\n flash(\"The email address you entered \" + str(request.form['email'] + \" is VALID. Thank you!\"))\n query = \"INSERT INTO email_validation_assignment (email, created_at, updated_at)VALUES (:email, NOW(), NOW())\"\n data = { \"email\": request.form['email'] }\n mysql.query_db(query, data)\n return redirect('/success')\n\n@app.route('/success')\ndef success():\n query = \"SELECT email, DATE_FORMAT(created_at, '%m %d %Y %h:%i %p') as date_created FROM email_validation_assignment\"\n emails = mysql.query_db(query)\n return render_template('success.html', all_emails = emails)\n\n\n@app.route('/delete', methods=[\"POST\"])\ndef delete():\n query = \"DELETE FROM email_validation_assignment WHERE email = :email\"\n data = { 'email': request.form['email']}\n mysql.query_db(query, data)\n return redirect('/success')\napp.run(debug=True)", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
from valuate.predict import * def get_profit_rate(intent, popularity): """ 获取畅销系数 """ # 按畅销程度分级,各交易方式相比于标价的固定比例 profits = gl.PROFITS profit = profits[popularity] # 计算各交易方式的价格相比于标价的固定比例 if intent == 'sell': # 商家收购价相比加权平均价的比例 profit_rate = 1 - profit[0] - profit[1] elif intent == 'buy': # 商家真实售价相比加权平均价的比例 profit_rate = 1 - profit[0] elif intent == 'release': # 建议标价相比加权平均价的比例 profit_rate = 1 elif intent == 'private': # C2C价格相比加权平均价的比例 profit_rate = 1 - profit[0] - profit[2] elif intent == 'lowest': # 最低成交价相比加权平均价的比例 profit_rate = 1 - profit[0] - profit[1] - profit[3] elif intent == 'cpo': # 认证二手车价相比加权平均价的差异比例 profit_rate = 1 - profit[0] - profit[8] elif intent == 'replace': # 4S店置换价相比加权平均价的比例 profit_rate = 1 - profit[0] - profit[4] elif intent == 'auction': # 拍卖价相比加权平均价的差异比例 profit_rate = 1 - profit[0] - profit[5] elif intent == 'avg-buy': # 平均买车价相比加权平均价的差异比例 profit_rate = 1 - profit[0] - profit[7] elif intent == 'avg-sell': # 平均卖车价价相比加权平均价的差异比例 profit_rate = 1 - profit[0] - profit[6] return profit_rate def cal_intent_condition(prices, price_bn): """ 计算所有交易方式的4个级别车况价 """ if(prices[2] * 1.03) > price_bn: rate = (prices[2] * 1.03) / price_bn prices = prices / rate df1 = pd.DataFrame(prices) df2 = pd.DataFrame([gl.CAR_CONDITION_COEFFICIENT_VALUES]) all_map = df1.dot(df2) all_map.columns = ['excellent', 'good', 'fair', 'bad'] all_map['intent'] = pd.Series(gl.INTENT_TYPE).values all_map = all_map.loc[:, ['intent', 'excellent', 'good', 'fair', 'bad']] all_map[['excellent', 'good', 'fair', 'bad']] = all_map[['excellent', 'good', 'fair', 'bad']].astype(int) return all_map def process_mile(price, use_time, mile): """ mile处理 """ # 正常行驶的车辆以一年2.5万公里为正常基数,低于2.5万公里的价格的浮动在+3.5%以内 # 大于2.5万公里的若每年的平均行驶里程大于2.5万公里小于5万公里价格浮动在-3.5-7.5% # 若年平均形式里程大于5万公里及以上影响价格在-7.5-12.5%之间 mile_per_month = mile / use_time if mile_per_month < gl.MILE_THRESHOLD_2_5: return price + 0.035 * (1 - mile_per_month/gl.MILE_THRESHOLD_2_5) * price elif gl.MILE_THRESHOLD_2_5 <= mile_per_month < gl.MILE_THRESHOLD_5: return price - (0.04 * (mile_per_month/gl.MILE_THRESHOLD_5)+0.035) * price elif gl.MILE_THRESHOLD_5 <= mile_per_month < gl.MILE_THRESHOLD_10: return price - (0.05 * (mile_per_month/gl.MILE_THRESHOLD_5)+0.075) * price else: return price - 0.125 * price def process_profit_rate(df): """ 畅销系数处理 """ return get_profit_rate(df['intent'], df['popularity']) def process_buy_profit_rate(df): """ 畅销系数处理 """ return get_profit_rate(df['intent_source'], df['popularity']) def process_unreasonable_history_price(data, nums): """ 处理不合理历史价格趋势 """ if nums == 0: return data temp = data[1:] temp.sort() for i, value in enumerate(temp): data[i+1] = temp[i] for i in range(0, nums): rate = (data[i + 1] - data[i]) / data[i + 1] if (data[i] >= data[i + 1]) | (0.003 > rate) | (0.0157 < rate): data[i + 1] = int(data[i] * 1.0083) return data def process_unreasonable_future_price(data, nums): """ 处理不合理未来价格趋势 """ temp = data[1:] temp.sort(reverse=True) for i, value in enumerate(temp): data[i+1] = temp[i] for i in range(0, nums): rate = (data[i] - data[i + 1]) / data[i] if (data[i] <= data[i + 1]) | (0.036 > rate) | (0.188 < rate): data[i + 1] = int(data[i] * 0.9) return data def process_fill_zero(hedge): temp = hedge if len(hedge) < 18: for i in range(0, (18-len(hedge))): temp = '0'+temp return temp def predict_from_db(model_detail_slug, city, use_time): """ 从生产库查询预测 """ # 查找city和model_detail_slug编号 city_id = province_city_map.loc[city, 'city_id'] model_detail_slug_id = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug_id'] # 计算查询字段编号和月编号 if (use_time % 6) == 0: column_num = str(int(use_time / 6) - 1) month_num = 6 else: column_num = str(int(use_time / 6)) month_num = use_time % 6 # 查询 record = db_operate.query_valuate(model_detail_slug_id, city_id, column_num, use_time) # 查找对应值 dealer_hedge = str(record.loc[0, 'b2c_year_'+column_num]) dealer_hedge = process_fill_zero(dealer_hedge) dealer_hedge = dealer_hedge[(month_num-1)*3:month_num*3] dealer_hedge = int(dealer_hedge) / 1000 cpersonal_hedge = str(record.loc[0, 'c2c_year_'+column_num]) cpersonal_hedge = process_fill_zero(cpersonal_hedge) cpersonal_hedge = cpersonal_hedge[(month_num-1)*3:month_num*3] cpersonal_hedge = int(cpersonal_hedge) / 1000 return dealer_hedge, cpersonal_hedge def predict_from_db_history(model_detail_slug, city, use_time): """ 从生产库查询预测 """ # 查找city和model_detail_slug编号 city_id = province_city_map.loc[city, 'city_id'] model_detail_slug_id = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug_id'] # 计算查询字段编号和月编号 if (use_time % 6) == 0: column_num = int(use_time / 6) - 1 month_num = 6 else: column_num = int(use_time / 6) month_num = use_time % 6 # 查询 dealer_hedge, cpersonal_hedge = db_operate.query_valuate_history(model_detail_slug_id, city_id, column_num, use_time) # 查找对应值 result = [] if len(dealer_hedge) == 1: dealer_hedge = process_fill_zero(dealer_hedge[0]) cpersonal_hedge = process_fill_zero(cpersonal_hedge[0]) for i in range(0, use_time): dealer_per = dealer_hedge[i*3:(i+1)*3] cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3] result.append([int(dealer_per)/1000, int(cpersonal_per)/1000, use_time]) result.reverse() elif len(dealer_hedge) == 2: dealer_hedge = process_fill_zero(dealer_hedge[0])+process_fill_zero(dealer_hedge[1]) cpersonal_hedge = process_fill_zero(cpersonal_hedge[0])+process_fill_zero(cpersonal_hedge[1]) for i in range(month_num-1, month_num+6): dealer_per = dealer_hedge[i*3:(i+1)*3] cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3] result.append([int(dealer_per)/1000, int(cpersonal_per)/1000, use_time]) result.reverse() return result def predict_from_db_future(model_detail_slug, city, use_time, times): """ 从生产库查询预测 """ # 查找city和model_detail_slug编号 city_id = province_city_map.loc[city, 'city_id'] model_detail_slug_id = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug_id'] # 计算查询字段编号和月编号 if (use_time % 6) == 0: column_num = int(use_time / 6) - 1 month_num = 6 else: column_num = int(use_time / 6) month_num = use_time % 6 # 查询 record = db_operate.query_valuate_future(model_detail_slug_id, city_id) # 查找对应值 result = [] for i in range(0, times): dealer_hedge = str(record.loc[0, 'b2c_year_' + str(column_num+i*2)]) dealer_hedge = process_fill_zero(dealer_hedge) dealer_hedge = dealer_hedge[(month_num - 1) * 3:month_num * 3] dealer_hedge = int(dealer_hedge) / 1000 cpersonal_hedge = str(record.loc[0, 'c2c_year_' + str(column_num+i*2)]) cpersonal_hedge = process_fill_zero(cpersonal_hedge) cpersonal_hedge = cpersonal_hedge[(month_num - 1) * 3:month_num * 3] cpersonal_hedge = int(cpersonal_hedge) / 1000 result.append([dealer_hedge, cpersonal_hedge, use_time+i*12]) return result def process_prices_relate(dealer_price, cpersonal_price): """ 人工处理三类价格的相关性 """ buy = dealer_price private = cpersonal_price # 计算buy与private的比例关系 private_buy_rate = (buy - private) / private # 人工处理预测不合理的三类价格 if (private_buy_rate < 0) | (abs(private_buy_rate) > 0.12): private = int(buy * (1 - 0.0875)) sell = int(private * (1 - 0.0525)) return buy, private, sell def process_adjust_profit(model_detail_slug, popularity): """ 调整值调整 """ index = str(model_detail_slug)+'_'+str(popularity) if index in model_detail_slug_popularity_index: rate = adjust_profit.loc[index, 'rate'] else: rate = 0 return rate def check_params_value(city, model_detail_slug, use_time, mile, category): """ 校验参数 """ # 校验city if city not in cities: raise ApiParamsValueError('city', city, 'Unknown city!') # 校验model if model_detail_slug not in models: raise ApiParamsValueError('model_detail_slug', model_detail_slug, 'Unknown model!') # 校验mile if not ((isinstance(mile, int)) | (isinstance(mile, float))): raise ApiParamsTypeError('mile', mile, 'Mile must be int or float!') elif mile < 0: raise ApiParamsValueError('mile', mile, 'Mile must be greater than zero!') # 校验use_time if not isinstance(use_time, int): raise ApiParamsTypeError('use_time', use_time, 'Use_time must be int!') if category == 'valuate': if (use_time < 1) | (use_time > 240): raise ApiParamsValueError('use_time', use_time, 'The use_time of Forecast must be in 1-240!') elif category == 'history': if (use_time < 1) | (use_time > 240): raise ApiParamsValueError('use_time', use_time, 'The use_time of historical trend must be in 1-240!') elif category == 'future': if (use_time < 1) | (use_time > 240): raise ApiParamsValueError('use_time', use_time, 'The use_time of future trend must be in 1-240!') class Predict(object): def __init__(self): """ 加载各类匹配表和模型 """ self.result = [] self.valuate_model = [] def add_process_intent(self, buy, private, sell, popularity, price_bn): """ 根据交易方式修正预测值 """ # 组合结果 self.result = result_map.copy() self.result.loc[(self.result['intent'] == 'buy'), 'predict_price'] = buy self.result.loc[(self.result['intent'] == 'private'), 'predict_price'] = private self.result.loc[(self.result['intent'] == 'sell'), 'predict_price'] = sell self.result['predict_price'] = self.result['predict_price'].fillna(buy) self.result['popularity'] = popularity self.result['profit_rate'] = self.result.apply(process_profit_rate, axis=1) self.result['buy_profit_rate'] = self.result.apply(process_buy_profit_rate, axis=1) self.result['predict_price'] = self.result['predict_price'] / self.result['buy_profit_rate'] self.result['predict_price'] = self.result['profit_rate'] * self.result['predict_price'] # 计算所有交易类型 self.result = cal_intent_condition(self.result.predict_price.values, price_bn) def follow_process(self, use_time, mile, price_bn, dealer_hedge, cpersonal_hedge, province, model_slug, model_detail_slug): """ 后续跟进处理 """ # 获取价格 dealer_price, cpersonal_price = dealer_hedge * price_bn, cpersonal_hedge * price_bn # 处理mile dealer_price = process_mile(dealer_price, use_time, mile) cpersonal_price = process_mile(cpersonal_price, use_time, mile) # 处理价格之间的相关性 buy, private, sell = process_prices_relate(dealer_price, cpersonal_price) # 获取流行度 index = str(model_slug) + '_' + str(province) if index in province_popularity_index: popularity = province_popularity_map.loc[index, 'popularity'] else: popularity = 'C' # 进行调整值最终调整 rate = process_adjust_profit(model_detail_slug, popularity) buy, private, sell = buy * (1 + rate), private * (1 + rate), sell * (1 + rate) return buy, private, sell, popularity def predict(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2, ret_type='records'): """ 预测返回 """ # 校验参数 check_params_value(city, model_detail_slug, use_time, mile, category='valuate') # 查找款型对应的新车指导价,调整后的款型 price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn'] price_bn = price_bn * 10000 province = province_city_map.loc[city, 'province'] model_slug = model_detail_map.loc[model_detail_slug, 'model_slug'] final_model_detail_slug = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug'] # 预测返回保值率 dealer_hedge, cpersonal_hedge = predict_from_db(final_model_detail_slug, city, use_time) buy, private, sell, popularity = self.follow_process(use_time, mile, price_bn, dealer_hedge, cpersonal_hedge, province, model_slug, model_detail_slug) # 根据交易方式修正预测值 self.add_process_intent(buy, private, sell, popularity, price_bn) if ret_type == 'records': return self.result.to_dict('records') else: return self.result def predict_for_history(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2): """ 预测历史数据返回 """ # 校验参数 check_params_value(city, model_detail_slug, use_time, mile, category='valuate') # 查找款型对应的新车指导价,调整后的款型 price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn'] price_bn = price_bn * 10000 province = province_city_map.loc[city, 'province'] model_slug = model_detail_map.loc[model_detail_slug, 'model_slug'] final_model_detail_slug = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug'] # 预测返回保值率 data_buy = [] data_sell = [] data_private = [] result = predict_from_db_history(final_model_detail_slug, city, use_time) for dealer_hedge, cpersonal_hedge, use_time_per in result: buy, private, sell, popularity = self.follow_process(use_time_per, mile, price_bn, dealer_hedge, cpersonal_hedge, province, model_slug, model_detail_slug) data_buy.append(int(buy)) data_private.append(int(private)) data_sell.append(int(sell)) return data_buy, data_private, data_sell def predict_for_future(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2, times=3): """ 预测历史数据返回 """ # 校验参数 check_params_value(city, model_detail_slug, use_time, mile, category='valuate') # 查找款型对应的新车指导价,调整后的款型 price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn'] price_bn = price_bn * 10000 province = province_city_map.loc[city, 'province'] model_slug = model_detail_map.loc[model_detail_slug, 'model_slug'] final_model_detail_slug = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug'] # 预测返回保值率 data_buy = [] data_sell = [] data_private = [] result = predict_from_db_future(final_model_detail_slug, city, use_time, times) for dealer_hedge, cpersonal_hedge, use_time_per in result: buy, private, sell, popularity = self.follow_process(use_time_per, mile, price_bn, dealer_hedge, cpersonal_hedge, province, model_slug, model_detail_slug) data_buy.append(int(buy)) data_private.append(int(private)) data_sell.append(int(sell)) return data_buy, data_private, data_sell def history_price_trend(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2, ret_type='records'): """ 计算历史价格趋势 """ # 校验参数 check_params_value(city, model_detail_slug, use_time, mile, category='history') # 计算时间 times_str = ['0', '-1', '-2', '-3', '-4', '-5', '-6'] nums = 6 if use_time <= 6: times_str = [] nums = use_time-1 for i in range(0, nums+1): times_str.append(str(-i)) # 计算车商交易价,车商收购价的历史价格走势 data_buy, data_private, data_sell = self.predict_for_history(city, model_detail_slug, use_time, mile) # 处理异常值 data_buy = process_unreasonable_history_price(data_buy, nums) data_sell = process_unreasonable_history_price(data_sell, nums) data_private = process_unreasonable_history_price(data_private, nums) result_b_2_c = pd.DataFrame([data_buy], columns=times_str) result_b_2_c['type'] = 'buy' result_c_2_b = pd.DataFrame([data_sell], columns=times_str) result_c_2_b['type'] = 'sell' result_c_2_c = pd.DataFrame([data_private], columns=times_str) result_c_2_c['type'] = 'private' result = result_b_2_c.append(result_c_2_b, ignore_index=True) result = result.append(result_c_2_c, ignore_index=True) if ret_type == 'records': return result.to_dict('records') else: return result def future_price_trend(self, city='深圳', model_detail_slug='model_25023_cs', use_time=365, mile=2, ret_type='records'): """ 计算未来价格趋势 """ # 校验参数 check_params_value(city, model_detail_slug, use_time, mile, category='future') # 计算时间 times_str = ['0', '12', '24', '36'] nums = 3 if use_time > 204: times_str = [] nums = int((240-use_time) / 12) for i in range(0, nums+1): times_str.append(str(i*12)) # 计算个人交易价的未来价格趋势 data_buy, data_private, data_sell = self.predict_for_future(city, model_detail_slug, use_time, mile, len(times_str)) data_buy = process_unreasonable_future_price(data_buy, nums) data_sell = process_unreasonable_future_price(data_sell, nums) data_private = process_unreasonable_future_price(data_private, nums) result_b_2_c = pd.DataFrame([data_buy], columns=times_str) result_b_2_c['type'] = 'buy' result_c_2_b = pd.DataFrame([data_sell], columns=times_str) result_c_2_b['type'] = 'sell' result_c_2_c = pd.DataFrame([data_private], columns=times_str) result_c_2_c['type'] = 'private' result = result_b_2_c.append(result_c_2_b, ignore_index=True) result = result.append(result_c_2_c, ignore_index=True) if ret_type == 'records': return result.to_dict('records') else: return result
normal
{ "blob_id": "1f01989f10be5404d415d4abd1ef9ab6c8695aba", "index": 6069, "step-1": "<mask token>\n\n\ndef process_mile(price, use_time, mile):\n \"\"\"\n mile处理\n \"\"\"\n mile_per_month = mile / use_time\n if mile_per_month < gl.MILE_THRESHOLD_2_5:\n return price + 0.035 * (1 - mile_per_month / gl.MILE_THRESHOLD_2_5\n ) * price\n elif gl.MILE_THRESHOLD_2_5 <= mile_per_month < gl.MILE_THRESHOLD_5:\n return price - (0.04 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.035\n ) * price\n elif gl.MILE_THRESHOLD_5 <= mile_per_month < gl.MILE_THRESHOLD_10:\n return price - (0.05 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.075\n ) * price\n else:\n return price - 0.125 * price\n\n\n<mask token>\n\n\ndef process_prices_relate(dealer_price, cpersonal_price):\n \"\"\"\n 人工处理三类价格的相关性\n \"\"\"\n buy = dealer_price\n private = cpersonal_price\n private_buy_rate = (buy - private) / private\n if (private_buy_rate < 0) | (abs(private_buy_rate) > 0.12):\n private = int(buy * (1 - 0.0875))\n sell = int(private * (1 - 0.0525))\n return buy, private, sell\n\n\n<mask token>\n\n\ndef check_params_value(city, model_detail_slug, use_time, mile, category):\n \"\"\"\n 校验参数\n \"\"\"\n if city not in cities:\n raise ApiParamsValueError('city', city, 'Unknown city!')\n if model_detail_slug not in models:\n raise ApiParamsValueError('model_detail_slug', model_detail_slug,\n 'Unknown model!')\n if not isinstance(mile, int) | isinstance(mile, float):\n raise ApiParamsTypeError('mile', mile, 'Mile must be int or float!')\n elif mile < 0:\n raise ApiParamsValueError('mile', mile,\n 'Mile must be greater than zero!')\n if not isinstance(use_time, int):\n raise ApiParamsTypeError('use_time', use_time, 'Use_time must be int!')\n if category == 'valuate':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of Forecast must be in 1-240!')\n elif category == 'history':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of historical trend must be in 1-240!')\n elif category == 'future':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of future trend must be in 1-240!')\n\n\nclass Predict(object):\n\n def __init__(self):\n \"\"\"\n 加载各类匹配表和模型\n \"\"\"\n self.result = []\n self.valuate_model = []\n\n def add_process_intent(self, buy, private, sell, popularity, price_bn):\n \"\"\"\n 根据交易方式修正预测值\n \"\"\"\n self.result = result_map.copy()\n self.result.loc[self.result['intent'] == 'buy', 'predict_price'] = buy\n self.result.loc[self.result['intent'] == 'private', 'predict_price'\n ] = private\n self.result.loc[self.result['intent'] == 'sell', 'predict_price'\n ] = sell\n self.result['predict_price'] = self.result['predict_price'].fillna(buy)\n self.result['popularity'] = popularity\n self.result['profit_rate'] = self.result.apply(process_profit_rate,\n axis=1)\n self.result['buy_profit_rate'] = self.result.apply(\n process_buy_profit_rate, axis=1)\n self.result['predict_price'] = self.result['predict_price'\n ] / self.result['buy_profit_rate']\n self.result['predict_price'] = self.result['profit_rate'\n ] * self.result['predict_price']\n self.result = cal_intent_condition(self.result.predict_price.values,\n price_bn)\n\n def follow_process(self, use_time, mile, price_bn, dealer_hedge,\n cpersonal_hedge, province, model_slug, model_detail_slug):\n \"\"\"\n 后续跟进处理\n \"\"\"\n dealer_price, cpersonal_price = (dealer_hedge * price_bn, \n cpersonal_hedge * price_bn)\n dealer_price = process_mile(dealer_price, use_time, mile)\n cpersonal_price = process_mile(cpersonal_price, use_time, mile)\n buy, private, sell = process_prices_relate(dealer_price,\n cpersonal_price)\n index = str(model_slug) + '_' + str(province)\n if index in province_popularity_index:\n popularity = province_popularity_map.loc[index, 'popularity']\n else:\n popularity = 'C'\n rate = process_adjust_profit(model_detail_slug, popularity)\n buy, private, sell = buy * (1 + rate), private * (1 + rate), sell * (\n 1 + rate)\n return buy, private, sell, popularity\n\n def predict(self, city='深圳', model_detail_slug='model_25023_cs',\n use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 预测返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n dealer_hedge, cpersonal_hedge = predict_from_db(final_model_detail_slug\n , city, use_time)\n buy, private, sell, popularity = self.follow_process(use_time, mile,\n price_bn, dealer_hedge, cpersonal_hedge, province, model_slug,\n model_detail_slug)\n self.add_process_intent(buy, private, sell, popularity, price_bn)\n if ret_type == 'records':\n return self.result.to_dict('records')\n else:\n return self.result\n\n def predict_for_history(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_history(final_model_detail_slug, city,\n use_time)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def predict_for_future(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, times=3):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_future(final_model_detail_slug, city,\n use_time, times)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def history_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 计算历史价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='history')\n times_str = ['0', '-1', '-2', '-3', '-4', '-5', '-6']\n nums = 6\n if use_time <= 6:\n times_str = []\n nums = use_time - 1\n for i in range(0, nums + 1):\n times_str.append(str(-i))\n data_buy, data_private, data_sell = self.predict_for_history(city,\n model_detail_slug, use_time, mile)\n data_buy = process_unreasonable_history_price(data_buy, nums)\n data_sell = process_unreasonable_history_price(data_sell, nums)\n data_private = process_unreasonable_history_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n\n def future_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=365, mile=2, ret_type='records'):\n \"\"\"\n 计算未来价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='future')\n times_str = ['0', '12', '24', '36']\n nums = 3\n if use_time > 204:\n times_str = []\n nums = int((240 - use_time) / 12)\n for i in range(0, nums + 1):\n times_str.append(str(i * 12))\n data_buy, data_private, data_sell = self.predict_for_future(city,\n model_detail_slug, use_time, mile, len(times_str))\n data_buy = process_unreasonable_future_price(data_buy, nums)\n data_sell = process_unreasonable_future_price(data_sell, nums)\n data_private = process_unreasonable_future_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n", "step-2": "<mask token>\n\n\ndef process_mile(price, use_time, mile):\n \"\"\"\n mile处理\n \"\"\"\n mile_per_month = mile / use_time\n if mile_per_month < gl.MILE_THRESHOLD_2_5:\n return price + 0.035 * (1 - mile_per_month / gl.MILE_THRESHOLD_2_5\n ) * price\n elif gl.MILE_THRESHOLD_2_5 <= mile_per_month < gl.MILE_THRESHOLD_5:\n return price - (0.04 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.035\n ) * price\n elif gl.MILE_THRESHOLD_5 <= mile_per_month < gl.MILE_THRESHOLD_10:\n return price - (0.05 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.075\n ) * price\n else:\n return price - 0.125 * price\n\n\n<mask token>\n\n\ndef process_unreasonable_history_price(data, nums):\n \"\"\"\n 处理不合理历史价格趋势\n \"\"\"\n if nums == 0:\n return data\n temp = data[1:]\n temp.sort()\n for i, value in enumerate(temp):\n data[i + 1] = temp[i]\n for i in range(0, nums):\n rate = (data[i + 1] - data[i]) / data[i + 1]\n if (data[i] >= data[i + 1]) | (0.003 > rate) | (0.0157 < rate):\n data[i + 1] = int(data[i] * 1.0083)\n return data\n\n\n<mask token>\n\n\ndef process_fill_zero(hedge):\n temp = hedge\n if len(hedge) < 18:\n for i in range(0, 18 - len(hedge)):\n temp = '0' + temp\n return temp\n\n\n<mask token>\n\n\ndef predict_from_db_history(model_detail_slug, city, use_time):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug_id']\n if use_time % 6 == 0:\n column_num = int(use_time / 6) - 1\n month_num = 6\n else:\n column_num = int(use_time / 6)\n month_num = use_time % 6\n dealer_hedge, cpersonal_hedge = db_operate.query_valuate_history(\n model_detail_slug_id, city_id, column_num, use_time)\n result = []\n if len(dealer_hedge) == 1:\n dealer_hedge = process_fill_zero(dealer_hedge[0])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0])\n for i in range(0, use_time):\n dealer_per = dealer_hedge[i * 3:(i + 1) * 3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per) / 1000, int(cpersonal_per) / \n 1000, use_time])\n result.reverse()\n elif len(dealer_hedge) == 2:\n dealer_hedge = process_fill_zero(dealer_hedge[0]) + process_fill_zero(\n dealer_hedge[1])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0]\n ) + process_fill_zero(cpersonal_hedge[1])\n for i in range(month_num - 1, month_num + 6):\n dealer_per = dealer_hedge[i * 3:(i + 1) * 3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per) / 1000, int(cpersonal_per) / \n 1000, use_time])\n result.reverse()\n return result\n\n\n<mask token>\n\n\ndef process_prices_relate(dealer_price, cpersonal_price):\n \"\"\"\n 人工处理三类价格的相关性\n \"\"\"\n buy = dealer_price\n private = cpersonal_price\n private_buy_rate = (buy - private) / private\n if (private_buy_rate < 0) | (abs(private_buy_rate) > 0.12):\n private = int(buy * (1 - 0.0875))\n sell = int(private * (1 - 0.0525))\n return buy, private, sell\n\n\n<mask token>\n\n\ndef check_params_value(city, model_detail_slug, use_time, mile, category):\n \"\"\"\n 校验参数\n \"\"\"\n if city not in cities:\n raise ApiParamsValueError('city', city, 'Unknown city!')\n if model_detail_slug not in models:\n raise ApiParamsValueError('model_detail_slug', model_detail_slug,\n 'Unknown model!')\n if not isinstance(mile, int) | isinstance(mile, float):\n raise ApiParamsTypeError('mile', mile, 'Mile must be int or float!')\n elif mile < 0:\n raise ApiParamsValueError('mile', mile,\n 'Mile must be greater than zero!')\n if not isinstance(use_time, int):\n raise ApiParamsTypeError('use_time', use_time, 'Use_time must be int!')\n if category == 'valuate':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of Forecast must be in 1-240!')\n elif category == 'history':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of historical trend must be in 1-240!')\n elif category == 'future':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of future trend must be in 1-240!')\n\n\nclass Predict(object):\n\n def __init__(self):\n \"\"\"\n 加载各类匹配表和模型\n \"\"\"\n self.result = []\n self.valuate_model = []\n\n def add_process_intent(self, buy, private, sell, popularity, price_bn):\n \"\"\"\n 根据交易方式修正预测值\n \"\"\"\n self.result = result_map.copy()\n self.result.loc[self.result['intent'] == 'buy', 'predict_price'] = buy\n self.result.loc[self.result['intent'] == 'private', 'predict_price'\n ] = private\n self.result.loc[self.result['intent'] == 'sell', 'predict_price'\n ] = sell\n self.result['predict_price'] = self.result['predict_price'].fillna(buy)\n self.result['popularity'] = popularity\n self.result['profit_rate'] = self.result.apply(process_profit_rate,\n axis=1)\n self.result['buy_profit_rate'] = self.result.apply(\n process_buy_profit_rate, axis=1)\n self.result['predict_price'] = self.result['predict_price'\n ] / self.result['buy_profit_rate']\n self.result['predict_price'] = self.result['profit_rate'\n ] * self.result['predict_price']\n self.result = cal_intent_condition(self.result.predict_price.values,\n price_bn)\n\n def follow_process(self, use_time, mile, price_bn, dealer_hedge,\n cpersonal_hedge, province, model_slug, model_detail_slug):\n \"\"\"\n 后续跟进处理\n \"\"\"\n dealer_price, cpersonal_price = (dealer_hedge * price_bn, \n cpersonal_hedge * price_bn)\n dealer_price = process_mile(dealer_price, use_time, mile)\n cpersonal_price = process_mile(cpersonal_price, use_time, mile)\n buy, private, sell = process_prices_relate(dealer_price,\n cpersonal_price)\n index = str(model_slug) + '_' + str(province)\n if index in province_popularity_index:\n popularity = province_popularity_map.loc[index, 'popularity']\n else:\n popularity = 'C'\n rate = process_adjust_profit(model_detail_slug, popularity)\n buy, private, sell = buy * (1 + rate), private * (1 + rate), sell * (\n 1 + rate)\n return buy, private, sell, popularity\n\n def predict(self, city='深圳', model_detail_slug='model_25023_cs',\n use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 预测返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n dealer_hedge, cpersonal_hedge = predict_from_db(final_model_detail_slug\n , city, use_time)\n buy, private, sell, popularity = self.follow_process(use_time, mile,\n price_bn, dealer_hedge, cpersonal_hedge, province, model_slug,\n model_detail_slug)\n self.add_process_intent(buy, private, sell, popularity, price_bn)\n if ret_type == 'records':\n return self.result.to_dict('records')\n else:\n return self.result\n\n def predict_for_history(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_history(final_model_detail_slug, city,\n use_time)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def predict_for_future(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, times=3):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_future(final_model_detail_slug, city,\n use_time, times)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def history_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 计算历史价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='history')\n times_str = ['0', '-1', '-2', '-3', '-4', '-5', '-6']\n nums = 6\n if use_time <= 6:\n times_str = []\n nums = use_time - 1\n for i in range(0, nums + 1):\n times_str.append(str(-i))\n data_buy, data_private, data_sell = self.predict_for_history(city,\n model_detail_slug, use_time, mile)\n data_buy = process_unreasonable_history_price(data_buy, nums)\n data_sell = process_unreasonable_history_price(data_sell, nums)\n data_private = process_unreasonable_history_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n\n def future_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=365, mile=2, ret_type='records'):\n \"\"\"\n 计算未来价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='future')\n times_str = ['0', '12', '24', '36']\n nums = 3\n if use_time > 204:\n times_str = []\n nums = int((240 - use_time) / 12)\n for i in range(0, nums + 1):\n times_str.append(str(i * 12))\n data_buy, data_private, data_sell = self.predict_for_future(city,\n model_detail_slug, use_time, mile, len(times_str))\n data_buy = process_unreasonable_future_price(data_buy, nums)\n data_sell = process_unreasonable_future_price(data_sell, nums)\n data_private = process_unreasonable_future_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n", "step-3": "<mask token>\n\n\ndef process_mile(price, use_time, mile):\n \"\"\"\n mile处理\n \"\"\"\n mile_per_month = mile / use_time\n if mile_per_month < gl.MILE_THRESHOLD_2_5:\n return price + 0.035 * (1 - mile_per_month / gl.MILE_THRESHOLD_2_5\n ) * price\n elif gl.MILE_THRESHOLD_2_5 <= mile_per_month < gl.MILE_THRESHOLD_5:\n return price - (0.04 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.035\n ) * price\n elif gl.MILE_THRESHOLD_5 <= mile_per_month < gl.MILE_THRESHOLD_10:\n return price - (0.05 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.075\n ) * price\n else:\n return price - 0.125 * price\n\n\n<mask token>\n\n\ndef process_unreasonable_history_price(data, nums):\n \"\"\"\n 处理不合理历史价格趋势\n \"\"\"\n if nums == 0:\n return data\n temp = data[1:]\n temp.sort()\n for i, value in enumerate(temp):\n data[i + 1] = temp[i]\n for i in range(0, nums):\n rate = (data[i + 1] - data[i]) / data[i + 1]\n if (data[i] >= data[i + 1]) | (0.003 > rate) | (0.0157 < rate):\n data[i + 1] = int(data[i] * 1.0083)\n return data\n\n\n<mask token>\n\n\ndef process_fill_zero(hedge):\n temp = hedge\n if len(hedge) < 18:\n for i in range(0, 18 - len(hedge)):\n temp = '0' + temp\n return temp\n\n\ndef predict_from_db(model_detail_slug, city, use_time):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug_id']\n if use_time % 6 == 0:\n column_num = str(int(use_time / 6) - 1)\n month_num = 6\n else:\n column_num = str(int(use_time / 6))\n month_num = use_time % 6\n record = db_operate.query_valuate(model_detail_slug_id, city_id,\n column_num, use_time)\n dealer_hedge = str(record.loc[0, 'b2c_year_' + column_num])\n dealer_hedge = process_fill_zero(dealer_hedge)\n dealer_hedge = dealer_hedge[(month_num - 1) * 3:month_num * 3]\n dealer_hedge = int(dealer_hedge) / 1000\n cpersonal_hedge = str(record.loc[0, 'c2c_year_' + column_num])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge)\n cpersonal_hedge = cpersonal_hedge[(month_num - 1) * 3:month_num * 3]\n cpersonal_hedge = int(cpersonal_hedge) / 1000\n return dealer_hedge, cpersonal_hedge\n\n\ndef predict_from_db_history(model_detail_slug, city, use_time):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug_id']\n if use_time % 6 == 0:\n column_num = int(use_time / 6) - 1\n month_num = 6\n else:\n column_num = int(use_time / 6)\n month_num = use_time % 6\n dealer_hedge, cpersonal_hedge = db_operate.query_valuate_history(\n model_detail_slug_id, city_id, column_num, use_time)\n result = []\n if len(dealer_hedge) == 1:\n dealer_hedge = process_fill_zero(dealer_hedge[0])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0])\n for i in range(0, use_time):\n dealer_per = dealer_hedge[i * 3:(i + 1) * 3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per) / 1000, int(cpersonal_per) / \n 1000, use_time])\n result.reverse()\n elif len(dealer_hedge) == 2:\n dealer_hedge = process_fill_zero(dealer_hedge[0]) + process_fill_zero(\n dealer_hedge[1])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0]\n ) + process_fill_zero(cpersonal_hedge[1])\n for i in range(month_num - 1, month_num + 6):\n dealer_per = dealer_hedge[i * 3:(i + 1) * 3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per) / 1000, int(cpersonal_per) / \n 1000, use_time])\n result.reverse()\n return result\n\n\n<mask token>\n\n\ndef process_prices_relate(dealer_price, cpersonal_price):\n \"\"\"\n 人工处理三类价格的相关性\n \"\"\"\n buy = dealer_price\n private = cpersonal_price\n private_buy_rate = (buy - private) / private\n if (private_buy_rate < 0) | (abs(private_buy_rate) > 0.12):\n private = int(buy * (1 - 0.0875))\n sell = int(private * (1 - 0.0525))\n return buy, private, sell\n\n\n<mask token>\n\n\ndef check_params_value(city, model_detail_slug, use_time, mile, category):\n \"\"\"\n 校验参数\n \"\"\"\n if city not in cities:\n raise ApiParamsValueError('city', city, 'Unknown city!')\n if model_detail_slug not in models:\n raise ApiParamsValueError('model_detail_slug', model_detail_slug,\n 'Unknown model!')\n if not isinstance(mile, int) | isinstance(mile, float):\n raise ApiParamsTypeError('mile', mile, 'Mile must be int or float!')\n elif mile < 0:\n raise ApiParamsValueError('mile', mile,\n 'Mile must be greater than zero!')\n if not isinstance(use_time, int):\n raise ApiParamsTypeError('use_time', use_time, 'Use_time must be int!')\n if category == 'valuate':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of Forecast must be in 1-240!')\n elif category == 'history':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of historical trend must be in 1-240!')\n elif category == 'future':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of future trend must be in 1-240!')\n\n\nclass Predict(object):\n\n def __init__(self):\n \"\"\"\n 加载各类匹配表和模型\n \"\"\"\n self.result = []\n self.valuate_model = []\n\n def add_process_intent(self, buy, private, sell, popularity, price_bn):\n \"\"\"\n 根据交易方式修正预测值\n \"\"\"\n self.result = result_map.copy()\n self.result.loc[self.result['intent'] == 'buy', 'predict_price'] = buy\n self.result.loc[self.result['intent'] == 'private', 'predict_price'\n ] = private\n self.result.loc[self.result['intent'] == 'sell', 'predict_price'\n ] = sell\n self.result['predict_price'] = self.result['predict_price'].fillna(buy)\n self.result['popularity'] = popularity\n self.result['profit_rate'] = self.result.apply(process_profit_rate,\n axis=1)\n self.result['buy_profit_rate'] = self.result.apply(\n process_buy_profit_rate, axis=1)\n self.result['predict_price'] = self.result['predict_price'\n ] / self.result['buy_profit_rate']\n self.result['predict_price'] = self.result['profit_rate'\n ] * self.result['predict_price']\n self.result = cal_intent_condition(self.result.predict_price.values,\n price_bn)\n\n def follow_process(self, use_time, mile, price_bn, dealer_hedge,\n cpersonal_hedge, province, model_slug, model_detail_slug):\n \"\"\"\n 后续跟进处理\n \"\"\"\n dealer_price, cpersonal_price = (dealer_hedge * price_bn, \n cpersonal_hedge * price_bn)\n dealer_price = process_mile(dealer_price, use_time, mile)\n cpersonal_price = process_mile(cpersonal_price, use_time, mile)\n buy, private, sell = process_prices_relate(dealer_price,\n cpersonal_price)\n index = str(model_slug) + '_' + str(province)\n if index in province_popularity_index:\n popularity = province_popularity_map.loc[index, 'popularity']\n else:\n popularity = 'C'\n rate = process_adjust_profit(model_detail_slug, popularity)\n buy, private, sell = buy * (1 + rate), private * (1 + rate), sell * (\n 1 + rate)\n return buy, private, sell, popularity\n\n def predict(self, city='深圳', model_detail_slug='model_25023_cs',\n use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 预测返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n dealer_hedge, cpersonal_hedge = predict_from_db(final_model_detail_slug\n , city, use_time)\n buy, private, sell, popularity = self.follow_process(use_time, mile,\n price_bn, dealer_hedge, cpersonal_hedge, province, model_slug,\n model_detail_slug)\n self.add_process_intent(buy, private, sell, popularity, price_bn)\n if ret_type == 'records':\n return self.result.to_dict('records')\n else:\n return self.result\n\n def predict_for_history(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_history(final_model_detail_slug, city,\n use_time)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def predict_for_future(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, times=3):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_future(final_model_detail_slug, city,\n use_time, times)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def history_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 计算历史价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='history')\n times_str = ['0', '-1', '-2', '-3', '-4', '-5', '-6']\n nums = 6\n if use_time <= 6:\n times_str = []\n nums = use_time - 1\n for i in range(0, nums + 1):\n times_str.append(str(-i))\n data_buy, data_private, data_sell = self.predict_for_history(city,\n model_detail_slug, use_time, mile)\n data_buy = process_unreasonable_history_price(data_buy, nums)\n data_sell = process_unreasonable_history_price(data_sell, nums)\n data_private = process_unreasonable_history_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n\n def future_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=365, mile=2, ret_type='records'):\n \"\"\"\n 计算未来价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='future')\n times_str = ['0', '12', '24', '36']\n nums = 3\n if use_time > 204:\n times_str = []\n nums = int((240 - use_time) / 12)\n for i in range(0, nums + 1):\n times_str.append(str(i * 12))\n data_buy, data_private, data_sell = self.predict_for_future(city,\n model_detail_slug, use_time, mile, len(times_str))\n data_buy = process_unreasonable_future_price(data_buy, nums)\n data_sell = process_unreasonable_future_price(data_sell, nums)\n data_private = process_unreasonable_future_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n", "step-4": "<mask token>\n\n\ndef get_profit_rate(intent, popularity):\n \"\"\"\n 获取畅销系数\n \"\"\"\n profits = gl.PROFITS\n profit = profits[popularity]\n if intent == 'sell':\n profit_rate = 1 - profit[0] - profit[1]\n elif intent == 'buy':\n profit_rate = 1 - profit[0]\n elif intent == 'release':\n profit_rate = 1\n elif intent == 'private':\n profit_rate = 1 - profit[0] - profit[2]\n elif intent == 'lowest':\n profit_rate = 1 - profit[0] - profit[1] - profit[3]\n elif intent == 'cpo':\n profit_rate = 1 - profit[0] - profit[8]\n elif intent == 'replace':\n profit_rate = 1 - profit[0] - profit[4]\n elif intent == 'auction':\n profit_rate = 1 - profit[0] - profit[5]\n elif intent == 'avg-buy':\n profit_rate = 1 - profit[0] - profit[7]\n elif intent == 'avg-sell':\n profit_rate = 1 - profit[0] - profit[6]\n return profit_rate\n\n\ndef cal_intent_condition(prices, price_bn):\n \"\"\"\n 计算所有交易方式的4个级别车况价\n \"\"\"\n if prices[2] * 1.03 > price_bn:\n rate = prices[2] * 1.03 / price_bn\n prices = prices / rate\n df1 = pd.DataFrame(prices)\n df2 = pd.DataFrame([gl.CAR_CONDITION_COEFFICIENT_VALUES])\n all_map = df1.dot(df2)\n all_map.columns = ['excellent', 'good', 'fair', 'bad']\n all_map['intent'] = pd.Series(gl.INTENT_TYPE).values\n all_map = all_map.loc[:, ['intent', 'excellent', 'good', 'fair', 'bad']]\n all_map[['excellent', 'good', 'fair', 'bad']] = all_map[['excellent',\n 'good', 'fair', 'bad']].astype(int)\n return all_map\n\n\ndef process_mile(price, use_time, mile):\n \"\"\"\n mile处理\n \"\"\"\n mile_per_month = mile / use_time\n if mile_per_month < gl.MILE_THRESHOLD_2_5:\n return price + 0.035 * (1 - mile_per_month / gl.MILE_THRESHOLD_2_5\n ) * price\n elif gl.MILE_THRESHOLD_2_5 <= mile_per_month < gl.MILE_THRESHOLD_5:\n return price - (0.04 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.035\n ) * price\n elif gl.MILE_THRESHOLD_5 <= mile_per_month < gl.MILE_THRESHOLD_10:\n return price - (0.05 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.075\n ) * price\n else:\n return price - 0.125 * price\n\n\n<mask token>\n\n\ndef process_unreasonable_history_price(data, nums):\n \"\"\"\n 处理不合理历史价格趋势\n \"\"\"\n if nums == 0:\n return data\n temp = data[1:]\n temp.sort()\n for i, value in enumerate(temp):\n data[i + 1] = temp[i]\n for i in range(0, nums):\n rate = (data[i + 1] - data[i]) / data[i + 1]\n if (data[i] >= data[i + 1]) | (0.003 > rate) | (0.0157 < rate):\n data[i + 1] = int(data[i] * 1.0083)\n return data\n\n\n<mask token>\n\n\ndef process_fill_zero(hedge):\n temp = hedge\n if len(hedge) < 18:\n for i in range(0, 18 - len(hedge)):\n temp = '0' + temp\n return temp\n\n\ndef predict_from_db(model_detail_slug, city, use_time):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug_id']\n if use_time % 6 == 0:\n column_num = str(int(use_time / 6) - 1)\n month_num = 6\n else:\n column_num = str(int(use_time / 6))\n month_num = use_time % 6\n record = db_operate.query_valuate(model_detail_slug_id, city_id,\n column_num, use_time)\n dealer_hedge = str(record.loc[0, 'b2c_year_' + column_num])\n dealer_hedge = process_fill_zero(dealer_hedge)\n dealer_hedge = dealer_hedge[(month_num - 1) * 3:month_num * 3]\n dealer_hedge = int(dealer_hedge) / 1000\n cpersonal_hedge = str(record.loc[0, 'c2c_year_' + column_num])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge)\n cpersonal_hedge = cpersonal_hedge[(month_num - 1) * 3:month_num * 3]\n cpersonal_hedge = int(cpersonal_hedge) / 1000\n return dealer_hedge, cpersonal_hedge\n\n\ndef predict_from_db_history(model_detail_slug, city, use_time):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug_id']\n if use_time % 6 == 0:\n column_num = int(use_time / 6) - 1\n month_num = 6\n else:\n column_num = int(use_time / 6)\n month_num = use_time % 6\n dealer_hedge, cpersonal_hedge = db_operate.query_valuate_history(\n model_detail_slug_id, city_id, column_num, use_time)\n result = []\n if len(dealer_hedge) == 1:\n dealer_hedge = process_fill_zero(dealer_hedge[0])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0])\n for i in range(0, use_time):\n dealer_per = dealer_hedge[i * 3:(i + 1) * 3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per) / 1000, int(cpersonal_per) / \n 1000, use_time])\n result.reverse()\n elif len(dealer_hedge) == 2:\n dealer_hedge = process_fill_zero(dealer_hedge[0]) + process_fill_zero(\n dealer_hedge[1])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0]\n ) + process_fill_zero(cpersonal_hedge[1])\n for i in range(month_num - 1, month_num + 6):\n dealer_per = dealer_hedge[i * 3:(i + 1) * 3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per) / 1000, int(cpersonal_per) / \n 1000, use_time])\n result.reverse()\n return result\n\n\n<mask token>\n\n\ndef process_prices_relate(dealer_price, cpersonal_price):\n \"\"\"\n 人工处理三类价格的相关性\n \"\"\"\n buy = dealer_price\n private = cpersonal_price\n private_buy_rate = (buy - private) / private\n if (private_buy_rate < 0) | (abs(private_buy_rate) > 0.12):\n private = int(buy * (1 - 0.0875))\n sell = int(private * (1 - 0.0525))\n return buy, private, sell\n\n\n<mask token>\n\n\ndef check_params_value(city, model_detail_slug, use_time, mile, category):\n \"\"\"\n 校验参数\n \"\"\"\n if city not in cities:\n raise ApiParamsValueError('city', city, 'Unknown city!')\n if model_detail_slug not in models:\n raise ApiParamsValueError('model_detail_slug', model_detail_slug,\n 'Unknown model!')\n if not isinstance(mile, int) | isinstance(mile, float):\n raise ApiParamsTypeError('mile', mile, 'Mile must be int or float!')\n elif mile < 0:\n raise ApiParamsValueError('mile', mile,\n 'Mile must be greater than zero!')\n if not isinstance(use_time, int):\n raise ApiParamsTypeError('use_time', use_time, 'Use_time must be int!')\n if category == 'valuate':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of Forecast must be in 1-240!')\n elif category == 'history':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of historical trend must be in 1-240!')\n elif category == 'future':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of future trend must be in 1-240!')\n\n\nclass Predict(object):\n\n def __init__(self):\n \"\"\"\n 加载各类匹配表和模型\n \"\"\"\n self.result = []\n self.valuate_model = []\n\n def add_process_intent(self, buy, private, sell, popularity, price_bn):\n \"\"\"\n 根据交易方式修正预测值\n \"\"\"\n self.result = result_map.copy()\n self.result.loc[self.result['intent'] == 'buy', 'predict_price'] = buy\n self.result.loc[self.result['intent'] == 'private', 'predict_price'\n ] = private\n self.result.loc[self.result['intent'] == 'sell', 'predict_price'\n ] = sell\n self.result['predict_price'] = self.result['predict_price'].fillna(buy)\n self.result['popularity'] = popularity\n self.result['profit_rate'] = self.result.apply(process_profit_rate,\n axis=1)\n self.result['buy_profit_rate'] = self.result.apply(\n process_buy_profit_rate, axis=1)\n self.result['predict_price'] = self.result['predict_price'\n ] / self.result['buy_profit_rate']\n self.result['predict_price'] = self.result['profit_rate'\n ] * self.result['predict_price']\n self.result = cal_intent_condition(self.result.predict_price.values,\n price_bn)\n\n def follow_process(self, use_time, mile, price_bn, dealer_hedge,\n cpersonal_hedge, province, model_slug, model_detail_slug):\n \"\"\"\n 后续跟进处理\n \"\"\"\n dealer_price, cpersonal_price = (dealer_hedge * price_bn, \n cpersonal_hedge * price_bn)\n dealer_price = process_mile(dealer_price, use_time, mile)\n cpersonal_price = process_mile(cpersonal_price, use_time, mile)\n buy, private, sell = process_prices_relate(dealer_price,\n cpersonal_price)\n index = str(model_slug) + '_' + str(province)\n if index in province_popularity_index:\n popularity = province_popularity_map.loc[index, 'popularity']\n else:\n popularity = 'C'\n rate = process_adjust_profit(model_detail_slug, popularity)\n buy, private, sell = buy * (1 + rate), private * (1 + rate), sell * (\n 1 + rate)\n return buy, private, sell, popularity\n\n def predict(self, city='深圳', model_detail_slug='model_25023_cs',\n use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 预测返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n dealer_hedge, cpersonal_hedge = predict_from_db(final_model_detail_slug\n , city, use_time)\n buy, private, sell, popularity = self.follow_process(use_time, mile,\n price_bn, dealer_hedge, cpersonal_hedge, province, model_slug,\n model_detail_slug)\n self.add_process_intent(buy, private, sell, popularity, price_bn)\n if ret_type == 'records':\n return self.result.to_dict('records')\n else:\n return self.result\n\n def predict_for_history(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_history(final_model_detail_slug, city,\n use_time)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def predict_for_future(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, times=3):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_future(final_model_detail_slug, city,\n use_time, times)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def history_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 计算历史价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='history')\n times_str = ['0', '-1', '-2', '-3', '-4', '-5', '-6']\n nums = 6\n if use_time <= 6:\n times_str = []\n nums = use_time - 1\n for i in range(0, nums + 1):\n times_str.append(str(-i))\n data_buy, data_private, data_sell = self.predict_for_history(city,\n model_detail_slug, use_time, mile)\n data_buy = process_unreasonable_history_price(data_buy, nums)\n data_sell = process_unreasonable_history_price(data_sell, nums)\n data_private = process_unreasonable_history_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n\n def future_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=365, mile=2, ret_type='records'):\n \"\"\"\n 计算未来价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='future')\n times_str = ['0', '12', '24', '36']\n nums = 3\n if use_time > 204:\n times_str = []\n nums = int((240 - use_time) / 12)\n for i in range(0, nums + 1):\n times_str.append(str(i * 12))\n data_buy, data_private, data_sell = self.predict_for_future(city,\n model_detail_slug, use_time, mile, len(times_str))\n data_buy = process_unreasonable_future_price(data_buy, nums)\n data_sell = process_unreasonable_future_price(data_sell, nums)\n data_private = process_unreasonable_future_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n", "step-5": "from valuate.predict import *\n\n\ndef get_profit_rate(intent, popularity):\n \"\"\"\n 获取畅销系数\n \"\"\"\n # 按畅销程度分级,各交易方式相比于标价的固定比例\n profits = gl.PROFITS\n profit = profits[popularity]\n # 计算各交易方式的价格相比于标价的固定比例\n if intent == 'sell':\n # 商家收购价相比加权平均价的比例\n profit_rate = 1 - profit[0] - profit[1]\n elif intent == 'buy':\n # 商家真实售价相比加权平均价的比例\n profit_rate = 1 - profit[0]\n elif intent == 'release':\n # 建议标价相比加权平均价的比例\n profit_rate = 1\n elif intent == 'private':\n # C2C价格相比加权平均价的比例\n profit_rate = 1 - profit[0] - profit[2]\n elif intent == 'lowest':\n # 最低成交价相比加权平均价的比例\n profit_rate = 1 - profit[0] - profit[1] - profit[3]\n elif intent == 'cpo':\n # 认证二手车价相比加权平均价的差异比例\n profit_rate = 1 - profit[0] - profit[8]\n elif intent == 'replace':\n # 4S店置换价相比加权平均价的比例\n profit_rate = 1 - profit[0] - profit[4]\n elif intent == 'auction':\n # 拍卖价相比加权平均价的差异比例\n profit_rate = 1 - profit[0] - profit[5]\n elif intent == 'avg-buy':\n # 平均买车价相比加权平均价的差异比例\n profit_rate = 1 - profit[0] - profit[7]\n elif intent == 'avg-sell':\n # 平均卖车价价相比加权平均价的差异比例\n profit_rate = 1 - profit[0] - profit[6]\n return profit_rate\n\n\ndef cal_intent_condition(prices, price_bn):\n \"\"\"\n 计算所有交易方式的4个级别车况价\n \"\"\"\n if(prices[2] * 1.03) > price_bn:\n rate = (prices[2] * 1.03) / price_bn\n prices = prices / rate\n\n df1 = pd.DataFrame(prices)\n df2 = pd.DataFrame([gl.CAR_CONDITION_COEFFICIENT_VALUES])\n all_map = df1.dot(df2)\n all_map.columns = ['excellent', 'good', 'fair', 'bad']\n all_map['intent'] = pd.Series(gl.INTENT_TYPE).values\n all_map = all_map.loc[:, ['intent', 'excellent', 'good', 'fair', 'bad']]\n all_map[['excellent', 'good', 'fair', 'bad']] = all_map[['excellent', 'good', 'fair', 'bad']].astype(int)\n return all_map\n\n\ndef process_mile(price, use_time, mile):\n \"\"\"\n mile处理\n \"\"\"\n # 正常行驶的车辆以一年2.5万公里为正常基数,低于2.5万公里的价格的浮动在+3.5%以内\n # 大于2.5万公里的若每年的平均行驶里程大于2.5万公里小于5万公里价格浮动在-3.5-7.5%\n # 若年平均形式里程大于5万公里及以上影响价格在-7.5-12.5%之间\n mile_per_month = mile / use_time\n if mile_per_month < gl.MILE_THRESHOLD_2_5:\n return price + 0.035 * (1 - mile_per_month/gl.MILE_THRESHOLD_2_5) * price\n elif gl.MILE_THRESHOLD_2_5 <= mile_per_month < gl.MILE_THRESHOLD_5:\n return price - (0.04 * (mile_per_month/gl.MILE_THRESHOLD_5)+0.035) * price\n elif gl.MILE_THRESHOLD_5 <= mile_per_month < gl.MILE_THRESHOLD_10:\n return price - (0.05 * (mile_per_month/gl.MILE_THRESHOLD_5)+0.075) * price\n else:\n return price - 0.125 * price\n\n\ndef process_profit_rate(df):\n \"\"\"\n 畅销系数处理\n \"\"\"\n return get_profit_rate(df['intent'], df['popularity'])\n\n\ndef process_buy_profit_rate(df):\n \"\"\"\n 畅销系数处理\n \"\"\"\n return get_profit_rate(df['intent_source'], df['popularity'])\n\n\ndef process_unreasonable_history_price(data, nums):\n \"\"\"\n 处理不合理历史价格趋势\n \"\"\"\n if nums == 0:\n return data\n\n temp = data[1:]\n temp.sort()\n for i, value in enumerate(temp):\n data[i+1] = temp[i]\n\n for i in range(0, nums):\n rate = (data[i + 1] - data[i]) / data[i + 1]\n if (data[i] >= data[i + 1]) | (0.003 > rate) | (0.0157 < rate):\n data[i + 1] = int(data[i] * 1.0083)\n\n return data\n\n\ndef process_unreasonable_future_price(data, nums):\n \"\"\"\n 处理不合理未来价格趋势\n \"\"\"\n temp = data[1:]\n temp.sort(reverse=True)\n for i, value in enumerate(temp):\n data[i+1] = temp[i]\n\n for i in range(0, nums):\n rate = (data[i] - data[i + 1]) / data[i]\n if (data[i] <= data[i + 1]) | (0.036 > rate) | (0.188 < rate):\n data[i + 1] = int(data[i] * 0.9)\n\n return data\n\n\ndef process_fill_zero(hedge):\n temp = hedge\n if len(hedge) < 18:\n for i in range(0, (18-len(hedge))):\n temp = '0'+temp\n return temp\n\n\ndef predict_from_db(model_detail_slug, city, use_time):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n # 查找city和model_detail_slug编号\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug_id']\n # 计算查询字段编号和月编号\n if (use_time % 6) == 0:\n column_num = str(int(use_time / 6) - 1)\n month_num = 6\n else:\n column_num = str(int(use_time / 6))\n month_num = use_time % 6\n # 查询\n record = db_operate.query_valuate(model_detail_slug_id, city_id, column_num, use_time)\n # 查找对应值\n dealer_hedge = str(record.loc[0, 'b2c_year_'+column_num])\n dealer_hedge = process_fill_zero(dealer_hedge)\n dealer_hedge = dealer_hedge[(month_num-1)*3:month_num*3]\n dealer_hedge = int(dealer_hedge) / 1000\n cpersonal_hedge = str(record.loc[0, 'c2c_year_'+column_num])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge)\n cpersonal_hedge = cpersonal_hedge[(month_num-1)*3:month_num*3]\n cpersonal_hedge = int(cpersonal_hedge) / 1000\n return dealer_hedge, cpersonal_hedge\n\n\ndef predict_from_db_history(model_detail_slug, city, use_time):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n # 查找city和model_detail_slug编号\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug_id']\n # 计算查询字段编号和月编号\n if (use_time % 6) == 0:\n column_num = int(use_time / 6) - 1\n month_num = 6\n else:\n column_num = int(use_time / 6)\n month_num = use_time % 6\n # 查询\n dealer_hedge, cpersonal_hedge = db_operate.query_valuate_history(model_detail_slug_id, city_id, column_num, use_time)\n # 查找对应值\n result = []\n if len(dealer_hedge) == 1:\n dealer_hedge = process_fill_zero(dealer_hedge[0])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0])\n for i in range(0, use_time):\n dealer_per = dealer_hedge[i*3:(i+1)*3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per)/1000, int(cpersonal_per)/1000, use_time])\n result.reverse()\n elif len(dealer_hedge) == 2:\n dealer_hedge = process_fill_zero(dealer_hedge[0])+process_fill_zero(dealer_hedge[1])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0])+process_fill_zero(cpersonal_hedge[1])\n for i in range(month_num-1, month_num+6):\n dealer_per = dealer_hedge[i*3:(i+1)*3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per)/1000, int(cpersonal_per)/1000, use_time])\n result.reverse()\n return result\n\n\ndef predict_from_db_future(model_detail_slug, city, use_time, times):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n # 查找city和model_detail_slug编号\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug_id']\n # 计算查询字段编号和月编号\n if (use_time % 6) == 0:\n column_num = int(use_time / 6) - 1\n month_num = 6\n else:\n column_num = int(use_time / 6)\n month_num = use_time % 6\n # 查询\n record = db_operate.query_valuate_future(model_detail_slug_id, city_id)\n # 查找对应值\n result = []\n for i in range(0, times):\n dealer_hedge = str(record.loc[0, 'b2c_year_' + str(column_num+i*2)])\n dealer_hedge = process_fill_zero(dealer_hedge)\n dealer_hedge = dealer_hedge[(month_num - 1) * 3:month_num * 3]\n dealer_hedge = int(dealer_hedge) / 1000\n cpersonal_hedge = str(record.loc[0, 'c2c_year_' + str(column_num+i*2)])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge)\n cpersonal_hedge = cpersonal_hedge[(month_num - 1) * 3:month_num * 3]\n cpersonal_hedge = int(cpersonal_hedge) / 1000\n result.append([dealer_hedge, cpersonal_hedge, use_time+i*12])\n return result\n\n\ndef process_prices_relate(dealer_price, cpersonal_price):\n \"\"\"\n 人工处理三类价格的相关性\n \"\"\"\n buy = dealer_price\n private = cpersonal_price\n # 计算buy与private的比例关系\n private_buy_rate = (buy - private) / private\n # 人工处理预测不合理的三类价格\n if (private_buy_rate < 0) | (abs(private_buy_rate) > 0.12):\n private = int(buy * (1 - 0.0875))\n\n sell = int(private * (1 - 0.0525))\n return buy, private, sell\n\n\ndef process_adjust_profit(model_detail_slug, popularity):\n \"\"\"\n 调整值调整\n \"\"\"\n index = str(model_detail_slug)+'_'+str(popularity)\n if index in model_detail_slug_popularity_index:\n rate = adjust_profit.loc[index, 'rate']\n else:\n rate = 0\n return rate\n\n\ndef check_params_value(city, model_detail_slug, use_time, mile, category):\n \"\"\"\n 校验参数\n \"\"\"\n # 校验city\n if city not in cities:\n raise ApiParamsValueError('city', city, 'Unknown city!')\n # 校验model\n if model_detail_slug not in models:\n raise ApiParamsValueError('model_detail_slug', model_detail_slug, 'Unknown model!')\n # 校验mile\n if not ((isinstance(mile, int)) | (isinstance(mile, float))):\n raise ApiParamsTypeError('mile', mile, 'Mile must be int or float!')\n elif mile < 0:\n raise ApiParamsValueError('mile', mile, 'Mile must be greater than zero!')\n # 校验use_time\n if not isinstance(use_time, int):\n raise ApiParamsTypeError('use_time', use_time, 'Use_time must be int!')\n if category == 'valuate':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time, 'The use_time of Forecast must be in 1-240!')\n elif category == 'history':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time, 'The use_time of historical trend must be in 1-240!')\n elif category == 'future':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time, 'The use_time of future trend must be in 1-240!')\n\n\nclass Predict(object):\n\n def __init__(self):\n \"\"\"\n 加载各类匹配表和模型\n \"\"\"\n self.result = []\n self.valuate_model = []\n\n def add_process_intent(self, buy, private, sell, popularity, price_bn):\n \"\"\"\n 根据交易方式修正预测值\n \"\"\"\n # 组合结果\n self.result = result_map.copy()\n self.result.loc[(self.result['intent'] == 'buy'), 'predict_price'] = buy\n self.result.loc[(self.result['intent'] == 'private'), 'predict_price'] = private\n self.result.loc[(self.result['intent'] == 'sell'), 'predict_price'] = sell\n self.result['predict_price'] = self.result['predict_price'].fillna(buy)\n\n self.result['popularity'] = popularity\n self.result['profit_rate'] = self.result.apply(process_profit_rate, axis=1)\n self.result['buy_profit_rate'] = self.result.apply(process_buy_profit_rate, axis=1)\n self.result['predict_price'] = self.result['predict_price'] / self.result['buy_profit_rate']\n self.result['predict_price'] = self.result['profit_rate'] * self.result['predict_price']\n\n # 计算所有交易类型\n self.result = cal_intent_condition(self.result.predict_price.values, price_bn)\n\n def follow_process(self, use_time, mile, price_bn, dealer_hedge, cpersonal_hedge, province, model_slug, model_detail_slug):\n \"\"\"\n 后续跟进处理\n \"\"\"\n # 获取价格\n dealer_price, cpersonal_price = dealer_hedge * price_bn, cpersonal_hedge * price_bn\n # 处理mile\n dealer_price = process_mile(dealer_price, use_time, mile)\n cpersonal_price = process_mile(cpersonal_price, use_time, mile)\n # 处理价格之间的相关性\n buy, private, sell = process_prices_relate(dealer_price, cpersonal_price)\n # 获取流行度\n index = str(model_slug) + '_' + str(province)\n if index in province_popularity_index:\n popularity = province_popularity_map.loc[index, 'popularity']\n else:\n popularity = 'C'\n # 进行调整值最终调整\n rate = process_adjust_profit(model_detail_slug, popularity)\n buy, private, sell = buy * (1 + rate), private * (1 + rate), sell * (1 + rate)\n return buy, private, sell, popularity\n\n def predict(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 预测返回\n \"\"\"\n # 校验参数\n check_params_value(city, model_detail_slug, use_time, mile, category='valuate')\n\n # 查找款型对应的新车指导价,调整后的款型\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug']\n # 预测返回保值率\n dealer_hedge, cpersonal_hedge = predict_from_db(final_model_detail_slug, city, use_time)\n buy, private, sell, popularity = self.follow_process(use_time, mile, price_bn, dealer_hedge, cpersonal_hedge, province, model_slug, model_detail_slug)\n # 根据交易方式修正预测值\n self.add_process_intent(buy, private, sell, popularity, price_bn)\n\n if ret_type == 'records':\n return self.result.to_dict('records')\n else:\n return self.result\n\n def predict_for_history(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n # 校验参数\n check_params_value(city, model_detail_slug, use_time, mile, category='valuate')\n\n # 查找款型对应的新车指导价,调整后的款型\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug']\n\n # 预测返回保值率\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_history(final_model_detail_slug, city, use_time)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per, mile, price_bn, dealer_hedge,\n cpersonal_hedge, province, model_slug,\n model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def predict_for_future(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2, times=3):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n # 校验参数\n check_params_value(city, model_detail_slug, use_time, mile, category='valuate')\n\n # 查找款型对应的新车指导价,调整后的款型\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug']\n\n # 预测返回保值率\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_future(final_model_detail_slug, city, use_time, times)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per, mile, price_bn, dealer_hedge,\n cpersonal_hedge, province, model_slug,\n model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def history_price_trend(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 计算历史价格趋势\n \"\"\"\n # 校验参数\n check_params_value(city, model_detail_slug, use_time, mile, category='history')\n # 计算时间\n times_str = ['0', '-1', '-2', '-3', '-4', '-5', '-6']\n nums = 6\n if use_time <= 6:\n times_str = []\n nums = use_time-1\n for i in range(0, nums+1):\n times_str.append(str(-i))\n # 计算车商交易价,车商收购价的历史价格走势\n data_buy, data_private, data_sell = self.predict_for_history(city, model_detail_slug, use_time, mile)\n\n # 处理异常值\n data_buy = process_unreasonable_history_price(data_buy, nums)\n data_sell = process_unreasonable_history_price(data_sell, nums)\n data_private = process_unreasonable_history_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n\n def future_price_trend(self, city='深圳', model_detail_slug='model_25023_cs', use_time=365, mile=2, ret_type='records'):\n \"\"\"\n 计算未来价格趋势\n \"\"\"\n # 校验参数\n check_params_value(city, model_detail_slug, use_time, mile, category='future')\n # 计算时间\n times_str = ['0', '12', '24', '36']\n nums = 3\n if use_time > 204:\n times_str = []\n nums = int((240-use_time) / 12)\n for i in range(0, nums+1):\n times_str.append(str(i*12))\n # 计算个人交易价的未来价格趋势\n data_buy, data_private, data_sell = self.predict_for_future(city, model_detail_slug, use_time, mile, len(times_str))\n\n data_buy = process_unreasonable_future_price(data_buy, nums)\n data_sell = process_unreasonable_future_price(data_sell, nums)\n data_private = process_unreasonable_future_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n\n", "step-ids": [ 12, 15, 16, 18, 25 ] }
[ 12, 15, 16, 18, 25 ]
import csv from matplotlib import pyplot as plt from datetime import datetime file_one = 'data/dwifh_all_sales.csv' file_two = 'data/dwifh_bc_sales.csv' # create code to automatically build a dictionary for each album? with open(file_one) as fo: reader = csv.reader(fo) header = next(reader) album = {} dates, cd_income, dd_income, total_profit, artist_payout = [], [], [], [], [] for row in reader: if row[2].strip() == 'Harm\'s Way': dates.append(float(row[0].strip())) cd_income.append(int(float(row[4].strip()))) dd_income.append(int(float(row[5].strip()))) total_profit.append(int(float(row[7].strip()))) artist_payout.append(int(float(row[8].strip()))) else: pass album_alltime_profit = sum(total_profit) artist_alltime_payout = sum(artist_payout) # complete the dictionary for this album album['title'] = 'Harm\'s Way' album['period of sales'] = dates album['cd_income_data'] = cd_income album['dd_income_data'] = dd_income album['all_time_profit'] = album_alltime_profit album['all_time_payout'] = artist_alltime_payout for key, value in album.items(): print(f'{key}: {value}') plt.style.use('seaborn') fig, ax = plt.subplots() ax.plot(album['period of sales'], album['dd_income_data'], c='red') ax.plot(album['period of sales'], album['cd_income_data'], c = 'blue') plt.title('{} Sales - All Time'.format(album['title'])) plt.xlabel('', fontsize=16) fig.autofmt_xdate() plt.ylabel('CD (blue) and DD (red)', fontsize=16) plt.tick_params(axis='both', which='major', labelsize=16) #plt.show() # TASK: # 1. get the names of the albums from the .csv file and store # them in a list. make sure there are no duplicates. # parse the csv file and create a dictionary for each album, # assigning it the name taken from the name list. # use: for album in album_list: so the process is done once # for each album name. # the dict created for each album contains all the data pulled # from the csv file. create the dict, then append it to # a list of dicts. this list will, when done, contain four # dictionaries, one for each album. # but since it's done in a loop, all four dicts get created # automatically, but they contain different data, respective to # each album.
normal
{ "blob_id": "53380810a3d9787fe7c373cf1829f2d849a91c3c", "index": 8456, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith open(file_one) as fo:\n reader = csv.reader(fo)\n header = next(reader)\n album = {}\n dates, cd_income, dd_income, total_profit, artist_payout = [], [], [], [\n ], []\n for row in reader:\n if row[2].strip() == \"Harm's Way\":\n dates.append(float(row[0].strip()))\n cd_income.append(int(float(row[4].strip())))\n dd_income.append(int(float(row[5].strip())))\n total_profit.append(int(float(row[7].strip())))\n artist_payout.append(int(float(row[8].strip())))\n else:\n pass\n<mask token>\nfor key, value in album.items():\n print(f'{key}: {value}')\nplt.style.use('seaborn')\n<mask token>\nax.plot(album['period of sales'], album['dd_income_data'], c='red')\nax.plot(album['period of sales'], album['cd_income_data'], c='blue')\nplt.title('{} Sales - All Time'.format(album['title']))\nplt.xlabel('', fontsize=16)\nfig.autofmt_xdate()\nplt.ylabel('CD (blue) and DD (red)', fontsize=16)\nplt.tick_params(axis='both', which='major', labelsize=16)\n", "step-3": "<mask token>\nfile_one = 'data/dwifh_all_sales.csv'\nfile_two = 'data/dwifh_bc_sales.csv'\nwith open(file_one) as fo:\n reader = csv.reader(fo)\n header = next(reader)\n album = {}\n dates, cd_income, dd_income, total_profit, artist_payout = [], [], [], [\n ], []\n for row in reader:\n if row[2].strip() == \"Harm's Way\":\n dates.append(float(row[0].strip()))\n cd_income.append(int(float(row[4].strip())))\n dd_income.append(int(float(row[5].strip())))\n total_profit.append(int(float(row[7].strip())))\n artist_payout.append(int(float(row[8].strip())))\n else:\n pass\nalbum_alltime_profit = sum(total_profit)\nartist_alltime_payout = sum(artist_payout)\nalbum['title'] = \"Harm's Way\"\nalbum['period of sales'] = dates\nalbum['cd_income_data'] = cd_income\nalbum['dd_income_data'] = dd_income\nalbum['all_time_profit'] = album_alltime_profit\nalbum['all_time_payout'] = artist_alltime_payout\nfor key, value in album.items():\n print(f'{key}: {value}')\nplt.style.use('seaborn')\nfig, ax = plt.subplots()\nax.plot(album['period of sales'], album['dd_income_data'], c='red')\nax.plot(album['period of sales'], album['cd_income_data'], c='blue')\nplt.title('{} Sales - All Time'.format(album['title']))\nplt.xlabel('', fontsize=16)\nfig.autofmt_xdate()\nplt.ylabel('CD (blue) and DD (red)', fontsize=16)\nplt.tick_params(axis='both', which='major', labelsize=16)\n", "step-4": "import csv\nfrom matplotlib import pyplot as plt\nfrom datetime import datetime\nfile_one = 'data/dwifh_all_sales.csv'\nfile_two = 'data/dwifh_bc_sales.csv'\nwith open(file_one) as fo:\n reader = csv.reader(fo)\n header = next(reader)\n album = {}\n dates, cd_income, dd_income, total_profit, artist_payout = [], [], [], [\n ], []\n for row in reader:\n if row[2].strip() == \"Harm's Way\":\n dates.append(float(row[0].strip()))\n cd_income.append(int(float(row[4].strip())))\n dd_income.append(int(float(row[5].strip())))\n total_profit.append(int(float(row[7].strip())))\n artist_payout.append(int(float(row[8].strip())))\n else:\n pass\nalbum_alltime_profit = sum(total_profit)\nartist_alltime_payout = sum(artist_payout)\nalbum['title'] = \"Harm's Way\"\nalbum['period of sales'] = dates\nalbum['cd_income_data'] = cd_income\nalbum['dd_income_data'] = dd_income\nalbum['all_time_profit'] = album_alltime_profit\nalbum['all_time_payout'] = artist_alltime_payout\nfor key, value in album.items():\n print(f'{key}: {value}')\nplt.style.use('seaborn')\nfig, ax = plt.subplots()\nax.plot(album['period of sales'], album['dd_income_data'], c='red')\nax.plot(album['period of sales'], album['cd_income_data'], c='blue')\nplt.title('{} Sales - All Time'.format(album['title']))\nplt.xlabel('', fontsize=16)\nfig.autofmt_xdate()\nplt.ylabel('CD (blue) and DD (red)', fontsize=16)\nplt.tick_params(axis='both', which='major', labelsize=16)\n", "step-5": "import csv\nfrom matplotlib import pyplot as plt\nfrom datetime import datetime\n\nfile_one = 'data/dwifh_all_sales.csv'\nfile_two = 'data/dwifh_bc_sales.csv'\n\n# create code to automatically build a dictionary for each album?\n\nwith open(file_one) as fo:\n reader = csv.reader(fo)\n header = next(reader)\n\n album = {}\n dates, cd_income, dd_income, total_profit, artist_payout = [], [], [], [], []\n\n for row in reader:\n if row[2].strip() == 'Harm\\'s Way':\n dates.append(float(row[0].strip()))\n cd_income.append(int(float(row[4].strip())))\n dd_income.append(int(float(row[5].strip())))\n total_profit.append(int(float(row[7].strip())))\n artist_payout.append(int(float(row[8].strip())))\n else:\n pass\n\nalbum_alltime_profit = sum(total_profit)\nartist_alltime_payout = sum(artist_payout)\n\n# complete the dictionary for this album\nalbum['title'] = 'Harm\\'s Way'\nalbum['period of sales'] = dates\nalbum['cd_income_data'] = cd_income\nalbum['dd_income_data'] = dd_income\nalbum['all_time_profit'] = album_alltime_profit\nalbum['all_time_payout'] = artist_alltime_payout\n\nfor key, value in album.items():\n print(f'{key}: {value}')\n\nplt.style.use('seaborn')\nfig, ax = plt.subplots()\nax.plot(album['period of sales'], album['dd_income_data'], c='red')\nax.plot(album['period of sales'], album['cd_income_data'], c = 'blue')\n\nplt.title('{} Sales - All Time'.format(album['title']))\nplt.xlabel('', fontsize=16)\nfig.autofmt_xdate()\nplt.ylabel('CD (blue) and DD (red)', fontsize=16)\nplt.tick_params(axis='both', which='major', labelsize=16)\n\n#plt.show()\n\n# TASK:\n# 1. get the names of the albums from the .csv file and store\n# them in a list. make sure there are no duplicates.\n\n# parse the csv file and create a dictionary for each album,\n# assigning it the name taken from the name list.\n# use: for album in album_list: so the process is done once\n# for each album name.\n# the dict created for each album contains all the data pulled\n# from the csv file. create the dict, then append it to\n# a list of dicts. this list will, when done, contain four\n# dictionaries, one for each album.\n# but since it's done in a loop, all four dicts get created\n# automatically, but they contain different data, respective to\n# each album.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/env python # -*- coding: utf-8 -*- # staticbox.py import wx class StaticBox(wx.Dialog): def __init__(self, parent, id, title): wx.Dialog.__init__(self, parent, id, title, size = (250, 230)) wx.StaticBox(self, -1, 'Personal Info', (5, 5), size = (240, 170)) wx.CheckBox(self, -1, 'Male', (15, 30)) wx.CheckBox(self, -1, 'Married', (15, 55)) wx.StaticText(self, -1, 'Age', (15, 95)) wx.SpinCtrl(self, -1, '1', (55, 90), (60, -1), min = 1, max = 120) wx.Button(self, 1, 'Ok', (90, 185), (60, -1)) self.Bind(wx.EVT_BUTTON, self.OnClose, id = 1) self.Center() self.ShowModal() self.Destroy() def OnClose(self, event): self.Close() if __name__ == '__main__': app = wx.App() StaticBox(None, -1, 'staticbox.py') app.MainLoop()
normal
{ "blob_id": "96bf6220bfc884e3a19f70a63d9ecba449e2e7e2", "index": 6108, "step-1": "<mask token>\n\n\nclass StaticBox(wx.Dialog):\n <mask token>\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass StaticBox(wx.Dialog):\n\n def __init__(self, parent, id, title):\n wx.Dialog.__init__(self, parent, id, title, size=(250, 230))\n wx.StaticBox(self, -1, 'Personal Info', (5, 5), size=(240, 170))\n wx.CheckBox(self, -1, 'Male', (15, 30))\n wx.CheckBox(self, -1, 'Married', (15, 55))\n wx.StaticText(self, -1, 'Age', (15, 95))\n wx.SpinCtrl(self, -1, '1', (55, 90), (60, -1), min=1, max=120)\n wx.Button(self, 1, 'Ok', (90, 185), (60, -1))\n self.Bind(wx.EVT_BUTTON, self.OnClose, id=1)\n self.Center()\n self.ShowModal()\n self.Destroy()\n\n def OnClose(self, event):\n self.Close()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass StaticBox(wx.Dialog):\n\n def __init__(self, parent, id, title):\n wx.Dialog.__init__(self, parent, id, title, size=(250, 230))\n wx.StaticBox(self, -1, 'Personal Info', (5, 5), size=(240, 170))\n wx.CheckBox(self, -1, 'Male', (15, 30))\n wx.CheckBox(self, -1, 'Married', (15, 55))\n wx.StaticText(self, -1, 'Age', (15, 95))\n wx.SpinCtrl(self, -1, '1', (55, 90), (60, -1), min=1, max=120)\n wx.Button(self, 1, 'Ok', (90, 185), (60, -1))\n self.Bind(wx.EVT_BUTTON, self.OnClose, id=1)\n self.Center()\n self.ShowModal()\n self.Destroy()\n\n def OnClose(self, event):\n self.Close()\n\n\nif __name__ == '__main__':\n app = wx.App()\n StaticBox(None, -1, 'staticbox.py')\n app.MainLoop()\n", "step-4": "import wx\n\n\nclass StaticBox(wx.Dialog):\n\n def __init__(self, parent, id, title):\n wx.Dialog.__init__(self, parent, id, title, size=(250, 230))\n wx.StaticBox(self, -1, 'Personal Info', (5, 5), size=(240, 170))\n wx.CheckBox(self, -1, 'Male', (15, 30))\n wx.CheckBox(self, -1, 'Married', (15, 55))\n wx.StaticText(self, -1, 'Age', (15, 95))\n wx.SpinCtrl(self, -1, '1', (55, 90), (60, -1), min=1, max=120)\n wx.Button(self, 1, 'Ok', (90, 185), (60, -1))\n self.Bind(wx.EVT_BUTTON, self.OnClose, id=1)\n self.Center()\n self.ShowModal()\n self.Destroy()\n\n def OnClose(self, event):\n self.Close()\n\n\nif __name__ == '__main__':\n app = wx.App()\n StaticBox(None, -1, 'staticbox.py')\n app.MainLoop()\n", "step-5": "#!/usr/bin/env python \n# -*- coding: utf-8 -*- \n\n# staticbox.py\n\nimport wx\n\nclass StaticBox(wx.Dialog):\n def __init__(self, parent, id, title):\n wx.Dialog.__init__(self, parent, id, title, size = (250, 230))\n\n wx.StaticBox(self, -1, 'Personal Info', (5, 5), size = (240, 170))\n wx.CheckBox(self, -1, 'Male', (15, 30))\n wx.CheckBox(self, -1, 'Married', (15, 55))\n wx.StaticText(self, -1, 'Age', (15, 95))\n wx.SpinCtrl(self, -1, '1', (55, 90), (60, -1), min = 1, max = 120)\n wx.Button(self, 1, 'Ok', (90, 185), (60, -1))\n\n self.Bind(wx.EVT_BUTTON, self.OnClose, id = 1)\n\n self.Center()\n self.ShowModal()\n self.Destroy()\n\n def OnClose(self, event):\n self.Close()\n\nif __name__ == '__main__':\n app = wx.App()\n StaticBox(None, -1, 'staticbox.py')\n app.MainLoop()\n", "step-ids": [ 1, 3, 4, 5, 6 ] }
[ 1, 3, 4, 5, 6 ]
from fractions import Fraction import itertools # With MOD MOD = 10**9+7 def ncomb(n, r): return reduce(lambda a, b: (a*b)%MOD, (Fraction(n-i, i+1) for i in range(r)), 1) # No MOD def ncomb(n, r): return reduce(lambda a, b: (a*b), (Fraction(n-i, i+1) for i in range(r)), 1) def comb(a, l): return [subset for subset in itertools.combinations(a, l)] def comball(a): r = [] for l in range(0, len(a)+1): r.extend(comb(a, l)) return r
normal
{ "blob_id": "2bc0d76e17f2f52fce9cc1925a3a0e0f53f5b81d", "index": 7953, "step-1": "<mask token>\n\n\ndef ncomb(n, r):\n return reduce(lambda a, b: a * b % MOD, (Fraction(n - i, i + 1) for i in\n range(r)), 1)\n\n\n<mask token>\n\n\ndef comball(a):\n r = []\n for l in range(0, len(a) + 1):\n r.extend(comb(a, l))\n return r\n", "step-2": "<mask token>\n\n\ndef ncomb(n, r):\n return reduce(lambda a, b: a * b % MOD, (Fraction(n - i, i + 1) for i in\n range(r)), 1)\n\n\n<mask token>\n\n\ndef comb(a, l):\n return [subset for subset in itertools.combinations(a, l)]\n\n\ndef comball(a):\n r = []\n for l in range(0, len(a) + 1):\n r.extend(comb(a, l))\n return r\n", "step-3": "<mask token>\n\n\ndef ncomb(n, r):\n return reduce(lambda a, b: a * b % MOD, (Fraction(n - i, i + 1) for i in\n range(r)), 1)\n\n\ndef ncomb(n, r):\n return reduce(lambda a, b: a * b, (Fraction(n - i, i + 1) for i in\n range(r)), 1)\n\n\ndef comb(a, l):\n return [subset for subset in itertools.combinations(a, l)]\n\n\ndef comball(a):\n r = []\n for l in range(0, len(a) + 1):\n r.extend(comb(a, l))\n return r\n", "step-4": "<mask token>\nMOD = 10 ** 9 + 7\n\n\ndef ncomb(n, r):\n return reduce(lambda a, b: a * b % MOD, (Fraction(n - i, i + 1) for i in\n range(r)), 1)\n\n\ndef ncomb(n, r):\n return reduce(lambda a, b: a * b, (Fraction(n - i, i + 1) for i in\n range(r)), 1)\n\n\ndef comb(a, l):\n return [subset for subset in itertools.combinations(a, l)]\n\n\ndef comball(a):\n r = []\n for l in range(0, len(a) + 1):\n r.extend(comb(a, l))\n return r\n", "step-5": "from fractions import Fraction\nimport itertools\n\n# With MOD\nMOD = 10**9+7\ndef ncomb(n, r):\n return reduce(lambda a, b: (a*b)%MOD, (Fraction(n-i, i+1) for i in range(r)), 1)\n\n# No MOD\ndef ncomb(n, r):\n return reduce(lambda a, b: (a*b), (Fraction(n-i, i+1) for i in range(r)), 1)\n\ndef comb(a, l):\n return [subset for subset in itertools.combinations(a, l)]\n\ndef comball(a):\n r = []\n for l in range(0, len(a)+1):\n r.extend(comb(a, l))\n return r\n", "step-ids": [ 2, 3, 4, 5, 7 ] }
[ 2, 3, 4, 5, 7 ]
''' Created on Nov 1, 2013 @author: hanchensu ''' from numpy import * import numpy as np def smoSimple(dataMatIn, classLabels, C, toler, maxIter): dataMatrix = mat(dataMatIn); labelMat = mat(classLabels).transpose() b = 0; m,n = shape(dataMatrix) matrix = mat([[1,2],[3,4],[5,6]]) m,n= shape(matrix) matA = mat([[1,2],[2,3],[5,6]]) matB = mat([1,2,3]).transpose() print matA print matB print multiply(matA,matB) # x1 = np.arange(9.0).reshape((3, 3)) # x2 = np.arange(3.0) # print x1 # print x2 # print np.multiply(x1, x2)
normal
{ "blob_id": "9bf8834b12bcace0f6daf64adae1babe78bb04fa", "index": 5553, "step-1": "'''\nCreated on Nov 1, 2013\n\n@author: hanchensu\n'''\nfrom numpy import *\nimport numpy as np\n\ndef smoSimple(dataMatIn, classLabels, C, toler, maxIter):\n dataMatrix = mat(dataMatIn); labelMat = mat(classLabels).transpose()\n b = 0; m,n = shape(dataMatrix)\n \nmatrix = mat([[1,2],[3,4],[5,6]])\nm,n= shape(matrix)\n\nmatA = mat([[1,2],[2,3],[5,6]])\nmatB = mat([1,2,3]).transpose()\nprint matA\nprint matB\nprint multiply(matA,matB) \n\n# x1 = np.arange(9.0).reshape((3, 3))\n# x2 = np.arange(3.0)\n# print x1\n# print x2\n# print np.multiply(x1, x2)", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
''' #조건문 예제 #fdragon50 #2016 ''' # 주석 : 도움말/덧글 / 미사용(추후 사용가능한) 코드 기록 # 여러줄의 문자열 표현은 ''' ''' 사이에 표현 가능하나 사용은 권장않음 # #으로 시작하는것은 문자열 자체가 아닌.. 무시되는 구간 # 주석은 누가봐도 이해할수있게 / 간결하게 # 더 좋은것은 누가봐도 이해할수 있는 코드임 # 가독성이 좋은 코드를 만들수 있도록.. #조건문 예제 #fdragon50 #2016 input = 11 real_fdragon50 = 11 #real_k8805 = "ab" if real_fdragon50 == input: print("Hello!") #elif real_k8805 == input: # print("Hello!") else: print("Who are you")
normal
{ "blob_id": "2da6debb1f9ae2c966a17fdfb3b668160a3ef8d7", "index": 1384, "step-1": "<mask token>\n", "step-2": "<mask token>\nif real_fdragon50 == input:\n print('Hello!')\nelse:\n print('Who are you')\n", "step-3": "<mask token>\ninput = 11\nreal_fdragon50 = 11\nif real_fdragon50 == input:\n print('Hello!')\nelse:\n print('Who are you')\n", "step-4": "'''\n#조건문 예제\n#fdragon50\n#2016\n'''\n\n\n# 주석 : 도움말/덧글 / 미사용(추후 사용가능한) 코드 기록\n# 여러줄의 문자열 표현은 ''' ''' 사이에 표현 가능하나 사용은 권장않음\n# #으로 시작하는것은 문자열 자체가 아닌.. 무시되는 구간\n# 주석은 누가봐도 이해할수있게 / 간결하게\n# 더 좋은것은 누가봐도 이해할수 있는 코드임\n# 가독성이 좋은 코드를 만들수 있도록..\n\n#조건문 예제\n#fdragon50\n#2016\n\n\ninput = 11\nreal_fdragon50 = 11\n#real_k8805 = \"ab\"\n\nif real_fdragon50 == input:\n print(\"Hello!\")\n#elif real_k8805 == input:\n# print(\"Hello!\")\nelse:\n print(\"Who are you\")\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='ClassLevel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('level', models.CharField(unique=True, max_length=100)), ], ), migrations.CreateModel( name='CourseRecord', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('Course_Title', models.CharField(unique=True, max_length=50)), ('Course_Code', models.CharField(unique=True, max_length=10)), ('Course_Unit', models.PositiveSmallIntegerField()), ('Semester', models.CharField(choices=[('First_Semester', 'First_Semester'), ('Second_Semester', 'Second_Semester')], max_length=20, default='Select_Semester')), ('level', models.ForeignKey(to='Qbank.ClassLevel')), ], ), migrations.CreateModel( name='QuestionBank', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('CourseTitle', models.CharField(max_length=50)), ('CourseCode', models.CharField(max_length=10)), ('CourseUnit', models.IntegerField()), ('Semester', models.CharField(choices=[('First_Semester', 'First_Semester'), ('Second_Semester', 'Second_Semester')], max_length=20, default='Select_Semester')), ('Date', models.DateField()), ('question_papers', models.FileField(upload_to='QuestionPapers')), ('level', models.ForeignKey(to='Qbank.ClassLevel')), ], ), migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('Account_Type', models.CharField(choices=[('L', 'Lecturer'), ('S', 'Student')], max_length=1, default='S')), ('Upload_Picture', models.ImageField(upload_to='profile_images', blank=True)), ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)), ], ), ]
normal
{ "blob_id": "ab5400f4b44a53cb5cc2f6394bcdb8f55fd218f0", "index": 1813, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='ClassLevel', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('level', models.CharField(unique=True,\n max_length=100))]), migrations.CreateModel(name='CourseRecord',\n fields=[('id', models.AutoField(verbose_name='ID', serialize=False,\n auto_created=True, primary_key=True)), ('Course_Title', models.\n CharField(unique=True, max_length=50)), ('Course_Code', models.\n CharField(unique=True, max_length=10)), ('Course_Unit', models.\n PositiveSmallIntegerField()), ('Semester', models.CharField(choices\n =[('First_Semester', 'First_Semester'), ('Second_Semester',\n 'Second_Semester')], max_length=20, default='Select_Semester')), (\n 'level', models.ForeignKey(to='Qbank.ClassLevel'))]), migrations.\n CreateModel(name='QuestionBank', fields=[('id', models.AutoField(\n verbose_name='ID', serialize=False, auto_created=True, primary_key=\n True)), ('CourseTitle', models.CharField(max_length=50)), (\n 'CourseCode', models.CharField(max_length=10)), ('CourseUnit',\n models.IntegerField()), ('Semester', models.CharField(choices=[(\n 'First_Semester', 'First_Semester'), ('Second_Semester',\n 'Second_Semester')], max_length=20, default='Select_Semester')), (\n 'Date', models.DateField()), ('question_papers', models.FileField(\n upload_to='QuestionPapers')), ('level', models.ForeignKey(to=\n 'Qbank.ClassLevel'))]), migrations.CreateModel(name='UserProfile',\n fields=[('id', models.AutoField(verbose_name='ID', serialize=False,\n auto_created=True, primary_key=True)), ('Account_Type', models.\n CharField(choices=[('L', 'Lecturer'), ('S', 'Student')], max_length\n =1, default='S')), ('Upload_Picture', models.ImageField(upload_to=\n 'profile_images', blank=True)), ('user', models.OneToOneField(to=\n settings.AUTH_USER_MODEL))])]\n", "step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='ClassLevel', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('level', models.CharField(unique=True,\n max_length=100))]), migrations.CreateModel(name='CourseRecord',\n fields=[('id', models.AutoField(verbose_name='ID', serialize=False,\n auto_created=True, primary_key=True)), ('Course_Title', models.\n CharField(unique=True, max_length=50)), ('Course_Code', models.\n CharField(unique=True, max_length=10)), ('Course_Unit', models.\n PositiveSmallIntegerField()), ('Semester', models.CharField(choices\n =[('First_Semester', 'First_Semester'), ('Second_Semester',\n 'Second_Semester')], max_length=20, default='Select_Semester')), (\n 'level', models.ForeignKey(to='Qbank.ClassLevel'))]), migrations.\n CreateModel(name='QuestionBank', fields=[('id', models.AutoField(\n verbose_name='ID', serialize=False, auto_created=True, primary_key=\n True)), ('CourseTitle', models.CharField(max_length=50)), (\n 'CourseCode', models.CharField(max_length=10)), ('CourseUnit',\n models.IntegerField()), ('Semester', models.CharField(choices=[(\n 'First_Semester', 'First_Semester'), ('Second_Semester',\n 'Second_Semester')], max_length=20, default='Select_Semester')), (\n 'Date', models.DateField()), ('question_papers', models.FileField(\n upload_to='QuestionPapers')), ('level', models.ForeignKey(to=\n 'Qbank.ClassLevel'))]), migrations.CreateModel(name='UserProfile',\n fields=[('id', models.AutoField(verbose_name='ID', serialize=False,\n auto_created=True, primary_key=True)), ('Account_Type', models.\n CharField(choices=[('L', 'Lecturer'), ('S', 'Student')], max_length\n =1, default='S')), ('Upload_Picture', models.ImageField(upload_to=\n 'profile_images', blank=True)), ('user', models.OneToOneField(to=\n settings.AUTH_USER_MODEL))])]\n", "step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ClassLevel',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('level', models.CharField(unique=True, max_length=100)),\n ],\n ),\n migrations.CreateModel(\n name='CourseRecord',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('Course_Title', models.CharField(unique=True, max_length=50)),\n ('Course_Code', models.CharField(unique=True, max_length=10)),\n ('Course_Unit', models.PositiveSmallIntegerField()),\n ('Semester', models.CharField(choices=[('First_Semester', 'First_Semester'), ('Second_Semester', 'Second_Semester')], max_length=20, default='Select_Semester')),\n ('level', models.ForeignKey(to='Qbank.ClassLevel')),\n ],\n ),\n migrations.CreateModel(\n name='QuestionBank',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('CourseTitle', models.CharField(max_length=50)),\n ('CourseCode', models.CharField(max_length=10)),\n ('CourseUnit', models.IntegerField()),\n ('Semester', models.CharField(choices=[('First_Semester', 'First_Semester'), ('Second_Semester', 'Second_Semester')], max_length=20, default='Select_Semester')),\n ('Date', models.DateField()),\n ('question_papers', models.FileField(upload_to='QuestionPapers')),\n ('level', models.ForeignKey(to='Qbank.ClassLevel')),\n ],\n ),\n migrations.CreateModel(\n name='UserProfile',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('Account_Type', models.CharField(choices=[('L', 'Lecturer'), ('S', 'Student')], max_length=1, default='S')),\n ('Upload_Picture', models.ImageField(upload_to='profile_images', blank=True)),\n ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import os, subprocess def greet(name): hostname = subprocess.check_output("hostname").decode("utf-8")[:-1] return "Hello, {}! I'm {}#{}.".format(name, hostname, os.getppid())
normal
{ "blob_id": "9bd55a2f224acfa2cb34d0ca14a25e8864d644b3", "index": 5250, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef greet(name):\n hostname = subprocess.check_output('hostname').decode('utf-8')[:-1]\n return \"Hello, {}! I'm {}#{}.\".format(name, hostname, os.getppid())\n", "step-3": "import os, subprocess\n\n\ndef greet(name):\n hostname = subprocess.check_output('hostname').decode('utf-8')[:-1]\n return \"Hello, {}! I'm {}#{}.\".format(name, hostname, os.getppid())\n", "step-4": "import os, subprocess\n\ndef greet(name):\n hostname = subprocess.check_output(\"hostname\").decode(\"utf-8\")[:-1]\n return \"Hello, {}! I'm {}#{}.\".format(name, hostname, os.getppid())\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
N, M = map(int, input().split()) # Nはスイッチの数、Mは電球の数 lights = [[0] * N for _ in range(M)] for i in range(M): temp = list(map(int, input().split())) # 0番目はスイッチの個数、1番目以降はスイッチを示す k = temp[0] switches = temp[1:] for j in range(k): lights[i][switches[j]-1] = 1 P = list(map(int, input().split())) # 個数を2で割ったあまりが要素と等しい場合に点灯する answer_count = 0 for i in range(2**N): flag = True for k in range(M): count = 0 for j in range(N): if (i >> j) & 1: count += lights[k][j] if count % 2 != P[k]: flag = False break if flag: answer_count += 1 print(answer_count)
normal
{ "blob_id": "c4ac7ff5d45af9d325f65b4d454a48ca0d8f86df", "index": 8808, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in range(M):\n temp = list(map(int, input().split()))\n k = temp[0]\n switches = temp[1:]\n for j in range(k):\n lights[i][switches[j] - 1] = 1\n<mask token>\nfor i in range(2 ** N):\n flag = True\n for k in range(M):\n count = 0\n for j in range(N):\n if i >> j & 1:\n count += lights[k][j]\n if count % 2 != P[k]:\n flag = False\n break\n if flag:\n answer_count += 1\nprint(answer_count)\n", "step-3": "N, M = map(int, input().split())\nlights = [([0] * N) for _ in range(M)]\nfor i in range(M):\n temp = list(map(int, input().split()))\n k = temp[0]\n switches = temp[1:]\n for j in range(k):\n lights[i][switches[j] - 1] = 1\nP = list(map(int, input().split()))\nanswer_count = 0\nfor i in range(2 ** N):\n flag = True\n for k in range(M):\n count = 0\n for j in range(N):\n if i >> j & 1:\n count += lights[k][j]\n if count % 2 != P[k]:\n flag = False\n break\n if flag:\n answer_count += 1\nprint(answer_count)\n", "step-4": "N, M = map(int, input().split()) # Nはスイッチの数、Mは電球の数\nlights = [[0] * N for _ in range(M)]\nfor i in range(M): \n temp = list(map(int, input().split())) # 0番目はスイッチの個数、1番目以降はスイッチを示す\n k = temp[0]\n switches = temp[1:]\n for j in range(k):\n lights[i][switches[j]-1] = 1\nP = list(map(int, input().split())) # 個数を2で割ったあまりが要素と等しい場合に点灯する\n\nanswer_count = 0\nfor i in range(2**N):\n flag = True\n for k in range(M):\n count = 0\n for j in range(N):\n if (i >> j) & 1:\n count += lights[k][j]\n if count % 2 != P[k]:\n flag = False\n break\n if flag:\n answer_count += 1\n\nprint(answer_count)", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
f=open('poem.txt') for line in f: print line,
normal
{ "blob_id": "76348448a658736627efe8fa6b19c752191966e7", "index": 5409, "step-1": "f=open('poem.txt')\nfor line in f:\n\tprint line,\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
#!/usr/bin/env python ############################################################################### # $Id$ # # Project: GDAL/OGR Test Suite # Purpose: Test support for the various "EMPTY" WKT geometry representations. # Author: Frank Warmerdam <warmerdam@pobox.com> # ############################################################################### # Copyright (c) 2004, Frank Warmerdam <warmerdam@pobox.com> # Copyright (c) 2008, Even Rouault <even dot rouault at mines-paris dot org> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Library General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Library General Public License for more details. # # You should have received a copy of the GNU Library General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., 59 Temple Place - Suite 330, # Boston, MA 02111-1307, USA. ############################################################################### import os import sys import string sys.path.append( '../pymod' ) import gdaltest import ogrtest from osgeo import ogr from osgeo import gdal class TestWktEmpty: def __init__( self, inString, expectedOutString ): self.inString = inString self.expectedOutString = expectedOutString def isEmpty(self, geom): try: ogr.Geometry.IsEmpty except: return 'skip' if (geom.IsEmpty() == False): geom.Destroy() gdaltest.post_reason ("IsEmpty returning false for an empty geometry") return 'fail' return 'success' def CheckIsEmpty(self): geom = ogr.CreateGeometryFromWkt( self.inString ) wkt = geom.ExportToWkt() if self.expectedOutString != 'POINT EMPTY': if ogr.CreateGeometryFromWkb(geom.ExportToWkb()).ExportToWkt() != wkt: return 'fail' if wkt == self.expectedOutString: if self.isEmpty(geom) == 'fail': return 'fail' else: return 'success' else: gdaltest.post_reason( 'WKT is wrong: ' + wkt + '. Expected value is: ' + self.expectedOutString ) return 'fail' empty_wkt_list = [ \ ('GEOMETRYCOLLECTION(EMPTY)', 'GEOMETRYCOLLECTION EMPTY'), ('MULTIPOLYGON( EMPTY )', 'MULTIPOLYGON EMPTY'), ('MULTILINESTRING(EMPTY)', 'MULTILINESTRING EMPTY'), ('MULTIPOINT(EMPTY)', 'MULTIPOINT EMPTY'), ('POINT ( EMPTY )', 'POINT EMPTY'), ('LINESTRING(EMPTY)', 'LINESTRING EMPTY'), ('POLYGON ( EMPTY )', 'POLYGON EMPTY'), ('GEOMETRYCOLLECTION EMPTY', 'GEOMETRYCOLLECTION EMPTY'), ('MULTIPOLYGON EMPTY', 'MULTIPOLYGON EMPTY'), ('MULTILINESTRING EMPTY', 'MULTILINESTRING EMPTY'), ('MULTIPOINT EMPTY', 'MULTIPOINT EMPTY'), ('POINT EMPTY', 'POINT EMPTY'), ('LINESTRING EMPTY', 'LINESTRING EMPTY'), ('POLYGON EMPTY', 'POLYGON EMPTY') ] def ogr_wktempty_test_partial_empty_geoms(): # Multipoint with a valid point and an empty point wkt = 'MULTIPOINT (1 1)' geom = ogr.CreateGeometryFromWkt(wkt) geom.AddGeometry(ogr.Geometry( type = ogr.wkbPoint )) if geom.ExportToWkt() != wkt: gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt ) return 'fail' # Multipoint with an empty point and a valid point geom = ogr.CreateGeometryFromWkt('MULTIPOINT EMPTY') geom.AddGeometry(ogr.Geometry( type = ogr.wkbPoint )) geom.AddGeometry(ogr.CreateGeometryFromWkt('POINT (1 1)')) wkt = 'MULTIPOINT (1 1)' if geom.ExportToWkt() != wkt: gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt ) return 'fail' # Multilinestring with a valid string and an empty linestring wkt = 'MULTILINESTRING ((0 1,2 3,4 5,0 1))' geom = ogr.CreateGeometryFromWkt(wkt) geom.AddGeometry(ogr.Geometry( type = ogr.wkbLineString )) if geom.ExportToWkt() != wkt: gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt ) return 'fail' # Multilinestring with an empty linestring and a valid linestring geom = ogr.CreateGeometryFromWkt('MULTILINESTRING EMPTY') geom.AddGeometry(ogr.Geometry( type = ogr.wkbLineString )) geom.AddGeometry(ogr.CreateGeometryFromWkt('LINESTRING (0 1,2 3,4 5,0 1)')) wkt = 'MULTILINESTRING ((0 1,2 3,4 5,0 1))' if geom.ExportToWkt() != wkt: gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt ) return 'fail' # Polygon with a valid external ring and an empty internal ring wkt = 'POLYGON ((100 0,100 10,110 10,100 0))' geom = ogr.CreateGeometryFromWkt(wkt) geom.AddGeometry(ogr.Geometry( type = ogr.wkbLinearRing )) if geom.ExportToWkt() != wkt: gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt ) return 'fail' # Polygon with an empty external ring and a valid internal ring wkt = 'POLYGON EMPTY' geom = ogr.CreateGeometryFromWkt(wkt) geom.AddGeometry(ogr.Geometry( type = ogr.wkbLinearRing )) ring = ogr.Geometry( type = ogr.wkbLinearRing ) ring.AddPoint_2D( 0, 0) ring.AddPoint_2D( 10, 0) ring.AddPoint_2D( 10, 10) ring.AddPoint_2D( 0, 10) ring.AddPoint_2D( 0, 0) geom.AddGeometry(ring) if geom.ExportToWkt() != wkt: gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt ) return 'fail' # Multipolygon with a valid polygon and an empty polygon wkt = 'MULTIPOLYGON (((0 0,0 10,10 10,0 0)))' geom = ogr.CreateGeometryFromWkt(wkt) geom.AddGeometry(ogr.Geometry( type = ogr.wkbPolygon )) if geom.ExportToWkt() != wkt: gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt ) return 'fail' # Multipolygon with an empty polygon and a valid polygon geom = ogr.CreateGeometryFromWkt('MULTIPOLYGON EMPTY') geom.AddGeometry(ogr.Geometry( type = ogr.wkbPolygon )) geom.AddGeometry(ogr.CreateGeometryFromWkt('POLYGON ((100 0,100 10,110 10,100 0))')) wkt = 'MULTIPOLYGON (((100 0,100 10,110 10,100 0)))' if geom.ExportToWkt() != wkt: gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt ) return 'fail' return 'success' gdaltest_list = [] for item in empty_wkt_list: ut = TestWktEmpty( item[0], item[1] ) gdaltest_list.append( (ut.CheckIsEmpty, item[0]) ) gdaltest_list.append( ogr_wktempty_test_partial_empty_geoms ) if __name__ == '__main__': gdaltest.setup_run( 'ogr_wktempty' ) gdaltest.run_tests( gdaltest_list ) gdaltest.summarize()
normal
{ "blob_id": "1ef1dcc8fdf4d813dad70c860e33778715d51b0c", "index": 1575, "step-1": "<mask token>\n\n\nclass TestWktEmpty:\n\n def __init__(self, inString, expectedOutString):\n self.inString = inString\n self.expectedOutString = expectedOutString\n\n def isEmpty(self, geom):\n try:\n ogr.Geometry.IsEmpty\n except:\n return 'skip'\n if geom.IsEmpty() == False:\n geom.Destroy()\n gdaltest.post_reason(\n 'IsEmpty returning false for an empty geometry')\n return 'fail'\n return 'success'\n\n def CheckIsEmpty(self):\n geom = ogr.CreateGeometryFromWkt(self.inString)\n wkt = geom.ExportToWkt()\n if self.expectedOutString != 'POINT EMPTY':\n if ogr.CreateGeometryFromWkb(geom.ExportToWkb()).ExportToWkt(\n ) != wkt:\n return 'fail'\n if wkt == self.expectedOutString:\n if self.isEmpty(geom) == 'fail':\n return 'fail'\n else:\n return 'success'\n else:\n gdaltest.post_reason('WKT is wrong: ' + wkt +\n '. Expected value is: ' + self.expectedOutString)\n return 'fail'\n\n\n<mask token>\n\n\ndef ogr_wktempty_test_partial_empty_geoms():\n wkt = 'MULTIPOINT (1 1)'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbPoint))\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n geom = ogr.CreateGeometryFromWkt('MULTIPOINT EMPTY')\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbPoint))\n geom.AddGeometry(ogr.CreateGeometryFromWkt('POINT (1 1)'))\n wkt = 'MULTIPOINT (1 1)'\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n wkt = 'MULTILINESTRING ((0 1,2 3,4 5,0 1))'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbLineString))\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n geom = ogr.CreateGeometryFromWkt('MULTILINESTRING EMPTY')\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbLineString))\n geom.AddGeometry(ogr.CreateGeometryFromWkt('LINESTRING (0 1,2 3,4 5,0 1)'))\n wkt = 'MULTILINESTRING ((0 1,2 3,4 5,0 1))'\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n wkt = 'POLYGON ((100 0,100 10,110 10,100 0))'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbLinearRing))\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n wkt = 'POLYGON EMPTY'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbLinearRing))\n ring = ogr.Geometry(type=ogr.wkbLinearRing)\n ring.AddPoint_2D(0, 0)\n ring.AddPoint_2D(10, 0)\n ring.AddPoint_2D(10, 10)\n ring.AddPoint_2D(0, 10)\n ring.AddPoint_2D(0, 0)\n geom.AddGeometry(ring)\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n wkt = 'MULTIPOLYGON (((0 0,0 10,10 10,0 0)))'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbPolygon))\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n geom = ogr.CreateGeometryFromWkt('MULTIPOLYGON EMPTY')\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbPolygon))\n geom.AddGeometry(ogr.CreateGeometryFromWkt(\n 'POLYGON ((100 0,100 10,110 10,100 0))'))\n wkt = 'MULTIPOLYGON (((100 0,100 10,110 10,100 0)))'\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n return 'success'\n\n\n<mask token>\n", "step-2": "<mask token>\nsys.path.append('../pymod')\n<mask token>\n\n\nclass TestWktEmpty:\n\n def __init__(self, inString, expectedOutString):\n self.inString = inString\n self.expectedOutString = expectedOutString\n\n def isEmpty(self, geom):\n try:\n ogr.Geometry.IsEmpty\n except:\n return 'skip'\n if geom.IsEmpty() == False:\n geom.Destroy()\n gdaltest.post_reason(\n 'IsEmpty returning false for an empty geometry')\n return 'fail'\n return 'success'\n\n def CheckIsEmpty(self):\n geom = ogr.CreateGeometryFromWkt(self.inString)\n wkt = geom.ExportToWkt()\n if self.expectedOutString != 'POINT EMPTY':\n if ogr.CreateGeometryFromWkb(geom.ExportToWkb()).ExportToWkt(\n ) != wkt:\n return 'fail'\n if wkt == self.expectedOutString:\n if self.isEmpty(geom) == 'fail':\n return 'fail'\n else:\n return 'success'\n else:\n gdaltest.post_reason('WKT is wrong: ' + wkt +\n '. Expected value is: ' + self.expectedOutString)\n return 'fail'\n\n\n<mask token>\n\n\ndef ogr_wktempty_test_partial_empty_geoms():\n wkt = 'MULTIPOINT (1 1)'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbPoint))\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n geom = ogr.CreateGeometryFromWkt('MULTIPOINT EMPTY')\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbPoint))\n geom.AddGeometry(ogr.CreateGeometryFromWkt('POINT (1 1)'))\n wkt = 'MULTIPOINT (1 1)'\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n wkt = 'MULTILINESTRING ((0 1,2 3,4 5,0 1))'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbLineString))\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n geom = ogr.CreateGeometryFromWkt('MULTILINESTRING EMPTY')\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbLineString))\n geom.AddGeometry(ogr.CreateGeometryFromWkt('LINESTRING (0 1,2 3,4 5,0 1)'))\n wkt = 'MULTILINESTRING ((0 1,2 3,4 5,0 1))'\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n wkt = 'POLYGON ((100 0,100 10,110 10,100 0))'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbLinearRing))\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n wkt = 'POLYGON EMPTY'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbLinearRing))\n ring = ogr.Geometry(type=ogr.wkbLinearRing)\n ring.AddPoint_2D(0, 0)\n ring.AddPoint_2D(10, 0)\n ring.AddPoint_2D(10, 10)\n ring.AddPoint_2D(0, 10)\n ring.AddPoint_2D(0, 0)\n geom.AddGeometry(ring)\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n wkt = 'MULTIPOLYGON (((0 0,0 10,10 10,0 0)))'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbPolygon))\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n geom = ogr.CreateGeometryFromWkt('MULTIPOLYGON EMPTY')\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbPolygon))\n geom.AddGeometry(ogr.CreateGeometryFromWkt(\n 'POLYGON ((100 0,100 10,110 10,100 0))'))\n wkt = 'MULTIPOLYGON (((100 0,100 10,110 10,100 0)))'\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n return 'success'\n\n\n<mask token>\nfor item in empty_wkt_list:\n ut = TestWktEmpty(item[0], item[1])\n gdaltest_list.append((ut.CheckIsEmpty, item[0]))\ngdaltest_list.append(ogr_wktempty_test_partial_empty_geoms)\nif __name__ == '__main__':\n gdaltest.setup_run('ogr_wktempty')\n gdaltest.run_tests(gdaltest_list)\n gdaltest.summarize()\n", "step-3": "<mask token>\nsys.path.append('../pymod')\n<mask token>\n\n\nclass TestWktEmpty:\n\n def __init__(self, inString, expectedOutString):\n self.inString = inString\n self.expectedOutString = expectedOutString\n\n def isEmpty(self, geom):\n try:\n ogr.Geometry.IsEmpty\n except:\n return 'skip'\n if geom.IsEmpty() == False:\n geom.Destroy()\n gdaltest.post_reason(\n 'IsEmpty returning false for an empty geometry')\n return 'fail'\n return 'success'\n\n def CheckIsEmpty(self):\n geom = ogr.CreateGeometryFromWkt(self.inString)\n wkt = geom.ExportToWkt()\n if self.expectedOutString != 'POINT EMPTY':\n if ogr.CreateGeometryFromWkb(geom.ExportToWkb()).ExportToWkt(\n ) != wkt:\n return 'fail'\n if wkt == self.expectedOutString:\n if self.isEmpty(geom) == 'fail':\n return 'fail'\n else:\n return 'success'\n else:\n gdaltest.post_reason('WKT is wrong: ' + wkt +\n '. Expected value is: ' + self.expectedOutString)\n return 'fail'\n\n\nempty_wkt_list = [('GEOMETRYCOLLECTION(EMPTY)', 'GEOMETRYCOLLECTION EMPTY'),\n ('MULTIPOLYGON( EMPTY )', 'MULTIPOLYGON EMPTY'), (\n 'MULTILINESTRING(EMPTY)', 'MULTILINESTRING EMPTY'), (\n 'MULTIPOINT(EMPTY)', 'MULTIPOINT EMPTY'), ('POINT ( EMPTY )',\n 'POINT EMPTY'), ('LINESTRING(EMPTY)', 'LINESTRING EMPTY'), (\n 'POLYGON ( EMPTY )', 'POLYGON EMPTY'), ('GEOMETRYCOLLECTION EMPTY',\n 'GEOMETRYCOLLECTION EMPTY'), ('MULTIPOLYGON EMPTY',\n 'MULTIPOLYGON EMPTY'), ('MULTILINESTRING EMPTY',\n 'MULTILINESTRING EMPTY'), ('MULTIPOINT EMPTY', 'MULTIPOINT EMPTY'), (\n 'POINT EMPTY', 'POINT EMPTY'), ('LINESTRING EMPTY', 'LINESTRING EMPTY'),\n ('POLYGON EMPTY', 'POLYGON EMPTY')]\n\n\ndef ogr_wktempty_test_partial_empty_geoms():\n wkt = 'MULTIPOINT (1 1)'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbPoint))\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n geom = ogr.CreateGeometryFromWkt('MULTIPOINT EMPTY')\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbPoint))\n geom.AddGeometry(ogr.CreateGeometryFromWkt('POINT (1 1)'))\n wkt = 'MULTIPOINT (1 1)'\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n wkt = 'MULTILINESTRING ((0 1,2 3,4 5,0 1))'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbLineString))\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n geom = ogr.CreateGeometryFromWkt('MULTILINESTRING EMPTY')\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbLineString))\n geom.AddGeometry(ogr.CreateGeometryFromWkt('LINESTRING (0 1,2 3,4 5,0 1)'))\n wkt = 'MULTILINESTRING ((0 1,2 3,4 5,0 1))'\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n wkt = 'POLYGON ((100 0,100 10,110 10,100 0))'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbLinearRing))\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n wkt = 'POLYGON EMPTY'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbLinearRing))\n ring = ogr.Geometry(type=ogr.wkbLinearRing)\n ring.AddPoint_2D(0, 0)\n ring.AddPoint_2D(10, 0)\n ring.AddPoint_2D(10, 10)\n ring.AddPoint_2D(0, 10)\n ring.AddPoint_2D(0, 0)\n geom.AddGeometry(ring)\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n wkt = 'MULTIPOLYGON (((0 0,0 10,10 10,0 0)))'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbPolygon))\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n geom = ogr.CreateGeometryFromWkt('MULTIPOLYGON EMPTY')\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbPolygon))\n geom.AddGeometry(ogr.CreateGeometryFromWkt(\n 'POLYGON ((100 0,100 10,110 10,100 0))'))\n wkt = 'MULTIPOLYGON (((100 0,100 10,110 10,100 0)))'\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n return 'success'\n\n\ngdaltest_list = []\nfor item in empty_wkt_list:\n ut = TestWktEmpty(item[0], item[1])\n gdaltest_list.append((ut.CheckIsEmpty, item[0]))\ngdaltest_list.append(ogr_wktempty_test_partial_empty_geoms)\nif __name__ == '__main__':\n gdaltest.setup_run('ogr_wktempty')\n gdaltest.run_tests(gdaltest_list)\n gdaltest.summarize()\n", "step-4": "import os\nimport sys\nimport string\nsys.path.append('../pymod')\nimport gdaltest\nimport ogrtest\nfrom osgeo import ogr\nfrom osgeo import gdal\n\n\nclass TestWktEmpty:\n\n def __init__(self, inString, expectedOutString):\n self.inString = inString\n self.expectedOutString = expectedOutString\n\n def isEmpty(self, geom):\n try:\n ogr.Geometry.IsEmpty\n except:\n return 'skip'\n if geom.IsEmpty() == False:\n geom.Destroy()\n gdaltest.post_reason(\n 'IsEmpty returning false for an empty geometry')\n return 'fail'\n return 'success'\n\n def CheckIsEmpty(self):\n geom = ogr.CreateGeometryFromWkt(self.inString)\n wkt = geom.ExportToWkt()\n if self.expectedOutString != 'POINT EMPTY':\n if ogr.CreateGeometryFromWkb(geom.ExportToWkb()).ExportToWkt(\n ) != wkt:\n return 'fail'\n if wkt == self.expectedOutString:\n if self.isEmpty(geom) == 'fail':\n return 'fail'\n else:\n return 'success'\n else:\n gdaltest.post_reason('WKT is wrong: ' + wkt +\n '. Expected value is: ' + self.expectedOutString)\n return 'fail'\n\n\nempty_wkt_list = [('GEOMETRYCOLLECTION(EMPTY)', 'GEOMETRYCOLLECTION EMPTY'),\n ('MULTIPOLYGON( EMPTY )', 'MULTIPOLYGON EMPTY'), (\n 'MULTILINESTRING(EMPTY)', 'MULTILINESTRING EMPTY'), (\n 'MULTIPOINT(EMPTY)', 'MULTIPOINT EMPTY'), ('POINT ( EMPTY )',\n 'POINT EMPTY'), ('LINESTRING(EMPTY)', 'LINESTRING EMPTY'), (\n 'POLYGON ( EMPTY )', 'POLYGON EMPTY'), ('GEOMETRYCOLLECTION EMPTY',\n 'GEOMETRYCOLLECTION EMPTY'), ('MULTIPOLYGON EMPTY',\n 'MULTIPOLYGON EMPTY'), ('MULTILINESTRING EMPTY',\n 'MULTILINESTRING EMPTY'), ('MULTIPOINT EMPTY', 'MULTIPOINT EMPTY'), (\n 'POINT EMPTY', 'POINT EMPTY'), ('LINESTRING EMPTY', 'LINESTRING EMPTY'),\n ('POLYGON EMPTY', 'POLYGON EMPTY')]\n\n\ndef ogr_wktempty_test_partial_empty_geoms():\n wkt = 'MULTIPOINT (1 1)'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbPoint))\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n geom = ogr.CreateGeometryFromWkt('MULTIPOINT EMPTY')\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbPoint))\n geom.AddGeometry(ogr.CreateGeometryFromWkt('POINT (1 1)'))\n wkt = 'MULTIPOINT (1 1)'\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n wkt = 'MULTILINESTRING ((0 1,2 3,4 5,0 1))'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbLineString))\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n geom = ogr.CreateGeometryFromWkt('MULTILINESTRING EMPTY')\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbLineString))\n geom.AddGeometry(ogr.CreateGeometryFromWkt('LINESTRING (0 1,2 3,4 5,0 1)'))\n wkt = 'MULTILINESTRING ((0 1,2 3,4 5,0 1))'\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n wkt = 'POLYGON ((100 0,100 10,110 10,100 0))'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbLinearRing))\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n wkt = 'POLYGON EMPTY'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbLinearRing))\n ring = ogr.Geometry(type=ogr.wkbLinearRing)\n ring.AddPoint_2D(0, 0)\n ring.AddPoint_2D(10, 0)\n ring.AddPoint_2D(10, 10)\n ring.AddPoint_2D(0, 10)\n ring.AddPoint_2D(0, 0)\n geom.AddGeometry(ring)\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n wkt = 'MULTIPOLYGON (((0 0,0 10,10 10,0 0)))'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbPolygon))\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n geom = ogr.CreateGeometryFromWkt('MULTIPOLYGON EMPTY')\n geom.AddGeometry(ogr.Geometry(type=ogr.wkbPolygon))\n geom.AddGeometry(ogr.CreateGeometryFromWkt(\n 'POLYGON ((100 0,100 10,110 10,100 0))'))\n wkt = 'MULTIPOLYGON (((100 0,100 10,110 10,100 0)))'\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason('WKT is wrong: ' + geom.ExportToWkt() +\n '. Expected value is: ' + wkt)\n return 'fail'\n return 'success'\n\n\ngdaltest_list = []\nfor item in empty_wkt_list:\n ut = TestWktEmpty(item[0], item[1])\n gdaltest_list.append((ut.CheckIsEmpty, item[0]))\ngdaltest_list.append(ogr_wktempty_test_partial_empty_geoms)\nif __name__ == '__main__':\n gdaltest.setup_run('ogr_wktempty')\n gdaltest.run_tests(gdaltest_list)\n gdaltest.summarize()\n", "step-5": "#!/usr/bin/env python\n###############################################################################\n# $Id$\n#\n# Project: GDAL/OGR Test Suite\n# Purpose: Test support for the various \"EMPTY\" WKT geometry representations.\n# Author: Frank Warmerdam <warmerdam@pobox.com>\n# \n###############################################################################\n# Copyright (c) 2004, Frank Warmerdam <warmerdam@pobox.com>\n# Copyright (c) 2008, Even Rouault <even dot rouault at mines-paris dot org>\n# \n# This library is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Library General Public\n# License as published by the Free Software Foundation; either\n# version 2 of the License, or (at your option) any later version.\n# \n# This library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Library General Public License for more details.\n# \n# You should have received a copy of the GNU Library General Public\n# License along with this library; if not, write to the\n# Free Software Foundation, Inc., 59 Temple Place - Suite 330,\n# Boston, MA 02111-1307, USA.\n###############################################################################\n\nimport os\nimport sys\nimport string\n\nsys.path.append( '../pymod' )\n\nimport gdaltest\nimport ogrtest\nfrom osgeo import ogr\nfrom osgeo import gdal\n\nclass TestWktEmpty:\n def __init__( self, inString, expectedOutString ):\n self.inString = inString\n self.expectedOutString = expectedOutString\n\n def isEmpty(self, geom):\n try:\n ogr.Geometry.IsEmpty\n except:\n return 'skip'\n\n if (geom.IsEmpty() == False):\n geom.Destroy()\n gdaltest.post_reason (\"IsEmpty returning false for an empty geometry\")\n return 'fail'\n\n return 'success'\n\n def CheckIsEmpty(self):\n geom = ogr.CreateGeometryFromWkt( self.inString )\n wkt = geom.ExportToWkt()\n\n if self.expectedOutString != 'POINT EMPTY':\n if ogr.CreateGeometryFromWkb(geom.ExportToWkb()).ExportToWkt() != wkt:\n return 'fail'\n\n if wkt == self.expectedOutString:\n if self.isEmpty(geom) == 'fail':\n return 'fail'\n else:\n return 'success'\n else:\n gdaltest.post_reason( 'WKT is wrong: ' + wkt + '. Expected value is: ' + self.expectedOutString )\n return 'fail'\n\nempty_wkt_list = [ \\\n ('GEOMETRYCOLLECTION(EMPTY)', 'GEOMETRYCOLLECTION EMPTY'),\n ('MULTIPOLYGON( EMPTY )', 'MULTIPOLYGON EMPTY'),\n ('MULTILINESTRING(EMPTY)', 'MULTILINESTRING EMPTY'),\n ('MULTIPOINT(EMPTY)', 'MULTIPOINT EMPTY'),\n ('POINT ( EMPTY )', 'POINT EMPTY'),\n ('LINESTRING(EMPTY)', 'LINESTRING EMPTY'),\n ('POLYGON ( EMPTY )', 'POLYGON EMPTY'),\n\n ('GEOMETRYCOLLECTION EMPTY', 'GEOMETRYCOLLECTION EMPTY'),\n ('MULTIPOLYGON EMPTY', 'MULTIPOLYGON EMPTY'),\n ('MULTILINESTRING EMPTY', 'MULTILINESTRING EMPTY'),\n ('MULTIPOINT EMPTY', 'MULTIPOINT EMPTY'),\n ('POINT EMPTY', 'POINT EMPTY'),\n ('LINESTRING EMPTY', 'LINESTRING EMPTY'),\n ('POLYGON EMPTY', 'POLYGON EMPTY')\n ]\n\n\ndef ogr_wktempty_test_partial_empty_geoms():\n\n # Multipoint with a valid point and an empty point\n wkt = 'MULTIPOINT (1 1)'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry( type = ogr.wkbPoint ))\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt )\n return 'fail'\n\n # Multipoint with an empty point and a valid point\n geom = ogr.CreateGeometryFromWkt('MULTIPOINT EMPTY')\n geom.AddGeometry(ogr.Geometry( type = ogr.wkbPoint ))\n geom.AddGeometry(ogr.CreateGeometryFromWkt('POINT (1 1)'))\n wkt = 'MULTIPOINT (1 1)'\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt )\n return 'fail'\n\n # Multilinestring with a valid string and an empty linestring\n wkt = 'MULTILINESTRING ((0 1,2 3,4 5,0 1))'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry( type = ogr.wkbLineString ))\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt )\n return 'fail'\n\n # Multilinestring with an empty linestring and a valid linestring\n geom = ogr.CreateGeometryFromWkt('MULTILINESTRING EMPTY')\n geom.AddGeometry(ogr.Geometry( type = ogr.wkbLineString ))\n geom.AddGeometry(ogr.CreateGeometryFromWkt('LINESTRING (0 1,2 3,4 5,0 1)'))\n wkt = 'MULTILINESTRING ((0 1,2 3,4 5,0 1))'\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt )\n return 'fail'\n\n # Polygon with a valid external ring and an empty internal ring\n wkt = 'POLYGON ((100 0,100 10,110 10,100 0))'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry( type = ogr.wkbLinearRing ))\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt )\n return 'fail'\n\n # Polygon with an empty external ring and a valid internal ring\n wkt = 'POLYGON EMPTY'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry( type = ogr.wkbLinearRing ))\n ring = ogr.Geometry( type = ogr.wkbLinearRing )\n ring.AddPoint_2D( 0, 0)\n ring.AddPoint_2D( 10, 0)\n ring.AddPoint_2D( 10, 10)\n ring.AddPoint_2D( 0, 10)\n ring.AddPoint_2D( 0, 0)\n geom.AddGeometry(ring)\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt )\n return 'fail'\n\n # Multipolygon with a valid polygon and an empty polygon\n wkt = 'MULTIPOLYGON (((0 0,0 10,10 10,0 0)))'\n geom = ogr.CreateGeometryFromWkt(wkt)\n geom.AddGeometry(ogr.Geometry( type = ogr.wkbPolygon ))\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt )\n return 'fail'\n\n # Multipolygon with an empty polygon and a valid polygon\n geom = ogr.CreateGeometryFromWkt('MULTIPOLYGON EMPTY')\n geom.AddGeometry(ogr.Geometry( type = ogr.wkbPolygon ))\n geom.AddGeometry(ogr.CreateGeometryFromWkt('POLYGON ((100 0,100 10,110 10,100 0))'))\n wkt = 'MULTIPOLYGON (((100 0,100 10,110 10,100 0)))'\n if geom.ExportToWkt() != wkt:\n gdaltest.post_reason( 'WKT is wrong: ' + geom.ExportToWkt() + '. Expected value is: ' + wkt )\n return 'fail'\n\n return 'success'\n\n\ngdaltest_list = []\n\nfor item in empty_wkt_list:\n ut = TestWktEmpty( item[0], item[1] )\n gdaltest_list.append( (ut.CheckIsEmpty, item[0]) )\ngdaltest_list.append( ogr_wktempty_test_partial_empty_geoms )\n\nif __name__ == '__main__':\n\n gdaltest.setup_run( 'ogr_wktempty' )\n\n gdaltest.run_tests( gdaltest_list )\n\n gdaltest.summarize()\n\n", "step-ids": [ 5, 6, 7, 8, 9 ] }
[ 5, 6, 7, 8, 9 ]
from cell import Cell from tkinter import messagebox import time import fileTools class Playground: """ The playground for the program. All cells are stored here. This object also import/export cells to the playground :param screen: The screen object. :param mouse: The mouse object. :param keyboard: The keyboard object. :param root: The root object. Attributes: cells: All the cells that is on the playground. clickSwitch: The size of the one grid box in pixels. """ def __init__(self, root, screen, mouse, keyboard): self.root = root self.screen = screen self.mouse = mouse self.keyboard = keyboard self.cells = [] self.clickSwitch = False self.autoGenerateMode = False self.generation = 0 self.timeToCalcGeneration = 0 self.bindKeyboardKeysToFunctions() def bindKeyboardKeysToFunctions(self): """ Binds diffrent functions to keyboard presses. :return: (nothing) """ self.keyboard.bindFunctionToKey("space", self.nextGeneration) def updatePlayground(self): """ Updates the playground. Checking for user input to interact with the playground. :return: (nothing) """ self.getMouseInput() if(self.autoGenerateMode): self.nextGeneration() def getMouseInput(self): """ This method is getting the mouse and doing diffrent thing with it. For example: spawning a new cell if the user click on an grid-box. :return: (nothing) """ xPos = self.mouse.xGridPos yPos = self.mouse.yGridPos #Changing the hoverblock color depending if the mouse is hovering over an living cell or not. if(self.getCellFromPosition(xPos, yPos)): self.screen.canvas.itemconfig(self.screen.hoverBlock, fill='#ff0000') else: self.screen.canvas.itemconfig(self.screen.hoverBlock, fill='#00ff00') #Placing an cell on the playground if the user is clicking on the playground if(self.mouse.leftButton and self.clickSwitch == False): if(self.keyboard.shiftKey): clickedCell = self.getCellFromPosition(xPos, yPos) if(clickedCell == False): self.createCell(xPos, yPos) else: self.deleteCell(clickedCell) self.clickSwitch = True if (self.mouse.leftButton == False and self.clickSwitch == True): self.clickSwitch = False def deleteCell(self, cell): """ Deleting a cell from the cell-list. :param cell: The cell that is going to be delete. :return: (nothing) """ index = self.cells.index(cell) self.cells[index].delete() self.cells.remove(cell) def createCell(self, xPos, yPos): """ Creates a new cell for a given position. :param xPos: The x-position on the grid. :param yPos: the y-position on the grid :return: (nothing) """ self.cells.append(Cell(self.screen, xPos, yPos)) def getCellFromPosition(self, xPos, yPos): """ Gets a cell from a given position. :param xPos: The x-position on the grid. :param yPos: the y-position on the grid :return: Cell """ for cell in self.cells: if(xPos == cell.x and yPos == cell.y): return cell return False def clearPlayground(self): """ Removes all the cells from the playground :return: (nothing) """ for cell in self.cells: cell.delete() self.cells = [] self.generation = 0 def importPlayground(self, filepath): """ This function is importing a playground. :param filepath: The filepath to import the playground to. :return: (nothing) """ cellOutOfBound = False avgXPos = 0 avgYPos = 0 fileWrite = open(filepath, "r") cellPositions = fileWrite.readlines() self.clearPlayground() for cellPos in cellPositions: #Cleans the string cleanCellPos = fileTools.cleanString(cellPos) if(cleanCellPos == ""): continue #Check the format. cleanCellPos = self.checkFileFormat(cleanCellPos) if(cleanCellPos): cellXPos, cellYPos = cleanCellPos else: return #Checks if the coords is outside the world. if(cellXPos > self.screen.worldSize or cellYPos > self.screen.worldSize or cellXPos < 0 or cellYPos < 0): cellOutOfBound = True else: newCell = Cell(self.screen, cellXPos, cellYPos) rectCellPos = self.screen.canvas.coords(newCell.rect) avgXPos += rectCellPos[0]; avgYPos += rectCellPos[1] self.cells.append(newCell) #Print warning that some cells are not renderd. if(cellOutOfBound): messagebox.showwarning("Warning!", "Some cells are placed outside of the playground!") #Moving the user to where the cells are. avgXPos /= len(cellPositions); avgYPos /= len(cellPositions) self.screen.offsetX += avgXPos - self.screen.width/2 self.screen.offsetY += avgYPos - self.screen.height/2 def exportPlayground(self, filepath): """ This function is exporting a playground. :param filepath: The filepath to export the playground to. :return: (nothing) """ cellPositions = "" for cell in self.cells: if(cell.dead == False): cellPositions += str(cell.x) + " " + str(cell.y) + "\n" fileWrite = open(filepath, "w") fileWrite.write(cellPositions) fileWrite.close() def checkFileFormat(self, cellPos): """ Checks if the file has the right format for this program. :param fileContent: The content of the file :return: The positions in a tuple, (x, y), false if there is an error. """ try: cellPosList = cellPos.split() cellXPos = int(cellPosList[0]) cellYPos = int(cellPosList[1]) except ValueError: messagebox.showerror("Error: Wrong format", "The choosen file do not have the correct format. Be so kind to choose an other file.") return False pass return (cellXPos, cellYPos) def removeCells(self, cellArray): """ Deletes all the cells from the array and playground. :param cellArray: The array of cells to delete. :return: (nothing) """ for cell in cellArray: cell.delete() self.cells.remove(cell) def setNeighbors(self): """ Creates dead cells around all living cells and calculating all the neighbors for the dead and the living cells :return: (nothing) """ for cellIndex in range(len(self.cells)): cell = self.cells[cellIndex] #Checks the 8 cells around the living one. for neighborsX in range(cell.x - 1, cell.x + 2): for neighborsY in range(cell.y - 1, cell.y + 2): #If the position is outside the world, loop around. neighborsX = neighborsX % self.screen.worldSize neighborsY = neighborsY % self.screen.worldSize #Skipping itself. Becouse we do not want to calculate itself as a neighbor if(neighborsX == cell.x and neighborsY == cell.y): continue else: #Checks if a cell exist at neighborsX, neighborsY cellToCheck = self.getCellFromPosition(neighborsX, neighborsY) if(cellToCheck != False): #Add one to the neighbor var if there already exist and cell for the given position. cellToCheck.numOfNeighbor += 1 else: #Creates a new cell if it do not exist any. newCell = Cell(self.screen, neighborsX, neighborsY, True) newCell.numOfNeighbor += 1 self.cells.append(newCell) def checkAmountOfNeighbors(self): """ Check the amount of neighbors and kills or creates new cell depending on the amount. :return: (nothing) """ cellsToDelete = [] for cell in self.cells: if(cell.numOfNeighbor > 3 or cell.numOfNeighbor < 2 or (cell.numOfNeighbor == 2 and cell.dead == True)): cellsToDelete.append(cell) elif(cell.numOfNeighbor == 3 and cell.dead == True): cell.makeAlive() cell.numOfNeighbor = 0 self.removeCells(cellsToDelete) def nextGeneration(self): """ This method is updating the cells to the next generation. :return: (nothing) Thanks to Martins for the idea to have modolu of the current posotion. """ # Start a timer to calculate the time the render one generation. startTime = int(round(time.time() * 100000)) self.generation += 1 self.setNeighbors() self.checkAmountOfNeighbors() # Ends a timer to calculate the time the render one generation. endTime = int(round(time.time() * 100000)) self.timeToCalcGeneration = (endTime - startTime)
normal
{ "blob_id": "80d5cc9871ec753fb9239df7680ac62809baa496", "index": 8177, "step-1": "<mask token>\n\n\nclass Playground:\n <mask token>\n\n def __init__(self, root, screen, mouse, keyboard):\n self.root = root\n self.screen = screen\n self.mouse = mouse\n self.keyboard = keyboard\n self.cells = []\n self.clickSwitch = False\n self.autoGenerateMode = False\n self.generation = 0\n self.timeToCalcGeneration = 0\n self.bindKeyboardKeysToFunctions()\n\n def bindKeyboardKeysToFunctions(self):\n \"\"\" \n Binds diffrent functions to keyboard presses. \n :return: (nothing)\n \"\"\"\n self.keyboard.bindFunctionToKey('space', self.nextGeneration)\n\n def updatePlayground(self):\n \"\"\" \n Updates the playground. Checking for user input to interact with the playground.\n :return: (nothing)\n \"\"\"\n self.getMouseInput()\n if self.autoGenerateMode:\n self.nextGeneration()\n\n def getMouseInput(self):\n \"\"\"\n This method is getting the mouse and doing diffrent thing with it. For example: spawning a new cell if the user click on an grid-box.\n :return: (nothing)\n \"\"\"\n xPos = self.mouse.xGridPos\n yPos = self.mouse.yGridPos\n if self.getCellFromPosition(xPos, yPos):\n self.screen.canvas.itemconfig(self.screen.hoverBlock, fill=\n '#ff0000')\n else:\n self.screen.canvas.itemconfig(self.screen.hoverBlock, fill=\n '#00ff00')\n if self.mouse.leftButton and self.clickSwitch == False:\n if self.keyboard.shiftKey:\n clickedCell = self.getCellFromPosition(xPos, yPos)\n if clickedCell == False:\n self.createCell(xPos, yPos)\n else:\n self.deleteCell(clickedCell)\n self.clickSwitch = True\n if self.mouse.leftButton == False and self.clickSwitch == True:\n self.clickSwitch = False\n <mask token>\n\n def createCell(self, xPos, yPos):\n \"\"\"\n Creates a new cell for a given position.\n :param xPos: The x-position on the grid.\n :param yPos: the y-position on the grid\n :return: (nothing)\n \"\"\"\n self.cells.append(Cell(self.screen, xPos, yPos))\n\n def getCellFromPosition(self, xPos, yPos):\n \"\"\"\n Gets a cell from a given position.\n :param xPos: The x-position on the grid.\n :param yPos: the y-position on the grid\n :return: Cell\n \"\"\"\n for cell in self.cells:\n if xPos == cell.x and yPos == cell.y:\n return cell\n return False\n\n def clearPlayground(self):\n \"\"\"\n Removes all the cells from the playground\n :return: (nothing)\n \"\"\"\n for cell in self.cells:\n cell.delete()\n self.cells = []\n self.generation = 0\n\n def importPlayground(self, filepath):\n \"\"\"\n This function is importing a playground.\n :param filepath: The filepath to import the playground to. \n :return: (nothing)\n \"\"\"\n cellOutOfBound = False\n avgXPos = 0\n avgYPos = 0\n fileWrite = open(filepath, 'r')\n cellPositions = fileWrite.readlines()\n self.clearPlayground()\n for cellPos in cellPositions:\n cleanCellPos = fileTools.cleanString(cellPos)\n if cleanCellPos == '':\n continue\n cleanCellPos = self.checkFileFormat(cleanCellPos)\n if cleanCellPos:\n cellXPos, cellYPos = cleanCellPos\n else:\n return\n if (cellXPos > self.screen.worldSize or cellYPos > self.screen.\n worldSize or cellXPos < 0 or cellYPos < 0):\n cellOutOfBound = True\n else:\n newCell = Cell(self.screen, cellXPos, cellYPos)\n rectCellPos = self.screen.canvas.coords(newCell.rect)\n avgXPos += rectCellPos[0]\n avgYPos += rectCellPos[1]\n self.cells.append(newCell)\n if cellOutOfBound:\n messagebox.showwarning('Warning!',\n 'Some cells are placed outside of the playground!')\n avgXPos /= len(cellPositions)\n avgYPos /= len(cellPositions)\n self.screen.offsetX += avgXPos - self.screen.width / 2\n self.screen.offsetY += avgYPos - self.screen.height / 2\n <mask token>\n <mask token>\n\n def removeCells(self, cellArray):\n \"\"\"\n Deletes all the cells from the array and playground.\n :param cellArray: The array of cells to delete.\n :return: (nothing)\n \"\"\"\n for cell in cellArray:\n cell.delete()\n self.cells.remove(cell)\n <mask token>\n\n def checkAmountOfNeighbors(self):\n \"\"\"\n Check the amount of neighbors and kills or creates new cell depending on the amount.\n :return: (nothing)\n \"\"\"\n cellsToDelete = []\n for cell in self.cells:\n if (cell.numOfNeighbor > 3 or cell.numOfNeighbor < 2 or cell.\n numOfNeighbor == 2 and cell.dead == True):\n cellsToDelete.append(cell)\n elif cell.numOfNeighbor == 3 and cell.dead == True:\n cell.makeAlive()\n cell.numOfNeighbor = 0\n self.removeCells(cellsToDelete)\n\n def nextGeneration(self):\n \"\"\"\n This method is updating the cells to the next generation.\n :return: (nothing)\n\n Thanks to Martins for the idea to have modolu of the current posotion.\n \"\"\"\n startTime = int(round(time.time() * 100000))\n self.generation += 1\n self.setNeighbors()\n self.checkAmountOfNeighbors()\n endTime = int(round(time.time() * 100000))\n self.timeToCalcGeneration = endTime - startTime\n", "step-2": "<mask token>\n\n\nclass Playground:\n <mask token>\n\n def __init__(self, root, screen, mouse, keyboard):\n self.root = root\n self.screen = screen\n self.mouse = mouse\n self.keyboard = keyboard\n self.cells = []\n self.clickSwitch = False\n self.autoGenerateMode = False\n self.generation = 0\n self.timeToCalcGeneration = 0\n self.bindKeyboardKeysToFunctions()\n\n def bindKeyboardKeysToFunctions(self):\n \"\"\" \n Binds diffrent functions to keyboard presses. \n :return: (nothing)\n \"\"\"\n self.keyboard.bindFunctionToKey('space', self.nextGeneration)\n\n def updatePlayground(self):\n \"\"\" \n Updates the playground. Checking for user input to interact with the playground.\n :return: (nothing)\n \"\"\"\n self.getMouseInput()\n if self.autoGenerateMode:\n self.nextGeneration()\n\n def getMouseInput(self):\n \"\"\"\n This method is getting the mouse and doing diffrent thing with it. For example: spawning a new cell if the user click on an grid-box.\n :return: (nothing)\n \"\"\"\n xPos = self.mouse.xGridPos\n yPos = self.mouse.yGridPos\n if self.getCellFromPosition(xPos, yPos):\n self.screen.canvas.itemconfig(self.screen.hoverBlock, fill=\n '#ff0000')\n else:\n self.screen.canvas.itemconfig(self.screen.hoverBlock, fill=\n '#00ff00')\n if self.mouse.leftButton and self.clickSwitch == False:\n if self.keyboard.shiftKey:\n clickedCell = self.getCellFromPosition(xPos, yPos)\n if clickedCell == False:\n self.createCell(xPos, yPos)\n else:\n self.deleteCell(clickedCell)\n self.clickSwitch = True\n if self.mouse.leftButton == False and self.clickSwitch == True:\n self.clickSwitch = False\n\n def deleteCell(self, cell):\n \"\"\"\n Deleting a cell from the cell-list.\n :param cell: The cell that is going to be delete.\n :return: (nothing)\n \"\"\"\n index = self.cells.index(cell)\n self.cells[index].delete()\n self.cells.remove(cell)\n\n def createCell(self, xPos, yPos):\n \"\"\"\n Creates a new cell for a given position.\n :param xPos: The x-position on the grid.\n :param yPos: the y-position on the grid\n :return: (nothing)\n \"\"\"\n self.cells.append(Cell(self.screen, xPos, yPos))\n\n def getCellFromPosition(self, xPos, yPos):\n \"\"\"\n Gets a cell from a given position.\n :param xPos: The x-position on the grid.\n :param yPos: the y-position on the grid\n :return: Cell\n \"\"\"\n for cell in self.cells:\n if xPos == cell.x and yPos == cell.y:\n return cell\n return False\n\n def clearPlayground(self):\n \"\"\"\n Removes all the cells from the playground\n :return: (nothing)\n \"\"\"\n for cell in self.cells:\n cell.delete()\n self.cells = []\n self.generation = 0\n\n def importPlayground(self, filepath):\n \"\"\"\n This function is importing a playground.\n :param filepath: The filepath to import the playground to. \n :return: (nothing)\n \"\"\"\n cellOutOfBound = False\n avgXPos = 0\n avgYPos = 0\n fileWrite = open(filepath, 'r')\n cellPositions = fileWrite.readlines()\n self.clearPlayground()\n for cellPos in cellPositions:\n cleanCellPos = fileTools.cleanString(cellPos)\n if cleanCellPos == '':\n continue\n cleanCellPos = self.checkFileFormat(cleanCellPos)\n if cleanCellPos:\n cellXPos, cellYPos = cleanCellPos\n else:\n return\n if (cellXPos > self.screen.worldSize or cellYPos > self.screen.\n worldSize or cellXPos < 0 or cellYPos < 0):\n cellOutOfBound = True\n else:\n newCell = Cell(self.screen, cellXPos, cellYPos)\n rectCellPos = self.screen.canvas.coords(newCell.rect)\n avgXPos += rectCellPos[0]\n avgYPos += rectCellPos[1]\n self.cells.append(newCell)\n if cellOutOfBound:\n messagebox.showwarning('Warning!',\n 'Some cells are placed outside of the playground!')\n avgXPos /= len(cellPositions)\n avgYPos /= len(cellPositions)\n self.screen.offsetX += avgXPos - self.screen.width / 2\n self.screen.offsetY += avgYPos - self.screen.height / 2\n\n def exportPlayground(self, filepath):\n \"\"\"\n This function is exporting a playground.\n :param filepath: The filepath to export the playground to. \n :return: (nothing)\n \"\"\"\n cellPositions = ''\n for cell in self.cells:\n if cell.dead == False:\n cellPositions += str(cell.x) + ' ' + str(cell.y) + '\\n'\n fileWrite = open(filepath, 'w')\n fileWrite.write(cellPositions)\n fileWrite.close()\n\n def checkFileFormat(self, cellPos):\n \"\"\"\n Checks if the file has the right format for this program.\n :param fileContent: The content of the file\n :return: The positions in a tuple, (x, y), false if there is an error.\n \"\"\"\n try:\n cellPosList = cellPos.split()\n cellXPos = int(cellPosList[0])\n cellYPos = int(cellPosList[1])\n except ValueError:\n messagebox.showerror('Error: Wrong format',\n 'The choosen file do not have the correct format. Be so kind to choose an other file.'\n )\n return False\n pass\n return cellXPos, cellYPos\n\n def removeCells(self, cellArray):\n \"\"\"\n Deletes all the cells from the array and playground.\n :param cellArray: The array of cells to delete.\n :return: (nothing)\n \"\"\"\n for cell in cellArray:\n cell.delete()\n self.cells.remove(cell)\n\n def setNeighbors(self):\n \"\"\"\n Creates dead cells around all living cells and calculating all the neighbors for the dead and the living cells\n :return: (nothing)\n \"\"\"\n for cellIndex in range(len(self.cells)):\n cell = self.cells[cellIndex]\n for neighborsX in range(cell.x - 1, cell.x + 2):\n for neighborsY in range(cell.y - 1, cell.y + 2):\n neighborsX = neighborsX % self.screen.worldSize\n neighborsY = neighborsY % self.screen.worldSize\n if neighborsX == cell.x and neighborsY == cell.y:\n continue\n else:\n cellToCheck = self.getCellFromPosition(neighborsX,\n neighborsY)\n if cellToCheck != False:\n cellToCheck.numOfNeighbor += 1\n else:\n newCell = Cell(self.screen, neighborsX,\n neighborsY, True)\n newCell.numOfNeighbor += 1\n self.cells.append(newCell)\n\n def checkAmountOfNeighbors(self):\n \"\"\"\n Check the amount of neighbors and kills or creates new cell depending on the amount.\n :return: (nothing)\n \"\"\"\n cellsToDelete = []\n for cell in self.cells:\n if (cell.numOfNeighbor > 3 or cell.numOfNeighbor < 2 or cell.\n numOfNeighbor == 2 and cell.dead == True):\n cellsToDelete.append(cell)\n elif cell.numOfNeighbor == 3 and cell.dead == True:\n cell.makeAlive()\n cell.numOfNeighbor = 0\n self.removeCells(cellsToDelete)\n\n def nextGeneration(self):\n \"\"\"\n This method is updating the cells to the next generation.\n :return: (nothing)\n\n Thanks to Martins for the idea to have modolu of the current posotion.\n \"\"\"\n startTime = int(round(time.time() * 100000))\n self.generation += 1\n self.setNeighbors()\n self.checkAmountOfNeighbors()\n endTime = int(round(time.time() * 100000))\n self.timeToCalcGeneration = endTime - startTime\n", "step-3": "<mask token>\n\n\nclass Playground:\n \"\"\"\n The playground for the program. All cells are stored here. This object also import/export cells to the playground\n\n :param screen: The screen object.\n :param mouse: The mouse object.\n :param keyboard: The keyboard object.\n :param root: The root object.\n\n Attributes: \n cells: All the cells that is on the playground. \n clickSwitch: The size of the one grid box in pixels.\n \"\"\"\n\n def __init__(self, root, screen, mouse, keyboard):\n self.root = root\n self.screen = screen\n self.mouse = mouse\n self.keyboard = keyboard\n self.cells = []\n self.clickSwitch = False\n self.autoGenerateMode = False\n self.generation = 0\n self.timeToCalcGeneration = 0\n self.bindKeyboardKeysToFunctions()\n\n def bindKeyboardKeysToFunctions(self):\n \"\"\" \n Binds diffrent functions to keyboard presses. \n :return: (nothing)\n \"\"\"\n self.keyboard.bindFunctionToKey('space', self.nextGeneration)\n\n def updatePlayground(self):\n \"\"\" \n Updates the playground. Checking for user input to interact with the playground.\n :return: (nothing)\n \"\"\"\n self.getMouseInput()\n if self.autoGenerateMode:\n self.nextGeneration()\n\n def getMouseInput(self):\n \"\"\"\n This method is getting the mouse and doing diffrent thing with it. For example: spawning a new cell if the user click on an grid-box.\n :return: (nothing)\n \"\"\"\n xPos = self.mouse.xGridPos\n yPos = self.mouse.yGridPos\n if self.getCellFromPosition(xPos, yPos):\n self.screen.canvas.itemconfig(self.screen.hoverBlock, fill=\n '#ff0000')\n else:\n self.screen.canvas.itemconfig(self.screen.hoverBlock, fill=\n '#00ff00')\n if self.mouse.leftButton and self.clickSwitch == False:\n if self.keyboard.shiftKey:\n clickedCell = self.getCellFromPosition(xPos, yPos)\n if clickedCell == False:\n self.createCell(xPos, yPos)\n else:\n self.deleteCell(clickedCell)\n self.clickSwitch = True\n if self.mouse.leftButton == False and self.clickSwitch == True:\n self.clickSwitch = False\n\n def deleteCell(self, cell):\n \"\"\"\n Deleting a cell from the cell-list.\n :param cell: The cell that is going to be delete.\n :return: (nothing)\n \"\"\"\n index = self.cells.index(cell)\n self.cells[index].delete()\n self.cells.remove(cell)\n\n def createCell(self, xPos, yPos):\n \"\"\"\n Creates a new cell for a given position.\n :param xPos: The x-position on the grid.\n :param yPos: the y-position on the grid\n :return: (nothing)\n \"\"\"\n self.cells.append(Cell(self.screen, xPos, yPos))\n\n def getCellFromPosition(self, xPos, yPos):\n \"\"\"\n Gets a cell from a given position.\n :param xPos: The x-position on the grid.\n :param yPos: the y-position on the grid\n :return: Cell\n \"\"\"\n for cell in self.cells:\n if xPos == cell.x and yPos == cell.y:\n return cell\n return False\n\n def clearPlayground(self):\n \"\"\"\n Removes all the cells from the playground\n :return: (nothing)\n \"\"\"\n for cell in self.cells:\n cell.delete()\n self.cells = []\n self.generation = 0\n\n def importPlayground(self, filepath):\n \"\"\"\n This function is importing a playground.\n :param filepath: The filepath to import the playground to. \n :return: (nothing)\n \"\"\"\n cellOutOfBound = False\n avgXPos = 0\n avgYPos = 0\n fileWrite = open(filepath, 'r')\n cellPositions = fileWrite.readlines()\n self.clearPlayground()\n for cellPos in cellPositions:\n cleanCellPos = fileTools.cleanString(cellPos)\n if cleanCellPos == '':\n continue\n cleanCellPos = self.checkFileFormat(cleanCellPos)\n if cleanCellPos:\n cellXPos, cellYPos = cleanCellPos\n else:\n return\n if (cellXPos > self.screen.worldSize or cellYPos > self.screen.\n worldSize or cellXPos < 0 or cellYPos < 0):\n cellOutOfBound = True\n else:\n newCell = Cell(self.screen, cellXPos, cellYPos)\n rectCellPos = self.screen.canvas.coords(newCell.rect)\n avgXPos += rectCellPos[0]\n avgYPos += rectCellPos[1]\n self.cells.append(newCell)\n if cellOutOfBound:\n messagebox.showwarning('Warning!',\n 'Some cells are placed outside of the playground!')\n avgXPos /= len(cellPositions)\n avgYPos /= len(cellPositions)\n self.screen.offsetX += avgXPos - self.screen.width / 2\n self.screen.offsetY += avgYPos - self.screen.height / 2\n\n def exportPlayground(self, filepath):\n \"\"\"\n This function is exporting a playground.\n :param filepath: The filepath to export the playground to. \n :return: (nothing)\n \"\"\"\n cellPositions = ''\n for cell in self.cells:\n if cell.dead == False:\n cellPositions += str(cell.x) + ' ' + str(cell.y) + '\\n'\n fileWrite = open(filepath, 'w')\n fileWrite.write(cellPositions)\n fileWrite.close()\n\n def checkFileFormat(self, cellPos):\n \"\"\"\n Checks if the file has the right format for this program.\n :param fileContent: The content of the file\n :return: The positions in a tuple, (x, y), false if there is an error.\n \"\"\"\n try:\n cellPosList = cellPos.split()\n cellXPos = int(cellPosList[0])\n cellYPos = int(cellPosList[1])\n except ValueError:\n messagebox.showerror('Error: Wrong format',\n 'The choosen file do not have the correct format. Be so kind to choose an other file.'\n )\n return False\n pass\n return cellXPos, cellYPos\n\n def removeCells(self, cellArray):\n \"\"\"\n Deletes all the cells from the array and playground.\n :param cellArray: The array of cells to delete.\n :return: (nothing)\n \"\"\"\n for cell in cellArray:\n cell.delete()\n self.cells.remove(cell)\n\n def setNeighbors(self):\n \"\"\"\n Creates dead cells around all living cells and calculating all the neighbors for the dead and the living cells\n :return: (nothing)\n \"\"\"\n for cellIndex in range(len(self.cells)):\n cell = self.cells[cellIndex]\n for neighborsX in range(cell.x - 1, cell.x + 2):\n for neighborsY in range(cell.y - 1, cell.y + 2):\n neighborsX = neighborsX % self.screen.worldSize\n neighborsY = neighborsY % self.screen.worldSize\n if neighborsX == cell.x and neighborsY == cell.y:\n continue\n else:\n cellToCheck = self.getCellFromPosition(neighborsX,\n neighborsY)\n if cellToCheck != False:\n cellToCheck.numOfNeighbor += 1\n else:\n newCell = Cell(self.screen, neighborsX,\n neighborsY, True)\n newCell.numOfNeighbor += 1\n self.cells.append(newCell)\n\n def checkAmountOfNeighbors(self):\n \"\"\"\n Check the amount of neighbors and kills or creates new cell depending on the amount.\n :return: (nothing)\n \"\"\"\n cellsToDelete = []\n for cell in self.cells:\n if (cell.numOfNeighbor > 3 or cell.numOfNeighbor < 2 or cell.\n numOfNeighbor == 2 and cell.dead == True):\n cellsToDelete.append(cell)\n elif cell.numOfNeighbor == 3 and cell.dead == True:\n cell.makeAlive()\n cell.numOfNeighbor = 0\n self.removeCells(cellsToDelete)\n\n def nextGeneration(self):\n \"\"\"\n This method is updating the cells to the next generation.\n :return: (nothing)\n\n Thanks to Martins for the idea to have modolu of the current posotion.\n \"\"\"\n startTime = int(round(time.time() * 100000))\n self.generation += 1\n self.setNeighbors()\n self.checkAmountOfNeighbors()\n endTime = int(round(time.time() * 100000))\n self.timeToCalcGeneration = endTime - startTime\n", "step-4": "from cell import Cell\nfrom tkinter import messagebox\nimport time\nimport fileTools\n\n\nclass Playground:\n \"\"\"\n The playground for the program. All cells are stored here. This object also import/export cells to the playground\n\n :param screen: The screen object.\n :param mouse: The mouse object.\n :param keyboard: The keyboard object.\n :param root: The root object.\n\n Attributes: \n cells: All the cells that is on the playground. \n clickSwitch: The size of the one grid box in pixels.\n \"\"\"\n\n def __init__(self, root, screen, mouse, keyboard):\n self.root = root\n self.screen = screen\n self.mouse = mouse\n self.keyboard = keyboard\n self.cells = []\n self.clickSwitch = False\n self.autoGenerateMode = False\n self.generation = 0\n self.timeToCalcGeneration = 0\n self.bindKeyboardKeysToFunctions()\n\n def bindKeyboardKeysToFunctions(self):\n \"\"\" \n Binds diffrent functions to keyboard presses. \n :return: (nothing)\n \"\"\"\n self.keyboard.bindFunctionToKey('space', self.nextGeneration)\n\n def updatePlayground(self):\n \"\"\" \n Updates the playground. Checking for user input to interact with the playground.\n :return: (nothing)\n \"\"\"\n self.getMouseInput()\n if self.autoGenerateMode:\n self.nextGeneration()\n\n def getMouseInput(self):\n \"\"\"\n This method is getting the mouse and doing diffrent thing with it. For example: spawning a new cell if the user click on an grid-box.\n :return: (nothing)\n \"\"\"\n xPos = self.mouse.xGridPos\n yPos = self.mouse.yGridPos\n if self.getCellFromPosition(xPos, yPos):\n self.screen.canvas.itemconfig(self.screen.hoverBlock, fill=\n '#ff0000')\n else:\n self.screen.canvas.itemconfig(self.screen.hoverBlock, fill=\n '#00ff00')\n if self.mouse.leftButton and self.clickSwitch == False:\n if self.keyboard.shiftKey:\n clickedCell = self.getCellFromPosition(xPos, yPos)\n if clickedCell == False:\n self.createCell(xPos, yPos)\n else:\n self.deleteCell(clickedCell)\n self.clickSwitch = True\n if self.mouse.leftButton == False and self.clickSwitch == True:\n self.clickSwitch = False\n\n def deleteCell(self, cell):\n \"\"\"\n Deleting a cell from the cell-list.\n :param cell: The cell that is going to be delete.\n :return: (nothing)\n \"\"\"\n index = self.cells.index(cell)\n self.cells[index].delete()\n self.cells.remove(cell)\n\n def createCell(self, xPos, yPos):\n \"\"\"\n Creates a new cell for a given position.\n :param xPos: The x-position on the grid.\n :param yPos: the y-position on the grid\n :return: (nothing)\n \"\"\"\n self.cells.append(Cell(self.screen, xPos, yPos))\n\n def getCellFromPosition(self, xPos, yPos):\n \"\"\"\n Gets a cell from a given position.\n :param xPos: The x-position on the grid.\n :param yPos: the y-position on the grid\n :return: Cell\n \"\"\"\n for cell in self.cells:\n if xPos == cell.x and yPos == cell.y:\n return cell\n return False\n\n def clearPlayground(self):\n \"\"\"\n Removes all the cells from the playground\n :return: (nothing)\n \"\"\"\n for cell in self.cells:\n cell.delete()\n self.cells = []\n self.generation = 0\n\n def importPlayground(self, filepath):\n \"\"\"\n This function is importing a playground.\n :param filepath: The filepath to import the playground to. \n :return: (nothing)\n \"\"\"\n cellOutOfBound = False\n avgXPos = 0\n avgYPos = 0\n fileWrite = open(filepath, 'r')\n cellPositions = fileWrite.readlines()\n self.clearPlayground()\n for cellPos in cellPositions:\n cleanCellPos = fileTools.cleanString(cellPos)\n if cleanCellPos == '':\n continue\n cleanCellPos = self.checkFileFormat(cleanCellPos)\n if cleanCellPos:\n cellXPos, cellYPos = cleanCellPos\n else:\n return\n if (cellXPos > self.screen.worldSize or cellYPos > self.screen.\n worldSize or cellXPos < 0 or cellYPos < 0):\n cellOutOfBound = True\n else:\n newCell = Cell(self.screen, cellXPos, cellYPos)\n rectCellPos = self.screen.canvas.coords(newCell.rect)\n avgXPos += rectCellPos[0]\n avgYPos += rectCellPos[1]\n self.cells.append(newCell)\n if cellOutOfBound:\n messagebox.showwarning('Warning!',\n 'Some cells are placed outside of the playground!')\n avgXPos /= len(cellPositions)\n avgYPos /= len(cellPositions)\n self.screen.offsetX += avgXPos - self.screen.width / 2\n self.screen.offsetY += avgYPos - self.screen.height / 2\n\n def exportPlayground(self, filepath):\n \"\"\"\n This function is exporting a playground.\n :param filepath: The filepath to export the playground to. \n :return: (nothing)\n \"\"\"\n cellPositions = ''\n for cell in self.cells:\n if cell.dead == False:\n cellPositions += str(cell.x) + ' ' + str(cell.y) + '\\n'\n fileWrite = open(filepath, 'w')\n fileWrite.write(cellPositions)\n fileWrite.close()\n\n def checkFileFormat(self, cellPos):\n \"\"\"\n Checks if the file has the right format for this program.\n :param fileContent: The content of the file\n :return: The positions in a tuple, (x, y), false if there is an error.\n \"\"\"\n try:\n cellPosList = cellPos.split()\n cellXPos = int(cellPosList[0])\n cellYPos = int(cellPosList[1])\n except ValueError:\n messagebox.showerror('Error: Wrong format',\n 'The choosen file do not have the correct format. Be so kind to choose an other file.'\n )\n return False\n pass\n return cellXPos, cellYPos\n\n def removeCells(self, cellArray):\n \"\"\"\n Deletes all the cells from the array and playground.\n :param cellArray: The array of cells to delete.\n :return: (nothing)\n \"\"\"\n for cell in cellArray:\n cell.delete()\n self.cells.remove(cell)\n\n def setNeighbors(self):\n \"\"\"\n Creates dead cells around all living cells and calculating all the neighbors for the dead and the living cells\n :return: (nothing)\n \"\"\"\n for cellIndex in range(len(self.cells)):\n cell = self.cells[cellIndex]\n for neighborsX in range(cell.x - 1, cell.x + 2):\n for neighborsY in range(cell.y - 1, cell.y + 2):\n neighborsX = neighborsX % self.screen.worldSize\n neighborsY = neighborsY % self.screen.worldSize\n if neighborsX == cell.x and neighborsY == cell.y:\n continue\n else:\n cellToCheck = self.getCellFromPosition(neighborsX,\n neighborsY)\n if cellToCheck != False:\n cellToCheck.numOfNeighbor += 1\n else:\n newCell = Cell(self.screen, neighborsX,\n neighborsY, True)\n newCell.numOfNeighbor += 1\n self.cells.append(newCell)\n\n def checkAmountOfNeighbors(self):\n \"\"\"\n Check the amount of neighbors and kills or creates new cell depending on the amount.\n :return: (nothing)\n \"\"\"\n cellsToDelete = []\n for cell in self.cells:\n if (cell.numOfNeighbor > 3 or cell.numOfNeighbor < 2 or cell.\n numOfNeighbor == 2 and cell.dead == True):\n cellsToDelete.append(cell)\n elif cell.numOfNeighbor == 3 and cell.dead == True:\n cell.makeAlive()\n cell.numOfNeighbor = 0\n self.removeCells(cellsToDelete)\n\n def nextGeneration(self):\n \"\"\"\n This method is updating the cells to the next generation.\n :return: (nothing)\n\n Thanks to Martins for the idea to have modolu of the current posotion.\n \"\"\"\n startTime = int(round(time.time() * 100000))\n self.generation += 1\n self.setNeighbors()\n self.checkAmountOfNeighbors()\n endTime = int(round(time.time() * 100000))\n self.timeToCalcGeneration = endTime - startTime\n", "step-5": "from cell import Cell\nfrom tkinter import messagebox\nimport time\nimport fileTools\n\nclass Playground:\n\n \"\"\"\n The playground for the program. All cells are stored here. This object also import/export cells to the playground\n\n :param screen: The screen object.\n :param mouse: The mouse object.\n :param keyboard: The keyboard object.\n :param root: The root object.\n\n Attributes: \n cells: All the cells that is on the playground. \n clickSwitch: The size of the one grid box in pixels.\n \"\"\"\n\n def __init__(self, root, screen, mouse, keyboard):\n self.root = root\n self.screen = screen\n self.mouse = mouse\n self.keyboard = keyboard\n self.cells = []\n\n self.clickSwitch = False\n self.autoGenerateMode = False\n self.generation = 0\n self.timeToCalcGeneration = 0\n\n self.bindKeyboardKeysToFunctions()\n\n \n def bindKeyboardKeysToFunctions(self):\n \"\"\" \n Binds diffrent functions to keyboard presses. \n :return: (nothing)\n \"\"\"\n self.keyboard.bindFunctionToKey(\"space\", self.nextGeneration)\n\n\n def updatePlayground(self):\n \"\"\" \n Updates the playground. Checking for user input to interact with the playground.\n :return: (nothing)\n \"\"\"\n self.getMouseInput()\n if(self.autoGenerateMode):\n self.nextGeneration()\n\n\n def getMouseInput(self):\n \"\"\"\n This method is getting the mouse and doing diffrent thing with it. For example: spawning a new cell if the user click on an grid-box.\n :return: (nothing)\n \"\"\"\n xPos = self.mouse.xGridPos\n yPos = self.mouse.yGridPos\n\n #Changing the hoverblock color depending if the mouse is hovering over an living cell or not.\n if(self.getCellFromPosition(xPos, yPos)):\n self.screen.canvas.itemconfig(self.screen.hoverBlock, fill='#ff0000')\n else:\n self.screen.canvas.itemconfig(self.screen.hoverBlock, fill='#00ff00')\n\n #Placing an cell on the playground if the user is clicking on the playground\n if(self.mouse.leftButton and self.clickSwitch == False):\n if(self.keyboard.shiftKey):\n clickedCell = self.getCellFromPosition(xPos, yPos)\n if(clickedCell == False):\n self.createCell(xPos, yPos)\n else:\n self.deleteCell(clickedCell)\n self.clickSwitch = True\n\n if (self.mouse.leftButton == False and self.clickSwitch == True):\n self.clickSwitch = False\n \n\n def deleteCell(self, cell):\n \"\"\"\n Deleting a cell from the cell-list.\n :param cell: The cell that is going to be delete.\n :return: (nothing)\n \"\"\"\n index = self.cells.index(cell)\n self.cells[index].delete()\n self.cells.remove(cell)\n\n\n def createCell(self, xPos, yPos):\n \"\"\"\n Creates a new cell for a given position.\n :param xPos: The x-position on the grid.\n :param yPos: the y-position on the grid\n :return: (nothing)\n \"\"\"\n self.cells.append(Cell(self.screen, xPos, yPos))\n\n\n def getCellFromPosition(self, xPos, yPos):\n \"\"\"\n Gets a cell from a given position.\n :param xPos: The x-position on the grid.\n :param yPos: the y-position on the grid\n :return: Cell\n \"\"\"\n for cell in self.cells:\n if(xPos == cell.x and yPos == cell.y):\n return cell\n return False\n\n\n def clearPlayground(self):\n \"\"\"\n Removes all the cells from the playground\n :return: (nothing)\n \"\"\"\n\n for cell in self.cells:\n cell.delete()\n self.cells = []\n self.generation = 0\n\n\n def importPlayground(self, filepath):\n \"\"\"\n This function is importing a playground.\n :param filepath: The filepath to import the playground to. \n :return: (nothing)\n \"\"\"\n\n cellOutOfBound = False\n avgXPos = 0\n avgYPos = 0\n fileWrite = open(filepath, \"r\")\n cellPositions = fileWrite.readlines()\n\n self.clearPlayground()\n \n for cellPos in cellPositions:\n\n #Cleans the string\n cleanCellPos = fileTools.cleanString(cellPos)\n if(cleanCellPos == \"\"):\n continue\n\n #Check the format.\n cleanCellPos = self.checkFileFormat(cleanCellPos)\n if(cleanCellPos):\n cellXPos, cellYPos = cleanCellPos\n else:\n return\n\n #Checks if the coords is outside the world.\n if(cellXPos > self.screen.worldSize or cellYPos > self.screen.worldSize or cellXPos < 0 or cellYPos < 0):\n cellOutOfBound = True\n else:\n newCell = Cell(self.screen, cellXPos, cellYPos)\n rectCellPos = self.screen.canvas.coords(newCell.rect)\n avgXPos += rectCellPos[0]; avgYPos += rectCellPos[1]\n\n self.cells.append(newCell)\n\n #Print warning that some cells are not renderd.\n if(cellOutOfBound):\n messagebox.showwarning(\"Warning!\", \"Some cells are placed outside of the playground!\")\n\n #Moving the user to where the cells are.\n avgXPos /= len(cellPositions); avgYPos /= len(cellPositions)\n self.screen.offsetX += avgXPos - self.screen.width/2\n self.screen.offsetY += avgYPos - self.screen.height/2\n\n\n def exportPlayground(self, filepath):\n \"\"\"\n This function is exporting a playground.\n :param filepath: The filepath to export the playground to. \n :return: (nothing)\n \"\"\"\n cellPositions = \"\"\n for cell in self.cells:\n if(cell.dead == False):\n cellPositions += str(cell.x) + \" \" + str(cell.y) + \"\\n\"\n \n fileWrite = open(filepath, \"w\")\n fileWrite.write(cellPositions)\n fileWrite.close()\n \n\n def checkFileFormat(self, cellPos):\n \"\"\"\n Checks if the file has the right format for this program.\n :param fileContent: The content of the file\n :return: The positions in a tuple, (x, y), false if there is an error.\n \"\"\"\n try:\n cellPosList = cellPos.split()\n cellXPos = int(cellPosList[0])\n cellYPos = int(cellPosList[1])\n except ValueError:\n messagebox.showerror(\"Error: Wrong format\", \"The choosen file do not have the correct format. Be so kind to choose an other file.\")\n return False\n pass\n\n return (cellXPos, cellYPos)\n\n\n def removeCells(self, cellArray):\n \"\"\"\n Deletes all the cells from the array and playground.\n :param cellArray: The array of cells to delete.\n :return: (nothing)\n \"\"\"\n for cell in cellArray:\n cell.delete()\n self.cells.remove(cell)\n\n\n def setNeighbors(self):\n \"\"\"\n Creates dead cells around all living cells and calculating all the neighbors for the dead and the living cells\n :return: (nothing)\n \"\"\"\n for cellIndex in range(len(self.cells)):\n cell = self.cells[cellIndex]\n\n #Checks the 8 cells around the living one. \n for neighborsX in range(cell.x - 1, cell.x + 2):\n for neighborsY in range(cell.y - 1, cell.y + 2):\n\n #If the position is outside the world, loop around.\n neighborsX = neighborsX % self.screen.worldSize\n neighborsY = neighborsY % self.screen.worldSize\n\n #Skipping itself. Becouse we do not want to calculate itself as a neighbor\n if(neighborsX == cell.x and neighborsY == cell.y):\n continue\n else:\n #Checks if a cell exist at neighborsX, neighborsY\n cellToCheck = self.getCellFromPosition(neighborsX, neighborsY)\n if(cellToCheck != False):\n #Add one to the neighbor var if there already exist and cell for the given position.\n cellToCheck.numOfNeighbor += 1\n else:\n #Creates a new cell if it do not exist any.\n newCell = Cell(self.screen, neighborsX, neighborsY, True)\n newCell.numOfNeighbor += 1\n self.cells.append(newCell)\n\n\n def checkAmountOfNeighbors(self):\n \"\"\"\n Check the amount of neighbors and kills or creates new cell depending on the amount.\n :return: (nothing)\n \"\"\"\n cellsToDelete = []\n for cell in self.cells:\n if(cell.numOfNeighbor > 3 or cell.numOfNeighbor < 2 or (cell.numOfNeighbor == 2 and cell.dead == True)):\n cellsToDelete.append(cell)\n elif(cell.numOfNeighbor == 3 and cell.dead == True):\n cell.makeAlive()\n cell.numOfNeighbor = 0\n\n self.removeCells(cellsToDelete)\n\n def nextGeneration(self):\n \"\"\"\n This method is updating the cells to the next generation.\n :return: (nothing)\n\n Thanks to Martins for the idea to have modolu of the current posotion.\n \"\"\"\n\n # Start a timer to calculate the time the render one generation.\n startTime = int(round(time.time() * 100000))\n\n self.generation += 1\n\n self.setNeighbors()\n self.checkAmountOfNeighbors()\n\n # Ends a timer to calculate the time the render one generation.\n endTime = int(round(time.time() * 100000))\n self.timeToCalcGeneration = (endTime - startTime)\n", "step-ids": [ 12, 16, 17, 18, 19 ] }
[ 12, 16, 17, 18, 19 ]
# -*- coding: utf-8 -*- import math # 冒泡排序(Bubble Sort) # 比较相邻的元素。如果第一个比第二个大,就交换它们两个; # 对每一对相邻元素作同样的工作,从开始第一对到结尾的最后一对,这样在最后的元素应该会是最大的数; # 针对所有的元素重复以上的步骤,除了最后一个; # 重复步骤1~3,直到排序完成。 # 冒泡排序总的平均时间复杂度为:O(n^2) def bubble_sort(input): print("\nBubble Sort") input_len = len(input) print("length of input: %d" % input_len) for i in range(0, input_len): for j in range(0, input_len - 1 - i): if input[j] > input[j + 1]: tmp = input[j + 1] input[j + 1] = input[j] input[j] = tmp return input test_arr = [3, 4, 1, 6, 30, 5] test_arr_bubble_sorted = bubble_sort(test_arr) print(test_arr_bubble_sorted) # 选择排序(Selection-sort) # 选择排序(Selection-sort)是一种简单直观的排序算法。它的工作原理:首先在未排序序列中找到最小(大)元素,存放到排序序列的起始位置, # 然后,再从剩余未排序元素中继续寻找最小(大)元素,然后放到已排序序列的末尾。以此类推,直到所有元素均排序完毕。 # 选择排序总的平均时间复杂度为:O(n^2) def select_sort(input): print("\nSelect Sort") input_len = len(input) for i in range(0, input_len): min_index = i for j in range(i + 1, input_len): if input[j] < input[min_index]: min_index = j tmp = input[i] input[i] = input[min_index] input[min_index] = tmp return input test_arr = [3, 4, 1, 6, 30, 5] test_arr_select_sorted = select_sort(test_arr) print(test_arr_select_sorted) # 插入排序(Insertion Sort) # 插入排序(Insertion-Sort)的算法描述是一种简单直观的排序算法。它的工作原理是通过构建有序序列,对于未排序数据, # 在已排序序列中从后向前扫描,找到相应位置并插入。 # 归并排序(Merge Sort) # 首先归并排序使用了二分法,归根到底的思想还是分而治之。拿到一个长数组,将其不停的分为左边和右边两份,然后以此递归分下去。 # 然后再将她们按照两个有序数组的样子合并起来。 # 归并排序时间复杂度是o(nlogn) def merge_sort(input): input_len = len(input) if input_len <= 1: return input mid = math.floor(input_len / 2) left = merge_sort(input[:mid]) right = merge_sort(input[mid:]) return merge(left, right) def merge(sorted_arr1, sorted_arr2): result = [] i = j = 0 while i < len(sorted_arr1) and j < len(sorted_arr2): if sorted_arr1[i] < sorted_arr2[j]: result.append(sorted_arr1[i]) i = i + 1 else: result.append(sorted_arr2[j]) j = j + 1 if i == len(sorted_arr1): for item in sorted_arr2[j:]: result.append(item) else: for item in sorted_arr1[i:]: result.append(item) return result test_arr = [3, 4, 1, 6, 30, 5] print("\nMerge Sort") test_arr_merge_sorted = merge_sort(test_arr) print(test_arr_merge_sorted) # 快速排序(Quick Sort) # 快速排序使用分治法来把一个串(list)分为两个子串(sub-lists)。具体算法描述如下: # # 从数列中挑出一个元素,称为 “基准”(pivot); # 重新排序数列,所有元素比基准值小的摆放在基准前面,所有元素比基准值大的摆在基准的后面(相同的数可以到任一边)。 # 在这个分区退出之后,该基准就处于数列的中间位置。这个称为分区(partition)操作; # 递归地(recursive)把小于基准值元素的子数列和大于基准值元素的子数列排序。 # 快速排序时间复杂度是o(nlogn) def quick_sort(li, start, end): # 分治 一分为二 # start=end ,证明要处理的数据只有一个 # start>end ,证明右边没有数据 if start >= end: return # 定义两个游标,分别指向0和末尾位置 left = start right = end # 把0位置的数据,认为是中间值 mid = li[left] while left < right: # 让右边游标往左移动,目的是找到小于mid的值,放到left游标位置 while left < right and li[right] >= mid: right -= 1 li[left] = li[right] # 让左边游标往右移动,目的是找到大于mid的值,放到right游标位置 while left < right and li[left] < mid: left += 1 li[right] = li[left] # while结束后,把mid放到中间位置,left=right li[left] = mid # 递归处理左边的数据 quick_sort(li, start, left-1) # 递归处理右边的数据 quick_sort(li, left+1, end) test_arr = [3, 4, 1, 6, 30, 5] print("\nQuick Sort") quick_sort(test_arr, 0, len(test_arr)-1) print(test_arr)
normal
{ "blob_id": "c967aa647a97b17c9a7493559b9a1577dd95263a", "index": 7806, "step-1": "<mask token>\n\n\ndef select_sort(input):\n print('\\nSelect Sort')\n input_len = len(input)\n for i in range(0, input_len):\n min_index = i\n for j in range(i + 1, input_len):\n if input[j] < input[min_index]:\n min_index = j\n tmp = input[i]\n input[i] = input[min_index]\n input[min_index] = tmp\n return input\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef bubble_sort(input):\n print('\\nBubble Sort')\n input_len = len(input)\n print('length of input: %d' % input_len)\n for i in range(0, input_len):\n for j in range(0, input_len - 1 - i):\n if input[j] > input[j + 1]:\n tmp = input[j + 1]\n input[j + 1] = input[j]\n input[j] = tmp\n return input\n\n\n<mask token>\nprint(test_arr_bubble_sorted)\n\n\ndef select_sort(input):\n print('\\nSelect Sort')\n input_len = len(input)\n for i in range(0, input_len):\n min_index = i\n for j in range(i + 1, input_len):\n if input[j] < input[min_index]:\n min_index = j\n tmp = input[i]\n input[i] = input[min_index]\n input[min_index] = tmp\n return input\n\n\n<mask token>\nprint(test_arr_select_sorted)\n\n\ndef merge_sort(input):\n input_len = len(input)\n if input_len <= 1:\n return input\n mid = math.floor(input_len / 2)\n left = merge_sort(input[:mid])\n right = merge_sort(input[mid:])\n return merge(left, right)\n\n\ndef merge(sorted_arr1, sorted_arr2):\n result = []\n i = j = 0\n while i < len(sorted_arr1) and j < len(sorted_arr2):\n if sorted_arr1[i] < sorted_arr2[j]:\n result.append(sorted_arr1[i])\n i = i + 1\n else:\n result.append(sorted_arr2[j])\n j = j + 1\n if i == len(sorted_arr1):\n for item in sorted_arr2[j:]:\n result.append(item)\n else:\n for item in sorted_arr1[i:]:\n result.append(item)\n return result\n\n\n<mask token>\nprint('\\nMerge Sort')\n<mask token>\nprint(test_arr_merge_sorted)\n\n\ndef quick_sort(li, start, end):\n if start >= end:\n return\n left = start\n right = end\n mid = li[left]\n while left < right:\n while left < right and li[right] >= mid:\n right -= 1\n li[left] = li[right]\n while left < right and li[left] < mid:\n left += 1\n li[right] = li[left]\n li[left] = mid\n quick_sort(li, start, left - 1)\n quick_sort(li, left + 1, end)\n\n\n<mask token>\nprint('\\nQuick Sort')\nquick_sort(test_arr, 0, len(test_arr) - 1)\nprint(test_arr)\n", "step-3": "<mask token>\n\n\ndef bubble_sort(input):\n print('\\nBubble Sort')\n input_len = len(input)\n print('length of input: %d' % input_len)\n for i in range(0, input_len):\n for j in range(0, input_len - 1 - i):\n if input[j] > input[j + 1]:\n tmp = input[j + 1]\n input[j + 1] = input[j]\n input[j] = tmp\n return input\n\n\ntest_arr = [3, 4, 1, 6, 30, 5]\ntest_arr_bubble_sorted = bubble_sort(test_arr)\nprint(test_arr_bubble_sorted)\n\n\ndef select_sort(input):\n print('\\nSelect Sort')\n input_len = len(input)\n for i in range(0, input_len):\n min_index = i\n for j in range(i + 1, input_len):\n if input[j] < input[min_index]:\n min_index = j\n tmp = input[i]\n input[i] = input[min_index]\n input[min_index] = tmp\n return input\n\n\ntest_arr = [3, 4, 1, 6, 30, 5]\ntest_arr_select_sorted = select_sort(test_arr)\nprint(test_arr_select_sorted)\n\n\ndef merge_sort(input):\n input_len = len(input)\n if input_len <= 1:\n return input\n mid = math.floor(input_len / 2)\n left = merge_sort(input[:mid])\n right = merge_sort(input[mid:])\n return merge(left, right)\n\n\ndef merge(sorted_arr1, sorted_arr2):\n result = []\n i = j = 0\n while i < len(sorted_arr1) and j < len(sorted_arr2):\n if sorted_arr1[i] < sorted_arr2[j]:\n result.append(sorted_arr1[i])\n i = i + 1\n else:\n result.append(sorted_arr2[j])\n j = j + 1\n if i == len(sorted_arr1):\n for item in sorted_arr2[j:]:\n result.append(item)\n else:\n for item in sorted_arr1[i:]:\n result.append(item)\n return result\n\n\ntest_arr = [3, 4, 1, 6, 30, 5]\nprint('\\nMerge Sort')\ntest_arr_merge_sorted = merge_sort(test_arr)\nprint(test_arr_merge_sorted)\n\n\ndef quick_sort(li, start, end):\n if start >= end:\n return\n left = start\n right = end\n mid = li[left]\n while left < right:\n while left < right and li[right] >= mid:\n right -= 1\n li[left] = li[right]\n while left < right and li[left] < mid:\n left += 1\n li[right] = li[left]\n li[left] = mid\n quick_sort(li, start, left - 1)\n quick_sort(li, left + 1, end)\n\n\ntest_arr = [3, 4, 1, 6, 30, 5]\nprint('\\nQuick Sort')\nquick_sort(test_arr, 0, len(test_arr) - 1)\nprint(test_arr)\n", "step-4": "import math\n\n\ndef bubble_sort(input):\n print('\\nBubble Sort')\n input_len = len(input)\n print('length of input: %d' % input_len)\n for i in range(0, input_len):\n for j in range(0, input_len - 1 - i):\n if input[j] > input[j + 1]:\n tmp = input[j + 1]\n input[j + 1] = input[j]\n input[j] = tmp\n return input\n\n\ntest_arr = [3, 4, 1, 6, 30, 5]\ntest_arr_bubble_sorted = bubble_sort(test_arr)\nprint(test_arr_bubble_sorted)\n\n\ndef select_sort(input):\n print('\\nSelect Sort')\n input_len = len(input)\n for i in range(0, input_len):\n min_index = i\n for j in range(i + 1, input_len):\n if input[j] < input[min_index]:\n min_index = j\n tmp = input[i]\n input[i] = input[min_index]\n input[min_index] = tmp\n return input\n\n\ntest_arr = [3, 4, 1, 6, 30, 5]\ntest_arr_select_sorted = select_sort(test_arr)\nprint(test_arr_select_sorted)\n\n\ndef merge_sort(input):\n input_len = len(input)\n if input_len <= 1:\n return input\n mid = math.floor(input_len / 2)\n left = merge_sort(input[:mid])\n right = merge_sort(input[mid:])\n return merge(left, right)\n\n\ndef merge(sorted_arr1, sorted_arr2):\n result = []\n i = j = 0\n while i < len(sorted_arr1) and j < len(sorted_arr2):\n if sorted_arr1[i] < sorted_arr2[j]:\n result.append(sorted_arr1[i])\n i = i + 1\n else:\n result.append(sorted_arr2[j])\n j = j + 1\n if i == len(sorted_arr1):\n for item in sorted_arr2[j:]:\n result.append(item)\n else:\n for item in sorted_arr1[i:]:\n result.append(item)\n return result\n\n\ntest_arr = [3, 4, 1, 6, 30, 5]\nprint('\\nMerge Sort')\ntest_arr_merge_sorted = merge_sort(test_arr)\nprint(test_arr_merge_sorted)\n\n\ndef quick_sort(li, start, end):\n if start >= end:\n return\n left = start\n right = end\n mid = li[left]\n while left < right:\n while left < right and li[right] >= mid:\n right -= 1\n li[left] = li[right]\n while left < right and li[left] < mid:\n left += 1\n li[right] = li[left]\n li[left] = mid\n quick_sort(li, start, left - 1)\n quick_sort(li, left + 1, end)\n\n\ntest_arr = [3, 4, 1, 6, 30, 5]\nprint('\\nQuick Sort')\nquick_sort(test_arr, 0, len(test_arr) - 1)\nprint(test_arr)\n", "step-5": "# -*- coding: utf-8 -*-\nimport math\n\n\n# 冒泡排序(Bubble Sort)\n# 比较相邻的元素。如果第一个比第二个大,就交换它们两个;\n# 对每一对相邻元素作同样的工作,从开始第一对到结尾的最后一对,这样在最后的元素应该会是最大的数;\n# 针对所有的元素重复以上的步骤,除了最后一个;\n# 重复步骤1~3,直到排序完成。\n# 冒泡排序总的平均时间复杂度为:O(n^2)\n\n\ndef bubble_sort(input):\n print(\"\\nBubble Sort\")\n input_len = len(input)\n print(\"length of input: %d\" % input_len)\n for i in range(0, input_len):\n for j in range(0, input_len - 1 - i):\n if input[j] > input[j + 1]:\n tmp = input[j + 1]\n input[j + 1] = input[j]\n input[j] = tmp\n return input\n\n\ntest_arr = [3, 4, 1, 6, 30, 5]\ntest_arr_bubble_sorted = bubble_sort(test_arr)\nprint(test_arr_bubble_sorted)\n\n\n# 选择排序(Selection-sort)\n# 选择排序(Selection-sort)是一种简单直观的排序算法。它的工作原理:首先在未排序序列中找到最小(大)元素,存放到排序序列的起始位置,\n# 然后,再从剩余未排序元素中继续寻找最小(大)元素,然后放到已排序序列的末尾。以此类推,直到所有元素均排序完毕。\n# 选择排序总的平均时间复杂度为:O(n^2)\n\n\ndef select_sort(input):\n print(\"\\nSelect Sort\")\n input_len = len(input)\n for i in range(0, input_len):\n min_index = i\n for j in range(i + 1, input_len):\n if input[j] < input[min_index]:\n min_index = j\n\n tmp = input[i]\n input[i] = input[min_index]\n input[min_index] = tmp\n return input\n\n\ntest_arr = [3, 4, 1, 6, 30, 5]\ntest_arr_select_sorted = select_sort(test_arr)\nprint(test_arr_select_sorted)\n\n\n# 插入排序(Insertion Sort)\n# 插入排序(Insertion-Sort)的算法描述是一种简单直观的排序算法。它的工作原理是通过构建有序序列,对于未排序数据,\n# 在已排序序列中从后向前扫描,找到相应位置并插入。\n\n# 归并排序(Merge Sort)\n# 首先归并排序使用了二分法,归根到底的思想还是分而治之。拿到一个长数组,将其不停的分为左边和右边两份,然后以此递归分下去。\n# 然后再将她们按照两个有序数组的样子合并起来。\n# 归并排序时间复杂度是o(nlogn)\n\n\ndef merge_sort(input):\n input_len = len(input)\n if input_len <= 1:\n return input\n mid = math.floor(input_len / 2)\n left = merge_sort(input[:mid])\n right = merge_sort(input[mid:])\n return merge(left, right)\n\n\ndef merge(sorted_arr1, sorted_arr2):\n result = []\n i = j = 0\n while i < len(sorted_arr1) and j < len(sorted_arr2):\n if sorted_arr1[i] < sorted_arr2[j]:\n result.append(sorted_arr1[i])\n i = i + 1\n else:\n result.append(sorted_arr2[j])\n j = j + 1\n\n if i == len(sorted_arr1):\n for item in sorted_arr2[j:]:\n result.append(item)\n else:\n for item in sorted_arr1[i:]:\n result.append(item)\n return result\n\n\ntest_arr = [3, 4, 1, 6, 30, 5]\nprint(\"\\nMerge Sort\")\ntest_arr_merge_sorted = merge_sort(test_arr)\nprint(test_arr_merge_sorted)\n\n\n# 快速排序(Quick Sort)\n# 快速排序使用分治法来把一个串(list)分为两个子串(sub-lists)。具体算法描述如下:\n#\n# 从数列中挑出一个元素,称为 “基准”(pivot);\n# 重新排序数列,所有元素比基准值小的摆放在基准前面,所有元素比基准值大的摆在基准的后面(相同的数可以到任一边)。\n# 在这个分区退出之后,该基准就处于数列的中间位置。这个称为分区(partition)操作;\n# 递归地(recursive)把小于基准值元素的子数列和大于基准值元素的子数列排序。\n# 快速排序时间复杂度是o(nlogn)\n\n\ndef quick_sort(li, start, end):\n # 分治 一分为二\n # start=end ,证明要处理的数据只有一个\n # start>end ,证明右边没有数据\n if start >= end:\n return\n # 定义两个游标,分别指向0和末尾位置\n left = start\n right = end\n # 把0位置的数据,认为是中间值\n mid = li[left]\n while left < right:\n # 让右边游标往左移动,目的是找到小于mid的值,放到left游标位置\n while left < right and li[right] >= mid:\n right -= 1\n li[left] = li[right]\n # 让左边游标往右移动,目的是找到大于mid的值,放到right游标位置\n while left < right and li[left] < mid:\n left += 1\n li[right] = li[left]\n # while结束后,把mid放到中间位置,left=right\n li[left] = mid\n # 递归处理左边的数据\n quick_sort(li, start, left-1)\n # 递归处理右边的数据\n quick_sort(li, left+1, end)\n\n\ntest_arr = [3, 4, 1, 6, 30, 5]\nprint(\"\\nQuick Sort\")\nquick_sort(test_arr, 0, len(test_arr)-1)\nprint(test_arr)\n", "step-ids": [ 1, 6, 7, 8, 9 ] }
[ 1, 6, 7, 8, 9 ]
import sys from sklearn.svm import SVC from sklearn.model_selection import KFold,cross_validate,GridSearchCV from data_prepr import data_preprocessing import numpy as np def main(): #if dataset is not provided on call terminate if len(sys.argv)<2: print("usage: python svm_parameter_tuning.py <input_file> ") sys.exit() #pass dataset and get the matrix containing the data vectors and data targets ret_value=data_preprocessing(sys.argv[1]) data_matrix=ret_value[0] category_labels=ret_value[1] #create k_fold iterator to calculate metrics k_fold = KFold(n_splits=10) #perform grid search to determine parameter tuning c_range = [np.power(2.0,i) for i in range(-5, 10)] gamma_range = [np.power(2.0,i) for i in range(-10, -5)] param_grid = [{'kernel': ['rbf'], 'gamma': gamma_range,'C':c_range},{'kernel': ['linear'], 'C': c_range}] clf = GridSearchCV(SVC(),param_grid,cv=k_fold,scoring='accuracy',n_jobs=-1) clf.fit(data_matrix,category_labels) #print chosen hyperparameters print "Best accuracy achieved:"+ str(clf.best_score_) + " with below settings." for key,value in clf.best_params_.iteritems(): print key + ":" + str(value) #save best hyperparameter values on a dictionary in file hyperparameter_values.py output=open('./hyperparameter_values.py','w') output.write('HYPERPARAMETER_VALUES={') first=True for key,value in clf.best_params_.iteritems(): if first==True: output.write("\'"+key+"\':") first=False else: output.write(",\'"+key+"\':") if isinstance(value,str): output.write("\'"+value+"\'") else: output.write(str(value)) output.write('}') if __name__ == '__main__': main()
normal
{ "blob_id": "c5842b17b2587149cd13448593a6ed31b091ba77", "index": 4971, "step-1": "import sys\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import KFold,cross_validate,GridSearchCV\nfrom data_prepr import data_preprocessing\nimport numpy as np\n\n\ndef main():\n\t#if dataset is not provided on call terminate\n\tif len(sys.argv)<2:\n\t\tprint(\"usage: python svm_parameter_tuning.py <input_file> \")\n\t\tsys.exit()\n\n\t#pass dataset and get the matrix containing the data vectors and data targets\n\tret_value=data_preprocessing(sys.argv[1])\n\tdata_matrix=ret_value[0]\n\tcategory_labels=ret_value[1]\n\n\t#create k_fold iterator to calculate metrics\n\tk_fold = KFold(n_splits=10)\n\n\t#perform grid search to determine parameter tuning\n\tc_range = [np.power(2.0,i) for i in range(-5, 10)]\n\tgamma_range = [np.power(2.0,i) for i in range(-10, -5)]\n\tparam_grid = [{'kernel': ['rbf'], 'gamma': gamma_range,'C':c_range},{'kernel': ['linear'], 'C': c_range}]\n\tclf = GridSearchCV(SVC(),param_grid,cv=k_fold,scoring='accuracy',n_jobs=-1)\n\tclf.fit(data_matrix,category_labels)\n\n\t#print chosen hyperparameters\n\tprint \"Best accuracy achieved:\"+ str(clf.best_score_) + \" with below settings.\"\n\tfor key,value in clf.best_params_.iteritems():\n\t\tprint key + \":\" + str(value)\n\t#save best hyperparameter values on a dictionary in file hyperparameter_values.py\n\toutput=open('./hyperparameter_values.py','w')\n\toutput.write('HYPERPARAMETER_VALUES={')\n\tfirst=True\n\tfor key,value in clf.best_params_.iteritems():\n\t\tif first==True:\n\t\t\toutput.write(\"\\'\"+key+\"\\':\")\n\t\t\tfirst=False\n\t\telse:\n\t\t\toutput.write(\",\\'\"+key+\"\\':\")\n\n\t\tif isinstance(value,str):\n\t\t\toutput.write(\"\\'\"+value+\"\\'\")\n\t\telse:\n\t\t\toutput.write(str(value))\n\toutput.write('}')\n\n\n\nif __name__ == '__main__':\n\tmain()", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
#!/usr/bin/python3 from optparse import OptionParser from urllib import request, parse from urllib.error import URLError, HTTPError import ssl import re ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ssl_context.options &= ssl.CERT_NONE class Settings: SINGLETON = None def __init__(self): self.url_pattern = r'href="((http[s]?://|/)(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)"' self.crawl_content_type_whitelist_pattern = r'text/html' self.crawl_url_blacklist_pattern = r'/activity/' self.no_color = False self.verbosity = 2 # Options singleton @classmethod def instance(cls): if Settings.SINGLETON is None: Settings.SINGLETON = Settings() return Settings.SINGLETON class Style: HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' @classmethod def colored(cls, string, color): return string if Settings.instance().no_color else ("%s%s%s" % (color, string, Style.ENDC)) def info(string): return Style.colored(string, Style.OK_BLUE) def warn(string): return Style.colored(string, Style.WARNING) def error(string): return Style.colored(string, Style.FAIL) def result(string): return Style.colored(string, Style.OKGREEN) def log(priority, string): if priority <= Settings.instance().verbosity: print(string) class MapItem: def __init__(self): self.code = 200 self.children = [] def main(): settings = Settings.instance() usage = "usage: %prog [options] site_url" parser = OptionParser(usage=usage) parser.add_option("-c", "--content-type-regex", metavar="REGEX", dest="content_type_regex", help="whitelist regex for content-type of crawled nodes", default=settings.crawl_content_type_whitelist_pattern) parser.add_option("-b", "--url-blacklist-regex", metavar="REGEX", dest="url_blacklist_regex", help="blacklist regex for URLs of crawled nodes", default=settings.crawl_url_blacklist_pattern) parser.add_option("-v", action="count", dest="verbosity", default=0, help="output verbosity") parser.add_option("--no-color", action="store_true", dest="no_color") (opts, args) = parser.parse_args() if len(args) != 1: parser.print_help() return site_url = args[0] settings.crawl_content_type_whitelist_pattern = opts.content_type_regex settings.crawl_url_blacklist_pattern = opts.url_blacklist_regex settings.verbosity = opts.verbosity settings.no_color = opts.no_color site_map = map_site(site_url) external_urls = get_external_children(site_url, site_map) for url in sorted(external_urls): try: request.urlopen(url) log(1, Style.result('EXT: %s' % url)) except HTTPError as e: referrers = find_referrers(url, site_map) log(0, Style.error('EXT-ERROR-HTTP: %d %s' % (e.code, url))) log(0, Style.error('\n'.join(' REF: %s' % r for r in referrers))) except URLError as e: referrers = find_referrers(url, site_map) log(0, Style.error('EXT-ERROR: %s %s' % (e.reason, url))) log(0, Style.error('\n'.join(' REF: %s' % r for r in referrers))) def find_referrers(url, site_map): results = [] for referrer, item in site_map.items(): for child in item.children: if child == url: results.append(referrer) break return results def get_external_children(site_url, site_map): site_host = strip_path(site_url) external = set() for item in site_map.values(): for child in item.children: if not child.startswith(site_host): external.add(child) return external def map_site(site_url): site_map = {} settings = Settings.instance() crawl_url_blacklist_regex = re.compile(settings.crawl_url_blacklist_pattern) crawl_content_type_whitelist_regex = re.compile(settings.crawl_content_type_whitelist_pattern) url_regex = re.compile(settings.url_pattern) to_crawl = [site_url] while to_crawl: next_crawl = to_crawl.pop(0) new_crawl = crawl(next_crawl, site_url, site_map, crawl_url_blacklist_regex, crawl_content_type_whitelist_regex, url_regex) to_crawl += new_crawl return site_map def crawl(url, site_host, site_map, crawl_url_blacklist_regex, crawl_content_type_whitelist_regex, url_regex): if url in site_map: return [] log(2, 'CRAWL: %s' % url) map_item = MapItem() site_map[url] = map_item if not url.startswith(site_host): log(2, Style.warn('SKIP: external %s' % url)) return [] if crawl_url_blacklist_regex.search(url): log(2, Style.warn('SKIP: blacklist %s' % url)) return [] try: response = request.urlopen(url, context=ssl_context) except URLError as e: log(2, Style.error('ERROR: %d %s' % (e.code, url))) map_item.code = e.code return [] if not crawl_content_type_whitelist_regex.search(response.info().get('content-type')): log(2, Style.warn('SKIP: content-type %s' % url)) return [] content = ( response.read() .decode('utf-8') .replace(r'\n', '\n') .replace(r'\r', '\r')) for match in url_regex.finditer(content): child = match.group(1) if child.startswith('/'): child = parse.urljoin(site_host, child) child = strip_query(child) if child not in map_item.children: map_item.children.append(child) return map_item.children def strip_path(url): parsed = parse.urlparse(url) return parse.urlunparse((parsed.scheme, parsed.netloc, '', '', '', '')) def strip_query(url): parsed = parse.urlparse(url) return parse.urlunparse((parsed.scheme, parsed.netloc, parsed.path, '', '', '')) if __name__ == '__main__': main()
normal
{ "blob_id": "e92a738d3233450b255605619dafadd4d829604b", "index": 9067, "step-1": "<mask token>\n\n\nclass Settings:\n SINGLETON = None\n\n def __init__(self):\n self.url_pattern = (\n 'href=\"((http[s]?://|/)(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)\"'\n )\n self.crawl_content_type_whitelist_pattern = 'text/html'\n self.crawl_url_blacklist_pattern = '/activity/'\n self.no_color = False\n self.verbosity = 2\n\n @classmethod\n def instance(cls):\n if Settings.SINGLETON is None:\n Settings.SINGLETON = Settings()\n return Settings.SINGLETON\n\n\nclass Style:\n HEADER = '\\x1b[95m'\n OKBLUE = '\\x1b[94m'\n OKGREEN = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n FAIL = '\\x1b[91m'\n ENDC = '\\x1b[0m'\n\n @classmethod\n def colored(cls, string, color):\n return string if Settings.instance().no_color else '%s%s%s' % (color,\n string, Style.ENDC)\n\n def info(string):\n return Style.colored(string, Style.OK_BLUE)\n\n def warn(string):\n return Style.colored(string, Style.WARNING)\n\n def error(string):\n return Style.colored(string, Style.FAIL)\n\n def result(string):\n return Style.colored(string, Style.OKGREEN)\n\n\n<mask token>\n\n\nclass MapItem:\n\n def __init__(self):\n self.code = 200\n self.children = []\n\n\n<mask token>\n\n\ndef find_referrers(url, site_map):\n results = []\n for referrer, item in site_map.items():\n for child in item.children:\n if child == url:\n results.append(referrer)\n break\n return results\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Settings:\n SINGLETON = None\n\n def __init__(self):\n self.url_pattern = (\n 'href=\"((http[s]?://|/)(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)\"'\n )\n self.crawl_content_type_whitelist_pattern = 'text/html'\n self.crawl_url_blacklist_pattern = '/activity/'\n self.no_color = False\n self.verbosity = 2\n\n @classmethod\n def instance(cls):\n if Settings.SINGLETON is None:\n Settings.SINGLETON = Settings()\n return Settings.SINGLETON\n\n\nclass Style:\n HEADER = '\\x1b[95m'\n OKBLUE = '\\x1b[94m'\n OKGREEN = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n FAIL = '\\x1b[91m'\n ENDC = '\\x1b[0m'\n\n @classmethod\n def colored(cls, string, color):\n return string if Settings.instance().no_color else '%s%s%s' % (color,\n string, Style.ENDC)\n\n def info(string):\n return Style.colored(string, Style.OK_BLUE)\n\n def warn(string):\n return Style.colored(string, Style.WARNING)\n\n def error(string):\n return Style.colored(string, Style.FAIL)\n\n def result(string):\n return Style.colored(string, Style.OKGREEN)\n\n\ndef log(priority, string):\n if priority <= Settings.instance().verbosity:\n print(string)\n\n\nclass MapItem:\n\n def __init__(self):\n self.code = 200\n self.children = []\n\n\n<mask token>\n\n\ndef find_referrers(url, site_map):\n results = []\n for referrer, item in site_map.items():\n for child in item.children:\n if child == url:\n results.append(referrer)\n break\n return results\n\n\ndef get_external_children(site_url, site_map):\n site_host = strip_path(site_url)\n external = set()\n for item in site_map.values():\n for child in item.children:\n if not child.startswith(site_host):\n external.add(child)\n return external\n\n\n<mask token>\n\n\ndef crawl(url, site_host, site_map, crawl_url_blacklist_regex,\n crawl_content_type_whitelist_regex, url_regex):\n if url in site_map:\n return []\n log(2, 'CRAWL: %s' % url)\n map_item = MapItem()\n site_map[url] = map_item\n if not url.startswith(site_host):\n log(2, Style.warn('SKIP: external %s' % url))\n return []\n if crawl_url_blacklist_regex.search(url):\n log(2, Style.warn('SKIP: blacklist %s' % url))\n return []\n try:\n response = request.urlopen(url, context=ssl_context)\n except URLError as e:\n log(2, Style.error('ERROR: %d %s' % (e.code, url)))\n map_item.code = e.code\n return []\n if not crawl_content_type_whitelist_regex.search(response.info().get(\n 'content-type')):\n log(2, Style.warn('SKIP: content-type %s' % url))\n return []\n content = response.read().decode('utf-8').replace('\\\\n', '\\n').replace(\n '\\\\r', '\\r')\n for match in url_regex.finditer(content):\n child = match.group(1)\n if child.startswith('/'):\n child = parse.urljoin(site_host, child)\n child = strip_query(child)\n if child not in map_item.children:\n map_item.children.append(child)\n return map_item.children\n\n\ndef strip_path(url):\n parsed = parse.urlparse(url)\n return parse.urlunparse((parsed.scheme, parsed.netloc, '', '', '', ''))\n\n\ndef strip_query(url):\n parsed = parse.urlparse(url)\n return parse.urlunparse((parsed.scheme, parsed.netloc, parsed.path, '',\n '', ''))\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Settings:\n SINGLETON = None\n\n def __init__(self):\n self.url_pattern = (\n 'href=\"((http[s]?://|/)(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)\"'\n )\n self.crawl_content_type_whitelist_pattern = 'text/html'\n self.crawl_url_blacklist_pattern = '/activity/'\n self.no_color = False\n self.verbosity = 2\n\n @classmethod\n def instance(cls):\n if Settings.SINGLETON is None:\n Settings.SINGLETON = Settings()\n return Settings.SINGLETON\n\n\nclass Style:\n HEADER = '\\x1b[95m'\n OKBLUE = '\\x1b[94m'\n OKGREEN = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n FAIL = '\\x1b[91m'\n ENDC = '\\x1b[0m'\n\n @classmethod\n def colored(cls, string, color):\n return string if Settings.instance().no_color else '%s%s%s' % (color,\n string, Style.ENDC)\n\n def info(string):\n return Style.colored(string, Style.OK_BLUE)\n\n def warn(string):\n return Style.colored(string, Style.WARNING)\n\n def error(string):\n return Style.colored(string, Style.FAIL)\n\n def result(string):\n return Style.colored(string, Style.OKGREEN)\n\n\ndef log(priority, string):\n if priority <= Settings.instance().verbosity:\n print(string)\n\n\nclass MapItem:\n\n def __init__(self):\n self.code = 200\n self.children = []\n\n\n<mask token>\n\n\ndef find_referrers(url, site_map):\n results = []\n for referrer, item in site_map.items():\n for child in item.children:\n if child == url:\n results.append(referrer)\n break\n return results\n\n\ndef get_external_children(site_url, site_map):\n site_host = strip_path(site_url)\n external = set()\n for item in site_map.values():\n for child in item.children:\n if not child.startswith(site_host):\n external.add(child)\n return external\n\n\ndef map_site(site_url):\n site_map = {}\n settings = Settings.instance()\n crawl_url_blacklist_regex = re.compile(settings.crawl_url_blacklist_pattern\n )\n crawl_content_type_whitelist_regex = re.compile(settings.\n crawl_content_type_whitelist_pattern)\n url_regex = re.compile(settings.url_pattern)\n to_crawl = [site_url]\n while to_crawl:\n next_crawl = to_crawl.pop(0)\n new_crawl = crawl(next_crawl, site_url, site_map,\n crawl_url_blacklist_regex, crawl_content_type_whitelist_regex,\n url_regex)\n to_crawl += new_crawl\n return site_map\n\n\ndef crawl(url, site_host, site_map, crawl_url_blacklist_regex,\n crawl_content_type_whitelist_regex, url_regex):\n if url in site_map:\n return []\n log(2, 'CRAWL: %s' % url)\n map_item = MapItem()\n site_map[url] = map_item\n if not url.startswith(site_host):\n log(2, Style.warn('SKIP: external %s' % url))\n return []\n if crawl_url_blacklist_regex.search(url):\n log(2, Style.warn('SKIP: blacklist %s' % url))\n return []\n try:\n response = request.urlopen(url, context=ssl_context)\n except URLError as e:\n log(2, Style.error('ERROR: %d %s' % (e.code, url)))\n map_item.code = e.code\n return []\n if not crawl_content_type_whitelist_regex.search(response.info().get(\n 'content-type')):\n log(2, Style.warn('SKIP: content-type %s' % url))\n return []\n content = response.read().decode('utf-8').replace('\\\\n', '\\n').replace(\n '\\\\r', '\\r')\n for match in url_regex.finditer(content):\n child = match.group(1)\n if child.startswith('/'):\n child = parse.urljoin(site_host, child)\n child = strip_query(child)\n if child not in map_item.children:\n map_item.children.append(child)\n return map_item.children\n\n\ndef strip_path(url):\n parsed = parse.urlparse(url)\n return parse.urlunparse((parsed.scheme, parsed.netloc, '', '', '', ''))\n\n\ndef strip_query(url):\n parsed = parse.urlparse(url)\n return parse.urlunparse((parsed.scheme, parsed.netloc, parsed.path, '',\n '', ''))\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass Settings:\n SINGLETON = None\n\n def __init__(self):\n self.url_pattern = (\n 'href=\"((http[s]?://|/)(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)\"'\n )\n self.crawl_content_type_whitelist_pattern = 'text/html'\n self.crawl_url_blacklist_pattern = '/activity/'\n self.no_color = False\n self.verbosity = 2\n\n @classmethod\n def instance(cls):\n if Settings.SINGLETON is None:\n Settings.SINGLETON = Settings()\n return Settings.SINGLETON\n\n\nclass Style:\n HEADER = '\\x1b[95m'\n OKBLUE = '\\x1b[94m'\n OKGREEN = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n FAIL = '\\x1b[91m'\n ENDC = '\\x1b[0m'\n\n @classmethod\n def colored(cls, string, color):\n return string if Settings.instance().no_color else '%s%s%s' % (color,\n string, Style.ENDC)\n\n def info(string):\n return Style.colored(string, Style.OK_BLUE)\n\n def warn(string):\n return Style.colored(string, Style.WARNING)\n\n def error(string):\n return Style.colored(string, Style.FAIL)\n\n def result(string):\n return Style.colored(string, Style.OKGREEN)\n\n\ndef log(priority, string):\n if priority <= Settings.instance().verbosity:\n print(string)\n\n\nclass MapItem:\n\n def __init__(self):\n self.code = 200\n self.children = []\n\n\ndef main():\n settings = Settings.instance()\n usage = 'usage: %prog [options] site_url'\n parser = OptionParser(usage=usage)\n parser.add_option('-c', '--content-type-regex', metavar='REGEX', dest=\n 'content_type_regex', help=\n 'whitelist regex for content-type of crawled nodes', default=\n settings.crawl_content_type_whitelist_pattern)\n parser.add_option('-b', '--url-blacklist-regex', metavar='REGEX', dest=\n 'url_blacklist_regex', help=\n 'blacklist regex for URLs of crawled nodes', default=settings.\n crawl_url_blacklist_pattern)\n parser.add_option('-v', action='count', dest='verbosity', default=0,\n help='output verbosity')\n parser.add_option('--no-color', action='store_true', dest='no_color')\n opts, args = parser.parse_args()\n if len(args) != 1:\n parser.print_help()\n return\n site_url = args[0]\n settings.crawl_content_type_whitelist_pattern = opts.content_type_regex\n settings.crawl_url_blacklist_pattern = opts.url_blacklist_regex\n settings.verbosity = opts.verbosity\n settings.no_color = opts.no_color\n site_map = map_site(site_url)\n external_urls = get_external_children(site_url, site_map)\n for url in sorted(external_urls):\n try:\n request.urlopen(url)\n log(1, Style.result('EXT: %s' % url))\n except HTTPError as e:\n referrers = find_referrers(url, site_map)\n log(0, Style.error('EXT-ERROR-HTTP: %d %s' % (e.code, url)))\n log(0, Style.error('\\n'.join(' REF: %s' % r for r in referrers)))\n except URLError as e:\n referrers = find_referrers(url, site_map)\n log(0, Style.error('EXT-ERROR: %s %s' % (e.reason, url)))\n log(0, Style.error('\\n'.join(' REF: %s' % r for r in referrers)))\n\n\ndef find_referrers(url, site_map):\n results = []\n for referrer, item in site_map.items():\n for child in item.children:\n if child == url:\n results.append(referrer)\n break\n return results\n\n\ndef get_external_children(site_url, site_map):\n site_host = strip_path(site_url)\n external = set()\n for item in site_map.values():\n for child in item.children:\n if not child.startswith(site_host):\n external.add(child)\n return external\n\n\ndef map_site(site_url):\n site_map = {}\n settings = Settings.instance()\n crawl_url_blacklist_regex = re.compile(settings.crawl_url_blacklist_pattern\n )\n crawl_content_type_whitelist_regex = re.compile(settings.\n crawl_content_type_whitelist_pattern)\n url_regex = re.compile(settings.url_pattern)\n to_crawl = [site_url]\n while to_crawl:\n next_crawl = to_crawl.pop(0)\n new_crawl = crawl(next_crawl, site_url, site_map,\n crawl_url_blacklist_regex, crawl_content_type_whitelist_regex,\n url_regex)\n to_crawl += new_crawl\n return site_map\n\n\ndef crawl(url, site_host, site_map, crawl_url_blacklist_regex,\n crawl_content_type_whitelist_regex, url_regex):\n if url in site_map:\n return []\n log(2, 'CRAWL: %s' % url)\n map_item = MapItem()\n site_map[url] = map_item\n if not url.startswith(site_host):\n log(2, Style.warn('SKIP: external %s' % url))\n return []\n if crawl_url_blacklist_regex.search(url):\n log(2, Style.warn('SKIP: blacklist %s' % url))\n return []\n try:\n response = request.urlopen(url, context=ssl_context)\n except URLError as e:\n log(2, Style.error('ERROR: %d %s' % (e.code, url)))\n map_item.code = e.code\n return []\n if not crawl_content_type_whitelist_regex.search(response.info().get(\n 'content-type')):\n log(2, Style.warn('SKIP: content-type %s' % url))\n return []\n content = response.read().decode('utf-8').replace('\\\\n', '\\n').replace(\n '\\\\r', '\\r')\n for match in url_regex.finditer(content):\n child = match.group(1)\n if child.startswith('/'):\n child = parse.urljoin(site_host, child)\n child = strip_query(child)\n if child not in map_item.children:\n map_item.children.append(child)\n return map_item.children\n\n\ndef strip_path(url):\n parsed = parse.urlparse(url)\n return parse.urlunparse((parsed.scheme, parsed.netloc, '', '', '', ''))\n\n\ndef strip_query(url):\n parsed = parse.urlparse(url)\n return parse.urlunparse((parsed.scheme, parsed.netloc, parsed.path, '',\n '', ''))\n\n\n<mask token>\n", "step-5": "#!/usr/bin/python3\n\nfrom optparse import OptionParser\nfrom urllib import request, parse\nfrom urllib.error import URLError, HTTPError\nimport ssl\nimport re\n\n\nssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)\nssl_context.options &= ssl.CERT_NONE\n\n\nclass Settings:\n SINGLETON = None\n\n def __init__(self):\n self.url_pattern = r'href=\"((http[s]?://|/)(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)\"'\n self.crawl_content_type_whitelist_pattern = r'text/html'\n self.crawl_url_blacklist_pattern = r'/activity/'\n self.no_color = False\n self.verbosity = 2\n\n # Options singleton\n @classmethod\n def instance(cls):\n if Settings.SINGLETON is None:\n Settings.SINGLETON = Settings()\n return Settings.SINGLETON\n\n\nclass Style:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n\n @classmethod\n def colored(cls, string, color):\n return string if Settings.instance().no_color else (\"%s%s%s\" % (color, string, Style.ENDC))\n\n def info(string):\n return Style.colored(string, Style.OK_BLUE)\n\n def warn(string):\n return Style.colored(string, Style.WARNING)\n\n def error(string):\n return Style.colored(string, Style.FAIL)\n\n def result(string):\n return Style.colored(string, Style.OKGREEN)\n\n\ndef log(priority, string):\n if priority <= Settings.instance().verbosity:\n print(string)\n\n\nclass MapItem:\n def __init__(self):\n self.code = 200\n self.children = []\n\n\ndef main():\n settings = Settings.instance()\n usage = \"usage: %prog [options] site_url\"\n parser = OptionParser(usage=usage)\n parser.add_option(\"-c\", \"--content-type-regex\", metavar=\"REGEX\", dest=\"content_type_regex\",\n help=\"whitelist regex for content-type of crawled nodes\",\n default=settings.crawl_content_type_whitelist_pattern)\n parser.add_option(\"-b\", \"--url-blacklist-regex\", metavar=\"REGEX\", dest=\"url_blacklist_regex\",\n help=\"blacklist regex for URLs of crawled nodes\",\n default=settings.crawl_url_blacklist_pattern)\n parser.add_option(\"-v\", action=\"count\", dest=\"verbosity\", default=0,\n help=\"output verbosity\")\n parser.add_option(\"--no-color\", action=\"store_true\", dest=\"no_color\")\n\n (opts, args) = parser.parse_args()\n\n if len(args) != 1:\n parser.print_help()\n return\n\n site_url = args[0]\n settings.crawl_content_type_whitelist_pattern = opts.content_type_regex\n settings.crawl_url_blacklist_pattern = opts.url_blacklist_regex\n settings.verbosity = opts.verbosity\n settings.no_color = opts.no_color\n\n site_map = map_site(site_url)\n\n external_urls = get_external_children(site_url, site_map)\n for url in sorted(external_urls):\n try:\n request.urlopen(url)\n log(1, Style.result('EXT: %s' % url))\n except HTTPError as e:\n referrers = find_referrers(url, site_map)\n log(0, Style.error('EXT-ERROR-HTTP: %d %s' % (e.code, url)))\n log(0, Style.error('\\n'.join(' REF: %s' % r for r in referrers)))\n except URLError as e:\n referrers = find_referrers(url, site_map)\n log(0, Style.error('EXT-ERROR: %s %s' % (e.reason, url)))\n log(0, Style.error('\\n'.join(' REF: %s' % r for r in referrers)))\n\n\ndef find_referrers(url, site_map):\n results = []\n\n for referrer, item in site_map.items():\n for child in item.children:\n if child == url:\n\n results.append(referrer)\n break\n\n return results\n\n\ndef get_external_children(site_url, site_map):\n site_host = strip_path(site_url)\n external = set()\n\n for item in site_map.values():\n for child in item.children:\n if not child.startswith(site_host):\n external.add(child)\n\n return external\n\n\ndef map_site(site_url):\n site_map = {}\n settings = Settings.instance()\n crawl_url_blacklist_regex = re.compile(settings.crawl_url_blacklist_pattern)\n crawl_content_type_whitelist_regex = re.compile(settings.crawl_content_type_whitelist_pattern)\n url_regex = re.compile(settings.url_pattern)\n\n to_crawl = [site_url]\n while to_crawl:\n next_crawl = to_crawl.pop(0)\n new_crawl = crawl(next_crawl, site_url, site_map,\n crawl_url_blacklist_regex,\n crawl_content_type_whitelist_regex,\n url_regex)\n to_crawl += new_crawl\n\n return site_map\n\n\ndef crawl(url, site_host, site_map,\n crawl_url_blacklist_regex,\n crawl_content_type_whitelist_regex,\n url_regex):\n if url in site_map:\n return []\n\n log(2, 'CRAWL: %s' % url)\n map_item = MapItem()\n site_map[url] = map_item\n\n if not url.startswith(site_host):\n log(2, Style.warn('SKIP: external %s' % url))\n return []\n\n if crawl_url_blacklist_regex.search(url):\n log(2, Style.warn('SKIP: blacklist %s' % url))\n return []\n\n try:\n response = request.urlopen(url, context=ssl_context)\n except URLError as e:\n log(2, Style.error('ERROR: %d %s' % (e.code, url)))\n map_item.code = e.code\n return []\n\n if not crawl_content_type_whitelist_regex.search(response.info().get('content-type')):\n log(2, Style.warn('SKIP: content-type %s' % url))\n return []\n\n content = (\n response.read()\n .decode('utf-8')\n .replace(r'\\n', '\\n')\n .replace(r'\\r', '\\r'))\n\n for match in url_regex.finditer(content):\n child = match.group(1)\n if child.startswith('/'):\n child = parse.urljoin(site_host, child)\n\n child = strip_query(child)\n\n if child not in map_item.children:\n map_item.children.append(child)\n\n return map_item.children\n\n\ndef strip_path(url):\n parsed = parse.urlparse(url)\n return parse.urlunparse((parsed.scheme, parsed.netloc, '', '', '', ''))\n\n\ndef strip_query(url):\n parsed = parse.urlparse(url)\n return parse.urlunparse((parsed.scheme, parsed.netloc, parsed.path, '', '', ''))\n\n\nif __name__ == '__main__':\n main()\n", "step-ids": [ 14, 19, 20, 21, 25 ] }
[ 14, 19, 20, 21, 25 ]
import os import cv2 import numpy as np from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from numpy import array import tensorflow as tf TRAIN_DIR = 'C:/Users/vgg/untitled/MNIST/trainingSet/' train_folder_list = array(os.listdir(TRAIN_DIR)) train_input = [] train_label = [] label_encoder = LabelEncoder() # LabelEncoder Class 호출 integer_encoded = label_encoder.fit_transform(train_folder_list) onehot_encoder = OneHotEncoder(sparse=False) integer_encoded = integer_encoded.reshape(len(integer_encoded), 1) onehot_encoded = onehot_encoder.fit_transform(integer_encoded) for index in range(len(train_folder_list)): path = os.path.join(TRAIN_DIR, train_folder_list[index]) path = path + '/' img_list = os.listdir(path) for img in img_list: img_path = os.path.join(path, img) img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) train_input.append([np.array(img)]) train_label.append([np.array(index)]) train_input = np.reshape(train_input, (-1, 28, 28)) train_label = np.reshape(train_label, (-1,)) train_input = np.array(train_input).astype(np.float32) train_label = np.array(train_label).astype(np.float32) np.save("train_data.npy", train_input) np.save("train_label.npy", train_label) TEST_DIR = 'C:/Users/vgg/untitled/MNIST/testSet/' test_folder_list = array(os.listdir(TEST_DIR)) test_input = [] test_label = [] label_encoder = LabelEncoder() integer_encoded = label_encoder.fit_transform(test_folder_list) onehot_encoder = OneHotEncoder(sparse=False) integer_encoded = integer_encoded.reshape(len(integer_encoded), 1) onehot_encoded = onehot_encoder.fit_transform(integer_encoded) for index in range(len(test_folder_list)): path = os.path.join(TEST_DIR, test_folder_list[index]) path = path + '/' img_list = os.listdir(path) for img in img_list: img_path = os.path.join(path, img) img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) test_input.append([np.array(img)]) test_label.append([np.array(index)]) test_input = np.reshape(test_input, (-1, 28, 28)) test_label = np.reshape(test_label, (-1,)) test_input = np.array(test_input).astype(np.float32) test_label = np.array(test_label).astype(np.float32) np.save("test_input.npy", test_input) np.save("test_label.npy", test_label) #-*- coding: utf-8 -*- from keras.datasets import mnist from keras.utils import np_utils from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D from keras.callbacks import ModelCheckpoint,EarlyStopping import matplotlib.pyplot as plt # seed 값 설정 seed = 0 np.random.seed(seed) tf.set_random_seed(seed) # 데이터 불러오기 # test_input = [] # test_label = [] # # train_input = [] # train_label = [] X_train = train_input Y_train = train_label X_test = test_input Y_test = test_label print('X train shape') print(X_train.shape) print('Y train shape') print(Y_train.shape) print('X test shape') print(X_test.shape) print('y test shape') print(Y_test.shape) #(X_train, Y_train), (X_test, Y_test) = mnist.load_data() X_train = X_train.reshape(X_train.shape[0], 28, 28, 1).astype('float32') / 255 X_test = X_test.reshape(X_test.shape[0], 28, 28, 1).astype('float32') / 255 Y_train = np_utils.to_categorical(Y_train) Y_test = np_utils.to_categorical(Y_test) # 컨볼루션 신경망의 설정 model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), input_shape=(28, 28, 1), activation='relu')) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=2)) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # 모델 최적화 설정 MODEL_DIR = './model/' if not os.path.exists(MODEL_DIR): os.mkdir(MODEL_DIR) modelpath="./model/{epoch:02d}-{val_loss:.4f}.hdf5" checkpointer = ModelCheckpoint(filepath=modelpath, monitor='val_loss', verbose=1, save_best_only=True) early_stopping_callback = EarlyStopping(monitor='val_loss', patience=10) # 모델의 실행 history = model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=15, batch_size=100, verbose=0, callbacks=[early_stopping_callback,checkpointer]) # 테스트 정확도 출력 print("\n Test Accuracy: %.4f" % (model.evaluate(X_test, Y_test)[1])) # 테스트 셋의 오차 y_vloss = history.history['val_loss'] # 학습셋의 오차 y_loss = history.history['loss'] # 그래프로 표현 x_len = np.arange(len(y_loss)) plt.plot(x_len, y_vloss, marker='.', c="red", label='Testset_loss') plt.plot(x_len, y_loss, marker='.', c="blue", label='Trainset_loss') # 그래프에 그리드를 주고 레이블을 표시 plt.legend(loc='upper right') plt.grid() plt.xlabel('epoch') plt.ylabel('loss') plt.show()
normal
{ "blob_id": "01339324ad1a11aff062e8b27efabf27c97157fb", "index": 9908, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor index in range(len(train_folder_list)):\n path = os.path.join(TRAIN_DIR, train_folder_list[index])\n path = path + '/'\n img_list = os.listdir(path)\n for img in img_list:\n img_path = os.path.join(path, img)\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n train_input.append([np.array(img)])\n train_label.append([np.array(index)])\n<mask token>\nnp.save('train_data.npy', train_input)\nnp.save('train_label.npy', train_label)\n<mask token>\nfor index in range(len(test_folder_list)):\n path = os.path.join(TEST_DIR, test_folder_list[index])\n path = path + '/'\n img_list = os.listdir(path)\n for img in img_list:\n img_path = os.path.join(path, img)\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n test_input.append([np.array(img)])\n test_label.append([np.array(index)])\n<mask token>\nnp.save('test_input.npy', test_input)\nnp.save('test_label.npy', test_label)\n<mask token>\nnp.random.seed(seed)\ntf.set_random_seed(seed)\n<mask token>\nprint('X train shape')\nprint(X_train.shape)\nprint('Y train shape')\nprint(Y_train.shape)\nprint('X test shape')\nprint(X_test.shape)\nprint('y test shape')\nprint(Y_test.shape)\n<mask token>\nmodel.add(Conv2D(32, kernel_size=(3, 3), input_shape=(28, 28, 1),\n activation='relu'))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=2))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(10, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[\n 'accuracy'])\n<mask token>\nif not os.path.exists(MODEL_DIR):\n os.mkdir(MODEL_DIR)\n<mask token>\nprint(\"\"\"\n Test Accuracy: %.4f\"\"\" % model.evaluate(X_test, Y_test)[1])\n<mask token>\nplt.plot(x_len, y_vloss, marker='.', c='red', label='Testset_loss')\nplt.plot(x_len, y_loss, marker='.', c='blue', label='Trainset_loss')\nplt.legend(loc='upper right')\nplt.grid()\nplt.xlabel('epoch')\nplt.ylabel('loss')\nplt.show()\n", "step-3": "<mask token>\nTRAIN_DIR = 'C:/Users/vgg/untitled/MNIST/trainingSet/'\ntrain_folder_list = array(os.listdir(TRAIN_DIR))\ntrain_input = []\ntrain_label = []\nlabel_encoder = LabelEncoder()\ninteger_encoded = label_encoder.fit_transform(train_folder_list)\nonehot_encoder = OneHotEncoder(sparse=False)\ninteger_encoded = integer_encoded.reshape(len(integer_encoded), 1)\nonehot_encoded = onehot_encoder.fit_transform(integer_encoded)\nfor index in range(len(train_folder_list)):\n path = os.path.join(TRAIN_DIR, train_folder_list[index])\n path = path + '/'\n img_list = os.listdir(path)\n for img in img_list:\n img_path = os.path.join(path, img)\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n train_input.append([np.array(img)])\n train_label.append([np.array(index)])\ntrain_input = np.reshape(train_input, (-1, 28, 28))\ntrain_label = np.reshape(train_label, (-1,))\ntrain_input = np.array(train_input).astype(np.float32)\ntrain_label = np.array(train_label).astype(np.float32)\nnp.save('train_data.npy', train_input)\nnp.save('train_label.npy', train_label)\nTEST_DIR = 'C:/Users/vgg/untitled/MNIST/testSet/'\ntest_folder_list = array(os.listdir(TEST_DIR))\ntest_input = []\ntest_label = []\nlabel_encoder = LabelEncoder()\ninteger_encoded = label_encoder.fit_transform(test_folder_list)\nonehot_encoder = OneHotEncoder(sparse=False)\ninteger_encoded = integer_encoded.reshape(len(integer_encoded), 1)\nonehot_encoded = onehot_encoder.fit_transform(integer_encoded)\nfor index in range(len(test_folder_list)):\n path = os.path.join(TEST_DIR, test_folder_list[index])\n path = path + '/'\n img_list = os.listdir(path)\n for img in img_list:\n img_path = os.path.join(path, img)\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n test_input.append([np.array(img)])\n test_label.append([np.array(index)])\ntest_input = np.reshape(test_input, (-1, 28, 28))\ntest_label = np.reshape(test_label, (-1,))\ntest_input = np.array(test_input).astype(np.float32)\ntest_label = np.array(test_label).astype(np.float32)\nnp.save('test_input.npy', test_input)\nnp.save('test_label.npy', test_label)\n<mask token>\nseed = 0\nnp.random.seed(seed)\ntf.set_random_seed(seed)\nX_train = train_input\nY_train = train_label\nX_test = test_input\nY_test = test_label\nprint('X train shape')\nprint(X_train.shape)\nprint('Y train shape')\nprint(Y_train.shape)\nprint('X test shape')\nprint(X_test.shape)\nprint('y test shape')\nprint(Y_test.shape)\nX_train = X_train.reshape(X_train.shape[0], 28, 28, 1).astype('float32') / 255\nX_test = X_test.reshape(X_test.shape[0], 28, 28, 1).astype('float32') / 255\nY_train = np_utils.to_categorical(Y_train)\nY_test = np_utils.to_categorical(Y_test)\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3), input_shape=(28, 28, 1),\n activation='relu'))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=2))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(10, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[\n 'accuracy'])\nMODEL_DIR = './model/'\nif not os.path.exists(MODEL_DIR):\n os.mkdir(MODEL_DIR)\nmodelpath = './model/{epoch:02d}-{val_loss:.4f}.hdf5'\ncheckpointer = ModelCheckpoint(filepath=modelpath, monitor='val_loss',\n verbose=1, save_best_only=True)\nearly_stopping_callback = EarlyStopping(monitor='val_loss', patience=10)\nhistory = model.fit(X_train, Y_train, validation_data=(X_test, Y_test),\n epochs=15, batch_size=100, verbose=0, callbacks=[\n early_stopping_callback, checkpointer])\nprint(\"\"\"\n Test Accuracy: %.4f\"\"\" % model.evaluate(X_test, Y_test)[1])\ny_vloss = history.history['val_loss']\ny_loss = history.history['loss']\nx_len = np.arange(len(y_loss))\nplt.plot(x_len, y_vloss, marker='.', c='red', label='Testset_loss')\nplt.plot(x_len, y_loss, marker='.', c='blue', label='Trainset_loss')\nplt.legend(loc='upper right')\nplt.grid()\nplt.xlabel('epoch')\nplt.ylabel('loss')\nplt.show()\n", "step-4": "import os\nimport cv2\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\nfrom numpy import array\nimport tensorflow as tf\nTRAIN_DIR = 'C:/Users/vgg/untitled/MNIST/trainingSet/'\ntrain_folder_list = array(os.listdir(TRAIN_DIR))\ntrain_input = []\ntrain_label = []\nlabel_encoder = LabelEncoder()\ninteger_encoded = label_encoder.fit_transform(train_folder_list)\nonehot_encoder = OneHotEncoder(sparse=False)\ninteger_encoded = integer_encoded.reshape(len(integer_encoded), 1)\nonehot_encoded = onehot_encoder.fit_transform(integer_encoded)\nfor index in range(len(train_folder_list)):\n path = os.path.join(TRAIN_DIR, train_folder_list[index])\n path = path + '/'\n img_list = os.listdir(path)\n for img in img_list:\n img_path = os.path.join(path, img)\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n train_input.append([np.array(img)])\n train_label.append([np.array(index)])\ntrain_input = np.reshape(train_input, (-1, 28, 28))\ntrain_label = np.reshape(train_label, (-1,))\ntrain_input = np.array(train_input).astype(np.float32)\ntrain_label = np.array(train_label).astype(np.float32)\nnp.save('train_data.npy', train_input)\nnp.save('train_label.npy', train_label)\nTEST_DIR = 'C:/Users/vgg/untitled/MNIST/testSet/'\ntest_folder_list = array(os.listdir(TEST_DIR))\ntest_input = []\ntest_label = []\nlabel_encoder = LabelEncoder()\ninteger_encoded = label_encoder.fit_transform(test_folder_list)\nonehot_encoder = OneHotEncoder(sparse=False)\ninteger_encoded = integer_encoded.reshape(len(integer_encoded), 1)\nonehot_encoded = onehot_encoder.fit_transform(integer_encoded)\nfor index in range(len(test_folder_list)):\n path = os.path.join(TEST_DIR, test_folder_list[index])\n path = path + '/'\n img_list = os.listdir(path)\n for img in img_list:\n img_path = os.path.join(path, img)\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n test_input.append([np.array(img)])\n test_label.append([np.array(index)])\ntest_input = np.reshape(test_input, (-1, 28, 28))\ntest_label = np.reshape(test_label, (-1,))\ntest_input = np.array(test_input).astype(np.float32)\ntest_label = np.array(test_label).astype(np.float32)\nnp.save('test_input.npy', test_input)\nnp.save('test_label.npy', test_label)\nfrom keras.datasets import mnist\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\nimport matplotlib.pyplot as plt\nseed = 0\nnp.random.seed(seed)\ntf.set_random_seed(seed)\nX_train = train_input\nY_train = train_label\nX_test = test_input\nY_test = test_label\nprint('X train shape')\nprint(X_train.shape)\nprint('Y train shape')\nprint(Y_train.shape)\nprint('X test shape')\nprint(X_test.shape)\nprint('y test shape')\nprint(Y_test.shape)\nX_train = X_train.reshape(X_train.shape[0], 28, 28, 1).astype('float32') / 255\nX_test = X_test.reshape(X_test.shape[0], 28, 28, 1).astype('float32') / 255\nY_train = np_utils.to_categorical(Y_train)\nY_test = np_utils.to_categorical(Y_test)\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3), input_shape=(28, 28, 1),\n activation='relu'))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=2))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(10, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[\n 'accuracy'])\nMODEL_DIR = './model/'\nif not os.path.exists(MODEL_DIR):\n os.mkdir(MODEL_DIR)\nmodelpath = './model/{epoch:02d}-{val_loss:.4f}.hdf5'\ncheckpointer = ModelCheckpoint(filepath=modelpath, monitor='val_loss',\n verbose=1, save_best_only=True)\nearly_stopping_callback = EarlyStopping(monitor='val_loss', patience=10)\nhistory = model.fit(X_train, Y_train, validation_data=(X_test, Y_test),\n epochs=15, batch_size=100, verbose=0, callbacks=[\n early_stopping_callback, checkpointer])\nprint(\"\"\"\n Test Accuracy: %.4f\"\"\" % model.evaluate(X_test, Y_test)[1])\ny_vloss = history.history['val_loss']\ny_loss = history.history['loss']\nx_len = np.arange(len(y_loss))\nplt.plot(x_len, y_vloss, marker='.', c='red', label='Testset_loss')\nplt.plot(x_len, y_loss, marker='.', c='blue', label='Trainset_loss')\nplt.legend(loc='upper right')\nplt.grid()\nplt.xlabel('epoch')\nplt.ylabel('loss')\nplt.show()\n", "step-5": "import os\r\nimport cv2\r\nimport numpy as np\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.preprocessing import OneHotEncoder\r\nfrom numpy import array\r\nimport tensorflow as tf\r\n\r\nTRAIN_DIR = 'C:/Users/vgg/untitled/MNIST/trainingSet/'\r\ntrain_folder_list = array(os.listdir(TRAIN_DIR))\r\n\r\ntrain_input = []\r\ntrain_label = []\r\n\r\nlabel_encoder = LabelEncoder() # LabelEncoder Class 호출\r\ninteger_encoded = label_encoder.fit_transform(train_folder_list)\r\nonehot_encoder = OneHotEncoder(sparse=False)\r\ninteger_encoded = integer_encoded.reshape(len(integer_encoded), 1)\r\nonehot_encoded = onehot_encoder.fit_transform(integer_encoded)\r\n\r\nfor index in range(len(train_folder_list)):\r\n path = os.path.join(TRAIN_DIR, train_folder_list[index])\r\n path = path + '/'\r\n img_list = os.listdir(path)\r\n for img in img_list:\r\n img_path = os.path.join(path, img)\r\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\r\n train_input.append([np.array(img)])\r\n train_label.append([np.array(index)])\r\n\r\ntrain_input = np.reshape(train_input, (-1, 28, 28))\r\ntrain_label = np.reshape(train_label, (-1,))\r\ntrain_input = np.array(train_input).astype(np.float32)\r\ntrain_label = np.array(train_label).astype(np.float32)\r\nnp.save(\"train_data.npy\", train_input)\r\nnp.save(\"train_label.npy\", train_label)\r\n\r\nTEST_DIR = 'C:/Users/vgg/untitled/MNIST/testSet/'\r\ntest_folder_list = array(os.listdir(TEST_DIR))\r\n\r\ntest_input = []\r\ntest_label = []\r\n\r\nlabel_encoder = LabelEncoder()\r\ninteger_encoded = label_encoder.fit_transform(test_folder_list)\r\n\r\nonehot_encoder = OneHotEncoder(sparse=False)\r\ninteger_encoded = integer_encoded.reshape(len(integer_encoded), 1)\r\nonehot_encoded = onehot_encoder.fit_transform(integer_encoded)\r\n\r\nfor index in range(len(test_folder_list)):\r\n path = os.path.join(TEST_DIR, test_folder_list[index])\r\n path = path + '/'\r\n img_list = os.listdir(path)\r\n for img in img_list:\r\n img_path = os.path.join(path, img)\r\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\r\n test_input.append([np.array(img)])\r\n test_label.append([np.array(index)])\r\n\r\ntest_input = np.reshape(test_input, (-1, 28, 28))\r\ntest_label = np.reshape(test_label, (-1,))\r\ntest_input = np.array(test_input).astype(np.float32)\r\ntest_label = np.array(test_label).astype(np.float32)\r\nnp.save(\"test_input.npy\", test_input)\r\nnp.save(\"test_label.npy\", test_label)\r\n\r\n\r\n#-*- coding: utf-8 -*-\r\nfrom keras.datasets import mnist\r\nfrom keras.utils import np_utils\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\r\nfrom keras.callbacks import ModelCheckpoint,EarlyStopping\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\n\r\n# seed 값 설정\r\nseed = 0\r\nnp.random.seed(seed)\r\ntf.set_random_seed(seed)\r\n\r\n# 데이터 불러오기\r\n\r\n# test_input = []\r\n# test_label = []\r\n#\r\n# train_input = []\r\n# train_label = []\r\nX_train = train_input\r\nY_train = train_label\r\nX_test = test_input\r\nY_test = test_label\r\n\r\nprint('X train shape')\r\nprint(X_train.shape)\r\nprint('Y train shape')\r\nprint(Y_train.shape)\r\nprint('X test shape')\r\nprint(X_test.shape)\r\nprint('y test shape')\r\nprint(Y_test.shape)\r\n\r\n#(X_train, Y_train), (X_test, Y_test) = mnist.load_data()\r\nX_train = X_train.reshape(X_train.shape[0], 28, 28, 1).astype('float32') / 255\r\nX_test = X_test.reshape(X_test.shape[0], 28, 28, 1).astype('float32') / 255\r\nY_train = np_utils.to_categorical(Y_train)\r\nY_test = np_utils.to_categorical(Y_test)\r\n\r\n# 컨볼루션 신경망의 설정\r\nmodel = Sequential()\r\nmodel.add(Conv2D(32, kernel_size=(3, 3), input_shape=(28, 28, 1), activation='relu'))\r\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\r\nmodel.add(MaxPooling2D(pool_size=2))\r\nmodel.add(Dropout(0.25))\r\nmodel.add(Flatten())\r\nmodel.add(Dense(128, activation='relu'))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(10, activation='softmax'))\r\n\r\nmodel.compile(loss='categorical_crossentropy',\r\n optimizer='adam',\r\n metrics=['accuracy'])\r\n\r\n# 모델 최적화 설정\r\nMODEL_DIR = './model/'\r\nif not os.path.exists(MODEL_DIR):\r\n os.mkdir(MODEL_DIR)\r\n\r\nmodelpath=\"./model/{epoch:02d}-{val_loss:.4f}.hdf5\"\r\ncheckpointer = ModelCheckpoint(filepath=modelpath, monitor='val_loss', verbose=1, save_best_only=True)\r\nearly_stopping_callback = EarlyStopping(monitor='val_loss', patience=10)\r\n\r\n# 모델의 실행\r\nhistory = model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=15, batch_size=100, verbose=0, callbacks=[early_stopping_callback,checkpointer])\r\n\r\n# 테스트 정확도 출력\r\nprint(\"\\n Test Accuracy: %.4f\" % (model.evaluate(X_test, Y_test)[1]))\r\n\r\n# 테스트 셋의 오차\r\ny_vloss = history.history['val_loss']\r\n\r\n# 학습셋의 오차\r\ny_loss = history.history['loss']\r\n\r\n# 그래프로 표현\r\nx_len = np.arange(len(y_loss))\r\nplt.plot(x_len, y_vloss, marker='.', c=\"red\", label='Testset_loss')\r\nplt.plot(x_len, y_loss, marker='.', c=\"blue\", label='Trainset_loss')\r\n\r\n# 그래프에 그리드를 주고 레이블을 표시\r\nplt.legend(loc='upper right')\r\nplt.grid()\r\nplt.xlabel('epoch')\r\nplt.ylabel('loss')\r\nplt.show()\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import streamlit as st import pandas as pd import seaborn as sns import matplotlib.pyplot as plt username=st.text_input ("username") upload=st.file_uploader("uploadfile",type=['csv']) button=st.button("submit") if button==True: df=pd.read_csv(upload) st.write(df.head()) fig = plt.figure() my = fig.add_subplot(1,1,1) my.scatter(df["sepal.length"],df["petal.length"],) my.set_xlabel("sepal.length") my.set_ylabel("petal.length") st.write(fig)
normal
{ "blob_id": "72f1547ea7de78a5fe4b583523e592fa25c0ee77", "index": 2467, "step-1": "<mask token>\n", "step-2": "<mask token>\nif button == True:\n df = pd.read_csv(upload)\n st.write(df.head())\n fig = plt.figure()\n my = fig.add_subplot(1, 1, 1)\n my.scatter(df['sepal.length'], df['petal.length'])\n my.set_xlabel('sepal.length')\n my.set_ylabel('petal.length')\n st.write(fig)\n", "step-3": "<mask token>\nusername = st.text_input('username')\nupload = st.file_uploader('uploadfile', type=['csv'])\nbutton = st.button('submit')\nif button == True:\n df = pd.read_csv(upload)\n st.write(df.head())\n fig = plt.figure()\n my = fig.add_subplot(1, 1, 1)\n my.scatter(df['sepal.length'], df['petal.length'])\n my.set_xlabel('sepal.length')\n my.set_ylabel('petal.length')\n st.write(fig)\n", "step-4": "import streamlit as st\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nusername = st.text_input('username')\nupload = st.file_uploader('uploadfile', type=['csv'])\nbutton = st.button('submit')\nif button == True:\n df = pd.read_csv(upload)\n st.write(df.head())\n fig = plt.figure()\n my = fig.add_subplot(1, 1, 1)\n my.scatter(df['sepal.length'], df['petal.length'])\n my.set_xlabel('sepal.length')\n my.set_ylabel('petal.length')\n st.write(fig)\n", "step-5": "import streamlit as st\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nusername=st.text_input (\"username\")\r\nupload=st.file_uploader(\"uploadfile\",type=['csv'])\r\nbutton=st.button(\"submit\")\r\nif button==True:\r\n df=pd.read_csv(upload)\r\n st.write(df.head())\r\n fig = plt.figure()\r\n my = fig.add_subplot(1,1,1)\r\n my.scatter(df[\"sepal.length\"],df[\"petal.length\"],)\r\n my.set_xlabel(\"sepal.length\")\r\n my.set_ylabel(\"petal.length\")\r\n st.write(fig)\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import numpy as np import pandas as pd from scipy.optimize import minimize from datetime import datetime import time from functions import weather_scraper def getData(): # # run weather_scraper.py to fetch new weather data # weather_scraper.getData() ## Read in csv file "weather_data.csv" weather_data = pd.read_csv("data/weather_data.csv") # Grab the current month & hour currentMonth = datetime.now().month currentHour = datetime.now().hour # Determine which month group the current month is [0,5] currentMonthGroup = currentMonth // 2 hoep_data = [] temp = weather_data.iloc[:,2] # Change hour string to number from 0-23 for i in range(len(temp)): weather_data.iloc[i,1] = (currentHour + i) % 24 # Convert temperature data to HOEP data if (currentMonthGroup == 0) : hoep_data = temp.apply(lambda x: (2.02887*x + 39.633)/100) elif (currentMonthGroup == 1): hoep_data = temp.apply(lambda x: (0.453122*x + 19.8276)/100) elif (currentMonthGroup == 2): hoep_data = temp.apply(lambda x: (1.13665*x - 11.0085)/100) elif (currentMonthGroup == 3): hoep_data = temp.apply(lambda x: (1.90245*x - 23.2826)/100) elif (currentMonthGroup == 4): hoep_data = temp.apply(lambda x: (1.39145*x - 8.97971)/100) else: hoep_data = temp.apply(lambda x: (1.72767*x + 21.3536)/100) # Load in the load_data load_data = pd.read_excel('data/load_data.xlsx', index_col=0, engine = 'openpyxl') # Create loading schedule based on current time of day and month load_sched = np.arange(48) for i in range(len(temp)): load_sched[i] = load_data.iloc[ weather_data.iloc[i,1] , currentMonthGroup] WMST = 0.003499 ## x[0:48] = PCEA ## x[48:96] = ESB start_time = time.time() # Constraints to ensure that ESB falls within limits def constraint1(x): for i in range(48): if (i == 0): x[48] = 0 else: x[48+i] = x[48+i-1] + x[i] return x[0:48] + x[48:96] def constraint2(x): for i in range(48): if (i == 0): x[48] = 0 else: x[48+i] = x[48+i-1] + x[i] return 10000 - (x[0:48]+ x[48:96]) power = ((-5000, 5000),) * 48 storage = ((0, 10000),) * 48 #Objective def MEC(x): # ( PDLL + PCEA ) x HOEP return sum(sum( (load_sched + np.array([x[0:48]])) * (np.array(hoep_data)+WMST) )) x0 = np.array([np.ones(48), np.ones(48)]) bounds = (power + storage) cons1 = {'type': 'ineq', 'fun': constraint1} cons2 = {'type': 'ineq', 'fun': constraint2} cons = ([cons1, cons2]) sol = minimize(MEC, x0, method='SLSQP',bounds=bounds,constraints=cons,options= {'maxiter':150,'disp':True}) input_var = {"EA_w_bill": round(sol.fun,2)} return input_var
normal
{ "blob_id": "7a1bd2b4734527a414c6173ea8edb150221f8042", "index": 363, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef getData():\n weather_data = pd.read_csv('data/weather_data.csv')\n currentMonth = datetime.now().month\n currentHour = datetime.now().hour\n currentMonthGroup = currentMonth // 2\n hoep_data = []\n temp = weather_data.iloc[:, 2]\n for i in range(len(temp)):\n weather_data.iloc[i, 1] = (currentHour + i) % 24\n if currentMonthGroup == 0:\n hoep_data = temp.apply(lambda x: (2.02887 * x + 39.633) / 100)\n elif currentMonthGroup == 1:\n hoep_data = temp.apply(lambda x: (0.453122 * x + 19.8276) / 100)\n elif currentMonthGroup == 2:\n hoep_data = temp.apply(lambda x: (1.13665 * x - 11.0085) / 100)\n elif currentMonthGroup == 3:\n hoep_data = temp.apply(lambda x: (1.90245 * x - 23.2826) / 100)\n elif currentMonthGroup == 4:\n hoep_data = temp.apply(lambda x: (1.39145 * x - 8.97971) / 100)\n else:\n hoep_data = temp.apply(lambda x: (1.72767 * x + 21.3536) / 100)\n load_data = pd.read_excel('data/load_data.xlsx', index_col=0, engine=\n 'openpyxl')\n load_sched = np.arange(48)\n for i in range(len(temp)):\n load_sched[i] = load_data.iloc[weather_data.iloc[i, 1],\n currentMonthGroup]\n WMST = 0.003499\n start_time = time.time()\n\n def constraint1(x):\n for i in range(48):\n if i == 0:\n x[48] = 0\n else:\n x[48 + i] = x[48 + i - 1] + x[i]\n return x[0:48] + x[48:96]\n\n def constraint2(x):\n for i in range(48):\n if i == 0:\n x[48] = 0\n else:\n x[48 + i] = x[48 + i - 1] + x[i]\n return 10000 - (x[0:48] + x[48:96])\n power = ((-5000, 5000),) * 48\n storage = ((0, 10000),) * 48\n\n def MEC(x):\n return sum(sum((load_sched + np.array([x[0:48]])) * (np.array(\n hoep_data) + WMST)))\n x0 = np.array([np.ones(48), np.ones(48)])\n bounds = power + storage\n cons1 = {'type': 'ineq', 'fun': constraint1}\n cons2 = {'type': 'ineq', 'fun': constraint2}\n cons = [cons1, cons2]\n sol = minimize(MEC, x0, method='SLSQP', bounds=bounds, constraints=cons,\n options={'maxiter': 150, 'disp': True})\n input_var = {'EA_w_bill': round(sol.fun, 2)}\n return input_var\n", "step-3": "import numpy as np\nimport pandas as pd\nfrom scipy.optimize import minimize\nfrom datetime import datetime\nimport time\nfrom functions import weather_scraper\n\n\ndef getData():\n weather_data = pd.read_csv('data/weather_data.csv')\n currentMonth = datetime.now().month\n currentHour = datetime.now().hour\n currentMonthGroup = currentMonth // 2\n hoep_data = []\n temp = weather_data.iloc[:, 2]\n for i in range(len(temp)):\n weather_data.iloc[i, 1] = (currentHour + i) % 24\n if currentMonthGroup == 0:\n hoep_data = temp.apply(lambda x: (2.02887 * x + 39.633) / 100)\n elif currentMonthGroup == 1:\n hoep_data = temp.apply(lambda x: (0.453122 * x + 19.8276) / 100)\n elif currentMonthGroup == 2:\n hoep_data = temp.apply(lambda x: (1.13665 * x - 11.0085) / 100)\n elif currentMonthGroup == 3:\n hoep_data = temp.apply(lambda x: (1.90245 * x - 23.2826) / 100)\n elif currentMonthGroup == 4:\n hoep_data = temp.apply(lambda x: (1.39145 * x - 8.97971) / 100)\n else:\n hoep_data = temp.apply(lambda x: (1.72767 * x + 21.3536) / 100)\n load_data = pd.read_excel('data/load_data.xlsx', index_col=0, engine=\n 'openpyxl')\n load_sched = np.arange(48)\n for i in range(len(temp)):\n load_sched[i] = load_data.iloc[weather_data.iloc[i, 1],\n currentMonthGroup]\n WMST = 0.003499\n start_time = time.time()\n\n def constraint1(x):\n for i in range(48):\n if i == 0:\n x[48] = 0\n else:\n x[48 + i] = x[48 + i - 1] + x[i]\n return x[0:48] + x[48:96]\n\n def constraint2(x):\n for i in range(48):\n if i == 0:\n x[48] = 0\n else:\n x[48 + i] = x[48 + i - 1] + x[i]\n return 10000 - (x[0:48] + x[48:96])\n power = ((-5000, 5000),) * 48\n storage = ((0, 10000),) * 48\n\n def MEC(x):\n return sum(sum((load_sched + np.array([x[0:48]])) * (np.array(\n hoep_data) + WMST)))\n x0 = np.array([np.ones(48), np.ones(48)])\n bounds = power + storage\n cons1 = {'type': 'ineq', 'fun': constraint1}\n cons2 = {'type': 'ineq', 'fun': constraint2}\n cons = [cons1, cons2]\n sol = minimize(MEC, x0, method='SLSQP', bounds=bounds, constraints=cons,\n options={'maxiter': 150, 'disp': True})\n input_var = {'EA_w_bill': round(sol.fun, 2)}\n return input_var\n", "step-4": "import numpy as np\nimport pandas as pd\nfrom scipy.optimize import minimize\nfrom datetime import datetime\nimport time\nfrom functions import weather_scraper\n\ndef getData():\n # # run weather_scraper.py to fetch new weather data\n # weather_scraper.getData()\n\n ## Read in csv file \"weather_data.csv\"\n weather_data = pd.read_csv(\"data/weather_data.csv\")\n\n # Grab the current month & hour\n currentMonth = datetime.now().month\n currentHour = datetime.now().hour\n\n # Determine which month group the current month is [0,5]\n currentMonthGroup = currentMonth // 2\n\n hoep_data = []\n temp = weather_data.iloc[:,2]\n\n # Change hour string to number from 0-23\n for i in range(len(temp)): \n weather_data.iloc[i,1] = (currentHour + i) % 24\n\n # Convert temperature data to HOEP data\n if (currentMonthGroup == 0) :\n hoep_data = temp.apply(lambda x: (2.02887*x + 39.633)/100)\n elif (currentMonthGroup == 1):\n hoep_data = temp.apply(lambda x: (0.453122*x + 19.8276)/100)\n elif (currentMonthGroup == 2):\n hoep_data = temp.apply(lambda x: (1.13665*x - 11.0085)/100)\n elif (currentMonthGroup == 3):\n hoep_data = temp.apply(lambda x: (1.90245*x - 23.2826)/100)\n elif (currentMonthGroup == 4): \n hoep_data = temp.apply(lambda x: (1.39145*x - 8.97971)/100)\n else:\n hoep_data = temp.apply(lambda x: (1.72767*x + 21.3536)/100)\n\n # Load in the load_data\n load_data = pd.read_excel('data/load_data.xlsx', index_col=0, engine = 'openpyxl')\n\n # Create loading schedule based on current time of day and month\n load_sched = np.arange(48)\n\n for i in range(len(temp)):\n load_sched[i] = load_data.iloc[ weather_data.iloc[i,1] , currentMonthGroup]\n\n WMST = 0.003499 \n\n ## x[0:48] = PCEA\n ## x[48:96] = ESB\n\n start_time = time.time()\n\n # Constraints to ensure that ESB falls within limits\n def constraint1(x):\n for i in range(48):\n if (i == 0):\n x[48] = 0\n else:\n x[48+i] = x[48+i-1] + x[i]\n return x[0:48] + x[48:96]\n \n def constraint2(x):\n for i in range(48):\n if (i == 0):\n x[48] = 0\n else:\n x[48+i] = x[48+i-1] + x[i]\n return 10000 - (x[0:48]+ x[48:96])\n \n\n power = ((-5000, 5000),) * 48\n storage = ((0, 10000),) * 48\n\n #Objective\n def MEC(x): # ( PDLL + PCEA ) x HOEP\n return sum(sum( (load_sched + np.array([x[0:48]])) * (np.array(hoep_data)+WMST) ))\n\n x0 = np.array([np.ones(48), np.ones(48)])\n\n bounds = (power + storage)\n cons1 = {'type': 'ineq', 'fun': constraint1}\n cons2 = {'type': 'ineq', 'fun': constraint2}\n\n cons = ([cons1, cons2])\n\n sol = minimize(MEC, x0, method='SLSQP',bounds=bounds,constraints=cons,options= {'maxiter':150,'disp':True})\n\n input_var = {\"EA_w_bill\": round(sol.fun,2)}\n\n return input_var\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
alias_macro = { "class": "Application", "method": "alias_macro", "doc": """ Returns or modifies the macro of a command alias. """, "syntax": """ Rhino.AliasMacro (strAlias [, strMacro]) """, "params": { 0: { "name": "alias", "optional": False, "type_vb": "string", "type_string": "str", "doc": """ The name of an existing command alias. """ }, 1: { "name": "macro", "optional": True, "type_vb": "string", "type_string": "str", "doc": """ The new macro to run when the alias is executed. """ }, }, "returns": { 0: { "type_vb": "String", "doc": "If a new macro is not specified, the existing macro if successful." }, 1: { "type_vb": "String", "doc": "If a new macro is specified, the previous macro if successful." }, 2: { "type_vb": "Null", "doc": "If not successful, or on error." }, } }
normal
{ "blob_id": "1574f034ff9b6ddb785e4c54758b2057009198ed", "index": 7587, "step-1": "<mask token>\n", "step-2": "alias_macro = {'class': 'Application', 'method': 'alias_macro', 'doc':\n \"\"\"\n Returns or modifies the macro of a command alias.\n \"\"\",\n 'syntax': \"\"\"\n Rhino.AliasMacro (strAlias [, strMacro])\n \"\"\",\n 'params': {(0): {'name': 'alias', 'optional': False, 'type_vb':\n 'string', 'type_string': 'str', 'doc':\n \"\"\"\n The name of an existing command alias.\n \"\"\"}, (1\n ): {'name': 'macro', 'optional': True, 'type_vb': 'string',\n 'type_string': 'str', 'doc':\n \"\"\"\n The new macro to run when the alias is executed.\n \"\"\"\n }}, 'returns': {(0): {'type_vb': 'String', 'doc':\n 'If a new macro is not specified, the existing macro if successful.'},\n (1): {'type_vb': 'String', 'doc':\n 'If a new macro is specified, the previous macro if successful.'}, (2):\n {'type_vb': 'Null', 'doc': 'If not successful, or on error.'}}}\n", "step-3": "alias_macro = {\r\n\r\n \"class\": \"Application\",\r\n \"method\": \"alias_macro\",\r\n \"doc\": \"\"\"\r\n Returns or modifies the macro of a command alias.\r\n \"\"\",\r\n\r\n \"syntax\": \"\"\"\r\n Rhino.AliasMacro (strAlias [, strMacro])\r\n \"\"\",\r\n\r\n \"params\": {\r\n 0: {\r\n \"name\": \"alias\",\r\n \"optional\": False,\r\n \"type_vb\": \"string\",\r\n \"type_string\": \"str\",\r\n \"doc\": \"\"\"\r\n The name of an existing command alias.\r\n \"\"\"\r\n },\r\n 1: {\r\n \"name\": \"macro\",\r\n \"optional\": True,\r\n \"type_vb\": \"string\",\r\n \"type_string\": \"str\",\r\n \"doc\": \"\"\"\r\n The new macro to run when the alias is executed.\r\n \"\"\"\r\n },\r\n },\r\n\r\n \"returns\": {\r\n 0: {\r\n \"type_vb\": \"String\",\r\n \"doc\": \"If a new macro is not specified, the existing macro if successful.\"\r\n },\r\n 1: {\r\n \"type_vb\": \"String\",\r\n \"doc\": \"If a new macro is specified, the previous macro if successful.\"\r\n },\r\n 2: {\r\n \"type_vb\": \"Null\",\r\n \"doc\": \"If not successful, or on error.\"\r\n },\r\n }\r\n\r\n}\r\n\r\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys from twython import Twython import random tweetStr = "None" #twitter consumer and access information goes here api = Twython(apiKey,apiSecret,accessToken,accessTokenSecret) timeline = api.get_user_timeline() lastEntry = timeline[0] sid = str(lastEntry['id']) def searchTwitter(sString, perP): return api.search(q=sString, since_id=sid, rpp=perP) def oneGif(twitSearch, sString, gifName): for tweet in twitSearch['statuses']: user = tweet["user"]["screen_name"] text = tweet['text'] id = str(tweet['id']) print text.encode('utf-8') if sString in text.lower(): statushead = "@" + user + " " if "RT" not in text: api.create_favorite(id=id) photo = open('/home/pi/gifs/' + gifName, 'rb') response = api.upload_media(media=photo, media_type='image/gif') api.update_status(status=statushead, media_ids=[response['media_id']], in_reply_to_status_id=id) tweetStr = statushead + gifName return "Tweeted: " + tweetStr def threeGif(twitSearch, sString, gifOne, gifTwo, gifThree): for tweet in twitSearch['statuses']: user = tweet["user"]["screen_name"] text = tweet['text'] id = str(tweet['id']) print text.encode('utf-8') if sString in text.lower(): statushead = "@" + user + " " if "RT" not in text: api.create_favorite(id=id) number = random.randrange(1,4) if number == 1: photo = open('/home/pi/gifs/' + gifOne, 'rb') response = api.upload_media(media=photo, media_type='image/gif') api.update_status(status=statushead, media_ids=[response['media_id']], in_reply_to_status_id=id) tweetStr = statushead + gifOne if number == 2: photo = open('/home/pi/gifs/' + gifTwo, 'rb') response = api.upload_media(media=photo, media_type='image/gif') api.update_status(status=statushead, media_ids=[response['media_id']], in_reply_to_status_id=id) tweetStr = statushead + gifTwo if number == 3: photo = open('/home/pi/gifs/' + gifThree, 'rb') response = api.upload_media(media=photo, media_type='image/gif') api.update_status(status=statushead, media_ids=[response['media_id']], in_reply_to_status_id=id) tweetStr = statushead + gifThree return "Tweeted: " + tweetStr
normal
{ "blob_id": "88e1eb4cbfe346c663cca23836c23346e18a8488", "index": 7444, "step-1": "\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nfrom twython import Twython\nimport random\n\ntweetStr = \"None\"\n\n#twitter consumer and access information goes here\n\n\napi = Twython(apiKey,apiSecret,accessToken,accessTokenSecret)\n\ntimeline = api.get_user_timeline()\nlastEntry = timeline[0]\nsid = str(lastEntry['id'])\n\ndef searchTwitter(sString, perP):\n\treturn api.search(q=sString, since_id=sid, rpp=perP)\n\ndef oneGif(twitSearch, sString, gifName):\n\tfor tweet in twitSearch['statuses']:\n\t\tuser = tweet[\"user\"][\"screen_name\"]\n\t\ttext = tweet['text']\n\t\tid = str(tweet['id'])\n\t\tprint text.encode('utf-8')\n\t\tif sString in text.lower():\n\t\t\tstatushead = \"@\" + user + \" \"\n\t\t\tif \"RT\" not in text:\n\t\t\t\tapi.create_favorite(id=id)\n\t\t\tphoto = open('/home/pi/gifs/' + gifName, 'rb')\n\t\t\tresponse = api.upload_media(media=photo, media_type='image/gif')\n\t\t\tapi.update_status(status=statushead, media_ids=[response['media_id']], in_reply_to_status_id=id)\n\t\t\ttweetStr = statushead + gifName\n\t\t\treturn \"Tweeted: \" + tweetStr\n\ndef threeGif(twitSearch, sString, gifOne, gifTwo, gifThree):\n\tfor tweet in twitSearch['statuses']:\n\t\tuser = tweet[\"user\"][\"screen_name\"]\n\t\ttext = tweet['text']\n\t\tid = str(tweet['id'])\n\t\tprint text.encode('utf-8')\n\t\tif sString in text.lower():\n\t\t\tstatushead = \"@\" + user + \" \"\n\t\t\tif \"RT\" not in text:\n\t\t\t\tapi.create_favorite(id=id)\n\t\t\tnumber = random.randrange(1,4)\n\t\t\tif number == 1:\n\t\t\t\tphoto = open('/home/pi/gifs/' + gifOne, 'rb')\n\t\t\t\tresponse = api.upload_media(media=photo, media_type='image/gif')\n\t\t\t\tapi.update_status(status=statushead, media_ids=[response['media_id']], in_reply_to_status_id=id)\n\t\t\t\ttweetStr = statushead + gifOne\n\t\t\tif number == 2:\n\t\t\t\tphoto = open('/home/pi/gifs/' + gifTwo, 'rb')\n\t\t\t\tresponse = api.upload_media(media=photo, media_type='image/gif')\n\t\t\t\tapi.update_status(status=statushead, media_ids=[response['media_id']], in_reply_to_status_id=id)\n\t\t\t\ttweetStr = statushead + gifTwo\n\t\t\tif number == 3:\n\t\t\t\tphoto = open('/home/pi/gifs/' + gifThree, 'rb')\n\t\t\t\tresponse = api.upload_media(media=photo, media_type='image/gif')\n\t\t\t\tapi.update_status(status=statushead, media_ids=[response['media_id']], in_reply_to_status_id=id)\n\t\t\t\ttweetStr = statushead + gifThree\n\t\t\treturn \"Tweeted: \" + tweetStr\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import numpy as np class LinearRegressor(): def __init__(self, alpha=0.1, epochs=1): self.alpha = alpha self.epochs = epochs self.costs = [] self.theta = None def _cost_function(self, y_pred, y, m): """ Gets the cost for the predicted values when contrasted with the correct ones. y_pred: An (1 x m) vector that corresponds to the values predicted by the Linear Regressor y: An (1 x m) vector that corresponds to the y (right) values in the dataset m: the number of samples (it could be also inferred from the shape of y or y_pred) TODO: You must implement the cost function and return an scalar that corresponds to the error produced by the Linear Regressor with its current configuration """ sumatory = 0 for x in range(m): sumatory += (y_pred[0][x] -y[0][x])**2 cost = 1/(2*m) * sumatory return cost def _hypothesis(self, X): """ Calculates the hypothesis for the given examples using the current self.theta values. X: an m x n array of m samples/examples with n features each. Creo que X es en realidad nxm transpose de theta es 1xn y * nxm = 1xm TODO: you must return a (1 x m) array, which corresponds to the estimated value for each of the m samples """ # * is element wise multiplication # numpy.dot(), or @ operator will work result = np.transpose(self.theta)@ X #emptyResult = np.zeros((1,X.shape[1])) return result def _cost_function_derivative(self, y_pred, y, X, m): """ Calculates the derivatives (gradient) of the cost function through the obtained/predicted values. y_pred: an (1 x m) array with the predicted values for X dataset y: an (1 x m) array with the right values for X dataset X: the input dataset m: the number of samples in the dataset TODO: You must implement the calculation of derivatives. An (n x 1) array that corresponds to the gradient of current theta values (the derivative per theta parameter) must be returned. """ derivatives= np.zeros((X.shape[0],1)) for j in range(X.shape[0]): auxsum = 0 for i in range(m): auxsum+=(y_pred[0][i] -y[0][i])*X[j][i] derivatives[j][0] = self.theta[j][0] - self.alpha * 1/m * auxsum #empty_derivatives = np.zeros((X.shape[0],1)) return derivatives def fit(self, X, y): """ Fits the linear regressor to the values in the dataset X: is an (n x m) vector, where n is the number of features and m is the number of samples/examples y: is an (1 x m) vector, where m is the number of samples/examples TODO: You need to provide an implementation that in each epoch is updating the values for the theta parameters by using the hypothesis and cost function functions """ n, m = X.shape[0], X.shape[1] # theta is (nx1) (one theta per dimension) self.theta = np.random.uniform(-10, 10, (n, 1)) for i in range(self.epochs): # Get predictions y_pred = self.predict(X) # calculate cost # cost = ... cost = self._cost_function(y_pred, y, m) # gradient is an (n) x 1 array, it refers to the derivate per theta gradient = self._cost_function_derivative(y_pred, y, X, m) # delta/update rule self.theta = gradient self.costs.append(cost) pass print("Final theta is {} (cost: {})".format(self.theta.T, cost)) def predict(self, X): """ Predicts the values for the given X samples using the current configuration of the Linear Regressor. X: an (n x m') array with m' samples of n dimensions whose value must be predicted. TODO: You must return a (1 x m') array that includes the predictions for the given m' samples. """ # ! You could simply call the hypothesis here predictions= self._hypothesis(X) #empty_predictions = np.zeros((1,X.shape[1])) return predictions
normal
{ "blob_id": "d805a1290c107a8d768417a432e338b182b7cd6b", "index": 5524, "step-1": "<mask token>\n\n\nclass LinearRegressor:\n <mask token>\n\n def _cost_function(self, y_pred, y, m):\n \"\"\"\n Gets the cost for the predicted values when contrasted with the correct ones.\n y_pred: An (1 x m) vector that corresponds to the values predicted by the Linear Regressor\n y: An (1 x m) vector that corresponds to the y (right) values in the dataset\n m: the number of samples (it could be also inferred from the shape of y or y_pred)\n\n TODO: You must implement the cost function and return an scalar that corresponds to the error produced by the Linear Regressor with its current configuration\n \"\"\"\n sumatory = 0\n for x in range(m):\n sumatory += (y_pred[0][x] - y[0][x]) ** 2\n cost = 1 / (2 * m) * sumatory\n return cost\n <mask token>\n\n def _cost_function_derivative(self, y_pred, y, X, m):\n \"\"\"\n Calculates the derivatives (gradient) of the cost function through the obtained/predicted values.\n y_pred: an (1 x m) array with the predicted values for X dataset\n y: an (1 x m) array with the right values for X dataset\n X: the input dataset\n m: the number of samples in the dataset\n\n TODO: You must implement the calculation of derivatives. An (n x 1) array that corresponds to the gradient of current theta values (the derivative per theta parameter) must be returned.\n \"\"\"\n derivatives = np.zeros((X.shape[0], 1))\n for j in range(X.shape[0]):\n auxsum = 0\n for i in range(m):\n auxsum += (y_pred[0][i] - y[0][i]) * X[j][i]\n derivatives[j][0] = self.theta[j][0] - self.alpha * 1 / m * auxsum\n return derivatives\n\n def fit(self, X, y):\n \"\"\"\n Fits the linear regressor to the values in the dataset\n X: is an (n x m) vector, where n is the number of features and m is the number of samples/examples\n y: is an (1 x m) vector, where m is the number of samples/examples\n\n TODO: You need to provide an implementation that in each epoch is updating the values for the theta parameters by using the hypothesis and cost function functions\n \"\"\"\n n, m = X.shape[0], X.shape[1]\n self.theta = np.random.uniform(-10, 10, (n, 1))\n for i in range(self.epochs):\n y_pred = self.predict(X)\n cost = self._cost_function(y_pred, y, m)\n gradient = self._cost_function_derivative(y_pred, y, X, m)\n self.theta = gradient\n self.costs.append(cost)\n pass\n print('Final theta is {} (cost: {})'.format(self.theta.T, cost))\n <mask token>\n", "step-2": "<mask token>\n\n\nclass LinearRegressor:\n <mask token>\n\n def _cost_function(self, y_pred, y, m):\n \"\"\"\n Gets the cost for the predicted values when contrasted with the correct ones.\n y_pred: An (1 x m) vector that corresponds to the values predicted by the Linear Regressor\n y: An (1 x m) vector that corresponds to the y (right) values in the dataset\n m: the number of samples (it could be also inferred from the shape of y or y_pred)\n\n TODO: You must implement the cost function and return an scalar that corresponds to the error produced by the Linear Regressor with its current configuration\n \"\"\"\n sumatory = 0\n for x in range(m):\n sumatory += (y_pred[0][x] - y[0][x]) ** 2\n cost = 1 / (2 * m) * sumatory\n return cost\n\n def _hypothesis(self, X):\n \"\"\"\n Calculates the hypothesis for the given examples using the current self.theta values.\n X: an m x n array of m samples/examples with n features each.\n Creo que X es en realidad nxm\n transpose de theta es 1xn y * nxm = 1xm\n\n TODO: you must return a (1 x m) array, which corresponds to the estimated value for each of the m samples\n \"\"\"\n result = np.transpose(self.theta) @ X\n return result\n\n def _cost_function_derivative(self, y_pred, y, X, m):\n \"\"\"\n Calculates the derivatives (gradient) of the cost function through the obtained/predicted values.\n y_pred: an (1 x m) array with the predicted values for X dataset\n y: an (1 x m) array with the right values for X dataset\n X: the input dataset\n m: the number of samples in the dataset\n\n TODO: You must implement the calculation of derivatives. An (n x 1) array that corresponds to the gradient of current theta values (the derivative per theta parameter) must be returned.\n \"\"\"\n derivatives = np.zeros((X.shape[0], 1))\n for j in range(X.shape[0]):\n auxsum = 0\n for i in range(m):\n auxsum += (y_pred[0][i] - y[0][i]) * X[j][i]\n derivatives[j][0] = self.theta[j][0] - self.alpha * 1 / m * auxsum\n return derivatives\n\n def fit(self, X, y):\n \"\"\"\n Fits the linear regressor to the values in the dataset\n X: is an (n x m) vector, where n is the number of features and m is the number of samples/examples\n y: is an (1 x m) vector, where m is the number of samples/examples\n\n TODO: You need to provide an implementation that in each epoch is updating the values for the theta parameters by using the hypothesis and cost function functions\n \"\"\"\n n, m = X.shape[0], X.shape[1]\n self.theta = np.random.uniform(-10, 10, (n, 1))\n for i in range(self.epochs):\n y_pred = self.predict(X)\n cost = self._cost_function(y_pred, y, m)\n gradient = self._cost_function_derivative(y_pred, y, X, m)\n self.theta = gradient\n self.costs.append(cost)\n pass\n print('Final theta is {} (cost: {})'.format(self.theta.T, cost))\n <mask token>\n", "step-3": "<mask token>\n\n\nclass LinearRegressor:\n\n def __init__(self, alpha=0.1, epochs=1):\n self.alpha = alpha\n self.epochs = epochs\n self.costs = []\n self.theta = None\n\n def _cost_function(self, y_pred, y, m):\n \"\"\"\n Gets the cost for the predicted values when contrasted with the correct ones.\n y_pred: An (1 x m) vector that corresponds to the values predicted by the Linear Regressor\n y: An (1 x m) vector that corresponds to the y (right) values in the dataset\n m: the number of samples (it could be also inferred from the shape of y or y_pred)\n\n TODO: You must implement the cost function and return an scalar that corresponds to the error produced by the Linear Regressor with its current configuration\n \"\"\"\n sumatory = 0\n for x in range(m):\n sumatory += (y_pred[0][x] - y[0][x]) ** 2\n cost = 1 / (2 * m) * sumatory\n return cost\n\n def _hypothesis(self, X):\n \"\"\"\n Calculates the hypothesis for the given examples using the current self.theta values.\n X: an m x n array of m samples/examples with n features each.\n Creo que X es en realidad nxm\n transpose de theta es 1xn y * nxm = 1xm\n\n TODO: you must return a (1 x m) array, which corresponds to the estimated value for each of the m samples\n \"\"\"\n result = np.transpose(self.theta) @ X\n return result\n\n def _cost_function_derivative(self, y_pred, y, X, m):\n \"\"\"\n Calculates the derivatives (gradient) of the cost function through the obtained/predicted values.\n y_pred: an (1 x m) array with the predicted values for X dataset\n y: an (1 x m) array with the right values for X dataset\n X: the input dataset\n m: the number of samples in the dataset\n\n TODO: You must implement the calculation of derivatives. An (n x 1) array that corresponds to the gradient of current theta values (the derivative per theta parameter) must be returned.\n \"\"\"\n derivatives = np.zeros((X.shape[0], 1))\n for j in range(X.shape[0]):\n auxsum = 0\n for i in range(m):\n auxsum += (y_pred[0][i] - y[0][i]) * X[j][i]\n derivatives[j][0] = self.theta[j][0] - self.alpha * 1 / m * auxsum\n return derivatives\n\n def fit(self, X, y):\n \"\"\"\n Fits the linear regressor to the values in the dataset\n X: is an (n x m) vector, where n is the number of features and m is the number of samples/examples\n y: is an (1 x m) vector, where m is the number of samples/examples\n\n TODO: You need to provide an implementation that in each epoch is updating the values for the theta parameters by using the hypothesis and cost function functions\n \"\"\"\n n, m = X.shape[0], X.shape[1]\n self.theta = np.random.uniform(-10, 10, (n, 1))\n for i in range(self.epochs):\n y_pred = self.predict(X)\n cost = self._cost_function(y_pred, y, m)\n gradient = self._cost_function_derivative(y_pred, y, X, m)\n self.theta = gradient\n self.costs.append(cost)\n pass\n print('Final theta is {} (cost: {})'.format(self.theta.T, cost))\n <mask token>\n", "step-4": "import numpy as np\n\n\nclass LinearRegressor:\n\n def __init__(self, alpha=0.1, epochs=1):\n self.alpha = alpha\n self.epochs = epochs\n self.costs = []\n self.theta = None\n\n def _cost_function(self, y_pred, y, m):\n \"\"\"\n Gets the cost for the predicted values when contrasted with the correct ones.\n y_pred: An (1 x m) vector that corresponds to the values predicted by the Linear Regressor\n y: An (1 x m) vector that corresponds to the y (right) values in the dataset\n m: the number of samples (it could be also inferred from the shape of y or y_pred)\n\n TODO: You must implement the cost function and return an scalar that corresponds to the error produced by the Linear Regressor with its current configuration\n \"\"\"\n sumatory = 0\n for x in range(m):\n sumatory += (y_pred[0][x] - y[0][x]) ** 2\n cost = 1 / (2 * m) * sumatory\n return cost\n\n def _hypothesis(self, X):\n \"\"\"\n Calculates the hypothesis for the given examples using the current self.theta values.\n X: an m x n array of m samples/examples with n features each.\n Creo que X es en realidad nxm\n transpose de theta es 1xn y * nxm = 1xm\n\n TODO: you must return a (1 x m) array, which corresponds to the estimated value for each of the m samples\n \"\"\"\n result = np.transpose(self.theta) @ X\n return result\n\n def _cost_function_derivative(self, y_pred, y, X, m):\n \"\"\"\n Calculates the derivatives (gradient) of the cost function through the obtained/predicted values.\n y_pred: an (1 x m) array with the predicted values for X dataset\n y: an (1 x m) array with the right values for X dataset\n X: the input dataset\n m: the number of samples in the dataset\n\n TODO: You must implement the calculation of derivatives. An (n x 1) array that corresponds to the gradient of current theta values (the derivative per theta parameter) must be returned.\n \"\"\"\n derivatives = np.zeros((X.shape[0], 1))\n for j in range(X.shape[0]):\n auxsum = 0\n for i in range(m):\n auxsum += (y_pred[0][i] - y[0][i]) * X[j][i]\n derivatives[j][0] = self.theta[j][0] - self.alpha * 1 / m * auxsum\n return derivatives\n\n def fit(self, X, y):\n \"\"\"\n Fits the linear regressor to the values in the dataset\n X: is an (n x m) vector, where n is the number of features and m is the number of samples/examples\n y: is an (1 x m) vector, where m is the number of samples/examples\n\n TODO: You need to provide an implementation that in each epoch is updating the values for the theta parameters by using the hypothesis and cost function functions\n \"\"\"\n n, m = X.shape[0], X.shape[1]\n self.theta = np.random.uniform(-10, 10, (n, 1))\n for i in range(self.epochs):\n y_pred = self.predict(X)\n cost = self._cost_function(y_pred, y, m)\n gradient = self._cost_function_derivative(y_pred, y, X, m)\n self.theta = gradient\n self.costs.append(cost)\n pass\n print('Final theta is {} (cost: {})'.format(self.theta.T, cost))\n\n def predict(self, X):\n \"\"\"\n Predicts the values for the given X samples using the current configuration of the Linear Regressor.\n\n X: an (n x m') array with m' samples of n dimensions whose value must be predicted.\n\n TODO: You must return a (1 x m') array that includes the predictions for the given m' samples.\n \"\"\"\n predictions = self._hypothesis(X)\n return predictions\n", "step-5": "import numpy as np\n\n\nclass LinearRegressor():\n def __init__(self, alpha=0.1, epochs=1):\n self.alpha = alpha\n self.epochs = epochs\n self.costs = []\n self.theta = None\n\n def _cost_function(self, y_pred, y, m):\n \"\"\"\n Gets the cost for the predicted values when contrasted with the correct ones.\n y_pred: An (1 x m) vector that corresponds to the values predicted by the Linear Regressor\n y: An (1 x m) vector that corresponds to the y (right) values in the dataset\n m: the number of samples (it could be also inferred from the shape of y or y_pred)\n\n TODO: You must implement the cost function and return an scalar that corresponds to the error produced by the Linear Regressor with its current configuration\n \"\"\"\n sumatory = 0\n for x in range(m):\n sumatory += (y_pred[0][x] -y[0][x])**2\n\n cost = 1/(2*m) * sumatory\n return cost\n\n\n def _hypothesis(self, X):\n \"\"\"\n Calculates the hypothesis for the given examples using the current self.theta values.\n X: an m x n array of m samples/examples with n features each.\n Creo que X es en realidad nxm\n transpose de theta es 1xn y * nxm = 1xm\n\n TODO: you must return a (1 x m) array, which corresponds to the estimated value for each of the m samples\n \"\"\"\n # * is element wise multiplication\n # numpy.dot(), or @ operator will work\n result = np.transpose(self.theta)@ X \n #emptyResult = np.zeros((1,X.shape[1]))\n return result \n\n def _cost_function_derivative(self, y_pred, y, X, m):\n \"\"\"\n Calculates the derivatives (gradient) of the cost function through the obtained/predicted values.\n y_pred: an (1 x m) array with the predicted values for X dataset\n y: an (1 x m) array with the right values for X dataset\n X: the input dataset\n m: the number of samples in the dataset\n\n TODO: You must implement the calculation of derivatives. An (n x 1) array that corresponds to the gradient of current theta values (the derivative per theta parameter) must be returned.\n \"\"\"\n\n derivatives= np.zeros((X.shape[0],1))\n for j in range(X.shape[0]):\n auxsum = 0\n for i in range(m):\n auxsum+=(y_pred[0][i] -y[0][i])*X[j][i]\n derivatives[j][0] = self.theta[j][0] - self.alpha * 1/m * auxsum\n\n #empty_derivatives = np.zeros((X.shape[0],1))\n return derivatives\n\n def fit(self, X, y):\n \"\"\"\n Fits the linear regressor to the values in the dataset\n X: is an (n x m) vector, where n is the number of features and m is the number of samples/examples\n y: is an (1 x m) vector, where m is the number of samples/examples\n\n TODO: You need to provide an implementation that in each epoch is updating the values for the theta parameters by using the hypothesis and cost function functions\n \"\"\"\n\n n, m = X.shape[0], X.shape[1]\n\n # theta is (nx1) (one theta per dimension)\n self.theta = np.random.uniform(-10, 10, (n, 1))\n\n for i in range(self.epochs):\n # Get predictions\n y_pred = self.predict(X)\n\n # calculate cost\n # cost = ...\n cost = self._cost_function(y_pred, y, m)\n \n\n # gradient is an (n) x 1 array, it refers to the derivate per theta\n gradient = self._cost_function_derivative(y_pred, y, X, m)\n\n # delta/update rule\n self.theta = gradient\n\n self.costs.append(cost)\n pass\n\n print(\"Final theta is {} (cost: {})\".format(self.theta.T, cost))\n\n def predict(self, X):\n \"\"\"\n Predicts the values for the given X samples using the current configuration of the Linear Regressor.\n\n X: an (n x m') array with m' samples of n dimensions whose value must be predicted.\n\n TODO: You must return a (1 x m') array that includes the predictions for the given m' samples.\n \"\"\"\n # ! You could simply call the hypothesis here\n predictions= self._hypothesis(X)\n #empty_predictions = np.zeros((1,X.shape[1]))\n return predictions", "step-ids": [ 4, 5, 6, 8, 9 ] }
[ 4, 5, 6, 8, 9 ]
#!/bin/env python3 """ https://www.hackerrank.com/challenges/triangle-quest-2 INPUT: integer N where 0 < N < 10 OUTPUT: print palindromic triangle of size N e.g.for N=5 1 121 12321 1234321 123454321 """ for i in range(1, int(input()) + 1): j = 1 while j < i: print(j,end='') j += 1 while i > 0: print(i,end='') i -= 1 print()
normal
{ "blob_id": "94cbd9554e3326897147dc417d9fc8f91974786a", "index": 5098, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in range(1, int(input()) + 1):\n j = 1\n while j < i:\n print(j, end='')\n j += 1\n while i > 0:\n print(i, end='')\n i -= 1\n print()\n", "step-3": "#!/bin/env python3\n\"\"\"\nhttps://www.hackerrank.com/challenges/triangle-quest-2\n\nINPUT:\n integer N\n where 0 < N < 10\n\nOUTPUT:\n print palindromic triangle of size N\n\n e.g.for N=5\n1\n121\n12321\n1234321\n123454321\n\n\"\"\"\nfor i in range(1, int(input()) + 1):\n j = 1\n while j < i:\n print(j,end='')\n j += 1\n\n while i > 0:\n print(i,end='')\n i -= 1\n print()\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import torch import torch.multiprocessing as mp import random class QManeger(object): def __init__(self, opt, q_trace, q_batch): self.traces_s = [] self.traces_a = [] self.traces_r = [] self.lock = mp.Lock() self.q_trace = q_trace self.q_batch = q_batch self.opt = opt self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def _push_one(self, state, action, reward): self.traces_s.append(state) self.traces_a.append(action) self.traces_r.append(reward) def listening(self): while True: traces = self.q_trace.get(block=True) for s, a, r in zip(traces[0], traces[1], traces[2]): self._push_one(s, a, r) if len(self.traces_s) > self.opt.batch_size: self.produce_batch() def produce_batch(self): batch_size = self.opt.batch_size res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:batch_size], \ self.traces_r[:batch_size] # delete del self.traces_s[:batch_size] del self.traces_a[:batch_size] del self.traces_r[:batch_size] res_s = torch.FloatTensor(res_s).to(self.device) res_a = torch.LongTensor(res_a).to(self.device) res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1) # stack batch and put self.q_batch.put((res_s, res_a, res_r))
normal
{ "blob_id": "b693cc63e2ee4c994ef7b5e44faea99f15a021f6", "index": 68, "step-1": "<mask token>\n\n\nclass QManeger(object):\n <mask token>\n <mask token>\n\n def listening(self):\n while True:\n traces = self.q_trace.get(block=True)\n for s, a, r in zip(traces[0], traces[1], traces[2]):\n self._push_one(s, a, r)\n if len(self.traces_s) > self.opt.batch_size:\n self.produce_batch()\n <mask token>\n", "step-2": "<mask token>\n\n\nclass QManeger(object):\n\n def __init__(self, opt, q_trace, q_batch):\n self.traces_s = []\n self.traces_a = []\n self.traces_r = []\n self.lock = mp.Lock()\n self.q_trace = q_trace\n self.q_batch = q_batch\n self.opt = opt\n self.device = torch.device('cuda' if torch.cuda.is_available() else\n 'cpu')\n <mask token>\n\n def listening(self):\n while True:\n traces = self.q_trace.get(block=True)\n for s, a, r in zip(traces[0], traces[1], traces[2]):\n self._push_one(s, a, r)\n if len(self.traces_s) > self.opt.batch_size:\n self.produce_batch()\n\n def produce_batch(self):\n batch_size = self.opt.batch_size\n res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:\n batch_size], self.traces_r[:batch_size]\n del self.traces_s[:batch_size]\n del self.traces_a[:batch_size]\n del self.traces_r[:batch_size]\n res_s = torch.FloatTensor(res_s).to(self.device)\n res_a = torch.LongTensor(res_a).to(self.device)\n res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1)\n self.q_batch.put((res_s, res_a, res_r))\n", "step-3": "<mask token>\n\n\nclass QManeger(object):\n\n def __init__(self, opt, q_trace, q_batch):\n self.traces_s = []\n self.traces_a = []\n self.traces_r = []\n self.lock = mp.Lock()\n self.q_trace = q_trace\n self.q_batch = q_batch\n self.opt = opt\n self.device = torch.device('cuda' if torch.cuda.is_available() else\n 'cpu')\n\n def _push_one(self, state, action, reward):\n self.traces_s.append(state)\n self.traces_a.append(action)\n self.traces_r.append(reward)\n\n def listening(self):\n while True:\n traces = self.q_trace.get(block=True)\n for s, a, r in zip(traces[0], traces[1], traces[2]):\n self._push_one(s, a, r)\n if len(self.traces_s) > self.opt.batch_size:\n self.produce_batch()\n\n def produce_batch(self):\n batch_size = self.opt.batch_size\n res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:\n batch_size], self.traces_r[:batch_size]\n del self.traces_s[:batch_size]\n del self.traces_a[:batch_size]\n del self.traces_r[:batch_size]\n res_s = torch.FloatTensor(res_s).to(self.device)\n res_a = torch.LongTensor(res_a).to(self.device)\n res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1)\n self.q_batch.put((res_s, res_a, res_r))\n", "step-4": "import torch\nimport torch.multiprocessing as mp\nimport random\n\n\nclass QManeger(object):\n\n def __init__(self, opt, q_trace, q_batch):\n self.traces_s = []\n self.traces_a = []\n self.traces_r = []\n self.lock = mp.Lock()\n self.q_trace = q_trace\n self.q_batch = q_batch\n self.opt = opt\n self.device = torch.device('cuda' if torch.cuda.is_available() else\n 'cpu')\n\n def _push_one(self, state, action, reward):\n self.traces_s.append(state)\n self.traces_a.append(action)\n self.traces_r.append(reward)\n\n def listening(self):\n while True:\n traces = self.q_trace.get(block=True)\n for s, a, r in zip(traces[0], traces[1], traces[2]):\n self._push_one(s, a, r)\n if len(self.traces_s) > self.opt.batch_size:\n self.produce_batch()\n\n def produce_batch(self):\n batch_size = self.opt.batch_size\n res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:\n batch_size], self.traces_r[:batch_size]\n del self.traces_s[:batch_size]\n del self.traces_a[:batch_size]\n del self.traces_r[:batch_size]\n res_s = torch.FloatTensor(res_s).to(self.device)\n res_a = torch.LongTensor(res_a).to(self.device)\n res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1)\n self.q_batch.put((res_s, res_a, res_r))\n", "step-5": "import torch\nimport torch.multiprocessing as mp\nimport random\n\nclass QManeger(object):\n\n def __init__(self, opt, q_trace, q_batch):\n self.traces_s = []\n self.traces_a = []\n self.traces_r = []\n self.lock = mp.Lock()\n\n self.q_trace = q_trace\n self.q_batch = q_batch\n self.opt = opt\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n def _push_one(self, state, action, reward):\n self.traces_s.append(state)\n self.traces_a.append(action)\n self.traces_r.append(reward)\n\n def listening(self):\n while True:\n traces = self.q_trace.get(block=True)\n for s, a, r in zip(traces[0], traces[1], traces[2]):\n self._push_one(s, a, r)\n\n if len(self.traces_s) > self.opt.batch_size:\n self.produce_batch()\n\n def produce_batch(self):\n batch_size = self.opt.batch_size\n\n res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:batch_size], \\\n self.traces_r[:batch_size]\n\n # delete\n del self.traces_s[:batch_size]\n del self.traces_a[:batch_size]\n del self.traces_r[:batch_size]\n\n res_s = torch.FloatTensor(res_s).to(self.device)\n res_a = torch.LongTensor(res_a).to(self.device)\n res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1)\n\n # stack batch and put\n self.q_batch.put((res_s, res_a, res_r))\n", "step-ids": [ 2, 4, 5, 6, 7 ] }
[ 2, 4, 5, 6, 7 ]
#!/usr/bin/env python # Core Library modules import os # Third party modules import nose # First party modules import lumixmaptool.copy as copy # Tests def get_parser_test(): """Check if the evaluation model returns a parser object.""" copy.get_parser() def parse_mapdata_test(): current_folder = os.path.dirname(os.path.realpath(__file__)) misc_folder = os.path.join(current_folder, "misc") maplistdata_path = os.path.join(misc_folder, "MapList.dat") result = copy.parse_mapdata(maplistdata_path) expected = { "num1": "00010001", "num2": "00010001", "regions": { 1: [ "BACK/B0000035.DFT", "BACK/B0000036.DFT", "BACK/B0000044.DFT", "BACK/B0000045.DFT", "BACK/B0000053.DFT", "BACK/B0000054.DFT", "NAME/N0000035.DFT", "NAME/N0000036.DFT", "NAME/N0000044.DFT", "NAME/N0000045.DFT", "NAME/N0000053.DFT", "NAME/N0000054.DFT", "POI/P0000035.DFT", "POI/P0000036.DFT", "POI/P0000044.DFT", "POI/P0000045.DFT", "POI/P0000053.DFT", "POI/P0000054.DFT", ], 2: [ "BACK/B0000024.DFT", "BACK/B0000025.DFT", "BACK/B0000026.DFT", "BACK/B0000027.DFT", "BACK/B0000033.DFT", "BACK/B0000034.DFT", "BACK/B0000035.DFT", "BACK/B0000036.DFT", "BACK/B0000042.DFT", "BACK/B0000043.DFT", "BACK/B0000044.DFT", "BACK/B0000045.DFT", "NAME/N0000024.DFT", "NAME/N0000025.DFT", "NAME/N0000026.DFT", "NAME/N0000027.DFT", "NAME/N0000033.DFT", "NAME/N0000034.DFT", "NAME/N0000035.DFT", "NAME/N0000036.DFT", "NAME/N0000042.DFT", "NAME/N0000043.DFT", "NAME/N0000044.DFT", "NAME/N0000045.DFT", "POI/P0000024.DFT", "POI/P0000025.DFT", "POI/P0000026.DFT", "POI/P0000027.DFT", "POI/P0000033.DFT", "POI/P0000034.DFT", "POI/P0000035.DFT", "POI/P0000036.DFT", "POI/P0000042.DFT", "POI/P0000043.DFT", "POI/P0000044.DFT", "POI/P0000045.DFT", ], 3: [ "BACK/B0000001.DFT", "BACK/B0000008.DFT", "BACK/B0000009.DFT", "BACK/B0000010.DFT", "BACK/B0000017.DFT", "BACK/B0000018.DFT", "BACK/B0000019.DFT", "BACK/B0000026.DFT", "BACK/B0000027.DFT", "NAME/N0000001.DFT", "NAME/N0000008.DFT", "NAME/N0000009.DFT", "NAME/N0000010.DFT", "NAME/N0000017.DFT", "NAME/N0000018.DFT", "NAME/N0000019.DFT", "NAME/N0000026.DFT", "NAME/N0000027.DFT", "POI/P0000017.DFT", "POI/P0000018.DFT", "POI/P0000019.DFT", "POI/P0000026.DFT", "POI/P0000027.DFT", ], 4: [ "BACK/B0000019.DFT", "BACK/B0000020.DFT", "BACK/B0000021.DFT", "BACK/B0000022.DFT", "BACK/B0000027.DFT", "BACK/B0000028.DFT", "BACK/B0000029.DFT", "BACK/B0000030.DFT", "BACK/B0000031.DFT", "BACK/B0000036.DFT", "BACK/B0000037.DFT", "BACK/B0000038.DFT", "BACK/B0000039.DFT", "BACK/B0000040.DFT", "BACK/B0000045.DFT", "BACK/B0000046.DFT", "BACK/B0000047.DFT", "BACK/B0000048.DFT", "BACK/B0000049.DFT", "BACK/B0000054.DFT", "NAME/N0000019.DFT", "NAME/N0000020.DFT", "NAME/N0000021.DFT", "NAME/N0000022.DFT", "NAME/N0000027.DFT", "NAME/N0000028.DFT", "NAME/N0000029.DFT", "NAME/N0000030.DFT", "NAME/N0000031.DFT", "NAME/N0000036.DFT", "NAME/N0000037.DFT", "NAME/N0000038.DFT", "NAME/N0000039.DFT", "NAME/N0000040.DFT", "NAME/N0000045.DFT", "NAME/N0000046.DFT", "NAME/N0000047.DFT", "NAME/N0000048.DFT", "NAME/N0000049.DFT", "NAME/N0000054.DFT", "POI/P0000019.DFT", "POI/P0000020.DFT", "POI/P0000021.DFT", "POI/P0000022.DFT", "POI/P0000027.DFT", "POI/P0000028.DFT", "POI/P0000029.DFT", "POI/P0000030.DFT", "POI/P0000031.DFT", "POI/P0000036.DFT", "POI/P0000037.DFT", "POI/P0000038.DFT", "POI/P0000039.DFT", "POI/P0000040.DFT", "POI/P0000045.DFT", "POI/P0000046.DFT", "POI/P0000047.DFT", "POI/P0000048.DFT", "POI/P0000049.DFT", "POI/P0000054.DFT", ], 5: [ "BACK/B0000002.DFT", "BACK/B0000003.DFT", "BACK/B0000004.DFT", "BACK/B0000011.DFT", "BACK/B0000012.DFT", "BACK/B0000013.DFT", "BACK/B0000020.DFT", "BACK/B0000021.DFT", "BACK/B0000022.DFT", "BACK/B0000029.DFT", "BACK/B0000030.DFT", "BACK/B0000031.DFT", "NAME/N0000002.DFT", "NAME/N0000003.DFT", "NAME/N0000004.DFT", "NAME/N0000011.DFT", "NAME/N0000012.DFT", "NAME/N0000013.DFT", "NAME/N0000020.DFT", "NAME/N0000021.DFT", "NAME/N0000022.DFT", "NAME/N0000029.DFT", "NAME/N0000030.DFT", "NAME/N0000031.DFT", "POI/P0000003.DFT", "POI/P0000011.DFT", "POI/P0000012.DFT", "POI/P0000013.DFT", "POI/P0000020.DFT", "POI/P0000021.DFT", "POI/P0000022.DFT", "POI/P0000029.DFT", "POI/P0000030.DFT", "POI/P0000031.DFT", ], 6: [ "BACK/B0000040.DFT", "BACK/B0000041.DFT", "BACK/B0000042.DFT", "BACK/B0000049.DFT", "BACK/B0000050.DFT", "BACK/B0000051.DFT", "NAME/N0000040.DFT", "NAME/N0000041.DFT", "NAME/N0000042.DFT", "NAME/N0000049.DFT", "NAME/N0000050.DFT", "NAME/N0000051.DFT", "POI/P0000040.DFT", "POI/P0000041.DFT", "POI/P0000042.DFT", "POI/P0000049.DFT", "POI/P0000050.DFT", "POI/P0000051.DFT", ], 7: [ "BACK/B0000032.DFT", "BACK/B0000033.DFT", "BACK/B0000034.DFT", "BACK/B0000041.DFT", "BACK/B0000042.DFT", "BACK/B0000043.DFT", "BACK/B0000050.DFT", "BACK/B0000051.DFT", "BACK/B0000052.DFT", "NAME/N0000032.DFT", "NAME/N0000033.DFT", "NAME/N0000034.DFT", "NAME/N0000041.DFT", "NAME/N0000042.DFT", "NAME/N0000043.DFT", "NAME/N0000050.DFT", "NAME/N0000051.DFT", "NAME/N0000052.DFT", "POI/P0000032.DFT", "POI/P0000033.DFT", "POI/P0000034.DFT", "POI/P0000041.DFT", "POI/P0000042.DFT", "POI/P0000043.DFT", "POI/P0000050.DFT", "POI/P0000051.DFT", "POI/P0000052.DFT", ], 8: [ "BACK/B0000031.DFT", "BACK/B0000032.DFT", "BACK/B0000033.DFT", "BACK/B0000040.DFT", "BACK/B0000041.DFT", "BACK/B0000042.DFT", "BACK/B0000049.DFT", "BACK/B0000050.DFT", "BACK/B0000051.DFT", "NAME/N0000031.DFT", "NAME/N0000032.DFT", "NAME/N0000033.DFT", "NAME/N0000040.DFT", "NAME/N0000041.DFT", "NAME/N0000042.DFT", "NAME/N0000049.DFT", "NAME/N0000050.DFT", "NAME/N0000051.DFT", "POI/P0000031.DFT", "POI/P0000032.DFT", "POI/P0000033.DFT", "POI/P0000040.DFT", "POI/P0000041.DFT", "POI/P0000042.DFT", "POI/P0000049.DFT", "POI/P0000050.DFT", "POI/P0000051.DFT", ], 9: [ "BACK/B0000005.DFT", "BACK/B0000006.DFT", "BACK/B0000007.DFT", "BACK/B0000014.DFT", "BACK/B0000015.DFT", "BACK/B0000016.DFT", "BACK/B0000023.DFT", "BACK/B0000024.DFT", "BACK/B0000025.DFT", "BACK/B0000032.DFT", "BACK/B0000033.DFT", "BACK/B0000034.DFT", "BACK/B0000041.DFT", "BACK/B0000042.DFT", "BACK/B0000043.DFT", "NAME/N0000005.DFT", "NAME/N0000006.DFT", "NAME/N0000007.DFT", "NAME/N0000014.DFT", "NAME/N0000015.DFT", "NAME/N0000016.DFT", "NAME/N0000023.DFT", "NAME/N0000024.DFT", "NAME/N0000025.DFT", "NAME/N0000032.DFT", "NAME/N0000033.DFT", "NAME/N0000034.DFT", "NAME/N0000041.DFT", "NAME/N0000042.DFT", "NAME/N0000043.DFT", "POI/P0000014.DFT", "POI/P0000015.DFT", "POI/P0000023.DFT", "POI/P0000024.DFT", "POI/P0000025.DFT", "POI/P0000032.DFT", "POI/P0000033.DFT", "POI/P0000034.DFT", "POI/P0000041.DFT", "POI/P0000042.DFT", "POI/P0000043.DFT", ], 10: [ "BACK/B0000037.DFT", "BACK/B0000041.DFT", "BACK/B0000042.DFT", "BACK/B0000043.DFT", "BACK/B0000044.DFT", "BACK/B0000045.DFT", "BACK/B0000046.DFT", "BACK/B0000050.DFT", "BACK/B0000051.DFT", "BACK/B0000052.DFT", "BACK/B0000053.DFT", "BACK/B0000054.DFT", "NAME/N0000037.DFT", "NAME/N0000041.DFT", "NAME/N0000042.DFT", "NAME/N0000043.DFT", "NAME/N0000044.DFT", "NAME/N0000045.DFT", "NAME/N0000046.DFT", "NAME/N0000050.DFT", "NAME/N0000051.DFT", "NAME/N0000052.DFT", "NAME/N0000053.DFT", "NAME/N0000054.DFT", "POI/P0000037.DFT", "POI/P0000041.DFT", "POI/P0000042.DFT", "POI/P0000043.DFT", "POI/P0000044.DFT", "POI/P0000045.DFT", "POI/P0000046.DFT", "POI/P0000050.DFT", "POI/P0000051.DFT", "POI/P0000052.DFT", "POI/P0000053.DFT", "POI/P0000054.DFT", ], }, } nose.tools.assert_equal(result, expected)
normal
{ "blob_id": "4dfdbc692858a627248cbe47d19b43c2a27ec70e", "index": 7373, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef parse_mapdata_test():\n current_folder = os.path.dirname(os.path.realpath(__file__))\n misc_folder = os.path.join(current_folder, 'misc')\n maplistdata_path = os.path.join(misc_folder, 'MapList.dat')\n result = copy.parse_mapdata(maplistdata_path)\n expected = {'num1': '00010001', 'num2': '00010001', 'regions': {(1): [\n 'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000044.DFT',\n 'BACK/B0000045.DFT', 'BACK/B0000053.DFT', 'BACK/B0000054.DFT',\n 'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000044.DFT',\n 'NAME/N0000045.DFT', 'NAME/N0000053.DFT', 'NAME/N0000054.DFT',\n 'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000044.DFT',\n 'POI/P0000045.DFT', 'POI/P0000053.DFT', 'POI/P0000054.DFT'], (2): [\n 'BACK/B0000024.DFT', 'BACK/B0000025.DFT', 'BACK/B0000026.DFT',\n 'BACK/B0000027.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000043.DFT', 'BACK/B0000044.DFT', 'BACK/B0000045.DFT',\n 'NAME/N0000024.DFT', 'NAME/N0000025.DFT', 'NAME/N0000026.DFT',\n 'NAME/N0000027.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000043.DFT', 'NAME/N0000044.DFT', 'NAME/N0000045.DFT',\n 'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000026.DFT',\n 'POI/P0000027.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',\n 'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000042.DFT',\n 'POI/P0000043.DFT', 'POI/P0000044.DFT', 'POI/P0000045.DFT'], (3): [\n 'BACK/B0000001.DFT', 'BACK/B0000008.DFT', 'BACK/B0000009.DFT',\n 'BACK/B0000010.DFT', 'BACK/B0000017.DFT', 'BACK/B0000018.DFT',\n 'BACK/B0000019.DFT', 'BACK/B0000026.DFT', 'BACK/B0000027.DFT',\n 'NAME/N0000001.DFT', 'NAME/N0000008.DFT', 'NAME/N0000009.DFT',\n 'NAME/N0000010.DFT', 'NAME/N0000017.DFT', 'NAME/N0000018.DFT',\n 'NAME/N0000019.DFT', 'NAME/N0000026.DFT', 'NAME/N0000027.DFT',\n 'POI/P0000017.DFT', 'POI/P0000018.DFT', 'POI/P0000019.DFT',\n 'POI/P0000026.DFT', 'POI/P0000027.DFT'], (4): ['BACK/B0000019.DFT',\n 'BACK/B0000020.DFT', 'BACK/B0000021.DFT', 'BACK/B0000022.DFT',\n 'BACK/B0000027.DFT', 'BACK/B0000028.DFT', 'BACK/B0000029.DFT',\n 'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'BACK/B0000036.DFT',\n 'BACK/B0000037.DFT', 'BACK/B0000038.DFT', 'BACK/B0000039.DFT',\n 'BACK/B0000040.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',\n 'BACK/B0000047.DFT', 'BACK/B0000048.DFT', 'BACK/B0000049.DFT',\n 'BACK/B0000054.DFT', 'NAME/N0000019.DFT', 'NAME/N0000020.DFT',\n 'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000027.DFT',\n 'NAME/N0000028.DFT', 'NAME/N0000029.DFT', 'NAME/N0000030.DFT',\n 'NAME/N0000031.DFT', 'NAME/N0000036.DFT', 'NAME/N0000037.DFT',\n 'NAME/N0000038.DFT', 'NAME/N0000039.DFT', 'NAME/N0000040.DFT',\n 'NAME/N0000045.DFT', 'NAME/N0000046.DFT', 'NAME/N0000047.DFT',\n 'NAME/N0000048.DFT', 'NAME/N0000049.DFT', 'NAME/N0000054.DFT',\n 'POI/P0000019.DFT', 'POI/P0000020.DFT', 'POI/P0000021.DFT',\n 'POI/P0000022.DFT', 'POI/P0000027.DFT', 'POI/P0000028.DFT',\n 'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT',\n 'POI/P0000036.DFT', 'POI/P0000037.DFT', 'POI/P0000038.DFT',\n 'POI/P0000039.DFT', 'POI/P0000040.DFT', 'POI/P0000045.DFT',\n 'POI/P0000046.DFT', 'POI/P0000047.DFT', 'POI/P0000048.DFT',\n 'POI/P0000049.DFT', 'POI/P0000054.DFT'], (5): ['BACK/B0000002.DFT',\n 'BACK/B0000003.DFT', 'BACK/B0000004.DFT', 'BACK/B0000011.DFT',\n 'BACK/B0000012.DFT', 'BACK/B0000013.DFT', 'BACK/B0000020.DFT',\n 'BACK/B0000021.DFT', 'BACK/B0000022.DFT', 'BACK/B0000029.DFT',\n 'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'NAME/N0000002.DFT',\n 'NAME/N0000003.DFT', 'NAME/N0000004.DFT', 'NAME/N0000011.DFT',\n 'NAME/N0000012.DFT', 'NAME/N0000013.DFT', 'NAME/N0000020.DFT',\n 'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000029.DFT',\n 'NAME/N0000030.DFT', 'NAME/N0000031.DFT', 'POI/P0000003.DFT',\n 'POI/P0000011.DFT', 'POI/P0000012.DFT', 'POI/P0000013.DFT',\n 'POI/P0000020.DFT', 'POI/P0000021.DFT', 'POI/P0000022.DFT',\n 'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT'], (6): [\n 'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',\n 'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',\n 'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',\n 'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (7): [\n 'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',\n 'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',\n 'POI/P0000032.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',\n 'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',\n 'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT'], (8): [\n 'BACK/B0000031.DFT', 'BACK/B0000032.DFT', 'BACK/B0000033.DFT',\n 'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',\n 'NAME/N0000031.DFT', 'NAME/N0000032.DFT', 'NAME/N0000033.DFT',\n 'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',\n 'POI/P0000031.DFT', 'POI/P0000032.DFT', 'POI/P0000033.DFT',\n 'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',\n 'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (9): [\n 'BACK/B0000005.DFT', 'BACK/B0000006.DFT', 'BACK/B0000007.DFT',\n 'BACK/B0000014.DFT', 'BACK/B0000015.DFT', 'BACK/B0000016.DFT',\n 'BACK/B0000023.DFT', 'BACK/B0000024.DFT', 'BACK/B0000025.DFT',\n 'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'NAME/N0000005.DFT', 'NAME/N0000006.DFT', 'NAME/N0000007.DFT',\n 'NAME/N0000014.DFT', 'NAME/N0000015.DFT', 'NAME/N0000016.DFT',\n 'NAME/N0000023.DFT', 'NAME/N0000024.DFT', 'NAME/N0000025.DFT',\n 'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'POI/P0000014.DFT', 'POI/P0000015.DFT', 'POI/P0000023.DFT',\n 'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000032.DFT',\n 'POI/P0000033.DFT', 'POI/P0000034.DFT', 'POI/P0000041.DFT',\n 'POI/P0000042.DFT', 'POI/P0000043.DFT'], (10): ['BACK/B0000037.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'BACK/B0000044.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',\n 'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',\n 'BACK/B0000053.DFT', 'BACK/B0000054.DFT', 'NAME/N0000037.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'NAME/N0000044.DFT', 'NAME/N0000045.DFT', 'NAME/N0000046.DFT',\n 'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',\n 'NAME/N0000053.DFT', 'NAME/N0000054.DFT', 'POI/P0000037.DFT',\n 'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',\n 'POI/P0000044.DFT', 'POI/P0000045.DFT', 'POI/P0000046.DFT',\n 'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT',\n 'POI/P0000053.DFT', 'POI/P0000054.DFT']}}\n nose.tools.assert_equal(result, expected)\n", "step-3": "<mask token>\n\n\ndef get_parser_test():\n \"\"\"Check if the evaluation model returns a parser object.\"\"\"\n copy.get_parser()\n\n\ndef parse_mapdata_test():\n current_folder = os.path.dirname(os.path.realpath(__file__))\n misc_folder = os.path.join(current_folder, 'misc')\n maplistdata_path = os.path.join(misc_folder, 'MapList.dat')\n result = copy.parse_mapdata(maplistdata_path)\n expected = {'num1': '00010001', 'num2': '00010001', 'regions': {(1): [\n 'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000044.DFT',\n 'BACK/B0000045.DFT', 'BACK/B0000053.DFT', 'BACK/B0000054.DFT',\n 'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000044.DFT',\n 'NAME/N0000045.DFT', 'NAME/N0000053.DFT', 'NAME/N0000054.DFT',\n 'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000044.DFT',\n 'POI/P0000045.DFT', 'POI/P0000053.DFT', 'POI/P0000054.DFT'], (2): [\n 'BACK/B0000024.DFT', 'BACK/B0000025.DFT', 'BACK/B0000026.DFT',\n 'BACK/B0000027.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000043.DFT', 'BACK/B0000044.DFT', 'BACK/B0000045.DFT',\n 'NAME/N0000024.DFT', 'NAME/N0000025.DFT', 'NAME/N0000026.DFT',\n 'NAME/N0000027.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000043.DFT', 'NAME/N0000044.DFT', 'NAME/N0000045.DFT',\n 'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000026.DFT',\n 'POI/P0000027.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',\n 'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000042.DFT',\n 'POI/P0000043.DFT', 'POI/P0000044.DFT', 'POI/P0000045.DFT'], (3): [\n 'BACK/B0000001.DFT', 'BACK/B0000008.DFT', 'BACK/B0000009.DFT',\n 'BACK/B0000010.DFT', 'BACK/B0000017.DFT', 'BACK/B0000018.DFT',\n 'BACK/B0000019.DFT', 'BACK/B0000026.DFT', 'BACK/B0000027.DFT',\n 'NAME/N0000001.DFT', 'NAME/N0000008.DFT', 'NAME/N0000009.DFT',\n 'NAME/N0000010.DFT', 'NAME/N0000017.DFT', 'NAME/N0000018.DFT',\n 'NAME/N0000019.DFT', 'NAME/N0000026.DFT', 'NAME/N0000027.DFT',\n 'POI/P0000017.DFT', 'POI/P0000018.DFT', 'POI/P0000019.DFT',\n 'POI/P0000026.DFT', 'POI/P0000027.DFT'], (4): ['BACK/B0000019.DFT',\n 'BACK/B0000020.DFT', 'BACK/B0000021.DFT', 'BACK/B0000022.DFT',\n 'BACK/B0000027.DFT', 'BACK/B0000028.DFT', 'BACK/B0000029.DFT',\n 'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'BACK/B0000036.DFT',\n 'BACK/B0000037.DFT', 'BACK/B0000038.DFT', 'BACK/B0000039.DFT',\n 'BACK/B0000040.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',\n 'BACK/B0000047.DFT', 'BACK/B0000048.DFT', 'BACK/B0000049.DFT',\n 'BACK/B0000054.DFT', 'NAME/N0000019.DFT', 'NAME/N0000020.DFT',\n 'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000027.DFT',\n 'NAME/N0000028.DFT', 'NAME/N0000029.DFT', 'NAME/N0000030.DFT',\n 'NAME/N0000031.DFT', 'NAME/N0000036.DFT', 'NAME/N0000037.DFT',\n 'NAME/N0000038.DFT', 'NAME/N0000039.DFT', 'NAME/N0000040.DFT',\n 'NAME/N0000045.DFT', 'NAME/N0000046.DFT', 'NAME/N0000047.DFT',\n 'NAME/N0000048.DFT', 'NAME/N0000049.DFT', 'NAME/N0000054.DFT',\n 'POI/P0000019.DFT', 'POI/P0000020.DFT', 'POI/P0000021.DFT',\n 'POI/P0000022.DFT', 'POI/P0000027.DFT', 'POI/P0000028.DFT',\n 'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT',\n 'POI/P0000036.DFT', 'POI/P0000037.DFT', 'POI/P0000038.DFT',\n 'POI/P0000039.DFT', 'POI/P0000040.DFT', 'POI/P0000045.DFT',\n 'POI/P0000046.DFT', 'POI/P0000047.DFT', 'POI/P0000048.DFT',\n 'POI/P0000049.DFT', 'POI/P0000054.DFT'], (5): ['BACK/B0000002.DFT',\n 'BACK/B0000003.DFT', 'BACK/B0000004.DFT', 'BACK/B0000011.DFT',\n 'BACK/B0000012.DFT', 'BACK/B0000013.DFT', 'BACK/B0000020.DFT',\n 'BACK/B0000021.DFT', 'BACK/B0000022.DFT', 'BACK/B0000029.DFT',\n 'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'NAME/N0000002.DFT',\n 'NAME/N0000003.DFT', 'NAME/N0000004.DFT', 'NAME/N0000011.DFT',\n 'NAME/N0000012.DFT', 'NAME/N0000013.DFT', 'NAME/N0000020.DFT',\n 'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000029.DFT',\n 'NAME/N0000030.DFT', 'NAME/N0000031.DFT', 'POI/P0000003.DFT',\n 'POI/P0000011.DFT', 'POI/P0000012.DFT', 'POI/P0000013.DFT',\n 'POI/P0000020.DFT', 'POI/P0000021.DFT', 'POI/P0000022.DFT',\n 'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT'], (6): [\n 'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',\n 'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',\n 'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',\n 'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (7): [\n 'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',\n 'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',\n 'POI/P0000032.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',\n 'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',\n 'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT'], (8): [\n 'BACK/B0000031.DFT', 'BACK/B0000032.DFT', 'BACK/B0000033.DFT',\n 'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',\n 'NAME/N0000031.DFT', 'NAME/N0000032.DFT', 'NAME/N0000033.DFT',\n 'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',\n 'POI/P0000031.DFT', 'POI/P0000032.DFT', 'POI/P0000033.DFT',\n 'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',\n 'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (9): [\n 'BACK/B0000005.DFT', 'BACK/B0000006.DFT', 'BACK/B0000007.DFT',\n 'BACK/B0000014.DFT', 'BACK/B0000015.DFT', 'BACK/B0000016.DFT',\n 'BACK/B0000023.DFT', 'BACK/B0000024.DFT', 'BACK/B0000025.DFT',\n 'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'NAME/N0000005.DFT', 'NAME/N0000006.DFT', 'NAME/N0000007.DFT',\n 'NAME/N0000014.DFT', 'NAME/N0000015.DFT', 'NAME/N0000016.DFT',\n 'NAME/N0000023.DFT', 'NAME/N0000024.DFT', 'NAME/N0000025.DFT',\n 'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'POI/P0000014.DFT', 'POI/P0000015.DFT', 'POI/P0000023.DFT',\n 'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000032.DFT',\n 'POI/P0000033.DFT', 'POI/P0000034.DFT', 'POI/P0000041.DFT',\n 'POI/P0000042.DFT', 'POI/P0000043.DFT'], (10): ['BACK/B0000037.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'BACK/B0000044.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',\n 'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',\n 'BACK/B0000053.DFT', 'BACK/B0000054.DFT', 'NAME/N0000037.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'NAME/N0000044.DFT', 'NAME/N0000045.DFT', 'NAME/N0000046.DFT',\n 'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',\n 'NAME/N0000053.DFT', 'NAME/N0000054.DFT', 'POI/P0000037.DFT',\n 'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',\n 'POI/P0000044.DFT', 'POI/P0000045.DFT', 'POI/P0000046.DFT',\n 'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT',\n 'POI/P0000053.DFT', 'POI/P0000054.DFT']}}\n nose.tools.assert_equal(result, expected)\n", "step-4": "import os\nimport nose\nimport lumixmaptool.copy as copy\n\n\ndef get_parser_test():\n \"\"\"Check if the evaluation model returns a parser object.\"\"\"\n copy.get_parser()\n\n\ndef parse_mapdata_test():\n current_folder = os.path.dirname(os.path.realpath(__file__))\n misc_folder = os.path.join(current_folder, 'misc')\n maplistdata_path = os.path.join(misc_folder, 'MapList.dat')\n result = copy.parse_mapdata(maplistdata_path)\n expected = {'num1': '00010001', 'num2': '00010001', 'regions': {(1): [\n 'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000044.DFT',\n 'BACK/B0000045.DFT', 'BACK/B0000053.DFT', 'BACK/B0000054.DFT',\n 'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000044.DFT',\n 'NAME/N0000045.DFT', 'NAME/N0000053.DFT', 'NAME/N0000054.DFT',\n 'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000044.DFT',\n 'POI/P0000045.DFT', 'POI/P0000053.DFT', 'POI/P0000054.DFT'], (2): [\n 'BACK/B0000024.DFT', 'BACK/B0000025.DFT', 'BACK/B0000026.DFT',\n 'BACK/B0000027.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000043.DFT', 'BACK/B0000044.DFT', 'BACK/B0000045.DFT',\n 'NAME/N0000024.DFT', 'NAME/N0000025.DFT', 'NAME/N0000026.DFT',\n 'NAME/N0000027.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000043.DFT', 'NAME/N0000044.DFT', 'NAME/N0000045.DFT',\n 'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000026.DFT',\n 'POI/P0000027.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',\n 'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000042.DFT',\n 'POI/P0000043.DFT', 'POI/P0000044.DFT', 'POI/P0000045.DFT'], (3): [\n 'BACK/B0000001.DFT', 'BACK/B0000008.DFT', 'BACK/B0000009.DFT',\n 'BACK/B0000010.DFT', 'BACK/B0000017.DFT', 'BACK/B0000018.DFT',\n 'BACK/B0000019.DFT', 'BACK/B0000026.DFT', 'BACK/B0000027.DFT',\n 'NAME/N0000001.DFT', 'NAME/N0000008.DFT', 'NAME/N0000009.DFT',\n 'NAME/N0000010.DFT', 'NAME/N0000017.DFT', 'NAME/N0000018.DFT',\n 'NAME/N0000019.DFT', 'NAME/N0000026.DFT', 'NAME/N0000027.DFT',\n 'POI/P0000017.DFT', 'POI/P0000018.DFT', 'POI/P0000019.DFT',\n 'POI/P0000026.DFT', 'POI/P0000027.DFT'], (4): ['BACK/B0000019.DFT',\n 'BACK/B0000020.DFT', 'BACK/B0000021.DFT', 'BACK/B0000022.DFT',\n 'BACK/B0000027.DFT', 'BACK/B0000028.DFT', 'BACK/B0000029.DFT',\n 'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'BACK/B0000036.DFT',\n 'BACK/B0000037.DFT', 'BACK/B0000038.DFT', 'BACK/B0000039.DFT',\n 'BACK/B0000040.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',\n 'BACK/B0000047.DFT', 'BACK/B0000048.DFT', 'BACK/B0000049.DFT',\n 'BACK/B0000054.DFT', 'NAME/N0000019.DFT', 'NAME/N0000020.DFT',\n 'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000027.DFT',\n 'NAME/N0000028.DFT', 'NAME/N0000029.DFT', 'NAME/N0000030.DFT',\n 'NAME/N0000031.DFT', 'NAME/N0000036.DFT', 'NAME/N0000037.DFT',\n 'NAME/N0000038.DFT', 'NAME/N0000039.DFT', 'NAME/N0000040.DFT',\n 'NAME/N0000045.DFT', 'NAME/N0000046.DFT', 'NAME/N0000047.DFT',\n 'NAME/N0000048.DFT', 'NAME/N0000049.DFT', 'NAME/N0000054.DFT',\n 'POI/P0000019.DFT', 'POI/P0000020.DFT', 'POI/P0000021.DFT',\n 'POI/P0000022.DFT', 'POI/P0000027.DFT', 'POI/P0000028.DFT',\n 'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT',\n 'POI/P0000036.DFT', 'POI/P0000037.DFT', 'POI/P0000038.DFT',\n 'POI/P0000039.DFT', 'POI/P0000040.DFT', 'POI/P0000045.DFT',\n 'POI/P0000046.DFT', 'POI/P0000047.DFT', 'POI/P0000048.DFT',\n 'POI/P0000049.DFT', 'POI/P0000054.DFT'], (5): ['BACK/B0000002.DFT',\n 'BACK/B0000003.DFT', 'BACK/B0000004.DFT', 'BACK/B0000011.DFT',\n 'BACK/B0000012.DFT', 'BACK/B0000013.DFT', 'BACK/B0000020.DFT',\n 'BACK/B0000021.DFT', 'BACK/B0000022.DFT', 'BACK/B0000029.DFT',\n 'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'NAME/N0000002.DFT',\n 'NAME/N0000003.DFT', 'NAME/N0000004.DFT', 'NAME/N0000011.DFT',\n 'NAME/N0000012.DFT', 'NAME/N0000013.DFT', 'NAME/N0000020.DFT',\n 'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000029.DFT',\n 'NAME/N0000030.DFT', 'NAME/N0000031.DFT', 'POI/P0000003.DFT',\n 'POI/P0000011.DFT', 'POI/P0000012.DFT', 'POI/P0000013.DFT',\n 'POI/P0000020.DFT', 'POI/P0000021.DFT', 'POI/P0000022.DFT',\n 'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT'], (6): [\n 'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',\n 'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',\n 'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',\n 'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (7): [\n 'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',\n 'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',\n 'POI/P0000032.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',\n 'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',\n 'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT'], (8): [\n 'BACK/B0000031.DFT', 'BACK/B0000032.DFT', 'BACK/B0000033.DFT',\n 'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',\n 'NAME/N0000031.DFT', 'NAME/N0000032.DFT', 'NAME/N0000033.DFT',\n 'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',\n 'POI/P0000031.DFT', 'POI/P0000032.DFT', 'POI/P0000033.DFT',\n 'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',\n 'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (9): [\n 'BACK/B0000005.DFT', 'BACK/B0000006.DFT', 'BACK/B0000007.DFT',\n 'BACK/B0000014.DFT', 'BACK/B0000015.DFT', 'BACK/B0000016.DFT',\n 'BACK/B0000023.DFT', 'BACK/B0000024.DFT', 'BACK/B0000025.DFT',\n 'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'NAME/N0000005.DFT', 'NAME/N0000006.DFT', 'NAME/N0000007.DFT',\n 'NAME/N0000014.DFT', 'NAME/N0000015.DFT', 'NAME/N0000016.DFT',\n 'NAME/N0000023.DFT', 'NAME/N0000024.DFT', 'NAME/N0000025.DFT',\n 'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'POI/P0000014.DFT', 'POI/P0000015.DFT', 'POI/P0000023.DFT',\n 'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000032.DFT',\n 'POI/P0000033.DFT', 'POI/P0000034.DFT', 'POI/P0000041.DFT',\n 'POI/P0000042.DFT', 'POI/P0000043.DFT'], (10): ['BACK/B0000037.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'BACK/B0000044.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',\n 'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',\n 'BACK/B0000053.DFT', 'BACK/B0000054.DFT', 'NAME/N0000037.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'NAME/N0000044.DFT', 'NAME/N0000045.DFT', 'NAME/N0000046.DFT',\n 'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',\n 'NAME/N0000053.DFT', 'NAME/N0000054.DFT', 'POI/P0000037.DFT',\n 'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',\n 'POI/P0000044.DFT', 'POI/P0000045.DFT', 'POI/P0000046.DFT',\n 'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT',\n 'POI/P0000053.DFT', 'POI/P0000054.DFT']}}\n nose.tools.assert_equal(result, expected)\n", "step-5": "#!/usr/bin/env python\n\n# Core Library modules\nimport os\n\n# Third party modules\nimport nose\n\n# First party modules\nimport lumixmaptool.copy as copy\n\n\n# Tests\ndef get_parser_test():\n \"\"\"Check if the evaluation model returns a parser object.\"\"\"\n copy.get_parser()\n\n\ndef parse_mapdata_test():\n current_folder = os.path.dirname(os.path.realpath(__file__))\n misc_folder = os.path.join(current_folder, \"misc\")\n maplistdata_path = os.path.join(misc_folder, \"MapList.dat\")\n result = copy.parse_mapdata(maplistdata_path)\n expected = {\n \"num1\": \"00010001\",\n \"num2\": \"00010001\",\n \"regions\": {\n 1: [\n \"BACK/B0000035.DFT\",\n \"BACK/B0000036.DFT\",\n \"BACK/B0000044.DFT\",\n \"BACK/B0000045.DFT\",\n \"BACK/B0000053.DFT\",\n \"BACK/B0000054.DFT\",\n \"NAME/N0000035.DFT\",\n \"NAME/N0000036.DFT\",\n \"NAME/N0000044.DFT\",\n \"NAME/N0000045.DFT\",\n \"NAME/N0000053.DFT\",\n \"NAME/N0000054.DFT\",\n \"POI/P0000035.DFT\",\n \"POI/P0000036.DFT\",\n \"POI/P0000044.DFT\",\n \"POI/P0000045.DFT\",\n \"POI/P0000053.DFT\",\n \"POI/P0000054.DFT\",\n ],\n 2: [\n \"BACK/B0000024.DFT\",\n \"BACK/B0000025.DFT\",\n \"BACK/B0000026.DFT\",\n \"BACK/B0000027.DFT\",\n \"BACK/B0000033.DFT\",\n \"BACK/B0000034.DFT\",\n \"BACK/B0000035.DFT\",\n \"BACK/B0000036.DFT\",\n \"BACK/B0000042.DFT\",\n \"BACK/B0000043.DFT\",\n \"BACK/B0000044.DFT\",\n \"BACK/B0000045.DFT\",\n \"NAME/N0000024.DFT\",\n \"NAME/N0000025.DFT\",\n \"NAME/N0000026.DFT\",\n \"NAME/N0000027.DFT\",\n \"NAME/N0000033.DFT\",\n \"NAME/N0000034.DFT\",\n \"NAME/N0000035.DFT\",\n \"NAME/N0000036.DFT\",\n \"NAME/N0000042.DFT\",\n \"NAME/N0000043.DFT\",\n \"NAME/N0000044.DFT\",\n \"NAME/N0000045.DFT\",\n \"POI/P0000024.DFT\",\n \"POI/P0000025.DFT\",\n \"POI/P0000026.DFT\",\n \"POI/P0000027.DFT\",\n \"POI/P0000033.DFT\",\n \"POI/P0000034.DFT\",\n \"POI/P0000035.DFT\",\n \"POI/P0000036.DFT\",\n \"POI/P0000042.DFT\",\n \"POI/P0000043.DFT\",\n \"POI/P0000044.DFT\",\n \"POI/P0000045.DFT\",\n ],\n 3: [\n \"BACK/B0000001.DFT\",\n \"BACK/B0000008.DFT\",\n \"BACK/B0000009.DFT\",\n \"BACK/B0000010.DFT\",\n \"BACK/B0000017.DFT\",\n \"BACK/B0000018.DFT\",\n \"BACK/B0000019.DFT\",\n \"BACK/B0000026.DFT\",\n \"BACK/B0000027.DFT\",\n \"NAME/N0000001.DFT\",\n \"NAME/N0000008.DFT\",\n \"NAME/N0000009.DFT\",\n \"NAME/N0000010.DFT\",\n \"NAME/N0000017.DFT\",\n \"NAME/N0000018.DFT\",\n \"NAME/N0000019.DFT\",\n \"NAME/N0000026.DFT\",\n \"NAME/N0000027.DFT\",\n \"POI/P0000017.DFT\",\n \"POI/P0000018.DFT\",\n \"POI/P0000019.DFT\",\n \"POI/P0000026.DFT\",\n \"POI/P0000027.DFT\",\n ],\n 4: [\n \"BACK/B0000019.DFT\",\n \"BACK/B0000020.DFT\",\n \"BACK/B0000021.DFT\",\n \"BACK/B0000022.DFT\",\n \"BACK/B0000027.DFT\",\n \"BACK/B0000028.DFT\",\n \"BACK/B0000029.DFT\",\n \"BACK/B0000030.DFT\",\n \"BACK/B0000031.DFT\",\n \"BACK/B0000036.DFT\",\n \"BACK/B0000037.DFT\",\n \"BACK/B0000038.DFT\",\n \"BACK/B0000039.DFT\",\n \"BACK/B0000040.DFT\",\n \"BACK/B0000045.DFT\",\n \"BACK/B0000046.DFT\",\n \"BACK/B0000047.DFT\",\n \"BACK/B0000048.DFT\",\n \"BACK/B0000049.DFT\",\n \"BACK/B0000054.DFT\",\n \"NAME/N0000019.DFT\",\n \"NAME/N0000020.DFT\",\n \"NAME/N0000021.DFT\",\n \"NAME/N0000022.DFT\",\n \"NAME/N0000027.DFT\",\n \"NAME/N0000028.DFT\",\n \"NAME/N0000029.DFT\",\n \"NAME/N0000030.DFT\",\n \"NAME/N0000031.DFT\",\n \"NAME/N0000036.DFT\",\n \"NAME/N0000037.DFT\",\n \"NAME/N0000038.DFT\",\n \"NAME/N0000039.DFT\",\n \"NAME/N0000040.DFT\",\n \"NAME/N0000045.DFT\",\n \"NAME/N0000046.DFT\",\n \"NAME/N0000047.DFT\",\n \"NAME/N0000048.DFT\",\n \"NAME/N0000049.DFT\",\n \"NAME/N0000054.DFT\",\n \"POI/P0000019.DFT\",\n \"POI/P0000020.DFT\",\n \"POI/P0000021.DFT\",\n \"POI/P0000022.DFT\",\n \"POI/P0000027.DFT\",\n \"POI/P0000028.DFT\",\n \"POI/P0000029.DFT\",\n \"POI/P0000030.DFT\",\n \"POI/P0000031.DFT\",\n \"POI/P0000036.DFT\",\n \"POI/P0000037.DFT\",\n \"POI/P0000038.DFT\",\n \"POI/P0000039.DFT\",\n \"POI/P0000040.DFT\",\n \"POI/P0000045.DFT\",\n \"POI/P0000046.DFT\",\n \"POI/P0000047.DFT\",\n \"POI/P0000048.DFT\",\n \"POI/P0000049.DFT\",\n \"POI/P0000054.DFT\",\n ],\n 5: [\n \"BACK/B0000002.DFT\",\n \"BACK/B0000003.DFT\",\n \"BACK/B0000004.DFT\",\n \"BACK/B0000011.DFT\",\n \"BACK/B0000012.DFT\",\n \"BACK/B0000013.DFT\",\n \"BACK/B0000020.DFT\",\n \"BACK/B0000021.DFT\",\n \"BACK/B0000022.DFT\",\n \"BACK/B0000029.DFT\",\n \"BACK/B0000030.DFT\",\n \"BACK/B0000031.DFT\",\n \"NAME/N0000002.DFT\",\n \"NAME/N0000003.DFT\",\n \"NAME/N0000004.DFT\",\n \"NAME/N0000011.DFT\",\n \"NAME/N0000012.DFT\",\n \"NAME/N0000013.DFT\",\n \"NAME/N0000020.DFT\",\n \"NAME/N0000021.DFT\",\n \"NAME/N0000022.DFT\",\n \"NAME/N0000029.DFT\",\n \"NAME/N0000030.DFT\",\n \"NAME/N0000031.DFT\",\n \"POI/P0000003.DFT\",\n \"POI/P0000011.DFT\",\n \"POI/P0000012.DFT\",\n \"POI/P0000013.DFT\",\n \"POI/P0000020.DFT\",\n \"POI/P0000021.DFT\",\n \"POI/P0000022.DFT\",\n \"POI/P0000029.DFT\",\n \"POI/P0000030.DFT\",\n \"POI/P0000031.DFT\",\n ],\n 6: [\n \"BACK/B0000040.DFT\",\n \"BACK/B0000041.DFT\",\n \"BACK/B0000042.DFT\",\n \"BACK/B0000049.DFT\",\n \"BACK/B0000050.DFT\",\n \"BACK/B0000051.DFT\",\n \"NAME/N0000040.DFT\",\n \"NAME/N0000041.DFT\",\n \"NAME/N0000042.DFT\",\n \"NAME/N0000049.DFT\",\n \"NAME/N0000050.DFT\",\n \"NAME/N0000051.DFT\",\n \"POI/P0000040.DFT\",\n \"POI/P0000041.DFT\",\n \"POI/P0000042.DFT\",\n \"POI/P0000049.DFT\",\n \"POI/P0000050.DFT\",\n \"POI/P0000051.DFT\",\n ],\n 7: [\n \"BACK/B0000032.DFT\",\n \"BACK/B0000033.DFT\",\n \"BACK/B0000034.DFT\",\n \"BACK/B0000041.DFT\",\n \"BACK/B0000042.DFT\",\n \"BACK/B0000043.DFT\",\n \"BACK/B0000050.DFT\",\n \"BACK/B0000051.DFT\",\n \"BACK/B0000052.DFT\",\n \"NAME/N0000032.DFT\",\n \"NAME/N0000033.DFT\",\n \"NAME/N0000034.DFT\",\n \"NAME/N0000041.DFT\",\n \"NAME/N0000042.DFT\",\n \"NAME/N0000043.DFT\",\n \"NAME/N0000050.DFT\",\n \"NAME/N0000051.DFT\",\n \"NAME/N0000052.DFT\",\n \"POI/P0000032.DFT\",\n \"POI/P0000033.DFT\",\n \"POI/P0000034.DFT\",\n \"POI/P0000041.DFT\",\n \"POI/P0000042.DFT\",\n \"POI/P0000043.DFT\",\n \"POI/P0000050.DFT\",\n \"POI/P0000051.DFT\",\n \"POI/P0000052.DFT\",\n ],\n 8: [\n \"BACK/B0000031.DFT\",\n \"BACK/B0000032.DFT\",\n \"BACK/B0000033.DFT\",\n \"BACK/B0000040.DFT\",\n \"BACK/B0000041.DFT\",\n \"BACK/B0000042.DFT\",\n \"BACK/B0000049.DFT\",\n \"BACK/B0000050.DFT\",\n \"BACK/B0000051.DFT\",\n \"NAME/N0000031.DFT\",\n \"NAME/N0000032.DFT\",\n \"NAME/N0000033.DFT\",\n \"NAME/N0000040.DFT\",\n \"NAME/N0000041.DFT\",\n \"NAME/N0000042.DFT\",\n \"NAME/N0000049.DFT\",\n \"NAME/N0000050.DFT\",\n \"NAME/N0000051.DFT\",\n \"POI/P0000031.DFT\",\n \"POI/P0000032.DFT\",\n \"POI/P0000033.DFT\",\n \"POI/P0000040.DFT\",\n \"POI/P0000041.DFT\",\n \"POI/P0000042.DFT\",\n \"POI/P0000049.DFT\",\n \"POI/P0000050.DFT\",\n \"POI/P0000051.DFT\",\n ],\n 9: [\n \"BACK/B0000005.DFT\",\n \"BACK/B0000006.DFT\",\n \"BACK/B0000007.DFT\",\n \"BACK/B0000014.DFT\",\n \"BACK/B0000015.DFT\",\n \"BACK/B0000016.DFT\",\n \"BACK/B0000023.DFT\",\n \"BACK/B0000024.DFT\",\n \"BACK/B0000025.DFT\",\n \"BACK/B0000032.DFT\",\n \"BACK/B0000033.DFT\",\n \"BACK/B0000034.DFT\",\n \"BACK/B0000041.DFT\",\n \"BACK/B0000042.DFT\",\n \"BACK/B0000043.DFT\",\n \"NAME/N0000005.DFT\",\n \"NAME/N0000006.DFT\",\n \"NAME/N0000007.DFT\",\n \"NAME/N0000014.DFT\",\n \"NAME/N0000015.DFT\",\n \"NAME/N0000016.DFT\",\n \"NAME/N0000023.DFT\",\n \"NAME/N0000024.DFT\",\n \"NAME/N0000025.DFT\",\n \"NAME/N0000032.DFT\",\n \"NAME/N0000033.DFT\",\n \"NAME/N0000034.DFT\",\n \"NAME/N0000041.DFT\",\n \"NAME/N0000042.DFT\",\n \"NAME/N0000043.DFT\",\n \"POI/P0000014.DFT\",\n \"POI/P0000015.DFT\",\n \"POI/P0000023.DFT\",\n \"POI/P0000024.DFT\",\n \"POI/P0000025.DFT\",\n \"POI/P0000032.DFT\",\n \"POI/P0000033.DFT\",\n \"POI/P0000034.DFT\",\n \"POI/P0000041.DFT\",\n \"POI/P0000042.DFT\",\n \"POI/P0000043.DFT\",\n ],\n 10: [\n \"BACK/B0000037.DFT\",\n \"BACK/B0000041.DFT\",\n \"BACK/B0000042.DFT\",\n \"BACK/B0000043.DFT\",\n \"BACK/B0000044.DFT\",\n \"BACK/B0000045.DFT\",\n \"BACK/B0000046.DFT\",\n \"BACK/B0000050.DFT\",\n \"BACK/B0000051.DFT\",\n \"BACK/B0000052.DFT\",\n \"BACK/B0000053.DFT\",\n \"BACK/B0000054.DFT\",\n \"NAME/N0000037.DFT\",\n \"NAME/N0000041.DFT\",\n \"NAME/N0000042.DFT\",\n \"NAME/N0000043.DFT\",\n \"NAME/N0000044.DFT\",\n \"NAME/N0000045.DFT\",\n \"NAME/N0000046.DFT\",\n \"NAME/N0000050.DFT\",\n \"NAME/N0000051.DFT\",\n \"NAME/N0000052.DFT\",\n \"NAME/N0000053.DFT\",\n \"NAME/N0000054.DFT\",\n \"POI/P0000037.DFT\",\n \"POI/P0000041.DFT\",\n \"POI/P0000042.DFT\",\n \"POI/P0000043.DFT\",\n \"POI/P0000044.DFT\",\n \"POI/P0000045.DFT\",\n \"POI/P0000046.DFT\",\n \"POI/P0000050.DFT\",\n \"POI/P0000051.DFT\",\n \"POI/P0000052.DFT\",\n \"POI/P0000053.DFT\",\n \"POI/P0000054.DFT\",\n ],\n },\n }\n nose.tools.assert_equal(result, expected)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/env python # -*- coding: utf-8 -*- import unittest import xmlrunner import os import sys import glob import yaml ASSETS_DIR = "" class GenerateMachineConfig(unittest.TestCase): def setUp(self): self.machine_configs = [] for machine_config_path in glob.glob( f'{ASSETS_DIR}/openshift/99_openshift-machineconfig_99-dual-stack-*.yaml' ): with open(machine_config_path) as f: self.machine_configs.append(yaml.load(f, Loader=yaml.FullLoader)) def test_kernel_args(self): """Assert there are machine configs configuring the kernel args for masters and workers""" for machine_config in self.machine_configs: kernel_args = machine_config["spec"]["kernelArguments"] self.assertIn("ip=dhcp,dhcp6", kernel_args) if __name__ == '__main__': ASSETS_DIR = sys.argv.pop() with open(os.environ.get('JUNIT_FILE', '/dev/null'), 'wb') as output: unittest.main(testRunner=xmlrunner.XMLTestRunner(output=output), failfast=False, buffer=False, catchbreak=False, verbosity=2)
normal
{ "blob_id": "f0c082968e26d414b0dbb679d4e5077056e99979", "index": 8653, "step-1": "<mask token>\n\n\nclass GenerateMachineConfig(unittest.TestCase):\n\n def setUp(self):\n self.machine_configs = []\n for machine_config_path in glob.glob(\n f'{ASSETS_DIR}/openshift/99_openshift-machineconfig_99-dual-stack-*.yaml'\n ):\n with open(machine_config_path) as f:\n self.machine_configs.append(yaml.load(f, Loader=yaml.\n FullLoader))\n\n def test_kernel_args(self):\n \"\"\"Assert there are machine configs configuring the kernel args for masters and workers\"\"\"\n for machine_config in self.machine_configs:\n kernel_args = machine_config['spec']['kernelArguments']\n self.assertIn('ip=dhcp,dhcp6', kernel_args)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass GenerateMachineConfig(unittest.TestCase):\n\n def setUp(self):\n self.machine_configs = []\n for machine_config_path in glob.glob(\n f'{ASSETS_DIR}/openshift/99_openshift-machineconfig_99-dual-stack-*.yaml'\n ):\n with open(machine_config_path) as f:\n self.machine_configs.append(yaml.load(f, Loader=yaml.\n FullLoader))\n\n def test_kernel_args(self):\n \"\"\"Assert there are machine configs configuring the kernel args for masters and workers\"\"\"\n for machine_config in self.machine_configs:\n kernel_args = machine_config['spec']['kernelArguments']\n self.assertIn('ip=dhcp,dhcp6', kernel_args)\n\n\nif __name__ == '__main__':\n ASSETS_DIR = sys.argv.pop()\n with open(os.environ.get('JUNIT_FILE', '/dev/null'), 'wb') as output:\n unittest.main(testRunner=xmlrunner.XMLTestRunner(output=output),\n failfast=False, buffer=False, catchbreak=False, verbosity=2)\n", "step-3": "<mask token>\nASSETS_DIR = ''\n\n\nclass GenerateMachineConfig(unittest.TestCase):\n\n def setUp(self):\n self.machine_configs = []\n for machine_config_path in glob.glob(\n f'{ASSETS_DIR}/openshift/99_openshift-machineconfig_99-dual-stack-*.yaml'\n ):\n with open(machine_config_path) as f:\n self.machine_configs.append(yaml.load(f, Loader=yaml.\n FullLoader))\n\n def test_kernel_args(self):\n \"\"\"Assert there are machine configs configuring the kernel args for masters and workers\"\"\"\n for machine_config in self.machine_configs:\n kernel_args = machine_config['spec']['kernelArguments']\n self.assertIn('ip=dhcp,dhcp6', kernel_args)\n\n\nif __name__ == '__main__':\n ASSETS_DIR = sys.argv.pop()\n with open(os.environ.get('JUNIT_FILE', '/dev/null'), 'wb') as output:\n unittest.main(testRunner=xmlrunner.XMLTestRunner(output=output),\n failfast=False, buffer=False, catchbreak=False, verbosity=2)\n", "step-4": "import unittest\nimport xmlrunner\nimport os\nimport sys\nimport glob\nimport yaml\nASSETS_DIR = ''\n\n\nclass GenerateMachineConfig(unittest.TestCase):\n\n def setUp(self):\n self.machine_configs = []\n for machine_config_path in glob.glob(\n f'{ASSETS_DIR}/openshift/99_openshift-machineconfig_99-dual-stack-*.yaml'\n ):\n with open(machine_config_path) as f:\n self.machine_configs.append(yaml.load(f, Loader=yaml.\n FullLoader))\n\n def test_kernel_args(self):\n \"\"\"Assert there are machine configs configuring the kernel args for masters and workers\"\"\"\n for machine_config in self.machine_configs:\n kernel_args = machine_config['spec']['kernelArguments']\n self.assertIn('ip=dhcp,dhcp6', kernel_args)\n\n\nif __name__ == '__main__':\n ASSETS_DIR = sys.argv.pop()\n with open(os.environ.get('JUNIT_FILE', '/dev/null'), 'wb') as output:\n unittest.main(testRunner=xmlrunner.XMLTestRunner(output=output),\n failfast=False, buffer=False, catchbreak=False, verbosity=2)\n", "step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport unittest\nimport xmlrunner\n\nimport os\nimport sys\nimport glob\nimport yaml\n\nASSETS_DIR = \"\"\n\nclass GenerateMachineConfig(unittest.TestCase):\n def setUp(self):\n self.machine_configs = []\n for machine_config_path in glob.glob(\n f'{ASSETS_DIR}/openshift/99_openshift-machineconfig_99-dual-stack-*.yaml'\n ):\n with open(machine_config_path) as f:\n self.machine_configs.append(yaml.load(f, Loader=yaml.FullLoader))\n\n def test_kernel_args(self):\n \"\"\"Assert there are machine configs configuring the kernel args for masters and workers\"\"\"\n for machine_config in self.machine_configs:\n kernel_args = machine_config[\"spec\"][\"kernelArguments\"]\n self.assertIn(\"ip=dhcp,dhcp6\", kernel_args)\n\n\nif __name__ == '__main__':\n ASSETS_DIR = sys.argv.pop()\n with open(os.environ.get('JUNIT_FILE', '/dev/null'), 'wb') as output:\n unittest.main(testRunner=xmlrunner.XMLTestRunner(output=output), failfast=False, buffer=False, catchbreak=False, verbosity=2)\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
from functools import wraps from time import sleep def retry(retry_count = 2, delay = 5, action_description = 'not specified', allowed_exceptions=()): def decorator(func): @wraps(func) # to preserve metadata of the function to be decorated def wrapper(*args, **kwargs): for _ in range(retry_count): try: return func(*args, **kwargs) except allowed_exceptions as e: print('Error executing {}: {}'.format(func.__name__, e)) print('Waiting for {} sec before executing {} again'.format(delay, func.__name__)) sleep(delay) print('Retrying to execute ' + func.__name__ + ' (action: ' + action_description + ')') return wrapper return decorator
normal
{ "blob_id": "79e4592d5ea84cc7c97d68a9390eb5d387045cf0", "index": 4344, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef retry(retry_count=2, delay=5, action_description='not specified',\n allowed_exceptions=()):\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n for _ in range(retry_count):\n try:\n return func(*args, **kwargs)\n except allowed_exceptions as e:\n print('Error executing {}: {}'.format(func.__name__, e))\n print('Waiting for {} sec before executing {} again'.\n format(delay, func.__name__))\n sleep(delay)\n print('Retrying to execute ' + func.__name__ +\n ' (action: ' + action_description + ')')\n return wrapper\n return decorator\n", "step-3": "from functools import wraps\nfrom time import sleep\n\n\ndef retry(retry_count=2, delay=5, action_description='not specified',\n allowed_exceptions=()):\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n for _ in range(retry_count):\n try:\n return func(*args, **kwargs)\n except allowed_exceptions as e:\n print('Error executing {}: {}'.format(func.__name__, e))\n print('Waiting for {} sec before executing {} again'.\n format(delay, func.__name__))\n sleep(delay)\n print('Retrying to execute ' + func.__name__ +\n ' (action: ' + action_description + ')')\n return wrapper\n return decorator\n", "step-4": "from functools import wraps\nfrom time import sleep\n\ndef retry(retry_count = 2, delay = 5, action_description = 'not specified', allowed_exceptions=()):\n def decorator(func):\n @wraps(func) # to preserve metadata of the function to be decorated\n def wrapper(*args, **kwargs):\n for _ in range(retry_count): \n try:\n return func(*args, **kwargs)\n except allowed_exceptions as e:\n print('Error executing {}: {}'.format(func.__name__, e))\n print('Waiting for {} sec before executing {} again'.format(delay, func.__name__))\n sleep(delay)\n print('Retrying to execute ' + func.__name__ + ' (action: ' + action_description + ')')\n return wrapper\n return decorator", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
""" This module provides an optimizer class that is based on an evolution strategy algorithm. """ import copy, random, math from time import time from xml.dom import minidom from extra.schedule import Schedule from extra.printer import pprint, BLUE class Optimizer(object): """ This class is the implementation of the evolution strategy to optimize and evaluate schedules. """ def __init__(self, plant, orderList, simulator, evaluator): """ plant - the plant to run the simulation and evaluation on orderList - the list of orders in the given schedule simulator - Simulator instance to run a schedule evaluator - Evaluator instance to evaluate a schedule """ assert plant != None assert orderList != None self.plant = plant self.orderList = orderList self.simulator = simulator self.evaluator = evaluator # used for benchmarking self.simulatorTime = 0 # enable/disable console output self.printing = True # parameters for the evolution strategy algorithm self.populationSize = 0 self.indivMutationRate = 0 self.selectionRate = 0 self.mutationRange = 0 self.iterations = 0 @staticmethod def fromXml(xmlDoc, plant, orderList, simulator, evaluator): """ Loads the optimizer configuration and parameters from an XML tree. """ optimizer = Optimizer(plant, orderList, simulator, evaluator) element = xmlDoc.getElementsByTagName("optimizer") # there should only be 1 optimizer node in the XML tree! assert len(element) == 1 element = element[0] # load the different attributes optimizer.populationSize = \ int(element.getAttribute("populationSize")) optimizer.mutationRange = \ int(element.getAttribute("mutationRange")) optimizer.iterations = \ int(element.getAttribute("iterations")) optimizer.indivMutationRate = \ float(element.getAttribute("indivMutationRate")) optimizer.selectionRate = \ float(element.getAttribute("selectionRate")) return optimizer @staticmethod def fromXmlFile(filename, plant, orderList, simulator, evaluator): """ Loads the optimizer configuration and parameters from an XML tree. """ file = open(filename, "r") doc = minidom.parse(file) optimizer = Optimizer.fromXml(doc, plant, orderList, simulator, evaluator) file.close() return optimizer def run(self, initialPopulation = None): """ Entry point of the evolution strategy algorithm. """ pprint("OPT calculating initial population...", BLUE, self.printing) if initialPopulation == None: # if we don't get an initial set of schedules as the initial population, # then we need to generate one. population = self.initialPopulation() else: # if we do get an initial population as input, then we just need to # calculate the fitnesses of the schedules in it. for p in initialPopulation: self.calcIndividualFitness(p) # if the population is too small or too large (less than or larger than # self.populationSize) then this will fix that for us. population = self.mutatePopulation(initialPopulation) # go through the needed number of iterations and mutate the population # everytime, this will keep the best individuals and will return the # best population achieved at the end. for i in range(self.iterations): pprint("OPT iteration number %s" % (i + 1), BLUE, self.printing) population = self.mutatePopulation(population) return population def calcIndividualFitness(self, indiv): """ Calculates fitness of a schedule. """ t = time() self.simulator.simulate(indiv) self.evaluator.evaluate(indiv) t = time() - t self.simulatorTime += t def sortPopulation(self, population): """ Sorts the population based on fitness, to have the better individuals at the beginning of the population list. """ population.sort(lambda a, b: cmp(b.fitness, a.fitness)) def mutatePopulation(self, population): """ Mutates a population. Selects the best n individuals (based on the selectionRate) to mutate (maybe they'll give us even better individuals!). After mutating an individual, it checks if we have an individual that is similar to the mutated one, if so, then try to mutate again, otherwise, we simply calculate its fitness and append it to the list. We then sort the population based on fitness and return the best PopulationSize items. """ for i in range(int(math.ceil(self.selectionRate * len(population)))): mutatedIndiv = self.mutateIndividual(population[i]) while self.isIndividualInPopulation(mutatedIndiv, population) == True: mutatedIndiv = self.mutateIndividual(population[i]) self.calcIndividualFitness(mutatedIndiv) population.append(mutatedIndiv) self.sortPopulation(population) return population[:self.populationSize] def isIndividualInPopulation(self, individual, population): """ Checks if an individual is in a population. """ for i in population: if i == individual: return True return False def initialPopulation(self): """ Generates an initial population. """ population = [] # generate an initial individual, calculate its fitness and add it to our # new population initIndiv = self.initialIndividual() self.calcIndividualFitness(initIndiv) population.append(initIndiv) # until we have filled the population for i in range(self.populationSize): # keep mutating the initial individual to get new ones mutatedIndiv = self.mutateIndividual(initIndiv) # if that new individual is in the population, don't add it, try # getting a new one while self.isIndividualInPopulation(mutatedIndiv, population) == True: mutatedIndiv = self.mutateIndividual(initIndiv) self.calcIndividualFitness(mutatedIndiv) population.append(mutatedIndiv) self.sortPopulation(population) return population def mutateIndividual(self, originalIndiv): """ Gets an individual and returns a mutation of it. """ # we need to deepcopy the schedule object newIndiv = copy.deepcopy(originalIndiv) # emtpy its schedule (we don't need it since it will be generated from the # new start times using the simulator newIndiv.schedule = [] # same for the finish times newIndiv.finishTimes = [] indivLen = len(newIndiv.startTimes) # the plant-entrance times in the schedule should be equal to the number # of orders! otherwise, something is wrong! assert indivLen == len(self.orderList.orders) indexes = range(indivLen) # for n times (based on the individual mutation rate), mutate a random # order plant-entrance time that we didn't mutate before. for i in range(int(self.indivMutationRate * indivLen)): index = int(random.uniform(0, len(indexes))) newIndiv.startTimes[indexes[index]][2] = \ self.mutateGene(newIndiv.startTimes[indexes[index]][2]) del indexes[index] return newIndiv def mutateGene(self, value): """ Gets a value and returns a mutation of it based on the mutation range. """ addent = int(random.uniform(0, self.mutationRange)) if (random.uniform(0, 1) < 0.5): addent = -addent return max(0, value + addent) def initialIndividual(self): """ Generates an initial individual based on order deadlines - minimum processing time. Account whether an order has a current machine and current overtime. """ indiv = Schedule() for o in self.orderList.orders: if o.currentMachine == "": minProcTime = o.recipe.calcMinProcTime(self.plant) machineName = o.recipe.recipe[0][0] else: machineName = o.currentMachine minProcTime = o.recipe.calcMinProcTime(self.plant, o.currentMachine) indiv.startTimes.append( [o, str(machineName), max(0, o.deadline - minProcTime)]) return indiv
normal
{ "blob_id": "8ce2e9cd9ceed6c79a85682b8bc03a3ffb5131c4", "index": 3817, "step-1": "<mask token>\n\n\nclass Optimizer(object):\n <mask token>\n <mask token>\n\n @staticmethod\n def fromXml(xmlDoc, plant, orderList, simulator, evaluator):\n \"\"\"\n\t\tLoads the optimizer configuration and parameters from an XML tree.\n\t\t\"\"\"\n optimizer = Optimizer(plant, orderList, simulator, evaluator)\n element = xmlDoc.getElementsByTagName('optimizer')\n assert len(element) == 1\n element = element[0]\n optimizer.populationSize = int(element.getAttribute('populationSize'))\n optimizer.mutationRange = int(element.getAttribute('mutationRange'))\n optimizer.iterations = int(element.getAttribute('iterations'))\n optimizer.indivMutationRate = float(element.getAttribute(\n 'indivMutationRate'))\n optimizer.selectionRate = float(element.getAttribute('selectionRate'))\n return optimizer\n\n @staticmethod\n def fromXmlFile(filename, plant, orderList, simulator, evaluator):\n \"\"\"\n\t\tLoads the optimizer configuration and parameters from an XML tree.\n\t\t\"\"\"\n file = open(filename, 'r')\n doc = minidom.parse(file)\n optimizer = Optimizer.fromXml(doc, plant, orderList, simulator,\n evaluator)\n file.close()\n return optimizer\n\n def run(self, initialPopulation=None):\n \"\"\"\n\t\tEntry point of the evolution strategy algorithm.\n\t\t\"\"\"\n pprint('OPT calculating initial population...', BLUE, self.printing)\n if initialPopulation == None:\n population = self.initialPopulation()\n else:\n for p in initialPopulation:\n self.calcIndividualFitness(p)\n population = self.mutatePopulation(initialPopulation)\n for i in range(self.iterations):\n pprint('OPT iteration number %s' % (i + 1), BLUE, self.printing)\n population = self.mutatePopulation(population)\n return population\n\n def calcIndividualFitness(self, indiv):\n \"\"\"\n\t\tCalculates fitness of a schedule.\n\t\t\"\"\"\n t = time()\n self.simulator.simulate(indiv)\n self.evaluator.evaluate(indiv)\n t = time() - t\n self.simulatorTime += t\n\n def sortPopulation(self, population):\n \"\"\"\n\t\tSorts the population based on fitness, to have the better individuals\n\t\tat the beginning of the population list.\n\t\t\"\"\"\n population.sort(lambda a, b: cmp(b.fitness, a.fitness))\n\n def mutatePopulation(self, population):\n \"\"\"\n\t\tMutates a population. Selects the best n individuals (based on the \n\t\tselectionRate) to mutate (maybe they'll give us even better individuals!).\n\t\tAfter mutating an individual, it checks if we have an individual that is \n\t\tsimilar to the mutated one, if so, then try to mutate again, otherwise,\n\t\twe simply calculate its fitness and append it to the list. We then sort\n\t\tthe population based on fitness and return the best PopulationSize items.\n\t\t\"\"\"\n for i in range(int(math.ceil(self.selectionRate * len(population)))):\n mutatedIndiv = self.mutateIndividual(population[i])\n while self.isIndividualInPopulation(mutatedIndiv, population\n ) == True:\n mutatedIndiv = self.mutateIndividual(population[i])\n self.calcIndividualFitness(mutatedIndiv)\n population.append(mutatedIndiv)\n self.sortPopulation(population)\n return population[:self.populationSize]\n\n def isIndividualInPopulation(self, individual, population):\n \"\"\"\n\t\tChecks if an individual is in a population.\n\t\t\"\"\"\n for i in population:\n if i == individual:\n return True\n return False\n <mask token>\n\n def mutateIndividual(self, originalIndiv):\n \"\"\"\n\t\tGets an individual and returns a mutation of it.\n\t\t\"\"\"\n newIndiv = copy.deepcopy(originalIndiv)\n newIndiv.schedule = []\n newIndiv.finishTimes = []\n indivLen = len(newIndiv.startTimes)\n assert indivLen == len(self.orderList.orders)\n indexes = range(indivLen)\n for i in range(int(self.indivMutationRate * indivLen)):\n index = int(random.uniform(0, len(indexes)))\n newIndiv.startTimes[indexes[index]][2] = self.mutateGene(newIndiv\n .startTimes[indexes[index]][2])\n del indexes[index]\n return newIndiv\n\n def mutateGene(self, value):\n \"\"\"\n\t\tGets a value and returns a mutation of it based on the mutation range.\n\t\t\"\"\"\n addent = int(random.uniform(0, self.mutationRange))\n if random.uniform(0, 1) < 0.5:\n addent = -addent\n return max(0, value + addent)\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Optimizer(object):\n <mask token>\n <mask token>\n\n @staticmethod\n def fromXml(xmlDoc, plant, orderList, simulator, evaluator):\n \"\"\"\n\t\tLoads the optimizer configuration and parameters from an XML tree.\n\t\t\"\"\"\n optimizer = Optimizer(plant, orderList, simulator, evaluator)\n element = xmlDoc.getElementsByTagName('optimizer')\n assert len(element) == 1\n element = element[0]\n optimizer.populationSize = int(element.getAttribute('populationSize'))\n optimizer.mutationRange = int(element.getAttribute('mutationRange'))\n optimizer.iterations = int(element.getAttribute('iterations'))\n optimizer.indivMutationRate = float(element.getAttribute(\n 'indivMutationRate'))\n optimizer.selectionRate = float(element.getAttribute('selectionRate'))\n return optimizer\n\n @staticmethod\n def fromXmlFile(filename, plant, orderList, simulator, evaluator):\n \"\"\"\n\t\tLoads the optimizer configuration and parameters from an XML tree.\n\t\t\"\"\"\n file = open(filename, 'r')\n doc = minidom.parse(file)\n optimizer = Optimizer.fromXml(doc, plant, orderList, simulator,\n evaluator)\n file.close()\n return optimizer\n\n def run(self, initialPopulation=None):\n \"\"\"\n\t\tEntry point of the evolution strategy algorithm.\n\t\t\"\"\"\n pprint('OPT calculating initial population...', BLUE, self.printing)\n if initialPopulation == None:\n population = self.initialPopulation()\n else:\n for p in initialPopulation:\n self.calcIndividualFitness(p)\n population = self.mutatePopulation(initialPopulation)\n for i in range(self.iterations):\n pprint('OPT iteration number %s' % (i + 1), BLUE, self.printing)\n population = self.mutatePopulation(population)\n return population\n\n def calcIndividualFitness(self, indiv):\n \"\"\"\n\t\tCalculates fitness of a schedule.\n\t\t\"\"\"\n t = time()\n self.simulator.simulate(indiv)\n self.evaluator.evaluate(indiv)\n t = time() - t\n self.simulatorTime += t\n\n def sortPopulation(self, population):\n \"\"\"\n\t\tSorts the population based on fitness, to have the better individuals\n\t\tat the beginning of the population list.\n\t\t\"\"\"\n population.sort(lambda a, b: cmp(b.fitness, a.fitness))\n\n def mutatePopulation(self, population):\n \"\"\"\n\t\tMutates a population. Selects the best n individuals (based on the \n\t\tselectionRate) to mutate (maybe they'll give us even better individuals!).\n\t\tAfter mutating an individual, it checks if we have an individual that is \n\t\tsimilar to the mutated one, if so, then try to mutate again, otherwise,\n\t\twe simply calculate its fitness and append it to the list. We then sort\n\t\tthe population based on fitness and return the best PopulationSize items.\n\t\t\"\"\"\n for i in range(int(math.ceil(self.selectionRate * len(population)))):\n mutatedIndiv = self.mutateIndividual(population[i])\n while self.isIndividualInPopulation(mutatedIndiv, population\n ) == True:\n mutatedIndiv = self.mutateIndividual(population[i])\n self.calcIndividualFitness(mutatedIndiv)\n population.append(mutatedIndiv)\n self.sortPopulation(population)\n return population[:self.populationSize]\n\n def isIndividualInPopulation(self, individual, population):\n \"\"\"\n\t\tChecks if an individual is in a population.\n\t\t\"\"\"\n for i in population:\n if i == individual:\n return True\n return False\n\n def initialPopulation(self):\n \"\"\"\n\t\tGenerates an initial population.\n\t\t\"\"\"\n population = []\n initIndiv = self.initialIndividual()\n self.calcIndividualFitness(initIndiv)\n population.append(initIndiv)\n for i in range(self.populationSize):\n mutatedIndiv = self.mutateIndividual(initIndiv)\n while self.isIndividualInPopulation(mutatedIndiv, population\n ) == True:\n mutatedIndiv = self.mutateIndividual(initIndiv)\n self.calcIndividualFitness(mutatedIndiv)\n population.append(mutatedIndiv)\n self.sortPopulation(population)\n return population\n\n def mutateIndividual(self, originalIndiv):\n \"\"\"\n\t\tGets an individual and returns a mutation of it.\n\t\t\"\"\"\n newIndiv = copy.deepcopy(originalIndiv)\n newIndiv.schedule = []\n newIndiv.finishTimes = []\n indivLen = len(newIndiv.startTimes)\n assert indivLen == len(self.orderList.orders)\n indexes = range(indivLen)\n for i in range(int(self.indivMutationRate * indivLen)):\n index = int(random.uniform(0, len(indexes)))\n newIndiv.startTimes[indexes[index]][2] = self.mutateGene(newIndiv\n .startTimes[indexes[index]][2])\n del indexes[index]\n return newIndiv\n\n def mutateGene(self, value):\n \"\"\"\n\t\tGets a value and returns a mutation of it based on the mutation range.\n\t\t\"\"\"\n addent = int(random.uniform(0, self.mutationRange))\n if random.uniform(0, 1) < 0.5:\n addent = -addent\n return max(0, value + addent)\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Optimizer(object):\n <mask token>\n\n def __init__(self, plant, orderList, simulator, evaluator):\n \"\"\"\n\t\tplant - the plant to run the simulation and evaluation on\n\t\torderList - the list of orders in the given schedule\n\t\tsimulator - Simulator instance to run a schedule\n\t\tevaluator - Evaluator instance to evaluate a schedule\n\t\t\"\"\"\n assert plant != None\n assert orderList != None\n self.plant = plant\n self.orderList = orderList\n self.simulator = simulator\n self.evaluator = evaluator\n self.simulatorTime = 0\n self.printing = True\n self.populationSize = 0\n self.indivMutationRate = 0\n self.selectionRate = 0\n self.mutationRange = 0\n self.iterations = 0\n\n @staticmethod\n def fromXml(xmlDoc, plant, orderList, simulator, evaluator):\n \"\"\"\n\t\tLoads the optimizer configuration and parameters from an XML tree.\n\t\t\"\"\"\n optimizer = Optimizer(plant, orderList, simulator, evaluator)\n element = xmlDoc.getElementsByTagName('optimizer')\n assert len(element) == 1\n element = element[0]\n optimizer.populationSize = int(element.getAttribute('populationSize'))\n optimizer.mutationRange = int(element.getAttribute('mutationRange'))\n optimizer.iterations = int(element.getAttribute('iterations'))\n optimizer.indivMutationRate = float(element.getAttribute(\n 'indivMutationRate'))\n optimizer.selectionRate = float(element.getAttribute('selectionRate'))\n return optimizer\n\n @staticmethod\n def fromXmlFile(filename, plant, orderList, simulator, evaluator):\n \"\"\"\n\t\tLoads the optimizer configuration and parameters from an XML tree.\n\t\t\"\"\"\n file = open(filename, 'r')\n doc = minidom.parse(file)\n optimizer = Optimizer.fromXml(doc, plant, orderList, simulator,\n evaluator)\n file.close()\n return optimizer\n\n def run(self, initialPopulation=None):\n \"\"\"\n\t\tEntry point of the evolution strategy algorithm.\n\t\t\"\"\"\n pprint('OPT calculating initial population...', BLUE, self.printing)\n if initialPopulation == None:\n population = self.initialPopulation()\n else:\n for p in initialPopulation:\n self.calcIndividualFitness(p)\n population = self.mutatePopulation(initialPopulation)\n for i in range(self.iterations):\n pprint('OPT iteration number %s' % (i + 1), BLUE, self.printing)\n population = self.mutatePopulation(population)\n return population\n\n def calcIndividualFitness(self, indiv):\n \"\"\"\n\t\tCalculates fitness of a schedule.\n\t\t\"\"\"\n t = time()\n self.simulator.simulate(indiv)\n self.evaluator.evaluate(indiv)\n t = time() - t\n self.simulatorTime += t\n\n def sortPopulation(self, population):\n \"\"\"\n\t\tSorts the population based on fitness, to have the better individuals\n\t\tat the beginning of the population list.\n\t\t\"\"\"\n population.sort(lambda a, b: cmp(b.fitness, a.fitness))\n\n def mutatePopulation(self, population):\n \"\"\"\n\t\tMutates a population. Selects the best n individuals (based on the \n\t\tselectionRate) to mutate (maybe they'll give us even better individuals!).\n\t\tAfter mutating an individual, it checks if we have an individual that is \n\t\tsimilar to the mutated one, if so, then try to mutate again, otherwise,\n\t\twe simply calculate its fitness and append it to the list. We then sort\n\t\tthe population based on fitness and return the best PopulationSize items.\n\t\t\"\"\"\n for i in range(int(math.ceil(self.selectionRate * len(population)))):\n mutatedIndiv = self.mutateIndividual(population[i])\n while self.isIndividualInPopulation(mutatedIndiv, population\n ) == True:\n mutatedIndiv = self.mutateIndividual(population[i])\n self.calcIndividualFitness(mutatedIndiv)\n population.append(mutatedIndiv)\n self.sortPopulation(population)\n return population[:self.populationSize]\n\n def isIndividualInPopulation(self, individual, population):\n \"\"\"\n\t\tChecks if an individual is in a population.\n\t\t\"\"\"\n for i in population:\n if i == individual:\n return True\n return False\n\n def initialPopulation(self):\n \"\"\"\n\t\tGenerates an initial population.\n\t\t\"\"\"\n population = []\n initIndiv = self.initialIndividual()\n self.calcIndividualFitness(initIndiv)\n population.append(initIndiv)\n for i in range(self.populationSize):\n mutatedIndiv = self.mutateIndividual(initIndiv)\n while self.isIndividualInPopulation(mutatedIndiv, population\n ) == True:\n mutatedIndiv = self.mutateIndividual(initIndiv)\n self.calcIndividualFitness(mutatedIndiv)\n population.append(mutatedIndiv)\n self.sortPopulation(population)\n return population\n\n def mutateIndividual(self, originalIndiv):\n \"\"\"\n\t\tGets an individual and returns a mutation of it.\n\t\t\"\"\"\n newIndiv = copy.deepcopy(originalIndiv)\n newIndiv.schedule = []\n newIndiv.finishTimes = []\n indivLen = len(newIndiv.startTimes)\n assert indivLen == len(self.orderList.orders)\n indexes = range(indivLen)\n for i in range(int(self.indivMutationRate * indivLen)):\n index = int(random.uniform(0, len(indexes)))\n newIndiv.startTimes[indexes[index]][2] = self.mutateGene(newIndiv\n .startTimes[indexes[index]][2])\n del indexes[index]\n return newIndiv\n\n def mutateGene(self, value):\n \"\"\"\n\t\tGets a value and returns a mutation of it based on the mutation range.\n\t\t\"\"\"\n addent = int(random.uniform(0, self.mutationRange))\n if random.uniform(0, 1) < 0.5:\n addent = -addent\n return max(0, value + addent)\n <mask token>\n", "step-4": "<mask token>\n\n\nclass Optimizer(object):\n <mask token>\n\n def __init__(self, plant, orderList, simulator, evaluator):\n \"\"\"\n\t\tplant - the plant to run the simulation and evaluation on\n\t\torderList - the list of orders in the given schedule\n\t\tsimulator - Simulator instance to run a schedule\n\t\tevaluator - Evaluator instance to evaluate a schedule\n\t\t\"\"\"\n assert plant != None\n assert orderList != None\n self.plant = plant\n self.orderList = orderList\n self.simulator = simulator\n self.evaluator = evaluator\n self.simulatorTime = 0\n self.printing = True\n self.populationSize = 0\n self.indivMutationRate = 0\n self.selectionRate = 0\n self.mutationRange = 0\n self.iterations = 0\n\n @staticmethod\n def fromXml(xmlDoc, plant, orderList, simulator, evaluator):\n \"\"\"\n\t\tLoads the optimizer configuration and parameters from an XML tree.\n\t\t\"\"\"\n optimizer = Optimizer(plant, orderList, simulator, evaluator)\n element = xmlDoc.getElementsByTagName('optimizer')\n assert len(element) == 1\n element = element[0]\n optimizer.populationSize = int(element.getAttribute('populationSize'))\n optimizer.mutationRange = int(element.getAttribute('mutationRange'))\n optimizer.iterations = int(element.getAttribute('iterations'))\n optimizer.indivMutationRate = float(element.getAttribute(\n 'indivMutationRate'))\n optimizer.selectionRate = float(element.getAttribute('selectionRate'))\n return optimizer\n\n @staticmethod\n def fromXmlFile(filename, plant, orderList, simulator, evaluator):\n \"\"\"\n\t\tLoads the optimizer configuration and parameters from an XML tree.\n\t\t\"\"\"\n file = open(filename, 'r')\n doc = minidom.parse(file)\n optimizer = Optimizer.fromXml(doc, plant, orderList, simulator,\n evaluator)\n file.close()\n return optimizer\n\n def run(self, initialPopulation=None):\n \"\"\"\n\t\tEntry point of the evolution strategy algorithm.\n\t\t\"\"\"\n pprint('OPT calculating initial population...', BLUE, self.printing)\n if initialPopulation == None:\n population = self.initialPopulation()\n else:\n for p in initialPopulation:\n self.calcIndividualFitness(p)\n population = self.mutatePopulation(initialPopulation)\n for i in range(self.iterations):\n pprint('OPT iteration number %s' % (i + 1), BLUE, self.printing)\n population = self.mutatePopulation(population)\n return population\n\n def calcIndividualFitness(self, indiv):\n \"\"\"\n\t\tCalculates fitness of a schedule.\n\t\t\"\"\"\n t = time()\n self.simulator.simulate(indiv)\n self.evaluator.evaluate(indiv)\n t = time() - t\n self.simulatorTime += t\n\n def sortPopulation(self, population):\n \"\"\"\n\t\tSorts the population based on fitness, to have the better individuals\n\t\tat the beginning of the population list.\n\t\t\"\"\"\n population.sort(lambda a, b: cmp(b.fitness, a.fitness))\n\n def mutatePopulation(self, population):\n \"\"\"\n\t\tMutates a population. Selects the best n individuals (based on the \n\t\tselectionRate) to mutate (maybe they'll give us even better individuals!).\n\t\tAfter mutating an individual, it checks if we have an individual that is \n\t\tsimilar to the mutated one, if so, then try to mutate again, otherwise,\n\t\twe simply calculate its fitness and append it to the list. We then sort\n\t\tthe population based on fitness and return the best PopulationSize items.\n\t\t\"\"\"\n for i in range(int(math.ceil(self.selectionRate * len(population)))):\n mutatedIndiv = self.mutateIndividual(population[i])\n while self.isIndividualInPopulation(mutatedIndiv, population\n ) == True:\n mutatedIndiv = self.mutateIndividual(population[i])\n self.calcIndividualFitness(mutatedIndiv)\n population.append(mutatedIndiv)\n self.sortPopulation(population)\n return population[:self.populationSize]\n\n def isIndividualInPopulation(self, individual, population):\n \"\"\"\n\t\tChecks if an individual is in a population.\n\t\t\"\"\"\n for i in population:\n if i == individual:\n return True\n return False\n\n def initialPopulation(self):\n \"\"\"\n\t\tGenerates an initial population.\n\t\t\"\"\"\n population = []\n initIndiv = self.initialIndividual()\n self.calcIndividualFitness(initIndiv)\n population.append(initIndiv)\n for i in range(self.populationSize):\n mutatedIndiv = self.mutateIndividual(initIndiv)\n while self.isIndividualInPopulation(mutatedIndiv, population\n ) == True:\n mutatedIndiv = self.mutateIndividual(initIndiv)\n self.calcIndividualFitness(mutatedIndiv)\n population.append(mutatedIndiv)\n self.sortPopulation(population)\n return population\n\n def mutateIndividual(self, originalIndiv):\n \"\"\"\n\t\tGets an individual and returns a mutation of it.\n\t\t\"\"\"\n newIndiv = copy.deepcopy(originalIndiv)\n newIndiv.schedule = []\n newIndiv.finishTimes = []\n indivLen = len(newIndiv.startTimes)\n assert indivLen == len(self.orderList.orders)\n indexes = range(indivLen)\n for i in range(int(self.indivMutationRate * indivLen)):\n index = int(random.uniform(0, len(indexes)))\n newIndiv.startTimes[indexes[index]][2] = self.mutateGene(newIndiv\n .startTimes[indexes[index]][2])\n del indexes[index]\n return newIndiv\n\n def mutateGene(self, value):\n \"\"\"\n\t\tGets a value and returns a mutation of it based on the mutation range.\n\t\t\"\"\"\n addent = int(random.uniform(0, self.mutationRange))\n if random.uniform(0, 1) < 0.5:\n addent = -addent\n return max(0, value + addent)\n\n def initialIndividual(self):\n \"\"\"\n\t\tGenerates an initial individual based on order deadlines - minimum\n\t\tprocessing time. Account whether an order has a current machine and\n\t\tcurrent overtime.\n\t\t\"\"\"\n indiv = Schedule()\n for o in self.orderList.orders:\n if o.currentMachine == '':\n minProcTime = o.recipe.calcMinProcTime(self.plant)\n machineName = o.recipe.recipe[0][0]\n else:\n machineName = o.currentMachine\n minProcTime = o.recipe.calcMinProcTime(self.plant, o.\n currentMachine)\n indiv.startTimes.append([o, str(machineName), max(0, o.deadline -\n minProcTime)])\n return indiv\n", "step-5": "\"\"\"\nThis module provides an optimizer class that is based on an evolution\nstrategy algorithm.\n\"\"\"\nimport copy, random, math\nfrom time import time\nfrom xml.dom import minidom\nfrom extra.schedule import Schedule\nfrom extra.printer import pprint, BLUE\n\nclass Optimizer(object):\n\t\"\"\"\n\tThis class is the implementation of the evolution strategy to optimize\n\tand evaluate schedules.\n\t\"\"\"\n\tdef __init__(self, plant, orderList, simulator, evaluator):\n\t\t\"\"\"\n\t\tplant - the plant to run the simulation and evaluation on\n\t\torderList - the list of orders in the given schedule\n\t\tsimulator - Simulator instance to run a schedule\n\t\tevaluator - Evaluator instance to evaluate a schedule\n\t\t\"\"\"\n\t\tassert plant != None\n\t\tassert orderList != None\n\t\t\n\t\tself.plant = plant\n\t\tself.orderList = orderList\n\t\tself.simulator = simulator\n\t\tself.evaluator = evaluator\n\t\t\n\t\t# used for benchmarking\n\t\tself.simulatorTime = 0\n\t\t\n\t\t# enable/disable console output\n\t\tself.printing = True\n\t\t\n\t\t# parameters for the evolution strategy algorithm\n\t\tself.populationSize = 0\n\t\tself.indivMutationRate = 0\n\t\tself.selectionRate = 0\n\t\tself.mutationRange = 0\n\t\tself.iterations = 0\n\t\n\t@staticmethod\n\tdef fromXml(xmlDoc, plant, orderList, simulator, evaluator):\n\t\t\"\"\"\n\t\tLoads the optimizer configuration and parameters from an XML tree.\n\t\t\"\"\"\n\t\toptimizer = Optimizer(plant, orderList, simulator, evaluator)\n\t\telement = xmlDoc.getElementsByTagName(\"optimizer\")\n\t\t\n\t\t# there should only be 1 optimizer node in the XML tree!\n\t\tassert len(element) == 1\n\t\telement = element[0]\n\t\t\n\t\t# load the different attributes\n\t\toptimizer.populationSize = \\\n\t\t\tint(element.getAttribute(\"populationSize\"))\n\t\toptimizer.mutationRange = \\\n\t\t\tint(element.getAttribute(\"mutationRange\"))\n\t\toptimizer.iterations = \\\n\t\t\tint(element.getAttribute(\"iterations\"))\n\t\toptimizer.indivMutationRate = \\\n\t\t\tfloat(element.getAttribute(\"indivMutationRate\"))\n\t\toptimizer.selectionRate = \\\n\t\t\tfloat(element.getAttribute(\"selectionRate\"))\n\t\t\n\t\treturn optimizer\n\n\t@staticmethod\n\tdef fromXmlFile(filename, plant, orderList, simulator, evaluator):\n\t\t\"\"\"\n\t\tLoads the optimizer configuration and parameters from an XML tree.\n\t\t\"\"\"\n\t\tfile = open(filename, \"r\")\n\t\tdoc = minidom.parse(file)\n\t\toptimizer = Optimizer.fromXml(doc, plant, orderList, simulator, evaluator)\n\t\tfile.close()\n\t\treturn optimizer\n\t\t\n\tdef run(self, initialPopulation = None):\n\t\t\"\"\"\n\t\tEntry point of the evolution strategy algorithm.\n\t\t\"\"\"\n\t\tpprint(\"OPT calculating initial population...\", BLUE, self.printing)\n\t\t\n\t\tif initialPopulation == None:\n\t\t\t# if we don't get an initial set of schedules as the initial population,\n\t\t\t# then we need to generate one.\n\t\t\tpopulation = self.initialPopulation()\n\t\telse:\n\t\t\t# if we do get an initial population as input, then we just need to \n\t\t\t# calculate the fitnesses of the schedules in it.\n\t\t\tfor p in initialPopulation:\n\t\t\t\tself.calcIndividualFitness(p)\n\t\t\t# if the population is too small or too large (less than or larger than\n\t\t\t# self.populationSize) then this will fix that for us.\n\t\t\tpopulation = self.mutatePopulation(initialPopulation)\n\t\t\n\t\t# go through the needed number of iterations and mutate the population\n\t\t# everytime, this will keep the best individuals and will return the \n\t\t# best population achieved at the end.\n\t\tfor i in range(self.iterations):\n\t\t\tpprint(\"OPT iteration number %s\" % (i + 1), BLUE, self.printing)\n\t\t\tpopulation = self.mutatePopulation(population)\n\t\treturn population\n\t\t\t\n\tdef calcIndividualFitness(self, indiv):\n\t\t\"\"\"\n\t\tCalculates fitness of a schedule.\n\t\t\"\"\"\n\t\tt = time()\n\t\tself.simulator.simulate(indiv)\n\t\tself.evaluator.evaluate(indiv)\n\t\tt = time() - t\n\t\tself.simulatorTime += t\n\t\n\tdef sortPopulation(self, population):\n\t\t\"\"\"\n\t\tSorts the population based on fitness, to have the better individuals\n\t\tat the beginning of the population list.\n\t\t\"\"\"\n\t\tpopulation.sort(lambda a, b: cmp(b.fitness, a.fitness))\n\t\n\tdef mutatePopulation(self, population):\n\t\t\"\"\"\n\t\tMutates a population. Selects the best n individuals (based on the \n\t\tselectionRate) to mutate (maybe they'll give us even better individuals!).\n\t\tAfter mutating an individual, it checks if we have an individual that is \n\t\tsimilar to the mutated one, if so, then try to mutate again, otherwise,\n\t\twe simply calculate its fitness and append it to the list. We then sort\n\t\tthe population based on fitness and return the best PopulationSize items.\n\t\t\"\"\"\n\t\tfor i in range(int(math.ceil(self.selectionRate * len(population)))):\n\t\t\tmutatedIndiv = self.mutateIndividual(population[i])\n\t\t\twhile self.isIndividualInPopulation(mutatedIndiv, population) == True:\n\t\t\t\tmutatedIndiv = self.mutateIndividual(population[i])\n\t\t\tself.calcIndividualFitness(mutatedIndiv)\n\t\t\tpopulation.append(mutatedIndiv)\n\t\tself.sortPopulation(population)\n\t\treturn population[:self.populationSize]\n\t\n\tdef isIndividualInPopulation(self, individual, population):\n\t\t\"\"\"\n\t\tChecks if an individual is in a population.\n\t\t\"\"\"\n\t\tfor i in population:\n\t\t\tif i == individual:\n\t\t\t\treturn True\n\t\treturn False\n\t\n\tdef initialPopulation(self):\n\t\t\"\"\"\n\t\tGenerates an initial population.\n\t\t\"\"\"\n\t\tpopulation = []\n\t\t# generate an initial individual, calculate its fitness and add it to our\n\t\t# new population\n\t\tinitIndiv = self.initialIndividual()\n\t\tself.calcIndividualFitness(initIndiv)\n\t\tpopulation.append(initIndiv)\n\t\t\n\t\t# until we have filled the population\n\t\tfor i in range(self.populationSize):\n\t\t\t# keep mutating the initial individual to get new ones\n\t\t\tmutatedIndiv = self.mutateIndividual(initIndiv)\n\t\t\t# if that new individual is in the population, don't add it, try\n\t\t\t# getting a new one\n\t\t\twhile self.isIndividualInPopulation(mutatedIndiv, population) == True:\n\t\t\t\tmutatedIndiv = self.mutateIndividual(initIndiv)\n\t\t\tself.calcIndividualFitness(mutatedIndiv)\n\t\t\tpopulation.append(mutatedIndiv)\n\t\tself.sortPopulation(population)\n\t\treturn population\n\t\t\n\tdef mutateIndividual(self, originalIndiv):\n\t\t\"\"\"\n\t\tGets an individual and returns a mutation of it.\n\t\t\"\"\"\n\t\t# we need to deepcopy the schedule object\n\t\tnewIndiv = copy.deepcopy(originalIndiv)\n\t\t# emtpy its schedule (we don't need it since it will be generated from the \n\t\t# new start times using the simulator\n\t\tnewIndiv.schedule = []\n\t\t# same for the finish times\n\t\tnewIndiv.finishTimes = []\n\t\tindivLen = len(newIndiv.startTimes)\n\t\t\n\t\t# the plant-entrance times in the schedule should be equal to the number\n\t\t# of orders! otherwise, something is wrong!\n\t\tassert indivLen == len(self.orderList.orders)\n\t\t\n\t\tindexes = range(indivLen)\n\t\t# for n times (based on the individual mutation rate), mutate a random\n\t\t# order plant-entrance time that we didn't mutate before.\n\t\tfor i in range(int(self.indivMutationRate * indivLen)):\n\t\t\tindex = int(random.uniform(0, len(indexes)))\n\t\t\tnewIndiv.startTimes[indexes[index]][2] = \\\n\t\t\t\tself.mutateGene(newIndiv.startTimes[indexes[index]][2])\n\t\t\tdel indexes[index]\n\t\treturn newIndiv\n\t\t\n\tdef mutateGene(self, value):\n\t\t\"\"\"\n\t\tGets a value and returns a mutation of it based on the mutation range.\n\t\t\"\"\"\n\t\taddent = int(random.uniform(0, self.mutationRange))\n\t\tif (random.uniform(0, 1) < 0.5):\n\t\t\taddent = -addent\n\t\treturn max(0, value + addent)\n\t\n\tdef initialIndividual(self):\n\t\t\"\"\"\n\t\tGenerates an initial individual based on order deadlines - minimum\n\t\tprocessing time. Account whether an order has a current machine and\n\t\tcurrent overtime.\n\t\t\"\"\"\n\t\tindiv = Schedule()\n\t\t\n\t\tfor o in self.orderList.orders:\n\t\t\tif o.currentMachine == \"\":\n\t\t\t\tminProcTime = o.recipe.calcMinProcTime(self.plant)\n\t\t\t\tmachineName = o.recipe.recipe[0][0]\n\t\t\telse:\n\t\t\t\tmachineName = o.currentMachine\n\t\t\t\tminProcTime = o.recipe.calcMinProcTime(self.plant, o.currentMachine)\n\t\t\tindiv.startTimes.append(\n\t\t\t\t[o, str(machineName), max(0, o.deadline - minProcTime)])\n\t\treturn indiv\n\t", "step-ids": [ 10, 11, 12, 13, 16 ] }
[ 10, 11, 12, 13, 16 ]
import math class Solution: # @param {integer} n # @param {integer} k # @return {string} def getPermutation(self, n, k): res = '' k -= 1 nums = [str(i) for i in range(1, n+1)] while n > 0: tmp = math.factorial(n-1) res += nums[k/tmp] del nums[k/tmp] k %= tmp n -= 1 return res # class Solution: # def f(self,n,k): # if n==1 : # return [0] # else: # count=1 # for i in range(1,n): # count*=i # begin=(k-1)/count # plus=k%count # return [begin]+self.f(n-1,plus) # # # @return a string # def getPermutation(self, n, k): # res=self.f(n,k) # print res # lists=range(1,n+1) # strs='' # for i in range(n): # strs+=str(lists[res[i]]) # lists.pop(res[i]) # return strs if __name__=="__main__": a=Solution() print a.getPermutation(3, 1),"123" print a.getPermutation(2,2) print a.getPermutation(3,2) #https://leetcode.com/discuss/16064/an-iterative-solution-for-reference #TLE # class Solution: # def f(self,lists): # if lists==None: # return None # tmpres=[] # # for idx,item in enumerate(lists): # tmp=[i for i in lists] # tmp.pop(idx) # res=self.f(tmp) # if len(res)>0: # for i in res: # tmpres.append(str(item)+i) # else: # tmpres.append(str(item)) # return tmpres # # # @return a string # def getPermutation(self, n, k): # if n==1: # return '1' # count=1 # begin=0 # plus=0 # for i in range(1,n): # count*=i # begin+=k/count # plus=k%count # # tmp=[i for i in range(1,n+1)] # if begin>0: # tmp.pop(begin-1) # # tmp=self.f(tmp) # if begin>0: # return str(begin)+tmp[plus-1] # else: # return tmp[plus-1] # TLE # # class Solution: # # def f(self,lists): # # if lists==None: # # return None # # tmpres=[] # # # # for idx,item in enumerate(lists): # # tmp=[i for i in lists] # # tmp.pop(idx) # # res=self.f(tmp) # # if len(res)>0: # # for i in res: # # tmpres.append(str(item)+i) # # else: # # tmpres.append(str(item)) # # return tmpres # # # # # @return a string # # def getPermutation(self, n, k): # # tmp=self.f(range(1,n+1)) # # return tmp[k-1] # #
normal
{ "blob_id": "d267bf82aee2eca29628fcd1d874a337adc1ae09", "index": 8859, "step-1": "import math\n\nclass Solution:\n # @param {integer} n\n # @param {integer} k\n # @return {string}\n def getPermutation(self, n, k):\n res = ''\n k -= 1\n nums = [str(i) for i in range(1, n+1)]\n while n > 0:\n tmp = math.factorial(n-1)\n res += nums[k/tmp]\n del nums[k/tmp]\n k %= tmp\n n -= 1\n return res\n\n\n\n# class Solution:\n# def f(self,n,k):\n# if n==1 :\n# return [0]\n# else:\n# count=1\n# for i in range(1,n):\n# count*=i\n# begin=(k-1)/count\n# plus=k%count\n# return [begin]+self.f(n-1,plus)\n#\n# # @return a string\n# def getPermutation(self, n, k):\n# res=self.f(n,k)\n# print res\n# lists=range(1,n+1)\n# strs=''\n# for i in range(n):\n# strs+=str(lists[res[i]])\n# lists.pop(res[i])\n# return strs\n\nif __name__==\"__main__\":\n a=Solution()\n print a.getPermutation(3, 1),\"123\"\n print a.getPermutation(2,2)\n print a.getPermutation(3,2)\n#https://leetcode.com/discuss/16064/an-iterative-solution-for-reference\n\n#TLE\n# class Solution:\n# def f(self,lists):\n# if lists==None:\n# return None\n# tmpres=[]\n# \n# for idx,item in enumerate(lists):\n# tmp=[i for i in lists]\n# tmp.pop(idx)\n# res=self.f(tmp)\n# if len(res)>0:\n# for i in res:\n# tmpres.append(str(item)+i)\n# else:\n# tmpres.append(str(item))\n# return tmpres\n# \n# # @return a string\n# def getPermutation(self, n, k):\n# if n==1:\n# return '1'\n# count=1\n# begin=0\n# plus=0\n# for i in range(1,n):\n# count*=i\n# begin+=k/count\n# plus=k%count\n# \n# tmp=[i for i in range(1,n+1)]\n# if begin>0:\n# tmp.pop(begin-1)\n# \n# tmp=self.f(tmp)\n# if begin>0:\n# return str(begin)+tmp[plus-1]\n# else:\n# return tmp[plus-1]\n# TLE\n# # class Solution:\n# # def f(self,lists):\n# # if lists==None:\n# # return None\n# # tmpres=[]\n# # \n# # for idx,item in enumerate(lists):\n# # tmp=[i for i in lists]\n# # tmp.pop(idx)\n# # res=self.f(tmp)\n# # if len(res)>0:\n# # for i in res:\n# # tmpres.append(str(item)+i)\n# # else:\n# # tmpres.append(str(item))\n# # return tmpres\n# # \n# # # @return a string\n# # def getPermutation(self, n, k):\n# # tmp=self.f(range(1,n+1))\n# # return tmp[k-1]\n# # \n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
#!/usr/bin/env python #============================================================================================= # MODULE DOCSTRING #============================================================================================= """ evaluate-gbvi.py Evaluate the GBVI model on hydration free energies of small molecules for multiple iterations of the Markov chain. """ #============================================================================================= # GLOBAL IMPORTS #============================================================================================= import sys,string from openeye.oechem import * from optparse import OptionParser # For parsing of command line arguments import os import math import numpy import simtk.openmm as openmm import simtk.unit as units import openeye.oechem import openeye.oequacpac import openeye.oeiupac from openeye.oechem import * from openeye.oequacpac import * from openeye.oeszybki import * from openeye.oeiupac import * import time import pymc #============================================================================================= # Load OpenMM plugins. #============================================================================================= print "Loading OpenMM plugins..." openmm.Platform.loadPluginsFromDirectory(os.path.join(os.environ['OPENMM_INSTALL_DIR'], 'lib')) openmm.Platform.loadPluginsFromDirectory(os.path.join(os.environ['OPENMM_INSTALL_DIR'], 'lib', 'plugins')) #============================================================================================= # Atom Typer #============================================================================================= class AtomTyper(object): """ Atom typer Based on 'Patty', by Pat Walters. """ class TypingException(Exception): """ Atom typing exception. """ def __init__(self, molecule, atom): self.molecule = molecule self.atom = atom def __str__(self): return "Atom not assigned: %6d %8s" % (self.atom.GetIdx(), OEGetAtomicSymbol(self.atom.GetAtomicNum())) def __init__(self, infileName, tagname): self.pattyTag = OEGetTag(tagname) self.smartsList = [] ifs = open(infileName) lines = ifs.readlines() for line in lines: # Strip trailing comments index = line.find('%') if index != -1: line = line[0:index] # Split into tokens. toks = string.split(line) if len(toks) == 2: smarts,type = toks pat = OESubSearch() pat.Init(smarts) pat.SetMaxMatches(0) self.smartsList.append([pat,type,smarts]) def dump(self): for pat,type,smarts in self.smartsList: print pat,type,smarts def assignTypes(self,mol): # Assign null types. for atom in mol.GetAtoms(): atom.SetStringData(self.pattyTag, "") # Assign atom types using rules. OEAssignAromaticFlags(mol) for pat,type,smarts in self.smartsList: for matchbase in pat.Match(mol): for matchpair in matchbase.GetAtoms(): matchpair.target.SetStringData(self.pattyTag,type) # Check if any atoms remain unassigned. for atom in mol.GetAtoms(): if atom.GetStringData(self.pattyTag)=="": raise AtomTyper.TypingException(mol, atom) def debugTypes(self,mol): for atom in mol.GetAtoms(): print "%6d %8s %8s" % (atom.GetIdx(),OEGetAtomicSymbol(atom.GetAtomicNum()),atom.GetStringData(self.pattyTag)) def getTypeList(self,mol): typeList = [] for atom in mol.GetAtoms(): typeList.append(atom.GetStringData(self.pattyTag)) return typeList #============================================================================================= # Utility routines #============================================================================================= def read_gbvi_parameters(filename): """ Read a GBVI parameter set from a file. ARGUMENTS filename (string) - the filename to read parameters from RETURNS parameters (dict) - parameters[(atomtype,parameter_name)] contains the dimensionless parameter """ parameters = dict() infile = open(filename, 'r') for line in infile: # Strip trailing comments index = line.find('%') if index != -1: line = line[0:index] # Parse parameters elements = line.split() if len(elements) == 3: [atomtype, radius, gamma] = elements parameters['%s_%s' % (atomtype,'radius')] = float(radius) parameters['%s_%s' % (atomtype,'gamma')] = float(gamma) return parameters #============================================================================================= # Computation of hydration free energies #============================================================================================= def function(x): (molecule, parameters) = x return compute_hydration_energy(molecule, parameters) def compute_hydration_energies_parallel(molecules, parameters): import multiprocessing # Create processor pool. nprocs = 8 pool = multiprocessing.Pool(processes=nprocs) x = list() for molecule in molecules: x.append( (molecule, parameters) ) # Distribute calculation. results = pool.map(function, x) return results def compute_hydration_energies(molecules, parameters): """ Compute solvation energies of all specified molecules using given parameter set. ARGUMENTS molecules (list of OEMol) - molecules with atom types parameters (dict) - parameters for atom types RETURNS energies (dict) - energies[molecule] is the computed solvation energy of given molecule """ energies = dict() # energies[index] is the computed solvation energy of molecules[index] platform = openmm.Platform.getPlatformByName("Reference") for molecule in molecules: # Create OpenMM System. system = openmm.System() for atom in molecule.GetAtoms(): mass = OEGetDefaultMass(atom.GetAtomicNum()) system.addParticle(mass * units.amu) # Add nonbonded term. # nonbonded_force = openmm.NonbondedSoftcoreForce() # nonbonded_force.setNonbondedMethod(openmm.NonbondedForce.NoCutoff) # for atom in molecule.GetAtoms(): # charge = 0.0 * units.elementary_charge # sigma = 1.0 * units.angstrom # epsilon = 0.0 * units.kilocalories_per_mole # nonbonded_force.addParticle(charge, sigma, epsilon) # system.addForce(nonbonded_force) # Add GBVI term # gbvi_force = openmm.GBVISoftcoreForce() gbvi_force = openmm.GBVIForce() gbvi_force.setNonbondedMethod(openmm.GBVIForce.NoCutoff) # set no cutoff gbvi_force.setSoluteDielectric(1) gbvi_force.setSolventDielectric(78) # Use scaling method. # gbvi_force.setBornRadiusScalingMethod(openmm.GBVISoftcoreForce.QuinticSpline) # gbvi_force.setQuinticLowerLimitFactor(0.75) # gbvi_force.setQuinticUpperBornRadiusLimit(50.0*units.nanometers) # Build indexable list of atoms. atoms = [atom for atom in molecule.GetAtoms()] # Assign GB/VI parameters. for atom in molecule.GetAtoms(): atomtype = atom.GetStringData("gbvi_type") # GBVI atomtype charge = atom.GetPartialCharge() * units.elementary_charge radius = parameters['%s_%s' % (atomtype, 'radius')] * units.angstroms gamma = parameters['%s_%s' % (atomtype, 'gamma')] * units.kilocalories_per_mole # gamma *= -1.0 # DEBUG lambda_ = 1.0 # fully interacting # gbvi_force.addParticle(charge, radius, gamma, lambda_) # for GBVISoftcoreForce gbvi_force.addParticle(charge, radius, gamma) # for GBVIForce # Add bonds. for bond in molecule.GetBonds(): # Get atom indices. iatom = bond.GetBgnIdx() jatom = bond.GetEndIdx() # Get bond length. (xi, yi, zi) = molecule.GetCoords(atoms[iatom]) (xj, yj, zj) = molecule.GetCoords(atoms[jatom]) distance = math.sqrt((xi-xj)**2 + (yi-yj)**2 + (zi-zj)**2) * units.angstroms # Identify bonded atoms to GBVI. gbvi_force.addBond(iatom, jatom, distance) # Add the force to the system. system.addForce(gbvi_force) # Build coordinate array. natoms = len(atoms) coordinates = units.Quantity(numpy.zeros([natoms, 3]), units.angstroms) for (index,atom) in enumerate(atoms): (x,y,z) = molecule.GetCoords(atom) coordinates[index,:] = units.Quantity(numpy.array([x,y,z]),units.angstroms) # Create OpenMM Context. timestep = 1.0 * units.femtosecond # arbitrary integrator = openmm.VerletIntegrator(timestep) context = openmm.Context(system, integrator, platform) # Set the coordinates. context.setPositions(coordinates) # Get the energy state = context.getState(getEnergy=True) energies[molecule] = state.getPotentialEnergy() return energies def compute_hydration_energy(molecule, parameters, platform_name="Reference"): """ Compute hydration energy of a specified molecule given the specified GBVI parameter set. ARGUMENTS molecule (OEMol) - molecule with GBVI atom types parameters (dict) - parameters for GBVI atom types RETURNS energy (float) - hydration energy in kcal/mol """ platform = openmm.Platform.getPlatformByName(platform_name) # Create OpenMM System. system = openmm.System() for atom in molecule.GetAtoms(): mass = OEGetDefaultMass(atom.GetAtomicNum()) system.addParticle(mass * units.amu) # Add GBVI term # gbvi_force = openmm.GBVISoftcoreForce() gbvi_force = openmm.GBVIForce() gbvi_force.setNonbondedMethod(openmm.GBVIForce.NoCutoff) # set no cutoff gbvi_force.setSoluteDielectric(1) gbvi_force.setSolventDielectric(78) # Use scaling method. # gbvi_force.setBornRadiusScalingMethod(openmm.GBVISoftcoreForce.QuinticSpline) # gbvi_force.setQuinticLowerLimitFactor(0.75) # gbvi_force.setQuinticUpperBornRadiusLimit(50.0*units.nanometers) # Build indexable list of atoms. atoms = [atom for atom in molecule.GetAtoms()] # Assign GB/VI parameters. for atom in molecule.GetAtoms(): atomtype = atom.GetStringData("gbvi_type") # GBVI atomtype charge = atom.GetPartialCharge() * units.elementary_charge try: radius = parameters['%s_%s' % (atomtype, 'radius')] * units.angstroms gamma = parameters['%s_%s' % (atomtype, 'gamma')] * units.kilocalories_per_mole except Exception, exception: print "Cannot find parameters for atomtype '%s' in molecule '%s'" % (atomtype, molecule.GetTitle()) print parameters.keys() raise exception # gamma *= -1.0 # DEBUG lambda_ = 1.0 # fully interacting # gbvi_force.addParticle(charge, radius, gamma, lambda_) # for GBVISoftcoreForce gbvi_force.addParticle(charge, radius, gamma) # for GBVIForce # Add bonds. for bond in molecule.GetBonds(): # Get atom indices. iatom = bond.GetBgnIdx() jatom = bond.GetEndIdx() # Get bond length. (xi, yi, zi) = molecule.GetCoords(atoms[iatom]) (xj, yj, zj) = molecule.GetCoords(atoms[jatom]) distance = math.sqrt((xi-xj)**2 + (yi-yj)**2 + (zi-zj)**2) * units.angstroms # Identify bonded atoms to GBVI. gbvi_force.addBond(iatom, jatom, distance) # Add the force to the system. system.addForce(gbvi_force) # Build coordinate array. natoms = len(atoms) coordinates = units.Quantity(numpy.zeros([natoms, 3]), units.angstroms) for (index,atom) in enumerate(atoms): (x,y,z) = molecule.GetCoords(atom) coordinates[index,:] = units.Quantity(numpy.array([x,y,z]),units.angstroms) # Create OpenMM Context. timestep = 1.0 * units.femtosecond # arbitrary integrator = openmm.VerletIntegrator(timestep) context = openmm.Context(system, integrator, platform) # Set the coordinates. context.setPositions(coordinates) # Get the energy state = context.getState(getEnergy=True) energy = state.getPotentialEnergy() / units.kilocalories_per_mole if numpy.isnan(energy): energy = +1e6; return energy def hydration_energy_factory(molecule): def hydration_energy(**parameters): return compute_hydration_energy(molecule, parameters, platform_name="Reference") return hydration_energy #============================================================================================= # PyMC model #============================================================================================= def testfun(molecule_index, *x): print molecule_index return molecule_index def create_model(molecules, initial_parameters): # Define priors for parameters. model = dict() parameters = dict() # just the parameters for (key, value) in initial_parameters.iteritems(): (atomtype, parameter_name) = key.split('_') if parameter_name == 'gamma': stochastic = pymc.Uniform(key, value=value, lower=-10.0, upper=+10.0) elif parameter_name == 'radius': stochastic = pymc.Uniform(key, value=value, lower=1.0, upper=3.0) else: raise Exception("Unrecognized parameter name: %s" % parameter_name) model[key] = stochastic parameters[key] = stochastic # Define deterministic functions for hydration free energies. for (molecule_index, molecule) in enumerate(molecules): molecule_name = molecule.GetTitle() variable_name = "dg_gbvi_%08d" % molecule_index # Determine which parameters are involved in this molecule to limit number of parents for caching. parents = dict() for atom in molecule.GetAtoms(): atomtype = atom.GetStringData("gbvi_type") # GBVI atomtype for parameter_name in ['gamma', 'radius']: stochastic_name = '%s_%s' % (atomtype,parameter_name) parents[stochastic_name] = parameters[stochastic_name] print "%s : " % molecule_name, print parents.keys() # Create deterministic variable for computed hydration free energy. function = hydration_energy_factory(molecule) model[variable_name] = pymc.Deterministic(eval=function, name=variable_name, parents=parents, doc=molecule_name, trace=True, verbose=1, dtype=float, plot=False, cache_depth=2) # Define error model log_sigma_min = math.log(0.01) # kcal/mol log_sigma_max = math.log(10.0) # kcal/mol log_sigma_guess = math.log(0.2) # kcal/mol model['log_sigma'] = pymc.Uniform('log_sigma', lower=log_sigma_min, upper=log_sigma_max, value=log_sigma_guess) model['sigma'] = pymc.Lambda('sigma', lambda log_sigma=model['log_sigma'] : math.exp(log_sigma) ) model['tau'] = pymc.Lambda('tau', lambda sigma=model['sigma'] : sigma**(-2) ) for (molecule_index, molecule) in enumerate(molecules): molecule_name = molecule.GetTitle() variable_name = "dg_exp_%08d" % molecule_index dg_exp = float(OEGetSDData(molecule, 'dG(exp)')) # observed hydration free energy in kcal/mol model[variable_name] = pymc.Normal(mu=model['dg_gbvi_%08d' % molecule_index], tau=model['tau'], value=dg_exp, observed=True) return model #============================================================================================= # MAIN #============================================================================================= if __name__=="__main__": # Create command-line argument options. usage_string = """\ usage: %prog --types typefile --parameters paramfile --molecules molfile example: %prog --types parameters/gbvi.types --parameters parameters/gbvi-am1bcc.parameters --molecules datasets/solvation.sdf --mcmcDb MCMC_db_name """ version_string = "%prog %__version__" parser = OptionParser(usage=usage_string, version=version_string) parser.add_option("-t", "--types", metavar='TYPES', action="store", type="string", dest='atomtypes_filename', default='', help="Filename defining atomtypes as SMARTS atom matches.") parser.add_option("-p", "--parameters", metavar='PARAMETERS', action="store", type="string", dest='parameters_filename', default='', help="File containing initial parameter set.") parser.add_option("-m", "--molecules", metavar='MOLECULES', action="store", type="string", dest='molecules_filename', default='', help="Small molecule set (in any OpenEye compatible file format) containing 'dG(exp)' fields with experimental hydration free energies.") parser.add_option("-d", "--mcmcDb", metavar='MCMC_Db', action="store", type="string", dest='mcmcDb', default='', help="MCMC db name.") # Parse command-line arguments. (options,args) = parser.parse_args() # Ensure all required options have been specified. if options.atomtypes_filename=='' or options.parameters_filename=='' or options.molecules_filename=='' or options.mcmcDb == '': parser.print_help() parser.error("All input files must be specified.") # Read GBVI parameters. parameters = read_gbvi_parameters(options.parameters_filename) mcmcDbName = options.mcmcDb printString = "Starting " + sys.argv[0] + "\n" printString += ' atom types=<' + options.atomtypes_filename + ">\n" printString += ' parameters=<' + options.parameters_filename + ">\n" printString += ' molecule=<' + options.molecules_filename + ">\n" printString += ' mcmcDB=<' + options.mcmcDb + ">\n" sys.stderr.write( printString ) sys.stdout.write( printString ) # Construct atom typer. atom_typer = AtomTyper(options.atomtypes_filename, "gbvi_type") # Load and type all molecules in the specified dataset. print "Loading and typing all molecules in dataset..." start_time = time.time() molecules = list() input_molstream = oemolistream(options.molecules_filename) molecule = OECreateOEGraphMol() while OEReadMolecule(input_molstream, molecule): # Get molecule name. name = OEGetSDData(molecule, 'name').strip() molecule.SetTitle(name) # Append to list. molecule_copy = OEMol(molecule) molecules.append(molecule_copy) input_molstream.close() print "%d molecules read" % len(molecules) end_time = time.time() elapsed_time = end_time - start_time print "%.3f s elapsed" % elapsed_time # Add explicit hydrogens. for molecule in molecules: openeye.oechem.OEAddExplicitHydrogens(molecule) # Build a conformation for all molecules with Omega. print "Building conformations for all molecules..." import openeye.oeomega omega = openeye.oeomega.OEOmega() omega.SetMaxConfs(1) omega.SetFromCT(True) for molecule in molecules: #omega.SetFixMol(molecule) omega(molecule) end_time = time.time() elapsed_time = end_time - start_time print "%.3f s elapsed" % elapsed_time # Regularize all molecules through writing as mol2. print "Regularizing all molecules..." ligand_mol2_dirname = os.path.dirname(mcmcDbName) + '/mol2' if( not os.path.exists( ligand_mol2_dirname ) ): os.makedirs(ligand_mol2_dirname) ligand_mol2_filename = ligand_mol2_dirname + '/temp' + os.path.basename(mcmcDbName) + '.mol2' start_time = time.time() omolstream = openeye.oechem.oemolostream(ligand_mol2_filename) for molecule in molecules: # Write molecule as mol2, changing molecule through normalization. openeye.oechem.OEWriteMolecule(omolstream, molecule) omolstream.close() end_time = time.time() elapsed_time = end_time - start_time print "%.3f s elapsed" % elapsed_time # Assign AM1-BCC charges. print "Assigning AM1-BCC charges..." start_time = time.time() for molecule in molecules: # Assign AM1-BCC charges. if molecule.NumAtoms() == 1: # Use formal charges for ions. OEFormalPartialCharges(molecule) else: # Assign AM1-BCC charges for multiatom molecules. OEAssignPartialCharges(molecule, OECharges_AM1BCC, False) # use explicit hydrogens # Check to make sure we ended up with partial charges. if OEHasPartialCharges(molecule) == False: print "No charges on molecule: '%s'" % molecule.GetTitle() print "IUPAC name: %s" % OECreateIUPACName(molecule) # TODO: Write molecule out # Delete themolecule. molecules.remove(molecule) end_time = time.time() elapsed_time = end_time - start_time print "%.3f s elapsed" % elapsed_time print "%d molecules remaining" % len(molecules) # Type all molecules with GAFF parameters. start_time = time.time() typed_molecules = list() untyped_molecules = list() for molecule in molecules: # Assign GBVI types according to SMARTS rules. try: atom_typer.assignTypes(molecule) typed_molecules.append(OEGraphMol(molecule)) #atom_typer.debugTypes(molecule) except AtomTyper.TypingException as exception: print name print exception untyped_molecules.append(OEGraphMol(molecule)) end_time = time.time() elapsed_time = end_time - start_time print "%d molecules correctly typed" % (len(typed_molecules)) print "%d molecules missing some types" % (len(untyped_molecules)) print "%.3f s elapsed" % elapsed_time # Load updated parameter sets. parameter_sets = list() for key in parameters.keys(): # Read parameters. filename = mcmcDbName + '.txt/Chain_0/%s.txt' % key print "Parameter %s from file %s" %( key, filename ) infile = open(filename, 'r') lines = infile.readlines() infile.close() # Discard header lines = lines[3:] # Insert parameter. for (index, line) in enumerate(lines): elements = line.split() parameter = float(elements[0]) try: parameter_sets[index][key] = parameter except Exception: parameter_sets.append( dict() ) parameter_sets[index][key] = parameter for (index, parameter_set) in enumerate([parameters] + parameter_sets): # skip some #for (index, parameter_set) in enumerate([parameters] + parameter_sets[::10]): # skip some # Compute energies with all molecules. print "Computing all energies..." start_time = time.time() energies = compute_hydration_energies(typed_molecules, parameter_set) #energies = compute_hydration_energies_parallel(typed_molecules, parameter_set) end_time = time.time() elapsed_time = end_time - start_time print "%.3f s elapsed" % elapsed_time # # Print comparison. # for molecule in typed_molecules: # # Get metadata. # name = OEGetSDData(molecule, 'name').strip() # dg_exp = float(OEGetSDData(molecule, 'dG(exp)')) * units.kilocalories_per_mole # # Form output. # outstring = "%48s %8.3f %8.3f" % (name, dg_exp / units.kilocalories_per_mole, energies[molecule] / units.kilocalories_per_mole) # print outstring # Print summary statistics. signed_errors = numpy.zeros([len(typed_molecules)], numpy.float64) for (i, molecule) in enumerate(typed_molecules): # Get metadata. name = OEGetSDData(molecule, 'name').strip() energy = energies[molecule] / units.kilocalories_per_mole if( math.isnan(energy) ): print "%5d dG: nan %8.3f %s" % (i, dg_exp / units.kilocalories_per_mole, name) else: try: dg_exp = float(OEGetSDData(molecule, 'dG(exp)')) * units.kilocalories_per_mole signed_errors[i] = energies[molecule] / units.kilocalories_per_mole - dg_exp / units.kilocalories_per_mole except: print "Problem getting dG(exp) for molecule %d %s" % (i, name) print "iteration %8d : RMS error %8.3f kcal/mol" % (index, signed_errors.std())
normal
{ "blob_id": "0ac9e757fa827b311487169d0dc822951ce8c4bb", "index": 7167, "step-1": "#!/usr/bin/env python\n\n#=============================================================================================\n# MODULE DOCSTRING\n#=============================================================================================\n\n\"\"\"\nevaluate-gbvi.py\n\nEvaluate the GBVI model on hydration free energies of small molecules for multiple iterations of the Markov chain.\n\n\"\"\"\n#=============================================================================================\n# GLOBAL IMPORTS\n#=============================================================================================\n\nimport sys,string\nfrom openeye.oechem import *\nfrom optparse import OptionParser # For parsing of command line arguments\n\nimport os\nimport math\nimport numpy\nimport simtk.openmm as openmm\nimport simtk.unit as units\n\nimport openeye.oechem\nimport openeye.oequacpac\nimport openeye.oeiupac\n\nfrom openeye.oechem import *\nfrom openeye.oequacpac import *\nfrom openeye.oeszybki import *\nfrom openeye.oeiupac import *\n\nimport time\nimport pymc \n\n#=============================================================================================\n# Load OpenMM plugins.\n#=============================================================================================\n\nprint \"Loading OpenMM plugins...\"\n\nopenmm.Platform.loadPluginsFromDirectory(os.path.join(os.environ['OPENMM_INSTALL_DIR'], 'lib'))\nopenmm.Platform.loadPluginsFromDirectory(os.path.join(os.environ['OPENMM_INSTALL_DIR'], 'lib', 'plugins'))\n\n#=============================================================================================\n# Atom Typer\n#=============================================================================================\n\nclass AtomTyper(object):\n \"\"\"\n Atom typer\n\n Based on 'Patty', by Pat Walters.\n\n \"\"\"\n \n class TypingException(Exception):\n \"\"\"\n Atom typing exception.\n\n \"\"\"\n def __init__(self, molecule, atom):\n self.molecule = molecule\n self.atom = atom\n\n def __str__(self):\n return \"Atom not assigned: %6d %8s\" % (self.atom.GetIdx(), OEGetAtomicSymbol(self.atom.GetAtomicNum()))\n\n def __init__(self, infileName, tagname):\n self.pattyTag = OEGetTag(tagname) \n self.smartsList = []\n ifs = open(infileName)\n lines = ifs.readlines()\n for line in lines:\n # Strip trailing comments\n index = line.find('%')\n if index != -1:\n line = line[0:index]\n # Split into tokens.\n toks = string.split(line)\n if len(toks) == 2:\n smarts,type = toks\n pat = OESubSearch()\n pat.Init(smarts)\n pat.SetMaxMatches(0)\n self.smartsList.append([pat,type,smarts])\n\n def dump(self):\n for pat,type,smarts in self.smartsList:\n print pat,type,smarts\n\n def assignTypes(self,mol):\n # Assign null types.\n for atom in mol.GetAtoms():\n atom.SetStringData(self.pattyTag, \"\") \n\n # Assign atom types using rules.\n OEAssignAromaticFlags(mol)\n for pat,type,smarts in self.smartsList:\n for matchbase in pat.Match(mol):\n for matchpair in matchbase.GetAtoms():\n matchpair.target.SetStringData(self.pattyTag,type)\n\n # Check if any atoms remain unassigned.\n for atom in mol.GetAtoms():\n if atom.GetStringData(self.pattyTag)==\"\":\n raise AtomTyper.TypingException(mol, atom)\n\n def debugTypes(self,mol):\n for atom in mol.GetAtoms():\n print \"%6d %8s %8s\" % (atom.GetIdx(),OEGetAtomicSymbol(atom.GetAtomicNum()),atom.GetStringData(self.pattyTag))\n\n def getTypeList(self,mol):\n typeList = []\n for atom in mol.GetAtoms():\n typeList.append(atom.GetStringData(self.pattyTag))\n return typeList\n\n#=============================================================================================\n# Utility routines\n#=============================================================================================\n\ndef read_gbvi_parameters(filename):\n \"\"\"\n Read a GBVI parameter set from a file.\n\n ARGUMENTS\n\n filename (string) - the filename to read parameters from\n\n RETURNS\n\n parameters (dict) - parameters[(atomtype,parameter_name)] contains the dimensionless parameter \n \n \"\"\"\n\n parameters = dict()\n \n infile = open(filename, 'r')\n for line in infile:\n # Strip trailing comments\n index = line.find('%')\n if index != -1:\n line = line[0:index] \n\n # Parse parameters\n elements = line.split()\n if len(elements) == 3:\n [atomtype, radius, gamma] = elements\n parameters['%s_%s' % (atomtype,'radius')] = float(radius)\n parameters['%s_%s' % (atomtype,'gamma')] = float(gamma)\n\n return parameters \n\n#=============================================================================================\n# Computation of hydration free energies\n#=============================================================================================\n\ndef function(x):\n (molecule, parameters) = x\n return compute_hydration_energy(molecule, parameters) \n\ndef compute_hydration_energies_parallel(molecules, parameters):\n import multiprocessing\n\n # Create processor pool.\n nprocs = 8\n pool = multiprocessing.Pool(processes=nprocs)\n\n x = list()\n for molecule in molecules:\n x.append( (molecule, parameters) )\n\n # Distribute calculation.\n results = pool.map(function, x)\n\n return results\n\ndef compute_hydration_energies(molecules, parameters):\n \"\"\"\n Compute solvation energies of all specified molecules using given parameter set.\n\n ARGUMENTS\n\n molecules (list of OEMol) - molecules with atom types\n parameters (dict) - parameters for atom types\n\n RETURNS\n\n energies (dict) - energies[molecule] is the computed solvation energy of given molecule\n\n \"\"\"\n\n energies = dict() # energies[index] is the computed solvation energy of molecules[index]\n\n platform = openmm.Platform.getPlatformByName(\"Reference\")\n\n for molecule in molecules:\n # Create OpenMM System.\n system = openmm.System()\n for atom in molecule.GetAtoms():\n mass = OEGetDefaultMass(atom.GetAtomicNum())\n system.addParticle(mass * units.amu)\n\n # Add nonbonded term.\n # nonbonded_force = openmm.NonbondedSoftcoreForce()\n # nonbonded_force.setNonbondedMethod(openmm.NonbondedForce.NoCutoff)\n # for atom in molecule.GetAtoms():\n # charge = 0.0 * units.elementary_charge\n # sigma = 1.0 * units.angstrom\n # epsilon = 0.0 * units.kilocalories_per_mole\n # nonbonded_force.addParticle(charge, sigma, epsilon)\n # system.addForce(nonbonded_force)\n\n # Add GBVI term\n # gbvi_force = openmm.GBVISoftcoreForce()\n gbvi_force = openmm.GBVIForce() \n gbvi_force.setNonbondedMethod(openmm.GBVIForce.NoCutoff) # set no cutoff\n gbvi_force.setSoluteDielectric(1)\n gbvi_force.setSolventDielectric(78)\n\n # Use scaling method.\n # gbvi_force.setBornRadiusScalingMethod(openmm.GBVISoftcoreForce.QuinticSpline)\n # gbvi_force.setQuinticLowerLimitFactor(0.75)\n # gbvi_force.setQuinticUpperBornRadiusLimit(50.0*units.nanometers)\n\n # Build indexable list of atoms.\n atoms = [atom for atom in molecule.GetAtoms()] \n \n # Assign GB/VI parameters.\n for atom in molecule.GetAtoms(): \n atomtype = atom.GetStringData(\"gbvi_type\") # GBVI atomtype\n charge = atom.GetPartialCharge() * units.elementary_charge\n radius = parameters['%s_%s' % (atomtype, 'radius')] * units.angstroms\n gamma = parameters['%s_%s' % (atomtype, 'gamma')] * units.kilocalories_per_mole \n # gamma *= -1.0 # DEBUG\n lambda_ = 1.0 # fully interacting\n # gbvi_force.addParticle(charge, radius, gamma, lambda_) # for GBVISoftcoreForce\n gbvi_force.addParticle(charge, radius, gamma) # for GBVIForce\n\n # Add bonds.\n for bond in molecule.GetBonds():\n # Get atom indices.\n iatom = bond.GetBgnIdx()\n jatom = bond.GetEndIdx()\n # Get bond length.\n (xi, yi, zi) = molecule.GetCoords(atoms[iatom])\n (xj, yj, zj) = molecule.GetCoords(atoms[jatom])\n distance = math.sqrt((xi-xj)**2 + (yi-yj)**2 + (zi-zj)**2) * units.angstroms\n # Identify bonded atoms to GBVI.\n gbvi_force.addBond(iatom, jatom, distance)\n\n # Add the force to the system.\n system.addForce(gbvi_force)\n \n # Build coordinate array.\n natoms = len(atoms)\n coordinates = units.Quantity(numpy.zeros([natoms, 3]), units.angstroms)\n for (index,atom) in enumerate(atoms):\n (x,y,z) = molecule.GetCoords(atom)\n coordinates[index,:] = units.Quantity(numpy.array([x,y,z]),units.angstroms) \n \n # Create OpenMM Context.\n timestep = 1.0 * units.femtosecond # arbitrary\n integrator = openmm.VerletIntegrator(timestep)\n context = openmm.Context(system, integrator, platform)\n\n # Set the coordinates.\n context.setPositions(coordinates)\n \n # Get the energy\n state = context.getState(getEnergy=True)\n energies[molecule] = state.getPotentialEnergy()\n\n return energies\n\ndef compute_hydration_energy(molecule, parameters, platform_name=\"Reference\"):\n \"\"\"\n Compute hydration energy of a specified molecule given the specified GBVI parameter set.\n\n ARGUMENTS\n\n molecule (OEMol) - molecule with GBVI atom types\n parameters (dict) - parameters for GBVI atom types\n\n RETURNS\n\n energy (float) - hydration energy in kcal/mol\n\n \"\"\"\n\n platform = openmm.Platform.getPlatformByName(platform_name)\n\n # Create OpenMM System.\n system = openmm.System()\n for atom in molecule.GetAtoms():\n mass = OEGetDefaultMass(atom.GetAtomicNum())\n system.addParticle(mass * units.amu)\n\n # Add GBVI term\n # gbvi_force = openmm.GBVISoftcoreForce()\n gbvi_force = openmm.GBVIForce() \n gbvi_force.setNonbondedMethod(openmm.GBVIForce.NoCutoff) # set no cutoff\n gbvi_force.setSoluteDielectric(1)\n gbvi_force.setSolventDielectric(78)\n \n # Use scaling method.\n # gbvi_force.setBornRadiusScalingMethod(openmm.GBVISoftcoreForce.QuinticSpline)\n # gbvi_force.setQuinticLowerLimitFactor(0.75)\n # gbvi_force.setQuinticUpperBornRadiusLimit(50.0*units.nanometers)\n \n # Build indexable list of atoms.\n atoms = [atom for atom in molecule.GetAtoms()] \n \n # Assign GB/VI parameters.\n for atom in molecule.GetAtoms(): \n atomtype = atom.GetStringData(\"gbvi_type\") # GBVI atomtype\n charge = atom.GetPartialCharge() * units.elementary_charge\n try:\n radius = parameters['%s_%s' % (atomtype, 'radius')] * units.angstroms\n gamma = parameters['%s_%s' % (atomtype, 'gamma')] * units.kilocalories_per_mole\n except Exception, exception:\n print \"Cannot find parameters for atomtype '%s' in molecule '%s'\" % (atomtype, molecule.GetTitle())\n print parameters.keys()\n raise exception\n \n # gamma *= -1.0 # DEBUG\n lambda_ = 1.0 # fully interacting\n # gbvi_force.addParticle(charge, radius, gamma, lambda_) # for GBVISoftcoreForce\n gbvi_force.addParticle(charge, radius, gamma) # for GBVIForce\n \n # Add bonds.\n for bond in molecule.GetBonds():\n # Get atom indices.\n iatom = bond.GetBgnIdx()\n jatom = bond.GetEndIdx()\n # Get bond length.\n (xi, yi, zi) = molecule.GetCoords(atoms[iatom])\n (xj, yj, zj) = molecule.GetCoords(atoms[jatom])\n distance = math.sqrt((xi-xj)**2 + (yi-yj)**2 + (zi-zj)**2) * units.angstroms\n # Identify bonded atoms to GBVI.\n gbvi_force.addBond(iatom, jatom, distance)\n\n # Add the force to the system.\n system.addForce(gbvi_force)\n \n # Build coordinate array.\n natoms = len(atoms)\n coordinates = units.Quantity(numpy.zeros([natoms, 3]), units.angstroms)\n for (index,atom) in enumerate(atoms):\n (x,y,z) = molecule.GetCoords(atom)\n coordinates[index,:] = units.Quantity(numpy.array([x,y,z]),units.angstroms) \n \n # Create OpenMM Context.\n timestep = 1.0 * units.femtosecond # arbitrary\n integrator = openmm.VerletIntegrator(timestep)\n context = openmm.Context(system, integrator, platform)\n\n # Set the coordinates.\n context.setPositions(coordinates)\n \n # Get the energy\n state = context.getState(getEnergy=True)\n energy = state.getPotentialEnergy() / units.kilocalories_per_mole\n if numpy.isnan(energy):\n energy = +1e6;\n\n return energy\n\ndef hydration_energy_factory(molecule):\n def hydration_energy(**parameters):\n return compute_hydration_energy(molecule, parameters, platform_name=\"Reference\")\n return hydration_energy\n\n#=============================================================================================\n# PyMC model\n#=============================================================================================\n\ndef testfun(molecule_index, *x):\n print molecule_index\n return molecule_index\n\ndef create_model(molecules, initial_parameters):\n\n # Define priors for parameters.\n model = dict()\n parameters = dict() # just the parameters\n for (key, value) in initial_parameters.iteritems():\n (atomtype, parameter_name) = key.split('_')\n if parameter_name == 'gamma':\n stochastic = pymc.Uniform(key, value=value, lower=-10.0, upper=+10.0)\n elif parameter_name == 'radius':\n stochastic = pymc.Uniform(key, value=value, lower=1.0, upper=3.0)\n else:\n raise Exception(\"Unrecognized parameter name: %s\" % parameter_name)\n model[key] = stochastic\n parameters[key] = stochastic\n\n # Define deterministic functions for hydration free energies.\n for (molecule_index, molecule) in enumerate(molecules):\n molecule_name = molecule.GetTitle()\n variable_name = \"dg_gbvi_%08d\" % molecule_index\n # Determine which parameters are involved in this molecule to limit number of parents for caching.\n parents = dict()\n for atom in molecule.GetAtoms():\n atomtype = atom.GetStringData(\"gbvi_type\") # GBVI atomtype\n for parameter_name in ['gamma', 'radius']:\n stochastic_name = '%s_%s' % (atomtype,parameter_name)\n parents[stochastic_name] = parameters[stochastic_name]\n print \"%s : \" % molecule_name,\n print parents.keys()\n # Create deterministic variable for computed hydration free energy.\n function = hydration_energy_factory(molecule)\n model[variable_name] = pymc.Deterministic(eval=function,\n name=variable_name,\n parents=parents,\n doc=molecule_name,\n trace=True,\n verbose=1,\n dtype=float,\n plot=False,\n cache_depth=2)\n\n # Define error model\n log_sigma_min = math.log(0.01) # kcal/mol\n log_sigma_max = math.log(10.0) # kcal/mol\n log_sigma_guess = math.log(0.2) # kcal/mol\n model['log_sigma'] = pymc.Uniform('log_sigma', lower=log_sigma_min, upper=log_sigma_max, value=log_sigma_guess)\n model['sigma'] = pymc.Lambda('sigma', lambda log_sigma=model['log_sigma'] : math.exp(log_sigma) ) \n model['tau'] = pymc.Lambda('tau', lambda sigma=model['sigma'] : sigma**(-2) )\n for (molecule_index, molecule) in enumerate(molecules):\n molecule_name = molecule.GetTitle()\n variable_name = \"dg_exp_%08d\" % molecule_index\n dg_exp = float(OEGetSDData(molecule, 'dG(exp)')) # observed hydration free energy in kcal/mol\n model[variable_name] = pymc.Normal(mu=model['dg_gbvi_%08d' % molecule_index], tau=model['tau'], value=dg_exp, observed=True) \n\n return model\n\n#=============================================================================================\n# MAIN\n#=============================================================================================\n\nif __name__==\"__main__\":\n\n # Create command-line argument options.\n usage_string = \"\"\"\\\n usage: %prog --types typefile --parameters paramfile --molecules molfile\n \n example: %prog --types parameters/gbvi.types --parameters parameters/gbvi-am1bcc.parameters --molecules datasets/solvation.sdf --mcmcDb MCMC_db_name\n \n \"\"\"\n version_string = \"%prog %__version__\"\n parser = OptionParser(usage=usage_string, version=version_string)\n\n parser.add_option(\"-t\", \"--types\", metavar='TYPES',\n action=\"store\", type=\"string\", dest='atomtypes_filename', default='',\n help=\"Filename defining atomtypes as SMARTS atom matches.\")\n parser.add_option(\"-p\", \"--parameters\", metavar='PARAMETERS',\n action=\"store\", type=\"string\", dest='parameters_filename', default='',\n help=\"File containing initial parameter set.\")\n parser.add_option(\"-m\", \"--molecules\", metavar='MOLECULES',\n action=\"store\", type=\"string\", dest='molecules_filename', default='',\n help=\"Small molecule set (in any OpenEye compatible file format) containing 'dG(exp)' fields with experimental hydration free energies.\")\n\n parser.add_option(\"-d\", \"--mcmcDb\", metavar='MCMC_Db',\n action=\"store\", type=\"string\", dest='mcmcDb', default='',\n help=\"MCMC db name.\")\n\n \n # Parse command-line arguments.\n (options,args) = parser.parse_args()\n \n # Ensure all required options have been specified.\n if options.atomtypes_filename=='' or options.parameters_filename=='' or options.molecules_filename=='' or options.mcmcDb == '':\n parser.print_help()\n parser.error(\"All input files must be specified.\")\n\n # Read GBVI parameters.\n parameters = read_gbvi_parameters(options.parameters_filename)\n\n mcmcDbName = options.mcmcDb\n printString = \"Starting \" + sys.argv[0] + \"\\n\"\n printString += ' atom types=<' + options.atomtypes_filename + \">\\n\"\n printString += ' parameters=<' + options.parameters_filename + \">\\n\"\n printString += ' molecule=<' + options.molecules_filename + \">\\n\"\n printString += ' mcmcDB=<' + options.mcmcDb + \">\\n\"\n sys.stderr.write( printString )\n sys.stdout.write( printString )\n\n \n # Construct atom typer.\n atom_typer = AtomTyper(options.atomtypes_filename, \"gbvi_type\")\n \n # Load and type all molecules in the specified dataset.\n print \"Loading and typing all molecules in dataset...\"\n start_time = time.time()\n molecules = list()\n input_molstream = oemolistream(options.molecules_filename)\n molecule = OECreateOEGraphMol()\n while OEReadMolecule(input_molstream, molecule):\n # Get molecule name.\n name = OEGetSDData(molecule, 'name').strip()\n molecule.SetTitle(name)\n # Append to list.\n molecule_copy = OEMol(molecule)\n molecules.append(molecule_copy)\n input_molstream.close()\n print \"%d molecules read\" % len(molecules)\n end_time = time.time()\n elapsed_time = end_time - start_time\n print \"%.3f s elapsed\" % elapsed_time\n\n # Add explicit hydrogens.\n for molecule in molecules:\n openeye.oechem.OEAddExplicitHydrogens(molecule) \n\n # Build a conformation for all molecules with Omega.\n print \"Building conformations for all molecules...\" \n import openeye.oeomega\n omega = openeye.oeomega.OEOmega()\n omega.SetMaxConfs(1)\n omega.SetFromCT(True)\n for molecule in molecules:\n #omega.SetFixMol(molecule)\n omega(molecule)\n end_time = time.time()\n elapsed_time = end_time - start_time\n print \"%.3f s elapsed\" % elapsed_time\n\n # Regularize all molecules through writing as mol2.\n print \"Regularizing all molecules...\"\n ligand_mol2_dirname = os.path.dirname(mcmcDbName) + '/mol2'\n if( not os.path.exists( ligand_mol2_dirname ) ):\n os.makedirs(ligand_mol2_dirname)\n ligand_mol2_filename = ligand_mol2_dirname + '/temp' + os.path.basename(mcmcDbName) + '.mol2' \n\n start_time = time.time() \n omolstream = openeye.oechem.oemolostream(ligand_mol2_filename) \n for molecule in molecules:\n # Write molecule as mol2, changing molecule through normalization. \n openeye.oechem.OEWriteMolecule(omolstream, molecule)\n omolstream.close()\n end_time = time.time()\n elapsed_time = end_time - start_time\n print \"%.3f s elapsed\" % elapsed_time\n \n # Assign AM1-BCC charges.\n print \"Assigning AM1-BCC charges...\"\n start_time = time.time()\n for molecule in molecules:\n # Assign AM1-BCC charges.\n if molecule.NumAtoms() == 1:\n # Use formal charges for ions.\n OEFormalPartialCharges(molecule) \n else:\n # Assign AM1-BCC charges for multiatom molecules.\n OEAssignPartialCharges(molecule, OECharges_AM1BCC, False) # use explicit hydrogens\n # Check to make sure we ended up with partial charges.\n if OEHasPartialCharges(molecule) == False:\n print \"No charges on molecule: '%s'\" % molecule.GetTitle()\n print \"IUPAC name: %s\" % OECreateIUPACName(molecule)\n # TODO: Write molecule out\n # Delete themolecule.\n molecules.remove(molecule)\n \n end_time = time.time()\n elapsed_time = end_time - start_time\n print \"%.3f s elapsed\" % elapsed_time\n print \"%d molecules remaining\" % len(molecules)\n \n # Type all molecules with GAFF parameters.\n start_time = time.time()\n typed_molecules = list()\n untyped_molecules = list()\n for molecule in molecules:\n # Assign GBVI types according to SMARTS rules.\n try:\n atom_typer.assignTypes(molecule)\n typed_molecules.append(OEGraphMol(molecule))\n #atom_typer.debugTypes(molecule)\n except AtomTyper.TypingException as exception:\n print name \n print exception\n untyped_molecules.append(OEGraphMol(molecule)) \n end_time = time.time()\n elapsed_time = end_time - start_time\n print \"%d molecules correctly typed\" % (len(typed_molecules))\n print \"%d molecules missing some types\" % (len(untyped_molecules))\n print \"%.3f s elapsed\" % elapsed_time\n\n # Load updated parameter sets.\n parameter_sets = list()\n for key in parameters.keys():\n # Read parameters.\n filename = mcmcDbName + '.txt/Chain_0/%s.txt' % key\n print \"Parameter %s from file %s\" %( key, filename ) \n infile = open(filename, 'r')\n lines = infile.readlines()\n infile.close()\n # Discard header\n lines = lines[3:]\n # Insert parameter.\n for (index, line) in enumerate(lines):\n elements = line.split()\n parameter = float(elements[0])\n try:\n parameter_sets[index][key] = parameter\n except Exception:\n parameter_sets.append( dict() )\n parameter_sets[index][key] = parameter\n\n for (index, parameter_set) in enumerate([parameters] + parameter_sets): # skip some\n #for (index, parameter_set) in enumerate([parameters] + parameter_sets[::10]): # skip some\n \n # Compute energies with all molecules.\n print \"Computing all energies...\"\n start_time = time.time()\n energies = compute_hydration_energies(typed_molecules, parameter_set)\n #energies = compute_hydration_energies_parallel(typed_molecules, parameter_set)\n end_time = time.time()\n elapsed_time = end_time - start_time\n print \"%.3f s elapsed\" % elapsed_time\n\n# # Print comparison.\n# for molecule in typed_molecules:\n# # Get metadata.\n# name = OEGetSDData(molecule, 'name').strip()\n# dg_exp = float(OEGetSDData(molecule, 'dG(exp)')) * units.kilocalories_per_mole \n# # Form output.\n# outstring = \"%48s %8.3f %8.3f\" % (name, dg_exp / units.kilocalories_per_mole, energies[molecule] / units.kilocalories_per_mole) \n# print outstring\n\n # Print summary statistics.\n signed_errors = numpy.zeros([len(typed_molecules)], numpy.float64)\n for (i, molecule) in enumerate(typed_molecules):\n # Get metadata.\n name = OEGetSDData(molecule, 'name').strip()\n energy = energies[molecule] / units.kilocalories_per_mole\n if( math.isnan(energy) ):\n print \"%5d dG: nan %8.3f %s\" % (i, dg_exp / units.kilocalories_per_mole, name)\n else:\n try:\n dg_exp = float(OEGetSDData(molecule, 'dG(exp)')) * units.kilocalories_per_mole\n signed_errors[i] = energies[molecule] / units.kilocalories_per_mole - dg_exp / units.kilocalories_per_mole\n except:\n print \"Problem getting dG(exp) for molecule %d %s\" % (i, name)\n\n print \"iteration %8d : RMS error %8.3f kcal/mol\" % (index, signed_errors.std())\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
# 3번 반복하고 싶은 경우 # 별 10개를 한줄로 for x in range(0, 10, 3): # 3번째 숫자는 증감할 양을 정해줌. # print(x) print("★", end=" ") print() print("------------------------") #이중 for문 for y in range(0, 10): for x in range(0, 10): # print(x) print("★", end=" ") print()
normal
{ "blob_id": "b360ba7412bd10e2818511cee81302d407f88fd1", "index": 1895, "step-1": "<mask token>\n", "step-2": "for x in range(0, 10, 3):\n print('★', end=' ')\nprint()\nprint('------------------------')\nfor y in range(0, 10):\n for x in range(0, 10):\n print('★', end=' ')\n print()\n", "step-3": "# 3번 반복하고 싶은 경우\r\n\r\n# 별 10개를 한줄로\r\nfor x in range(0, 10, 3): # 3번째 숫자는 증감할 양을 정해줌.\r\n # print(x)\r\n print(\"★\", end=\" \")\r\nprint()\r\nprint(\"------------------------\")\r\n#이중 for문\r\nfor y in range(0, 10):\r\n for x in range(0, 10):\r\n # print(x)\r\n print(\"★\", end=\" \")\r\n print()", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
from django import forms from .models import Recipe, Ingredient, Category, Tag from blog.widgets import CustomClearableFileInput class NewCategoriesForm(forms.ModelForm): friendly_name = forms.CharField(label='... or add your own category', required=False) class Meta(): model = Category fields = ('friendly_name',) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) placeholders = { 'friendly_name': 'One single word only' } for field in self.fields: placeholder = placeholders[field] self.fields[field].widget.attrs['placeholder'] = placeholder class NewTagsForm(forms.ModelForm): tagname = forms.CharField(label='... or add your own tag', required=False) class Meta(): model = Tag fields = '__all__' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) placeholders = { 'tagname': 'One single word only' } for field in self.fields: placeholder = placeholders[field] self.fields[field].widget.attrs['placeholder'] = placeholder class IngredientForm(forms.ModelForm): class Meta: model = Ingredient exclude = ('recipe', ) labels = { 'quantity': 'Qty', } def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) placeholders = { 'quantity': 'eg: 0.1', 'unit': 'eg: ml', 'preparation': 'eg: chopped', 'name': 'eg: tomatoes' } for field in self.fields: placeholder = placeholders[field] self.fields[field].widget.attrs['placeholder'] = placeholder self.fields['quantity'].widget.attrs['min'] = 0.01 IngredientFormSet = forms.inlineformset_factory(Recipe, Ingredient, form=IngredientForm, extra=25, min_num=1, validate_min=True) class RecipeForm(forms.ModelForm): # Replace image field image = forms.ImageField(label='Image', required=False, widget=CustomClearableFileInput) # Change rendering of form to user-friendly checkboxes # Credit: # https://medium.com/swlh/django-forms-for-many-to-many-fields-d977dec4b024 category = forms.ModelMultipleChoiceField( queryset=Category.objects.all(), label='Choose some categories from the list', required=False, widget=forms.CheckboxSelectMultiple ) # Change rendering of form to user-friendly checkboxes # Credit: # https://medium.com/swlh/django-forms-for-many-to-many-fields-d977dec4b024 tag = forms.ModelMultipleChoiceField( queryset=Tag.objects.all(), label='Choose some tags from the list', required=False, widget=forms.CheckboxSelectMultiple ) class Meta: model = Recipe exclude = ('author', 'date', 'date_posted', 'date_edited', 'vote_count', 'votes', 'recipe_box', 'mail_sent', 'discount_code',) labels = { 'intro': 'Brief Description', } def clean_servings(self): value = self.cleaned_data.get('servings') if value < 1: raise forms.ValidationError('The number of servings must be \ greater than zero') return value def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) categories = Category.objects.all().order_by('friendly_name') friendly_name = [(c.id, c.get_friendly_name()) for c in categories] placeholders = { 'title': 'eg: Carrot Cake', 'intro': 'eg: A deliciously sweet dessert', 'prep_time': 'eg: 1hr 20mins', 'cook_time': 'eg: 1hr 20mins', 'total_time': 'eg: 1hr 20mins', 'directions': 'Describe the steps to make this recipe', 'image': '', 'image_credit': 'Who took the photo?', 'servings': 'No. of servings', 'tag': '', 'category': '', } for field in self.fields: placeholder = placeholders[field] self.fields[field].widget.attrs['placeholder'] = placeholder self.fields['category'].choices = friendly_name self.fields['title'].widget.attrs['autofocus'] = True self.fields['directions'].required = True
normal
{ "blob_id": "7484bd9012bc9952b679073ae036de4554d362be", "index": 5175, "step-1": "<mask token>\n\n\nclass IngredientForm(forms.ModelForm):\n\n\n class Meta:\n model = Ingredient\n exclude = 'recipe',\n labels = {'quantity': 'Qty'}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {'quantity': 'eg: 0.1', 'unit': 'eg: ml',\n 'preparation': 'eg: chopped', 'name': 'eg: tomatoes'}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['quantity'].widget.attrs['min'] = 0.01\n\n\n<mask token>\n\n\nclass RecipeForm(forms.ModelForm):\n image = forms.ImageField(label='Image', required=False, widget=\n CustomClearableFileInput)\n category = forms.ModelMultipleChoiceField(queryset=Category.objects.all\n (), label='Choose some categories from the list', required=False,\n widget=forms.CheckboxSelectMultiple)\n tag = forms.ModelMultipleChoiceField(queryset=Tag.objects.all(), label=\n 'Choose some tags from the list', required=False, widget=forms.\n CheckboxSelectMultiple)\n\n\n class Meta:\n model = Recipe\n exclude = ('author', 'date', 'date_posted', 'date_edited',\n 'vote_count', 'votes', 'recipe_box', 'mail_sent', 'discount_code')\n labels = {'intro': 'Brief Description'}\n\n def clean_servings(self):\n value = self.cleaned_data.get('servings')\n if value < 1:\n raise forms.ValidationError(\n 'The number of servings must be greater than zero'\n )\n return value\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n categories = Category.objects.all().order_by('friendly_name')\n friendly_name = [(c.id, c.get_friendly_name()) for c in categories]\n placeholders = {'title': 'eg: Carrot Cake', 'intro':\n 'eg: A deliciously sweet dessert', 'prep_time':\n 'eg: 1hr 20mins', 'cook_time': 'eg: 1hr 20mins', 'total_time':\n 'eg: 1hr 20mins', 'directions':\n 'Describe the steps to make this recipe', 'image': '',\n 'image_credit': 'Who took the photo?', 'servings':\n 'No. of servings', 'tag': '', 'category': ''}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['category'].choices = friendly_name\n self.fields['title'].widget.attrs['autofocus'] = True\n self.fields['directions'].required = True\n", "step-2": "<mask token>\n\n\nclass NewTagsForm(forms.ModelForm):\n <mask token>\n\n\n class Meta:\n model = Tag\n fields = '__all__'\n <mask token>\n\n\nclass IngredientForm(forms.ModelForm):\n\n\n class Meta:\n model = Ingredient\n exclude = 'recipe',\n labels = {'quantity': 'Qty'}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {'quantity': 'eg: 0.1', 'unit': 'eg: ml',\n 'preparation': 'eg: chopped', 'name': 'eg: tomatoes'}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['quantity'].widget.attrs['min'] = 0.01\n\n\n<mask token>\n\n\nclass RecipeForm(forms.ModelForm):\n image = forms.ImageField(label='Image', required=False, widget=\n CustomClearableFileInput)\n category = forms.ModelMultipleChoiceField(queryset=Category.objects.all\n (), label='Choose some categories from the list', required=False,\n widget=forms.CheckboxSelectMultiple)\n tag = forms.ModelMultipleChoiceField(queryset=Tag.objects.all(), label=\n 'Choose some tags from the list', required=False, widget=forms.\n CheckboxSelectMultiple)\n\n\n class Meta:\n model = Recipe\n exclude = ('author', 'date', 'date_posted', 'date_edited',\n 'vote_count', 'votes', 'recipe_box', 'mail_sent', 'discount_code')\n labels = {'intro': 'Brief Description'}\n\n def clean_servings(self):\n value = self.cleaned_data.get('servings')\n if value < 1:\n raise forms.ValidationError(\n 'The number of servings must be greater than zero'\n )\n return value\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n categories = Category.objects.all().order_by('friendly_name')\n friendly_name = [(c.id, c.get_friendly_name()) for c in categories]\n placeholders = {'title': 'eg: Carrot Cake', 'intro':\n 'eg: A deliciously sweet dessert', 'prep_time':\n 'eg: 1hr 20mins', 'cook_time': 'eg: 1hr 20mins', 'total_time':\n 'eg: 1hr 20mins', 'directions':\n 'Describe the steps to make this recipe', 'image': '',\n 'image_credit': 'Who took the photo?', 'servings':\n 'No. of servings', 'tag': '', 'category': ''}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['category'].choices = friendly_name\n self.fields['title'].widget.attrs['autofocus'] = True\n self.fields['directions'].required = True\n", "step-3": "<mask token>\n\n\nclass NewTagsForm(forms.ModelForm):\n tagname = forms.CharField(label='... or add your own tag', required=False)\n\n\n class Meta:\n model = Tag\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {'tagname': 'One single word only'}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n\n\nclass IngredientForm(forms.ModelForm):\n\n\n class Meta:\n model = Ingredient\n exclude = 'recipe',\n labels = {'quantity': 'Qty'}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {'quantity': 'eg: 0.1', 'unit': 'eg: ml',\n 'preparation': 'eg: chopped', 'name': 'eg: tomatoes'}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['quantity'].widget.attrs['min'] = 0.01\n\n\n<mask token>\n\n\nclass RecipeForm(forms.ModelForm):\n image = forms.ImageField(label='Image', required=False, widget=\n CustomClearableFileInput)\n category = forms.ModelMultipleChoiceField(queryset=Category.objects.all\n (), label='Choose some categories from the list', required=False,\n widget=forms.CheckboxSelectMultiple)\n tag = forms.ModelMultipleChoiceField(queryset=Tag.objects.all(), label=\n 'Choose some tags from the list', required=False, widget=forms.\n CheckboxSelectMultiple)\n\n\n class Meta:\n model = Recipe\n exclude = ('author', 'date', 'date_posted', 'date_edited',\n 'vote_count', 'votes', 'recipe_box', 'mail_sent', 'discount_code')\n labels = {'intro': 'Brief Description'}\n\n def clean_servings(self):\n value = self.cleaned_data.get('servings')\n if value < 1:\n raise forms.ValidationError(\n 'The number of servings must be greater than zero'\n )\n return value\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n categories = Category.objects.all().order_by('friendly_name')\n friendly_name = [(c.id, c.get_friendly_name()) for c in categories]\n placeholders = {'title': 'eg: Carrot Cake', 'intro':\n 'eg: A deliciously sweet dessert', 'prep_time':\n 'eg: 1hr 20mins', 'cook_time': 'eg: 1hr 20mins', 'total_time':\n 'eg: 1hr 20mins', 'directions':\n 'Describe the steps to make this recipe', 'image': '',\n 'image_credit': 'Who took the photo?', 'servings':\n 'No. of servings', 'tag': '', 'category': ''}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['category'].choices = friendly_name\n self.fields['title'].widget.attrs['autofocus'] = True\n self.fields['directions'].required = True\n", "step-4": "<mask token>\n\n\nclass NewCategoriesForm(forms.ModelForm):\n friendly_name = forms.CharField(label='... or add your own category',\n required=False)\n\n\n class Meta:\n model = Category\n fields = 'friendly_name',\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {'friendly_name': 'One single word only'}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n\n\nclass NewTagsForm(forms.ModelForm):\n tagname = forms.CharField(label='... or add your own tag', required=False)\n\n\n class Meta:\n model = Tag\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {'tagname': 'One single word only'}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n\n\nclass IngredientForm(forms.ModelForm):\n\n\n class Meta:\n model = Ingredient\n exclude = 'recipe',\n labels = {'quantity': 'Qty'}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {'quantity': 'eg: 0.1', 'unit': 'eg: ml',\n 'preparation': 'eg: chopped', 'name': 'eg: tomatoes'}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['quantity'].widget.attrs['min'] = 0.01\n\n\n<mask token>\n\n\nclass RecipeForm(forms.ModelForm):\n image = forms.ImageField(label='Image', required=False, widget=\n CustomClearableFileInput)\n category = forms.ModelMultipleChoiceField(queryset=Category.objects.all\n (), label='Choose some categories from the list', required=False,\n widget=forms.CheckboxSelectMultiple)\n tag = forms.ModelMultipleChoiceField(queryset=Tag.objects.all(), label=\n 'Choose some tags from the list', required=False, widget=forms.\n CheckboxSelectMultiple)\n\n\n class Meta:\n model = Recipe\n exclude = ('author', 'date', 'date_posted', 'date_edited',\n 'vote_count', 'votes', 'recipe_box', 'mail_sent', 'discount_code')\n labels = {'intro': 'Brief Description'}\n\n def clean_servings(self):\n value = self.cleaned_data.get('servings')\n if value < 1:\n raise forms.ValidationError(\n 'The number of servings must be greater than zero'\n )\n return value\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n categories = Category.objects.all().order_by('friendly_name')\n friendly_name = [(c.id, c.get_friendly_name()) for c in categories]\n placeholders = {'title': 'eg: Carrot Cake', 'intro':\n 'eg: A deliciously sweet dessert', 'prep_time':\n 'eg: 1hr 20mins', 'cook_time': 'eg: 1hr 20mins', 'total_time':\n 'eg: 1hr 20mins', 'directions':\n 'Describe the steps to make this recipe', 'image': '',\n 'image_credit': 'Who took the photo?', 'servings':\n 'No. of servings', 'tag': '', 'category': ''}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['category'].choices = friendly_name\n self.fields['title'].widget.attrs['autofocus'] = True\n self.fields['directions'].required = True\n", "step-5": "from django import forms\nfrom .models import Recipe, Ingredient, Category, Tag\nfrom blog.widgets import CustomClearableFileInput\n\n\nclass NewCategoriesForm(forms.ModelForm):\n\n friendly_name = forms.CharField(label='... or add your own category',\n required=False)\n\n class Meta():\n model = Category\n fields = ('friendly_name',)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n placeholders = {\n 'friendly_name': 'One single word only'\n }\n\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n\n\nclass NewTagsForm(forms.ModelForm):\n\n tagname = forms.CharField(label='... or add your own tag', required=False)\n\n class Meta():\n model = Tag\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n placeholders = {\n 'tagname': 'One single word only'\n }\n\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n\n\nclass IngredientForm(forms.ModelForm):\n class Meta:\n model = Ingredient\n exclude = ('recipe', )\n\n labels = {\n 'quantity': 'Qty',\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n placeholders = {\n 'quantity': 'eg: 0.1',\n 'unit': 'eg: ml',\n 'preparation': 'eg: chopped',\n 'name': 'eg: tomatoes'\n }\n\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n\n self.fields['quantity'].widget.attrs['min'] = 0.01\n\n\nIngredientFormSet = forms.inlineformset_factory(Recipe, Ingredient,\n form=IngredientForm,\n extra=25,\n min_num=1,\n validate_min=True)\n\n\nclass RecipeForm(forms.ModelForm):\n\n # Replace image field\n image = forms.ImageField(label='Image',\n required=False,\n widget=CustomClearableFileInput)\n\n # Change rendering of form to user-friendly checkboxes\n # Credit:\n # https://medium.com/swlh/django-forms-for-many-to-many-fields-d977dec4b024\n category = forms.ModelMultipleChoiceField(\n queryset=Category.objects.all(),\n label='Choose some categories from the list',\n required=False,\n widget=forms.CheckboxSelectMultiple\n )\n\n # Change rendering of form to user-friendly checkboxes\n # Credit:\n # https://medium.com/swlh/django-forms-for-many-to-many-fields-d977dec4b024\n tag = forms.ModelMultipleChoiceField(\n queryset=Tag.objects.all(),\n label='Choose some tags from the list',\n required=False,\n widget=forms.CheckboxSelectMultiple\n )\n\n class Meta:\n model = Recipe\n exclude = ('author', 'date',\n 'date_posted', 'date_edited',\n 'vote_count', 'votes', 'recipe_box',\n 'mail_sent', 'discount_code',)\n\n labels = {\n 'intro': 'Brief Description',\n }\n\n def clean_servings(self):\n value = self.cleaned_data.get('servings')\n if value < 1:\n raise forms.ValidationError('The number of servings must be \\\n greater than zero')\n return value\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n categories = Category.objects.all().order_by('friendly_name')\n friendly_name = [(c.id, c.get_friendly_name()) for c in categories]\n\n placeholders = {\n 'title': 'eg: Carrot Cake',\n 'intro': 'eg: A deliciously sweet dessert',\n 'prep_time': 'eg: 1hr 20mins',\n 'cook_time': 'eg: 1hr 20mins',\n 'total_time': 'eg: 1hr 20mins',\n 'directions': 'Describe the steps to make this recipe',\n 'image': '',\n 'image_credit': 'Who took the photo?',\n 'servings': 'No. of servings',\n 'tag': '',\n 'category': '',\n }\n\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['category'].choices = friendly_name\n self.fields['title'].widget.attrs['autofocus'] = True\n self.fields['directions'].required = True\n", "step-ids": [ 6, 7, 9, 12, 15 ] }
[ 6, 7, 9, 12, 15 ]
from abc import ABC, abstractmethod class DatasetFileManager(ABC): @abstractmethod def read_dataset(self): pass
normal
{ "blob_id": "5ef65ace397be17be62625ed27b5753d15565d61", "index": 555, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass DatasetFileManager(ABC):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass DatasetFileManager(ABC):\n\n @abstractmethod\n def read_dataset(self):\n pass\n", "step-4": "from abc import ABC, abstractmethod\n\n\nclass DatasetFileManager(ABC):\n\n @abstractmethod\n def read_dataset(self):\n pass\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# coding: utf-8 import logging import uuid import json import xmltodict import bottle from bottle import HTTPError from bottle.ext import sqlalchemy from database import Base, engine from database import JdWaybillSendResp, JdWaybillApplyResp jd = bottle.Bottle(catchall=False) plugin = sqlalchemy.Plugin( engine, # SQLAlchemy engine created with create_engine function. Base.metadata, # SQLAlchemy metadata, required only if create=True. keyword='db', # Keyword used to inject session database in a route (default 'db'). create=True, # If it is true, execute `metadata.create_all(engine)` when plugin is applied (default False). commit=True, # If it is true, plugin commit changes after route is executed (default True). use_kwargs=False # If it is true and keyword is not defined, # plugin uses **kwargs argument to inject session database (default False). ) jd.install(plugin) @jd.get('/routerjson') def apply_jd_waybill(db): query = bottle.request.query if query['method'] == 'jingdong.etms.waybillcode.get': jd_code, resp = jd_get_response_normal() logging.debug('JD response: {} {}'.format(jd_code, resp)) db.add(JdWaybillApplyResp(jd_code, resp)) else: # '''jingdong.etms.waybillcode.send''' jd_param = json.loads(query['360buy_param_json']) delivery_id = jd_param['deliveryId'] order_id = jd_param['orderId'] resp = jd_send_response_normal(delivery_id, order_id) db.add(JdWaybillSendResp(delivery_id, order_id, resp)) logging.debug('JD response: {}'.format(resp)) return resp @jd.get('/jd_waybill') def jd_waybill(db): query = bottle.request.query jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get('wms_order_code')).first() if jd_rsp: # return entities return jd_rsp.body return HTTPError(404, None) def jd_get_response_normal(): code = str(uuid.uuid4()).split('-')[-1] return code, json.dumps({ 'jingdong_etms_waybillcode_get_responce': {'resultInfo': {'message': u'成功', 'code': 100, 'deliveryIdList': [code] }, 'code': u'0' } }) def jd_send_response_normal(deliver_id, order_id): return json.dumps({ "jingdong_etms_waybill_send_responce": { "resultInfo": { "message": u"成功", "deliveryId": deliver_id, "code": 100, "orderId": order_id } } })
normal
{ "blob_id": "a93884757069393b4d96de5ec9c7d815d58a2ea5", "index": 935, "step-1": "<mask token>\n\n\n@jd.get('/routerjson')\ndef apply_jd_waybill(db):\n query = bottle.request.query\n if query['method'] == 'jingdong.etms.waybillcode.get':\n jd_code, resp = jd_get_response_normal()\n logging.debug('JD response: {} {}'.format(jd_code, resp))\n db.add(JdWaybillApplyResp(jd_code, resp))\n else:\n jd_param = json.loads(query['360buy_param_json'])\n delivery_id = jd_param['deliveryId']\n order_id = jd_param['orderId']\n resp = jd_send_response_normal(delivery_id, order_id)\n db.add(JdWaybillSendResp(delivery_id, order_id, resp))\n logging.debug('JD response: {}'.format(resp))\n return resp\n\n\n@jd.get('/jd_waybill')\ndef jd_waybill(db):\n query = bottle.request.query\n jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get\n ('wms_order_code')).first()\n if jd_rsp:\n return jd_rsp.body\n return HTTPError(404, None)\n\n\ndef jd_get_response_normal():\n code = str(uuid.uuid4()).split('-')[-1]\n return code, json.dumps({'jingdong_etms_waybillcode_get_responce': {\n 'resultInfo': {'message': u'成功', 'code': 100, 'deliveryIdList': [\n code]}, 'code': u'0'}})\n\n\ndef jd_send_response_normal(deliver_id, order_id):\n return json.dumps({'jingdong_etms_waybill_send_responce': {'resultInfo':\n {'message': u'成功', 'deliveryId': deliver_id, 'code': 100, 'orderId':\n order_id}}})\n", "step-2": "<mask token>\njd.install(plugin)\n\n\n@jd.get('/routerjson')\ndef apply_jd_waybill(db):\n query = bottle.request.query\n if query['method'] == 'jingdong.etms.waybillcode.get':\n jd_code, resp = jd_get_response_normal()\n logging.debug('JD response: {} {}'.format(jd_code, resp))\n db.add(JdWaybillApplyResp(jd_code, resp))\n else:\n jd_param = json.loads(query['360buy_param_json'])\n delivery_id = jd_param['deliveryId']\n order_id = jd_param['orderId']\n resp = jd_send_response_normal(delivery_id, order_id)\n db.add(JdWaybillSendResp(delivery_id, order_id, resp))\n logging.debug('JD response: {}'.format(resp))\n return resp\n\n\n@jd.get('/jd_waybill')\ndef jd_waybill(db):\n query = bottle.request.query\n jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get\n ('wms_order_code')).first()\n if jd_rsp:\n return jd_rsp.body\n return HTTPError(404, None)\n\n\ndef jd_get_response_normal():\n code = str(uuid.uuid4()).split('-')[-1]\n return code, json.dumps({'jingdong_etms_waybillcode_get_responce': {\n 'resultInfo': {'message': u'成功', 'code': 100, 'deliveryIdList': [\n code]}, 'code': u'0'}})\n\n\ndef jd_send_response_normal(deliver_id, order_id):\n return json.dumps({'jingdong_etms_waybill_send_responce': {'resultInfo':\n {'message': u'成功', 'deliveryId': deliver_id, 'code': 100, 'orderId':\n order_id}}})\n", "step-3": "<mask token>\njd = bottle.Bottle(catchall=False)\nplugin = sqlalchemy.Plugin(engine, Base.metadata, keyword='db', create=True,\n commit=True, use_kwargs=False)\njd.install(plugin)\n\n\n@jd.get('/routerjson')\ndef apply_jd_waybill(db):\n query = bottle.request.query\n if query['method'] == 'jingdong.etms.waybillcode.get':\n jd_code, resp = jd_get_response_normal()\n logging.debug('JD response: {} {}'.format(jd_code, resp))\n db.add(JdWaybillApplyResp(jd_code, resp))\n else:\n jd_param = json.loads(query['360buy_param_json'])\n delivery_id = jd_param['deliveryId']\n order_id = jd_param['orderId']\n resp = jd_send_response_normal(delivery_id, order_id)\n db.add(JdWaybillSendResp(delivery_id, order_id, resp))\n logging.debug('JD response: {}'.format(resp))\n return resp\n\n\n@jd.get('/jd_waybill')\ndef jd_waybill(db):\n query = bottle.request.query\n jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get\n ('wms_order_code')).first()\n if jd_rsp:\n return jd_rsp.body\n return HTTPError(404, None)\n\n\ndef jd_get_response_normal():\n code = str(uuid.uuid4()).split('-')[-1]\n return code, json.dumps({'jingdong_etms_waybillcode_get_responce': {\n 'resultInfo': {'message': u'成功', 'code': 100, 'deliveryIdList': [\n code]}, 'code': u'0'}})\n\n\ndef jd_send_response_normal(deliver_id, order_id):\n return json.dumps({'jingdong_etms_waybill_send_responce': {'resultInfo':\n {'message': u'成功', 'deliveryId': deliver_id, 'code': 100, 'orderId':\n order_id}}})\n", "step-4": "import logging\nimport uuid\nimport json\nimport xmltodict\nimport bottle\nfrom bottle import HTTPError\nfrom bottle.ext import sqlalchemy\nfrom database import Base, engine\nfrom database import JdWaybillSendResp, JdWaybillApplyResp\njd = bottle.Bottle(catchall=False)\nplugin = sqlalchemy.Plugin(engine, Base.metadata, keyword='db', create=True,\n commit=True, use_kwargs=False)\njd.install(plugin)\n\n\n@jd.get('/routerjson')\ndef apply_jd_waybill(db):\n query = bottle.request.query\n if query['method'] == 'jingdong.etms.waybillcode.get':\n jd_code, resp = jd_get_response_normal()\n logging.debug('JD response: {} {}'.format(jd_code, resp))\n db.add(JdWaybillApplyResp(jd_code, resp))\n else:\n jd_param = json.loads(query['360buy_param_json'])\n delivery_id = jd_param['deliveryId']\n order_id = jd_param['orderId']\n resp = jd_send_response_normal(delivery_id, order_id)\n db.add(JdWaybillSendResp(delivery_id, order_id, resp))\n logging.debug('JD response: {}'.format(resp))\n return resp\n\n\n@jd.get('/jd_waybill')\ndef jd_waybill(db):\n query = bottle.request.query\n jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get\n ('wms_order_code')).first()\n if jd_rsp:\n return jd_rsp.body\n return HTTPError(404, None)\n\n\ndef jd_get_response_normal():\n code = str(uuid.uuid4()).split('-')[-1]\n return code, json.dumps({'jingdong_etms_waybillcode_get_responce': {\n 'resultInfo': {'message': u'成功', 'code': 100, 'deliveryIdList': [\n code]}, 'code': u'0'}})\n\n\ndef jd_send_response_normal(deliver_id, order_id):\n return json.dumps({'jingdong_etms_waybill_send_responce': {'resultInfo':\n {'message': u'成功', 'deliveryId': deliver_id, 'code': 100, 'orderId':\n order_id}}})\n", "step-5": "# coding: utf-8\nimport logging\nimport uuid\nimport json\nimport xmltodict\nimport bottle\nfrom bottle import HTTPError\nfrom bottle.ext import sqlalchemy\nfrom database import Base, engine\nfrom database import JdWaybillSendResp, JdWaybillApplyResp\n\njd = bottle.Bottle(catchall=False)\n\nplugin = sqlalchemy.Plugin(\n engine, # SQLAlchemy engine created with create_engine function.\n Base.metadata, # SQLAlchemy metadata, required only if create=True.\n keyword='db', # Keyword used to inject session database in a route (default 'db').\n create=True, # If it is true, execute `metadata.create_all(engine)` when plugin is applied (default False).\n commit=True, # If it is true, plugin commit changes after route is executed (default True).\n use_kwargs=False\n # If it is true and keyword is not defined,\n # plugin uses **kwargs argument to inject session database (default False).\n)\n\njd.install(plugin)\n\n\n@jd.get('/routerjson')\ndef apply_jd_waybill(db):\n query = bottle.request.query\n if query['method'] == 'jingdong.etms.waybillcode.get':\n jd_code, resp = jd_get_response_normal()\n logging.debug('JD response: {} {}'.format(jd_code, resp))\n db.add(JdWaybillApplyResp(jd_code, resp))\n else: # '''jingdong.etms.waybillcode.send'''\n jd_param = json.loads(query['360buy_param_json'])\n delivery_id = jd_param['deliveryId']\n order_id = jd_param['orderId']\n resp = jd_send_response_normal(delivery_id, order_id)\n db.add(JdWaybillSendResp(delivery_id, order_id, resp))\n logging.debug('JD response: {}'.format(resp))\n\n return resp\n\n\n@jd.get('/jd_waybill')\ndef jd_waybill(db):\n query = bottle.request.query\n jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get('wms_order_code')).first()\n if jd_rsp:\n # return entities\n return jd_rsp.body\n return HTTPError(404, None)\n\n\ndef jd_get_response_normal():\n code = str(uuid.uuid4()).split('-')[-1]\n return code, json.dumps({\n 'jingdong_etms_waybillcode_get_responce':\n {'resultInfo':\n {'message': u'成功',\n 'code': 100,\n 'deliveryIdList': [code]\n },\n 'code': u'0'\n }\n })\n\n\ndef jd_send_response_normal(deliver_id, order_id):\n return json.dumps({\n \"jingdong_etms_waybill_send_responce\": {\n \"resultInfo\": {\n \"message\": u\"成功\",\n \"deliveryId\": deliver_id,\n \"code\": 100,\n \"orderId\": order_id\n }\n }\n })\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
list = input().split() n = int(list[0]) k = int(list[1]) list.clear() for i in range(0, n): list.append("") tmp = input().split() list[i] = tmp[0] + list[int(tmp[1])-1] for i in range(0, k): start = input() print(len([word for word in list if word.startswith(start)]))
normal
{ "blob_id": "1808be09c2730af5829bb0c7c0c7cfe9f80fe84c", "index": 7546, "step-1": "<mask token>\n", "step-2": "<mask token>\nlist.clear()\nfor i in range(0, n):\n list.append('')\n tmp = input().split()\n list[i] = tmp[0] + list[int(tmp[1]) - 1]\nfor i in range(0, k):\n start = input()\n print(len([word for word in list if word.startswith(start)]))\n", "step-3": "list = input().split()\nn = int(list[0])\nk = int(list[1])\nlist.clear()\nfor i in range(0, n):\n list.append('')\n tmp = input().split()\n list[i] = tmp[0] + list[int(tmp[1]) - 1]\nfor i in range(0, k):\n start = input()\n print(len([word for word in list if word.startswith(start)]))\n", "step-4": "list = input().split()\nn = int(list[0])\nk = int(list[1])\nlist.clear()\nfor i in range(0, n):\n list.append(\"\")\n tmp = input().split()\n list[i] = tmp[0] + list[int(tmp[1])-1]\nfor i in range(0, k):\n start = input()\n print(len([word for word in list if word.startswith(start)]))", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# -*- encoding:utf-8 -*- from setuptools import setup, find_packages setup( name='pass-manager', version='1.2.0', author='petitviolet', author_email='violethero0820@gmail.com', packages=find_packages(), description = 'Simple CLI Password Manager', long_description = 'Please show help (pass-manager -h)', url = 'https://github.com/petitviolet/pass-manager', license = 'MIT', # scripts = ['src/pass_manager.py'], platforms = ['Mac OS X'], # platforms = ['POSIX', 'Windows', 'Mac OS X'], entry_points={ 'console_scripts': 'pass-manager = src.pass_manager:main' }, zip_safe=False, install_requires = ['crypto'], classifiers=[ 'Environment :: Console', 'Intended Audience :: Developers', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Utilities' ] )
normal
{ "blob_id": "31664f1cc808ccc0dad230e2b955692c7ae12db1", "index": 1792, "step-1": "<mask token>\n", "step-2": "<mask token>\nsetup(name='pass-manager', version='1.2.0', author='petitviolet',\n author_email='violethero0820@gmail.com', packages=find_packages(),\n description='Simple CLI Password Manager', long_description=\n 'Please show help (pass-manager -h)', url=\n 'https://github.com/petitviolet/pass-manager', license='MIT', platforms\n =['Mac OS X'], entry_points={'console_scripts':\n 'pass-manager = src.pass_manager:main'}, zip_safe=False,\n install_requires=['crypto'], classifiers=['Environment :: Console',\n 'Intended Audience :: Developers', 'Operating System :: OS Independent',\n 'Programming Language :: Python', 'Topic :: Utilities'])\n", "step-3": "from setuptools import setup, find_packages\nsetup(name='pass-manager', version='1.2.0', author='petitviolet',\n author_email='violethero0820@gmail.com', packages=find_packages(),\n description='Simple CLI Password Manager', long_description=\n 'Please show help (pass-manager -h)', url=\n 'https://github.com/petitviolet/pass-manager', license='MIT', platforms\n =['Mac OS X'], entry_points={'console_scripts':\n 'pass-manager = src.pass_manager:main'}, zip_safe=False,\n install_requires=['crypto'], classifiers=['Environment :: Console',\n 'Intended Audience :: Developers', 'Operating System :: OS Independent',\n 'Programming Language :: Python', 'Topic :: Utilities'])\n", "step-4": "# -*- encoding:utf-8 -*-\nfrom setuptools import setup, find_packages\n\nsetup(\n name='pass-manager',\n version='1.2.0',\n author='petitviolet',\n author_email='violethero0820@gmail.com',\n packages=find_packages(),\n description = 'Simple CLI Password Manager',\n long_description = 'Please show help (pass-manager -h)',\n url = 'https://github.com/petitviolet/pass-manager',\n license = 'MIT',\n # scripts = ['src/pass_manager.py'],\n platforms = ['Mac OS X'],\n # platforms = ['POSIX', 'Windows', 'Mac OS X'],\n entry_points={\n 'console_scripts': 'pass-manager = src.pass_manager:main'\n },\n zip_safe=False,\n install_requires = ['crypto'],\n classifiers=[\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Utilities'\n ]\n)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from .exceptions import InvalidUsage class HTTPMethodView: """ Simple class based implementation of view for the sanic. You should implement methods (get, post, put, patch, delete) for the class to every HTTP method you want to support. For example: class DummyView(HTTPMethodView): def get(self, request, *args, **kwargs): return text('I am get method') def put(self, request, *args, **kwargs): return text('I am put method') etc. If someone tries to use a non-implemented method, there will be a 405 response. If you need any url params just mention them in method definition: class DummyView(HTTPMethodView): def get(self, request, my_param_here, *args, **kwargs): return text('I am get method with %s' % my_param_here) To add the view into the routing you could use 1) app.add_route(DummyView.as_view(), '/') 2) app.route('/')(DummyView.as_view()) To add any decorator you could set it into decorators variable """ decorators = [] def dispatch_request(self, request, *args, **kwargs): handler = getattr(self, request.method.lower(), None) if handler: return handler(request, *args, **kwargs) raise InvalidUsage('Method {} not allowed for URL {}'.format( request.method, request.url), status_code=405) @classmethod def as_view(cls, *class_args, **class_kwargs): """ Converts the class into an actual view function that can be used with the routing system. """ def view(*args, **kwargs): self = view.view_class(*class_args, **class_kwargs) return self.dispatch_request(*args, **kwargs) if cls.decorators: view.__module__ = cls.__module__ for decorator in cls.decorators: view = decorator(view) view.view_class = cls view.__doc__ = cls.__doc__ view.__module__ = cls.__module__ return view
normal
{ "blob_id": "4948fd2062bdbd32bfa32d2b0e24587f0872132d", "index": 4686, "step-1": "<mask token>\n\n\nclass HTTPMethodView:\n <mask token>\n <mask token>\n\n def dispatch_request(self, request, *args, **kwargs):\n handler = getattr(self, request.method.lower(), None)\n if handler:\n return handler(request, *args, **kwargs)\n raise InvalidUsage('Method {} not allowed for URL {}'.format(\n request.method, request.url), status_code=405)\n <mask token>\n", "step-2": "<mask token>\n\n\nclass HTTPMethodView:\n <mask token>\n decorators = []\n\n def dispatch_request(self, request, *args, **kwargs):\n handler = getattr(self, request.method.lower(), None)\n if handler:\n return handler(request, *args, **kwargs)\n raise InvalidUsage('Method {} not allowed for URL {}'.format(\n request.method, request.url), status_code=405)\n\n @classmethod\n def as_view(cls, *class_args, **class_kwargs):\n \"\"\" Converts the class into an actual view function that can be used\n with the routing system.\n\n \"\"\"\n\n def view(*args, **kwargs):\n self = view.view_class(*class_args, **class_kwargs)\n return self.dispatch_request(*args, **kwargs)\n if cls.decorators:\n view.__module__ = cls.__module__\n for decorator in cls.decorators:\n view = decorator(view)\n view.view_class = cls\n view.__doc__ = cls.__doc__\n view.__module__ = cls.__module__\n return view\n", "step-3": "<mask token>\n\n\nclass HTTPMethodView:\n \"\"\" Simple class based implementation of view for the sanic.\n You should implement methods (get, post, put, patch, delete) for the class\n to every HTTP method you want to support.\n\n For example:\n class DummyView(HTTPMethodView):\n\n def get(self, request, *args, **kwargs):\n return text('I am get method')\n\n def put(self, request, *args, **kwargs):\n return text('I am put method')\n etc.\n\n If someone tries to use a non-implemented method, there will be a\n 405 response.\n\n If you need any url params just mention them in method definition:\n class DummyView(HTTPMethodView):\n\n def get(self, request, my_param_here, *args, **kwargs):\n return text('I am get method with %s' % my_param_here)\n\n To add the view into the routing you could use\n 1) app.add_route(DummyView.as_view(), '/')\n 2) app.route('/')(DummyView.as_view())\n\n To add any decorator you could set it into decorators variable\n \"\"\"\n decorators = []\n\n def dispatch_request(self, request, *args, **kwargs):\n handler = getattr(self, request.method.lower(), None)\n if handler:\n return handler(request, *args, **kwargs)\n raise InvalidUsage('Method {} not allowed for URL {}'.format(\n request.method, request.url), status_code=405)\n\n @classmethod\n def as_view(cls, *class_args, **class_kwargs):\n \"\"\" Converts the class into an actual view function that can be used\n with the routing system.\n\n \"\"\"\n\n def view(*args, **kwargs):\n self = view.view_class(*class_args, **class_kwargs)\n return self.dispatch_request(*args, **kwargs)\n if cls.decorators:\n view.__module__ = cls.__module__\n for decorator in cls.decorators:\n view = decorator(view)\n view.view_class = cls\n view.__doc__ = cls.__doc__\n view.__module__ = cls.__module__\n return view\n", "step-4": "from .exceptions import InvalidUsage\n\n\nclass HTTPMethodView:\n \"\"\" Simple class based implementation of view for the sanic.\n You should implement methods (get, post, put, patch, delete) for the class\n to every HTTP method you want to support.\n\n For example:\n class DummyView(HTTPMethodView):\n\n def get(self, request, *args, **kwargs):\n return text('I am get method')\n\n def put(self, request, *args, **kwargs):\n return text('I am put method')\n etc.\n\n If someone tries to use a non-implemented method, there will be a\n 405 response.\n\n If you need any url params just mention them in method definition:\n class DummyView(HTTPMethodView):\n\n def get(self, request, my_param_here, *args, **kwargs):\n return text('I am get method with %s' % my_param_here)\n\n To add the view into the routing you could use\n 1) app.add_route(DummyView.as_view(), '/')\n 2) app.route('/')(DummyView.as_view())\n\n To add any decorator you could set it into decorators variable\n \"\"\"\n decorators = []\n\n def dispatch_request(self, request, *args, **kwargs):\n handler = getattr(self, request.method.lower(), None)\n if handler:\n return handler(request, *args, **kwargs)\n raise InvalidUsage('Method {} not allowed for URL {}'.format(\n request.method, request.url), status_code=405)\n\n @classmethod\n def as_view(cls, *class_args, **class_kwargs):\n \"\"\" Converts the class into an actual view function that can be used\n with the routing system.\n\n \"\"\"\n\n def view(*args, **kwargs):\n self = view.view_class(*class_args, **class_kwargs)\n return self.dispatch_request(*args, **kwargs)\n if cls.decorators:\n view.__module__ = cls.__module__\n for decorator in cls.decorators:\n view = decorator(view)\n view.view_class = cls\n view.__doc__ = cls.__doc__\n view.__module__ = cls.__module__\n return view\n", "step-5": null, "step-ids": [ 2, 4, 5, 6 ] }
[ 2, 4, 5, 6 ]
import requests def get(url): return requests.get(url).text
normal
{ "blob_id": "671ecf23df1da659d186014afa738d0608ad404d", "index": 9251, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef get(url):\n return requests.get(url).text\n", "step-3": "import requests\n\n\ndef get(url):\n return requests.get(url).text\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import ctypes import time from order_queue.order import Order class stock(ctypes.Structure): _fields_ = [('stock_id', ctypes.c_int), ('order_type',ctypes.c_int),('Time',ctypes.c_char * 40),('user_id',ctypes.c_int),('volume',ctypes.c_int), ('price',ctypes.c_double) ] class exchange(ctypes.Structure): _fields_ = [ ('stock_id',ctypes.c_int), ('buy_id',ctypes.c_int), ('sell_id',ctypes.c_int), ('Time',ctypes.c_char * 40), ('volume',ctypes.c_int), ('price',ctypes.c_double) ] class TestSturcture(ctypes.Structure): _fields_ = [ ('a',ctypes.c_int), ('n',ctypes.c_int) ] def time_conversion(input): get = time.strftime("%H:%M:%S", input).encode('utf-8') return get def order_conversion(order): get_time = time_conversion(order.time) get = stock(int(order.get_stock_id()),int(order.get_direction()),get_time,int(order.get_user_id()[1:]),int(order.get_volume()),float(order.get_price())) return get def regenerate_order(result,long_order,short_order): deal_volume = result.volume if int(long_order.get_volume()) != result.volume: left_volume = int(long_order.get_volume()) - result.volume left_order = long_order elif int(short_order.get_volume()) != result.volume: left_volume = int(long_order.get_volume()) - result.volume left_order = short_order else: return None order = Order( left_order.get_stock_id(),left_order.get_user_id(),left_order.get_price(),left_volume,left_order.get_direction()) return order if __name__ == '__main__': print(time_conversion(time.localtime(time.time())))
normal
{ "blob_id": "7491a17256b9bc7af0953202e45f0fd9d5c34c40", "index": 8376, "step-1": "<mask token>\n\n\nclass exchange(ctypes.Structure):\n <mask token>\n\n\nclass TestSturcture(ctypes.Structure):\n _fields_ = [('a', ctypes.c_int), ('n', ctypes.c_int)]\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass stock(ctypes.Structure):\n <mask token>\n\n\nclass exchange(ctypes.Structure):\n _fields_ = [('stock_id', ctypes.c_int), ('buy_id', ctypes.c_int), (\n 'sell_id', ctypes.c_int), ('Time', ctypes.c_char * 40), ('volume',\n ctypes.c_int), ('price', ctypes.c_double)]\n\n\nclass TestSturcture(ctypes.Structure):\n _fields_ = [('a', ctypes.c_int), ('n', ctypes.c_int)]\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass stock(ctypes.Structure):\n _fields_ = [('stock_id', ctypes.c_int), ('order_type', ctypes.c_int), (\n 'Time', ctypes.c_char * 40), ('user_id', ctypes.c_int), ('volume',\n ctypes.c_int), ('price', ctypes.c_double)]\n\n\nclass exchange(ctypes.Structure):\n _fields_ = [('stock_id', ctypes.c_int), ('buy_id', ctypes.c_int), (\n 'sell_id', ctypes.c_int), ('Time', ctypes.c_char * 40), ('volume',\n ctypes.c_int), ('price', ctypes.c_double)]\n\n\nclass TestSturcture(ctypes.Structure):\n _fields_ = [('a', ctypes.c_int), ('n', ctypes.c_int)]\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass stock(ctypes.Structure):\n _fields_ = [('stock_id', ctypes.c_int), ('order_type', ctypes.c_int), (\n 'Time', ctypes.c_char * 40), ('user_id', ctypes.c_int), ('volume',\n ctypes.c_int), ('price', ctypes.c_double)]\n\n\nclass exchange(ctypes.Structure):\n _fields_ = [('stock_id', ctypes.c_int), ('buy_id', ctypes.c_int), (\n 'sell_id', ctypes.c_int), ('Time', ctypes.c_char * 40), ('volume',\n ctypes.c_int), ('price', ctypes.c_double)]\n\n\nclass TestSturcture(ctypes.Structure):\n _fields_ = [('a', ctypes.c_int), ('n', ctypes.c_int)]\n\n\ndef time_conversion(input):\n get = time.strftime('%H:%M:%S', input).encode('utf-8')\n return get\n\n\n<mask token>\n\n\ndef regenerate_order(result, long_order, short_order):\n deal_volume = result.volume\n if int(long_order.get_volume()) != result.volume:\n left_volume = int(long_order.get_volume()) - result.volume\n left_order = long_order\n elif int(short_order.get_volume()) != result.volume:\n left_volume = int(long_order.get_volume()) - result.volume\n left_order = short_order\n else:\n return None\n order = Order(left_order.get_stock_id(), left_order.get_user_id(),\n left_order.get_price(), left_volume, left_order.get_direction())\n return order\n\n\n<mask token>\n", "step-5": "import ctypes\nimport time\nfrom order_queue.order import Order\n\nclass stock(ctypes.Structure):\n _fields_ = [('stock_id', ctypes.c_int), ('order_type',ctypes.c_int),('Time',ctypes.c_char * 40),('user_id',ctypes.c_int),('volume',ctypes.c_int),\n ('price',ctypes.c_double)\n ]\nclass exchange(ctypes.Structure):\n _fields_ = [\n ('stock_id',ctypes.c_int),\n ('buy_id',ctypes.c_int),\n ('sell_id',ctypes.c_int),\n ('Time',ctypes.c_char * 40),\n ('volume',ctypes.c_int),\n ('price',ctypes.c_double)\n ]\nclass TestSturcture(ctypes.Structure):\n _fields_ = [\n ('a',ctypes.c_int),\n ('n',ctypes.c_int)\n ]\ndef time_conversion(input):\n get = time.strftime(\"%H:%M:%S\", input).encode('utf-8')\n return get\n\ndef order_conversion(order):\n get_time = time_conversion(order.time)\n get = stock(int(order.get_stock_id()),int(order.get_direction()),get_time,int(order.get_user_id()[1:]),int(order.get_volume()),float(order.get_price()))\n return get\n\ndef regenerate_order(result,long_order,short_order):\n deal_volume = result.volume\n if int(long_order.get_volume()) != result.volume:\n left_volume = int(long_order.get_volume()) - result.volume\n left_order = long_order\n elif int(short_order.get_volume()) != result.volume:\n left_volume = int(long_order.get_volume()) - result.volume\n left_order = short_order\n else:\n return None\n\n order = Order( left_order.get_stock_id(),left_order.get_user_id(),left_order.get_price(),left_volume,left_order.get_direction())\n return order\n\nif __name__ == '__main__':\n print(time_conversion(time.localtime(time.time())))", "step-ids": [ 3, 5, 6, 8, 12 ] }
[ 3, 5, 6, 8, 12 ]
# coding: utf-8 BOT_NAME = ['lg'] SPIDER_MODULES = ['lg.spiders'] NEWSPIDER_MODULE = 'lg.spiders' DOWNLOAD_DELAY = 0.1 # 间隔时间 LOG_LEVEL = 'WARNING'
normal
{ "blob_id": "bed3d83f682404719a95be360cdd74be9dc87991", "index": 3718, "step-1": "<mask token>\n", "step-2": "BOT_NAME = ['lg']\nSPIDER_MODULES = ['lg.spiders']\nNEWSPIDER_MODULE = 'lg.spiders'\nDOWNLOAD_DELAY = 0.1\nLOG_LEVEL = 'WARNING'\n", "step-3": "# coding: utf-8\n\nBOT_NAME = ['lg']\n\nSPIDER_MODULES = ['lg.spiders']\nNEWSPIDER_MODULE = 'lg.spiders'\n\nDOWNLOAD_DELAY = 0.1 # 间隔时间\nLOG_LEVEL = 'WARNING'\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
#/usr/bin/env python3 def nth_prime(n): ans = 2 known = [] for _ in range(n): while not all(ans%x != 0 for x in known): ans += 1 known.append(ans) return ans if __name__ == "__main__": n = int(input("Which one? ")) print(nth_prime(n))
normal
{ "blob_id": "21fb9622add4d19b2914118e3afd3867b2368a50", "index": 4913, "step-1": "<mask token>\n", "step-2": "def nth_prime(n):\n ans = 2\n known = []\n for _ in range(n):\n while not all(ans % x != 0 for x in known):\n ans += 1\n known.append(ans)\n return ans\n\n\n<mask token>\n", "step-3": "def nth_prime(n):\n ans = 2\n known = []\n for _ in range(n):\n while not all(ans % x != 0 for x in known):\n ans += 1\n known.append(ans)\n return ans\n\n\nif __name__ == '__main__':\n n = int(input('Which one? '))\n print(nth_prime(n))\n", "step-4": "#/usr/bin/env python3\n\ndef nth_prime(n):\n ans = 2\n known = []\n for _ in range(n):\n while not all(ans%x != 0 for x in known):\n ans += 1\n known.append(ans)\n return ans\n\nif __name__ == \"__main__\":\n n = int(input(\"Which one? \"))\n print(nth_prime(n))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
class Solution: def divide(self, dividend, divisor): """ :type dividend: int :type divisor: int :rtype: int """ negative = (dividend < 0) ^ (divisor < 0) dividend, divisor = abs(dividend), abs(divisor) result = 0 while dividend >= divisor: shift_time = 1 while dividend >= divisor << shift_time: shift_time += 1 dividend -= divisor << (shift_time - 1) result += 1 << (shift_time - 1) if negative: result = -result if (-1 << 31) <= result <= (1 << 31) - 1: return result return (1 << 31) - 1 if __name__ == '__main__': print(Solution().divide(-2147483648, -1))
normal
{ "blob_id": "4a0213351f8e9dcb2c6e71317a5ff1064974652e", "index": 3418, "step-1": "<mask token>\n", "step-2": "class Solution:\n <mask token>\n\n\n<mask token>\n", "step-3": "class Solution:\n\n def divide(self, dividend, divisor):\n \"\"\"\n :type dividend: int\n :type divisor: int\n :rtype: int\n \"\"\"\n negative = (dividend < 0) ^ (divisor < 0)\n dividend, divisor = abs(dividend), abs(divisor)\n result = 0\n while dividend >= divisor:\n shift_time = 1\n while dividend >= divisor << shift_time:\n shift_time += 1\n dividend -= divisor << shift_time - 1\n result += 1 << shift_time - 1\n if negative:\n result = -result\n if -1 << 31 <= result <= (1 << 31) - 1:\n return result\n return (1 << 31) - 1\n\n\n<mask token>\n", "step-4": "class Solution:\n\n def divide(self, dividend, divisor):\n \"\"\"\n :type dividend: int\n :type divisor: int\n :rtype: int\n \"\"\"\n negative = (dividend < 0) ^ (divisor < 0)\n dividend, divisor = abs(dividend), abs(divisor)\n result = 0\n while dividend >= divisor:\n shift_time = 1\n while dividend >= divisor << shift_time:\n shift_time += 1\n dividend -= divisor << shift_time - 1\n result += 1 << shift_time - 1\n if negative:\n result = -result\n if -1 << 31 <= result <= (1 << 31) - 1:\n return result\n return (1 << 31) - 1\n\n\nif __name__ == '__main__':\n print(Solution().divide(-2147483648, -1))\n", "step-5": "class Solution:\n def divide(self, dividend, divisor):\n \"\"\"\n :type dividend: int\n :type divisor: int\n :rtype: int\n \"\"\"\n negative = (dividend < 0) ^ (divisor < 0)\n dividend, divisor = abs(dividend), abs(divisor)\n\n result = 0\n while dividend >= divisor:\n shift_time = 1\n while dividend >= divisor << shift_time:\n shift_time += 1\n dividend -= divisor << (shift_time - 1)\n result += 1 << (shift_time - 1)\n\n if negative:\n result = -result\n\n if (-1 << 31) <= result <= (1 << 31) - 1:\n return result\n return (1 << 31) - 1\n\n\nif __name__ == '__main__':\n print(Solution().divide(-2147483648, -1))\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#! /usr/bin/env python from taskHandler import Location, Task, TaskFactory import roslib; roslib.load_manifest('smart_stool') import rospy from geometry_msgs.msg import PoseStamped, Twist, Vector3 from nav_msgs.msg import Odometry from kobuki_msgs.msg import BumperEvent from move_base_msgs.msg import MoveBaseActionResult from tf.transformations import quaternion_about_axis, euler_from_quaternion z_axis = (0,0,1) from math import pi class SmartStool: def __init__(self): # state of the smart stool self.odomPose = Location(0,0,0) self.bumperTriggered = False self.atTaskLocation = False # defining the tasks stool = Task('stool', 1, Location(0,0,0), 'sit') getMail = Task('get_mail', 2, Location(4,-3,0), 'bump') chasePets = Task('chase_pets', 3, Location(0,0,0), 'wiggle') charge = Task('charge_battery', 4, Location(1,0,0), 'sit') charge.activate() # charging should always be an active task # populate the task list and set up the task factory taskList = [stool, getMail, chasePets, charge] self.factory = TaskFactory(taskList) # set up the current task self.task = self.factory.getNextTask() # set up the subscribers self.odom_sub = rospy.Subscriber('/odom', Odometry, self.readOdometry, queue_size=1) self.bumper_sub = rospy.Subscriber('/mobile_base/events/bumper', BumperEvent, self.readBumper, queue_size=1) self.goalReached_sub = rospy.Subscriber('/move_base/result', MoveBaseActionResult, self.goalReached, queue_size=1) # set up the publishers self.moveBase_pub = rospy.Publisher('/move_base_simple/goal', PoseStamped) self.action_pub = rospy.Publisher('/cmd_vel_mux/input/teleop', Twist) def goToTask(self): # send the smart stool to the location of its current task current_task_location = self.task.location.copy() goal = PoseStamped() goal.header.frame_id = 'map' goal.header.seq = 1 now = rospy.Time.now() goal.header.stamp.secs = now.secs goal.header.stamp.nsecs = now.nsecs goal.pose.position.x = current_task_location.x goal.pose.position.y = current_task_location.y goal.pose.position.z = 0 quat = quaternion_about_axis(current_task_location.theta,z_axis) goal.pose.orientation.w = quat[0] goal.pose.orientation.x = quat[1] goal.pose.orientation.y = quat[2] goal.pose.orientation.z = quat[3] self.moveBase_pub.publish(goal) def publishTwist(self, cmd_linvel, cmd_angvel): # publishes a Twist message to /cmd_vel_mux/input/teleop to perform custom motion actions self.action_pub.publish(Twist(Vector3(cmd_linvel,0,0),Vector3(0,0,cmd_angvel))) def actionHandler(self,actionName): #### #### TODO: a change of task priority doesn't necessarily mean that the task was deactivated. Need to check #### if original task is still in list of active tasks. if it is, do not deactivate it. if it's not, deactivate it. #### Also need to check for other general silly mistakes #### current_task = self.task.copy() startLocation = self.odomPose.copy() driveSpeed = 0.1 spinSpeed = 0.5 close_enough = 0.1 wiggle_rotate = pi/2 timeout = 10 startTime = rospy.get_time() # execute the sit action print actionName if actionName == 'sit': while (not rospy.is_shutdown()) and (self.task == current_task): self.publishTwist(0,0) rate.sleep() self.task = self.factory.getNextTask() ##### TEMP ##### self.factory.activateTask('get_mail') # execute the bump action elif actionName == 'bump': self.bumperTriggered = False while not rospy.is_shutdown() and not self.bumperTriggered: self.publishTwist(driveSpeed,0) rate.sleep() startTime = rospy.get_time() while not rospy.is_shutdown() and (rospy.get_time() - startTime < 1): self.publishTwist(-driveSpeed,0) rate.sleep() self.factory.deactivateTask(current_task.name) # execute the wiggle action elif actionName == 'wiggle': while self.task == current_task or (rospy.get_time() - startTime > timeout): while not rospy.is_shutdown() and not self.odomPose.compareAngle(startLocation,-wiggle_rotate): self.publishTwist(0,-spinSpeed) rate.sleep() while not rospy.is_shutdown() and not self.odomPose.compareAngle(startLocation,wiggle_rotate): self.publishTwist(0,spinSpeed) rate.sleep() self.task = self.factory.getNextTask() self.factory.deactivateTask(current_task.name) # warn that the specified action is not implemented else: print 'Action not implemented!' print actionName # stop the robot: self.publishTwist(0,0) def execute(self): if self.task is None: break current_task = self.task.copy() self.goToTask() # wait for the robot to be at its goal position print 'going to task:' + current_task.name while not self.atTaskLocation: rate.sleep() self.task = self.factory.getNextTask() # if that task has changed, exit this function if not(current_task == self.task): return # reset for the next task self.atTaskLocation = False print 'doing action' self.actionHandler(self.task.getAction()) def readOdometry(self,msg): # callback function to read the robot's current odometry position odom_position = msg.pose.pose.position odom_rotation = msg.pose.pose.orientation self.odomPose = Location(odom_position.x,odom_position.y,euler_from_quaternion((odom_rotation.w, odom_rotation.x, odom_rotation.y, odom_rotation.z))[2]) def readBumper(self,msg): # callback function to set the bumperTriggered flag if the bumper was hit self.bumperTriggered = True def goalReached(self,msg): # callback function to determine if the current task location was reached if msg.status.status == 3: self.atTaskLocation = True if __name__ == '__main__': # initialize the node: rospy.init_node('smart_stool') freq = 30 # hz rate = rospy.Rate(freq) # set up the smart stool object mySmartStool = SmartStool() # wait for one second for i in range(freq): rate.sleep() while not rospy.is_shutdown(): mySmartStool.execute() rate.sleep() #top = factory.getNextTask() #all = factory.getAllTasks()
normal
{ "blob_id": "234112ec16af39b79849dd08769597771fa2c38f", "index": 3425, "step-1": "#! /usr/bin/env python\n\nfrom taskHandler import Location, Task, TaskFactory\nimport roslib; roslib.load_manifest('smart_stool')\nimport rospy\nfrom geometry_msgs.msg import PoseStamped, Twist, Vector3\nfrom nav_msgs.msg import Odometry\nfrom kobuki_msgs.msg import BumperEvent\nfrom move_base_msgs.msg import MoveBaseActionResult\nfrom tf.transformations import quaternion_about_axis, euler_from_quaternion\nz_axis = (0,0,1)\nfrom math import pi\n\nclass SmartStool:\n def __init__(self):\n # state of the smart stool\n self.odomPose = Location(0,0,0)\n self.bumperTriggered = False\n self.atTaskLocation = False\n\n # defining the tasks\n stool = Task('stool', 1, Location(0,0,0), 'sit')\n getMail = Task('get_mail', 2, Location(4,-3,0), 'bump')\n chasePets = Task('chase_pets', 3, Location(0,0,0), 'wiggle')\n charge = Task('charge_battery', 4, Location(1,0,0), 'sit')\n charge.activate() # charging should always be an active task\n\n # populate the task list and set up the task factory\n taskList = [stool, getMail, chasePets, charge]\n self.factory = TaskFactory(taskList)\n\n # set up the current task\n self.task = self.factory.getNextTask()\n\n # set up the subscribers\n self.odom_sub = rospy.Subscriber('/odom', Odometry, self.readOdometry, queue_size=1)\n self.bumper_sub = rospy.Subscriber('/mobile_base/events/bumper', BumperEvent, self.readBumper, queue_size=1)\n self.goalReached_sub = rospy.Subscriber('/move_base/result', MoveBaseActionResult, self.goalReached, queue_size=1)\n\n # set up the publishers\n self.moveBase_pub = rospy.Publisher('/move_base_simple/goal', PoseStamped)\n self.action_pub = rospy.Publisher('/cmd_vel_mux/input/teleop', Twist)\n\n def goToTask(self):\n # send the smart stool to the location of its current task\n current_task_location = self.task.location.copy()\n goal = PoseStamped()\n goal.header.frame_id = 'map'\n goal.header.seq = 1\n now = rospy.Time.now()\n goal.header.stamp.secs = now.secs\n goal.header.stamp.nsecs = now.nsecs\n goal.pose.position.x = current_task_location.x\n goal.pose.position.y = current_task_location.y\n goal.pose.position.z = 0\n quat = quaternion_about_axis(current_task_location.theta,z_axis)\n goal.pose.orientation.w = quat[0]\n goal.pose.orientation.x = quat[1]\n goal.pose.orientation.y = quat[2]\n goal.pose.orientation.z = quat[3]\n self.moveBase_pub.publish(goal)\n\n def publishTwist(self, cmd_linvel, cmd_angvel):\n # publishes a Twist message to /cmd_vel_mux/input/teleop to perform custom motion actions\n self.action_pub.publish(Twist(Vector3(cmd_linvel,0,0),Vector3(0,0,cmd_angvel)))\n\n def actionHandler(self,actionName):\n ####\n #### TODO: a change of task priority doesn't necessarily mean that the task was deactivated. Need to check\n #### if original task is still in list of active tasks. if it is, do not deactivate it. if it's not, deactivate it.\n #### Also need to check for other general silly mistakes\n ####\n current_task = self.task.copy()\n startLocation = self.odomPose.copy()\n driveSpeed = 0.1\n spinSpeed = 0.5\n close_enough = 0.1\n wiggle_rotate = pi/2\n timeout = 10\n startTime = rospy.get_time()\n\n # execute the sit action\n print actionName\n if actionName == 'sit':\n while (not rospy.is_shutdown()) and (self.task == current_task):\n self.publishTwist(0,0)\n rate.sleep()\n self.task = self.factory.getNextTask()\n ##### TEMP #####\n self.factory.activateTask('get_mail')\n\n # execute the bump action\n elif actionName == 'bump':\n self.bumperTriggered = False\n while not rospy.is_shutdown() and not self.bumperTriggered:\n self.publishTwist(driveSpeed,0)\n rate.sleep()\n startTime = rospy.get_time()\n while not rospy.is_shutdown() and (rospy.get_time() - startTime < 1):\n self.publishTwist(-driveSpeed,0)\n rate.sleep()\n self.factory.deactivateTask(current_task.name)\n\n # execute the wiggle action\n elif actionName == 'wiggle':\n while self.task == current_task or (rospy.get_time() - startTime > timeout):\n while not rospy.is_shutdown() and not self.odomPose.compareAngle(startLocation,-wiggle_rotate):\n self.publishTwist(0,-spinSpeed)\n rate.sleep()\n while not rospy.is_shutdown() and not self.odomPose.compareAngle(startLocation,wiggle_rotate):\n self.publishTwist(0,spinSpeed)\n rate.sleep()\n self.task = self.factory.getNextTask()\n self.factory.deactivateTask(current_task.name)\n\n # warn that the specified action is not implemented\n else:\n print 'Action not implemented!'\n print actionName\n\n # stop the robot:\n self.publishTwist(0,0)\n\n def execute(self):\n if self.task is None: break\n current_task = self.task.copy()\n self.goToTask()\n # wait for the robot to be at its goal position\n print 'going to task:' + current_task.name\n while not self.atTaskLocation:\n rate.sleep()\n self.task = self.factory.getNextTask()\n # if that task has changed, exit this function\n if not(current_task == self.task):\n return\n # reset for the next task\n self.atTaskLocation = False\n print 'doing action'\n self.actionHandler(self.task.getAction())\n\n def readOdometry(self,msg):\n # callback function to read the robot's current odometry position\n odom_position = msg.pose.pose.position\n odom_rotation = msg.pose.pose.orientation\n self.odomPose = Location(odom_position.x,odom_position.y,euler_from_quaternion((odom_rotation.w, odom_rotation.x, odom_rotation.y, odom_rotation.z))[2])\n\n def readBumper(self,msg):\n # callback function to set the bumperTriggered flag if the bumper was hit\n self.bumperTriggered = True\n\n def goalReached(self,msg):\n # callback function to determine if the current task location was reached\n if msg.status.status == 3:\n self.atTaskLocation = True\n\nif __name__ == '__main__':\n # initialize the node:\n rospy.init_node('smart_stool')\n freq = 30 # hz\n rate = rospy.Rate(freq)\n # set up the smart stool object\n mySmartStool = SmartStool()\n\n # wait for one second\n for i in range(freq):\n rate.sleep()\n\n while not rospy.is_shutdown():\n mySmartStool.execute()\n rate.sleep()\n\n\n\n#top = factory.getNextTask()\n#all = factory.getAllTasks()", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
############################################################################### # Programming Essentials B8IT102 Assessment # # Student: Barry Sheppard ID: 10387786 # # Problem 1 # ############################################################################### ############################################################################### # Functions # ############################################################################### def LimitedInput(message, limit, isNumber=False): """ Prompt user for input and continue to do so until input is valid. This function takes two required inputs, the message to display, and the limit of characters required. If the user enters something too long, they are prompted again until the input is correct. If the optional isNumber parameter is True, then it will also continue to prompt the user until a valid number is input. """ keepAsking = True while keepAsking: answer = input(message) if len(answer) > limit: print("The input must be", limit, "characters or less.") else: keepAsking = False if isNumber is True and CheckNumber(answer) is False: print("The input must be a number.") keepAsking = True return answer def CheckNumber(userInput): """ This function returns True if userInput can be converted to a number and returns False if it cannot. """ try: float(userInput) return True except(ValueError): return False def DateInput(message): """ This function prompts the user for a date using the message variable. User will continue to be prompted until the format is correct. The date format is very specific in the format DD/MM/YYYYY This function will confirm there are the right number of characters, the / are in the right place, the input are numbers, the days are between 1 and 31, the months are between 1 and 12, and the year is between 2000 and 3000 (roll on year 3k bug!) """ askAgainMessage = "The date must be in the format DD/MM/YYYY" keepAsking = True while keepAsking: answer = input(message) # First we check if there are two / by splitting using / and looking # for 3 items in the returned list. dateCheck = answer.split(sep="/") if len(dateCheck) is not 3: print(askAgainMessage) else: # If all is order, we can assign the 3 items to day, month, year day = dateCheck[0] month = dateCheck[1] year = dateCheck[2] # Next we check each item has the right amount of characters # and they can all be converted into numbers. if (len(day) == 2 and len(month) == 2 and len(year) == 4 and CheckNumber(day) and CheckNumber(month) and CheckNumber(year)): day = int(day) month = int(month) year = int(year) if (day > 0 and day < 32 and month > 0 and month < 13 and year > 2000 and year < 3000): keepAsking = False else: print(askAgainMessage) else: print(askAgainMessage) return answer ############################################################################### # Prompt the user for the required input # ############################################################################### # Ask the user to input the required details employeeName = LimitedInput("Employee Name: ", 20) # Example Mark Bate employeeNumber = LimitedInput("Employee Number: ", 10) # Example 123456789A weekEnding = DateInput("Week ending: ") # Example 26/01/2018 hoursWorked = LimitedInput("Number of hours worked: ", 6, True) # Example 42.5 # As there are only 168 hours in the week this is a check to prevent errors # This could be modified to a lower number based on legal limit while float(hoursWorked) > 168: print("The number of hours worked is too large.") hoursWorked = LimitedInput("Number of hours worked: ", 6, True) standardRate = LimitedInput("Hourly Rate: ", 6, True) # Example 10.50 overtimeMultiplier = LimitedInput("Overtime Rate: ", 3, True) # Example 1.5 standardTaxRate = LimitedInput("Standard Tax Rate: ", 2, True) # Example 20 overtimeTaxRate = LimitedInput("Overtime Tax Rate: ", 2, True) # Example 50 # Cnvert input to numbers, during the input we validated these as numerals hoursWorked = float(hoursWorked) standardRate = float(standardRate) overtimeMultiplier = float(overtimeMultiplier) standardTaxRate = float(standardTaxRate) overtimeTaxRate = float(overtimeTaxRate) ############################################################################### # Calculate required details for ouput # ############################################################################### # Check if more than standard hours have been worked if hoursWorked > 37.50: standardHours = 37.50 overtimeHours = hoursWorked - 37.50 else: standardHours = hoursWorked overtimeHours = 0 # Complete additional calculations for pay and deductions standardPayTotal = standardHours * standardRate overtimeRate = overtimeMultiplier * standardRate # As overtime is multiplier overtimePayTotal = overtimeHours * overtimeRate standardTaxTotal = (standardPayTotal * standardTaxRate)/100 overtimeTaxTotal = (overtimePayTotal * overtimeTaxRate)/100 payTotal = standardPayTotal + overtimePayTotal totalDeductions = standardTaxTotal + overtimeTaxTotal netPay = payTotal - totalDeductions ############################################################################### # Printing out the Payslip # ############################################################################### # Output is one big chunk of text with the variables inserted using the format # function, this lets us define the float variables as two digit decimals. print(""" P A Y S L I P WEEK ENDING {:} Employee: {:} Employee Number: {:} Earnings Deductions Hours Rate Total Hours (normal) {:6.2f} {:6.2f} {:6.2f} Tax @ {:02.0f}% {:6.2f} Hours (overtime) {:6.2f} {:6.2f} {:6.2f} Tax @ {:02.0f}% {:6.2f} Total pay: {:7.2f} Total deductions: {:7.2f} Net pay: {:7.2f} """.format(weekEnding, employeeName, employeeNumber, standardHours, standardRate, standardPayTotal, standardTaxRate, standardTaxTotal, overtimeHours, overtimeRate, overtimePayTotal, overtimeTaxRate, overtimeTaxTotal, payTotal, totalDeductions, netPay))
normal
{ "blob_id": "77e985d94d3b47539f046a3a46cb1a197cef86f4", "index": 3409, "step-1": "<mask token>\n\n\ndef CheckNumber(userInput):\n \"\"\" This function returns True if userInput can be converted to a number and\n returns False if it cannot. \"\"\"\n try:\n float(userInput)\n return True\n except ValueError:\n return False\n\n\ndef DateInput(message):\n \"\"\" This function prompts the user for a date using the message variable.\n User will continue to be prompted until the format is correct.\n\n The date format is very specific in the format DD/MM/YYYYY\n This function will confirm there are the right number of characters,\n the / are in the right place, the input are numbers, the days are between\n 1 and 31, the months are between 1 and 12, and the year is between 2000\n and 3000 (roll on year 3k bug!)\n \"\"\"\n askAgainMessage = 'The date must be in the format DD/MM/YYYY'\n keepAsking = True\n while keepAsking:\n answer = input(message)\n dateCheck = answer.split(sep='/')\n if len(dateCheck) is not 3:\n print(askAgainMessage)\n else:\n day = dateCheck[0]\n month = dateCheck[1]\n year = dateCheck[2]\n if len(day) == 2 and len(month) == 2 and len(year\n ) == 4 and CheckNumber(day) and CheckNumber(month\n ) and CheckNumber(year):\n day = int(day)\n month = int(month)\n year = int(year)\n if (day > 0 and day < 32 and month > 0 and month < 13 and \n year > 2000 and year < 3000):\n keepAsking = False\n else:\n print(askAgainMessage)\n else:\n print(askAgainMessage)\n return answer\n\n\n<mask token>\n", "step-2": "def LimitedInput(message, limit, isNumber=False):\n \"\"\" Prompt user for input and continue to do so until input is valid.\n\n This function takes two required inputs, the message to display, and the\n limit of characters required. If the user enters something too long, they\n are prompted again until the input is correct.\n If the optional isNumber parameter is True, then it will also continue to\n prompt the user until a valid number is input.\n\n \"\"\"\n keepAsking = True\n while keepAsking:\n answer = input(message)\n if len(answer) > limit:\n print('The input must be', limit, 'characters or less.')\n else:\n keepAsking = False\n if isNumber is True and CheckNumber(answer) is False:\n print('The input must be a number.')\n keepAsking = True\n return answer\n\n\ndef CheckNumber(userInput):\n \"\"\" This function returns True if userInput can be converted to a number and\n returns False if it cannot. \"\"\"\n try:\n float(userInput)\n return True\n except ValueError:\n return False\n\n\ndef DateInput(message):\n \"\"\" This function prompts the user for a date using the message variable.\n User will continue to be prompted until the format is correct.\n\n The date format is very specific in the format DD/MM/YYYYY\n This function will confirm there are the right number of characters,\n the / are in the right place, the input are numbers, the days are between\n 1 and 31, the months are between 1 and 12, and the year is between 2000\n and 3000 (roll on year 3k bug!)\n \"\"\"\n askAgainMessage = 'The date must be in the format DD/MM/YYYY'\n keepAsking = True\n while keepAsking:\n answer = input(message)\n dateCheck = answer.split(sep='/')\n if len(dateCheck) is not 3:\n print(askAgainMessage)\n else:\n day = dateCheck[0]\n month = dateCheck[1]\n year = dateCheck[2]\n if len(day) == 2 and len(month) == 2 and len(year\n ) == 4 and CheckNumber(day) and CheckNumber(month\n ) and CheckNumber(year):\n day = int(day)\n month = int(month)\n year = int(year)\n if (day > 0 and day < 32 and month > 0 and month < 13 and \n year > 2000 and year < 3000):\n keepAsking = False\n else:\n print(askAgainMessage)\n else:\n print(askAgainMessage)\n return answer\n\n\n<mask token>\n", "step-3": "def LimitedInput(message, limit, isNumber=False):\n \"\"\" Prompt user for input and continue to do so until input is valid.\n\n This function takes two required inputs, the message to display, and the\n limit of characters required. If the user enters something too long, they\n are prompted again until the input is correct.\n If the optional isNumber parameter is True, then it will also continue to\n prompt the user until a valid number is input.\n\n \"\"\"\n keepAsking = True\n while keepAsking:\n answer = input(message)\n if len(answer) > limit:\n print('The input must be', limit, 'characters or less.')\n else:\n keepAsking = False\n if isNumber is True and CheckNumber(answer) is False:\n print('The input must be a number.')\n keepAsking = True\n return answer\n\n\ndef CheckNumber(userInput):\n \"\"\" This function returns True if userInput can be converted to a number and\n returns False if it cannot. \"\"\"\n try:\n float(userInput)\n return True\n except ValueError:\n return False\n\n\ndef DateInput(message):\n \"\"\" This function prompts the user for a date using the message variable.\n User will continue to be prompted until the format is correct.\n\n The date format is very specific in the format DD/MM/YYYYY\n This function will confirm there are the right number of characters,\n the / are in the right place, the input are numbers, the days are between\n 1 and 31, the months are between 1 and 12, and the year is between 2000\n and 3000 (roll on year 3k bug!)\n \"\"\"\n askAgainMessage = 'The date must be in the format DD/MM/YYYY'\n keepAsking = True\n while keepAsking:\n answer = input(message)\n dateCheck = answer.split(sep='/')\n if len(dateCheck) is not 3:\n print(askAgainMessage)\n else:\n day = dateCheck[0]\n month = dateCheck[1]\n year = dateCheck[2]\n if len(day) == 2 and len(month) == 2 and len(year\n ) == 4 and CheckNumber(day) and CheckNumber(month\n ) and CheckNumber(year):\n day = int(day)\n month = int(month)\n year = int(year)\n if (day > 0 and day < 32 and month > 0 and month < 13 and \n year > 2000 and year < 3000):\n keepAsking = False\n else:\n print(askAgainMessage)\n else:\n print(askAgainMessage)\n return answer\n\n\n<mask token>\nwhile float(hoursWorked) > 168:\n print('The number of hours worked is too large.')\n hoursWorked = LimitedInput('Number of hours worked: ', 6, True)\n<mask token>\nif hoursWorked > 37.5:\n standardHours = 37.5\n overtimeHours = hoursWorked - 37.5\nelse:\n standardHours = hoursWorked\n overtimeHours = 0\n<mask token>\nprint(\n \"\"\"\n P A Y S L I P\nWEEK ENDING {:}\nEmployee: {:}\nEmployee Number: {:}\n Earnings Deductions\n Hours Rate Total\nHours (normal) {:6.2f} {:6.2f} {:6.2f} Tax @ {:02.0f}% {:6.2f}\nHours (overtime) {:6.2f} {:6.2f} {:6.2f} Tax @ {:02.0f}% {:6.2f}\n\n Total pay: {:7.2f}\n Total deductions: {:7.2f}\n Net pay: {:7.2f}\n\"\"\"\n .format(weekEnding, employeeName, employeeNumber, standardHours,\n standardRate, standardPayTotal, standardTaxRate, standardTaxTotal,\n overtimeHours, overtimeRate, overtimePayTotal, overtimeTaxRate,\n overtimeTaxTotal, payTotal, totalDeductions, netPay))\n", "step-4": "def LimitedInput(message, limit, isNumber=False):\n \"\"\" Prompt user for input and continue to do so until input is valid.\n\n This function takes two required inputs, the message to display, and the\n limit of characters required. If the user enters something too long, they\n are prompted again until the input is correct.\n If the optional isNumber parameter is True, then it will also continue to\n prompt the user until a valid number is input.\n\n \"\"\"\n keepAsking = True\n while keepAsking:\n answer = input(message)\n if len(answer) > limit:\n print('The input must be', limit, 'characters or less.')\n else:\n keepAsking = False\n if isNumber is True and CheckNumber(answer) is False:\n print('The input must be a number.')\n keepAsking = True\n return answer\n\n\ndef CheckNumber(userInput):\n \"\"\" This function returns True if userInput can be converted to a number and\n returns False if it cannot. \"\"\"\n try:\n float(userInput)\n return True\n except ValueError:\n return False\n\n\ndef DateInput(message):\n \"\"\" This function prompts the user for a date using the message variable.\n User will continue to be prompted until the format is correct.\n\n The date format is very specific in the format DD/MM/YYYYY\n This function will confirm there are the right number of characters,\n the / are in the right place, the input are numbers, the days are between\n 1 and 31, the months are between 1 and 12, and the year is between 2000\n and 3000 (roll on year 3k bug!)\n \"\"\"\n askAgainMessage = 'The date must be in the format DD/MM/YYYY'\n keepAsking = True\n while keepAsking:\n answer = input(message)\n dateCheck = answer.split(sep='/')\n if len(dateCheck) is not 3:\n print(askAgainMessage)\n else:\n day = dateCheck[0]\n month = dateCheck[1]\n year = dateCheck[2]\n if len(day) == 2 and len(month) == 2 and len(year\n ) == 4 and CheckNumber(day) and CheckNumber(month\n ) and CheckNumber(year):\n day = int(day)\n month = int(month)\n year = int(year)\n if (day > 0 and day < 32 and month > 0 and month < 13 and \n year > 2000 and year < 3000):\n keepAsking = False\n else:\n print(askAgainMessage)\n else:\n print(askAgainMessage)\n return answer\n\n\nemployeeName = LimitedInput('Employee Name: ', 20)\nemployeeNumber = LimitedInput('Employee Number: ', 10)\nweekEnding = DateInput('Week ending: ')\nhoursWorked = LimitedInput('Number of hours worked: ', 6, True)\nwhile float(hoursWorked) > 168:\n print('The number of hours worked is too large.')\n hoursWorked = LimitedInput('Number of hours worked: ', 6, True)\nstandardRate = LimitedInput('Hourly Rate: ', 6, True)\novertimeMultiplier = LimitedInput('Overtime Rate: ', 3, True)\nstandardTaxRate = LimitedInput('Standard Tax Rate: ', 2, True)\novertimeTaxRate = LimitedInput('Overtime Tax Rate: ', 2, True)\nhoursWorked = float(hoursWorked)\nstandardRate = float(standardRate)\novertimeMultiplier = float(overtimeMultiplier)\nstandardTaxRate = float(standardTaxRate)\novertimeTaxRate = float(overtimeTaxRate)\nif hoursWorked > 37.5:\n standardHours = 37.5\n overtimeHours = hoursWorked - 37.5\nelse:\n standardHours = hoursWorked\n overtimeHours = 0\nstandardPayTotal = standardHours * standardRate\novertimeRate = overtimeMultiplier * standardRate\novertimePayTotal = overtimeHours * overtimeRate\nstandardTaxTotal = standardPayTotal * standardTaxRate / 100\novertimeTaxTotal = overtimePayTotal * overtimeTaxRate / 100\npayTotal = standardPayTotal + overtimePayTotal\ntotalDeductions = standardTaxTotal + overtimeTaxTotal\nnetPay = payTotal - totalDeductions\nprint(\n \"\"\"\n P A Y S L I P\nWEEK ENDING {:}\nEmployee: {:}\nEmployee Number: {:}\n Earnings Deductions\n Hours Rate Total\nHours (normal) {:6.2f} {:6.2f} {:6.2f} Tax @ {:02.0f}% {:6.2f}\nHours (overtime) {:6.2f} {:6.2f} {:6.2f} Tax @ {:02.0f}% {:6.2f}\n\n Total pay: {:7.2f}\n Total deductions: {:7.2f}\n Net pay: {:7.2f}\n\"\"\"\n .format(weekEnding, employeeName, employeeNumber, standardHours,\n standardRate, standardPayTotal, standardTaxRate, standardTaxTotal,\n overtimeHours, overtimeRate, overtimePayTotal, overtimeTaxRate,\n overtimeTaxTotal, payTotal, totalDeductions, netPay))\n", "step-5": "###############################################################################\n# Programming Essentials B8IT102 Assessment #\n# Student: Barry Sheppard ID: 10387786 #\n# Problem 1 #\n###############################################################################\n\n\n###############################################################################\n# Functions #\n###############################################################################\n\ndef LimitedInput(message, limit, isNumber=False):\n \"\"\" Prompt user for input and continue to do so until input is valid.\n\n This function takes two required inputs, the message to display, and the\n limit of characters required. If the user enters something too long, they\n are prompted again until the input is correct.\n If the optional isNumber parameter is True, then it will also continue to\n prompt the user until a valid number is input.\n\n \"\"\"\n keepAsking = True\n while keepAsking:\n answer = input(message)\n if len(answer) > limit:\n print(\"The input must be\", limit, \"characters or less.\")\n else:\n keepAsking = False\n if isNumber is True and CheckNumber(answer) is False:\n print(\"The input must be a number.\")\n keepAsking = True\n return answer\n\n\ndef CheckNumber(userInput):\n \"\"\" This function returns True if userInput can be converted to a number and\n returns False if it cannot. \"\"\"\n try:\n float(userInput)\n return True\n except(ValueError):\n return False\n\n\ndef DateInput(message):\n \"\"\" This function prompts the user for a date using the message variable.\n User will continue to be prompted until the format is correct.\n\n The date format is very specific in the format DD/MM/YYYYY\n This function will confirm there are the right number of characters,\n the / are in the right place, the input are numbers, the days are between\n 1 and 31, the months are between 1 and 12, and the year is between 2000\n and 3000 (roll on year 3k bug!)\n \"\"\"\n askAgainMessage = \"The date must be in the format DD/MM/YYYY\"\n keepAsking = True\n while keepAsking:\n answer = input(message)\n # First we check if there are two / by splitting using / and looking\n # for 3 items in the returned list.\n dateCheck = answer.split(sep=\"/\")\n if len(dateCheck) is not 3:\n print(askAgainMessage)\n else:\n # If all is order, we can assign the 3 items to day, month, year\n day = dateCheck[0]\n month = dateCheck[1]\n year = dateCheck[2]\n # Next we check each item has the right amount of characters\n # and they can all be converted into numbers.\n if (len(day) == 2 and len(month) == 2 and len(year) == 4 and\n CheckNumber(day) and CheckNumber(month) and\n CheckNumber(year)):\n day = int(day)\n month = int(month)\n year = int(year)\n if (day > 0 and day < 32 and month > 0 and month < 13 and\n year > 2000 and year < 3000):\n keepAsking = False\n else:\n print(askAgainMessage)\n else:\n print(askAgainMessage)\n return answer\n\n\n###############################################################################\n# Prompt the user for the required input #\n###############################################################################\n\n# Ask the user to input the required details\nemployeeName = LimitedInput(\"Employee Name: \", 20) # Example Mark Bate\nemployeeNumber = LimitedInput(\"Employee Number: \", 10) # Example 123456789A\nweekEnding = DateInput(\"Week ending: \") # Example 26/01/2018\nhoursWorked = LimitedInput(\"Number of hours worked: \", 6, True) # Example 42.5\n\n# As there are only 168 hours in the week this is a check to prevent errors\n# This could be modified to a lower number based on legal limit\nwhile float(hoursWorked) > 168:\n print(\"The number of hours worked is too large.\")\n hoursWorked = LimitedInput(\"Number of hours worked: \", 6, True)\n\nstandardRate = LimitedInput(\"Hourly Rate: \", 6, True) # Example 10.50\novertimeMultiplier = LimitedInput(\"Overtime Rate: \", 3, True) # Example 1.5\nstandardTaxRate = LimitedInput(\"Standard Tax Rate: \", 2, True) # Example 20\novertimeTaxRate = LimitedInput(\"Overtime Tax Rate: \", 2, True) # Example 50\n\n# Cnvert input to numbers, during the input we validated these as numerals\nhoursWorked = float(hoursWorked)\nstandardRate = float(standardRate)\novertimeMultiplier = float(overtimeMultiplier)\nstandardTaxRate = float(standardTaxRate)\novertimeTaxRate = float(overtimeTaxRate)\n\n\n###############################################################################\n# Calculate required details for ouput #\n###############################################################################\n\n# Check if more than standard hours have been worked\nif hoursWorked > 37.50:\n standardHours = 37.50\n overtimeHours = hoursWorked - 37.50\nelse:\n standardHours = hoursWorked\n overtimeHours = 0\n# Complete additional calculations for pay and deductions\nstandardPayTotal = standardHours * standardRate\novertimeRate = overtimeMultiplier * standardRate # As overtime is multiplier\novertimePayTotal = overtimeHours * overtimeRate\nstandardTaxTotal = (standardPayTotal * standardTaxRate)/100\novertimeTaxTotal = (overtimePayTotal * overtimeTaxRate)/100\npayTotal = standardPayTotal + overtimePayTotal\ntotalDeductions = standardTaxTotal + overtimeTaxTotal\nnetPay = payTotal - totalDeductions\n\n\n###############################################################################\n# Printing out the Payslip #\n###############################################################################\n\n# Output is one big chunk of text with the variables inserted using the format\n# function, this lets us define the float variables as two digit decimals.\n\nprint(\"\"\"\n P A Y S L I P\nWEEK ENDING {:}\nEmployee: {:}\nEmployee Number: {:}\n Earnings Deductions\n Hours Rate Total\nHours (normal) {:6.2f} {:6.2f} {:6.2f} Tax @ {:02.0f}% {:6.2f}\nHours (overtime) {:6.2f} {:6.2f} {:6.2f} Tax @ {:02.0f}% {:6.2f}\n\n Total pay: {:7.2f}\n Total deductions: {:7.2f}\n Net pay: {:7.2f}\n\"\"\".format(weekEnding, employeeName, employeeNumber, standardHours,\n standardRate, standardPayTotal, standardTaxRate, standardTaxTotal,\n overtimeHours, overtimeRate, overtimePayTotal, overtimeTaxRate,\n overtimeTaxTotal, payTotal, totalDeductions, netPay))\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
import cv2 import numpy as np import random def main(): img = cv2.imread('test_image.png',0) res = np.zeros((img.shape[0],img.shape[1],3),np.uint8) thresh = cv2.threshold(img, 50, 255, 0)[1] _, contours,_ = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: cv2.drawContours(res, [cnt],0,(random.randint(0,255),random.randint(0,255) ,random.randint(0,255)),-1) cv2.imshow('res',res) cv2.waitKey(0) if __name__ == "__main__": main()
normal
{ "blob_id": "1babf9f27e6792d2a1c2545a1e3bcd08fefa0975", "index": 5639, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef main():\n img = cv2.imread('test_image.png', 0)\n res = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)\n thresh = cv2.threshold(img, 50, 255, 0)[1]\n _, contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.\n CHAIN_APPROX_SIMPLE)\n for cnt in contours:\n cv2.drawContours(res, [cnt], 0, (random.randint(0, 255), random.\n randint(0, 255), random.randint(0, 255)), -1)\n cv2.imshow('res', res)\n cv2.waitKey(0)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef main():\n img = cv2.imread('test_image.png', 0)\n res = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)\n thresh = cv2.threshold(img, 50, 255, 0)[1]\n _, contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.\n CHAIN_APPROX_SIMPLE)\n for cnt in contours:\n cv2.drawContours(res, [cnt], 0, (random.randint(0, 255), random.\n randint(0, 255), random.randint(0, 255)), -1)\n cv2.imshow('res', res)\n cv2.waitKey(0)\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "import cv2\nimport numpy as np\nimport random\n\n\ndef main():\n img = cv2.imread('test_image.png', 0)\n res = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)\n thresh = cv2.threshold(img, 50, 255, 0)[1]\n _, contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.\n CHAIN_APPROX_SIMPLE)\n for cnt in contours:\n cv2.drawContours(res, [cnt], 0, (random.randint(0, 255), random.\n randint(0, 255), random.randint(0, 255)), -1)\n cv2.imshow('res', res)\n cv2.waitKey(0)\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "import cv2\nimport numpy as np \nimport random\n\n\ndef main():\n img = cv2.imread('test_image.png',0)\n res = np.zeros((img.shape[0],img.shape[1],3),np.uint8)\n thresh = cv2.threshold(img, 50, 255, 0)[1]\n _, contours,_ = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n for cnt in contours:\n cv2.drawContours(res, [cnt],0,(random.randint(0,255),random.randint(0,255) ,random.randint(0,255)),-1)\n cv2.imshow('res',res)\n cv2.waitKey(0)\n \nif __name__ == \"__main__\":\n main()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# Random number guessing game. # 10 July 20 # CTI-110 P5HW1 - Random Number # Thelma Majette import random randomNumber = random.randint (1,100) # main function def main(): # Create a variable to control the loop. keep_going = 'y' while keep_going == 'y': # Ask user for a number () guess = int(input('\nGuess a number between 1 and 100: ')) # Perform the selected action. if guess > randomNumber: print ('\nToo high, try again.' ) elif guess < randomNumber: print ('\nToo low, try again' ) else: print ('\nCongratulations, you guessed the correct number!') keep_going ='n' main ()
normal
{ "blob_id": "c09c02a36a64e9522cfc8c0951bd6c98f404f09c", "index": 367, "step-1": "<mask token>\n\n\ndef main():\n keep_going = 'y'\n while keep_going == 'y':\n guess = int(input('\\nGuess a number between 1 and 100: '))\n if guess > randomNumber:\n print('\\nToo high, try again.')\n elif guess < randomNumber:\n print('\\nToo low, try again')\n else:\n print('\\nCongratulations, you guessed the correct number!')\n keep_going = 'n'\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef main():\n keep_going = 'y'\n while keep_going == 'y':\n guess = int(input('\\nGuess a number between 1 and 100: '))\n if guess > randomNumber:\n print('\\nToo high, try again.')\n elif guess < randomNumber:\n print('\\nToo low, try again')\n else:\n print('\\nCongratulations, you guessed the correct number!')\n keep_going = 'n'\n\n\nmain()\n", "step-3": "<mask token>\nrandomNumber = random.randint(1, 100)\n\n\ndef main():\n keep_going = 'y'\n while keep_going == 'y':\n guess = int(input('\\nGuess a number between 1 and 100: '))\n if guess > randomNumber:\n print('\\nToo high, try again.')\n elif guess < randomNumber:\n print('\\nToo low, try again')\n else:\n print('\\nCongratulations, you guessed the correct number!')\n keep_going = 'n'\n\n\nmain()\n", "step-4": "import random\nrandomNumber = random.randint(1, 100)\n\n\ndef main():\n keep_going = 'y'\n while keep_going == 'y':\n guess = int(input('\\nGuess a number between 1 and 100: '))\n if guess > randomNumber:\n print('\\nToo high, try again.')\n elif guess < randomNumber:\n print('\\nToo low, try again')\n else:\n print('\\nCongratulations, you guessed the correct number!')\n keep_going = 'n'\n\n\nmain()\n", "step-5": "# Random number guessing game.\r\n# 10 July 20\r\n# CTI-110 P5HW1 - Random Number\r\n# Thelma Majette\r\n\r\nimport random\r\n\r\nrandomNumber = random.randint (1,100)\r\n\r\n# main function\r\ndef main():\r\n\r\n # Create a variable to control the loop.\r\n keep_going = 'y'\r\n while keep_going == 'y':\r\n\r\n # Ask user for a number ()\r\n guess = int(input('\\nGuess a number between 1 and 100: '))\r\n\r\n # Perform the selected action.\r\n if guess > randomNumber:\r\n print ('\\nToo high, try again.' )\r\n elif guess < randomNumber:\r\n print ('\\nToo low, try again' )\r\n else:\r\n print ('\\nCongratulations, you guessed the correct number!')\r\n keep_going ='n'\r\n \r\n \r\n \r\nmain () \r\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
class default_locations: mc_2016_data_directory = "/afs/hephy.at/data/cms06/nanoTuples/" mc_2016_postProcessing_directory = "stops_2016_nano_v0p23/dilep/" data_2016_data_directory = "/afs/hephy.at/data/cms07/nanoTuples/" data_2016_postProcessing_directory = "stops_2016_nano_v0p19/dilep/" mc_2017_data_directory = "/afs/hephy.at/data/cms06/nanoTuples/" mc_2017_postProcessing_directory = "stops_2017_nano_v0p23/dilep/" data_2017_data_directory = "/afs/hephy.at/data/cms07/nanoTuples/" data_2017_postProcessing_directory = "stops_2017_nano_v0p19/dilep/" mc_2018_data_directory = "/afs/hephy.at/data/cms06/nanoTuples/" mc_2018_postProcessing_directory = "stops_2018_nano_v0p23/dilep/" data_2018_data_directory = "/afs/hephy.at/data/cms07/nanoTuples/" data_2018_postProcessing_directory = "stops_2018_nano_v0p19/dilep/" import os if os.environ['HOSTNAME'].startswith('clip'): default_locations.mc_2016_data_directory = "/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/" default_locations.data_2016_data_directory = "/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/" default_locations.mc_2017_data_directory = "/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/" default_locations.data_2017_data_directory = "/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/" default_locations.mc_2018_data_directory = "/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/" default_locations.data_2018_data_directory = "/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/"
normal
{ "blob_id": "b6df9414f99294c7986d3eb5332d40288f059cd1", "index": 1245, "step-1": "class default_locations:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n", "step-2": "class default_locations:\n mc_2016_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'\n mc_2016_postProcessing_directory = 'stops_2016_nano_v0p23/dilep/'\n data_2016_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'\n data_2016_postProcessing_directory = 'stops_2016_nano_v0p19/dilep/'\n mc_2017_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'\n mc_2017_postProcessing_directory = 'stops_2017_nano_v0p23/dilep/'\n data_2017_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'\n data_2017_postProcessing_directory = 'stops_2017_nano_v0p19/dilep/'\n mc_2018_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'\n mc_2018_postProcessing_directory = 'stops_2018_nano_v0p23/dilep/'\n data_2018_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'\n data_2018_postProcessing_directory = 'stops_2018_nano_v0p19/dilep/'\n\n\n<mask token>\n", "step-3": "class default_locations:\n mc_2016_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'\n mc_2016_postProcessing_directory = 'stops_2016_nano_v0p23/dilep/'\n data_2016_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'\n data_2016_postProcessing_directory = 'stops_2016_nano_v0p19/dilep/'\n mc_2017_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'\n mc_2017_postProcessing_directory = 'stops_2017_nano_v0p23/dilep/'\n data_2017_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'\n data_2017_postProcessing_directory = 'stops_2017_nano_v0p19/dilep/'\n mc_2018_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'\n mc_2018_postProcessing_directory = 'stops_2018_nano_v0p23/dilep/'\n data_2018_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'\n data_2018_postProcessing_directory = 'stops_2018_nano_v0p19/dilep/'\n\n\n<mask token>\nif os.environ['HOSTNAME'].startswith('clip'):\n default_locations.mc_2016_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n default_locations.data_2016_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n default_locations.mc_2017_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n default_locations.data_2017_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n default_locations.mc_2018_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n default_locations.data_2018_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n", "step-4": "class default_locations:\n mc_2016_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'\n mc_2016_postProcessing_directory = 'stops_2016_nano_v0p23/dilep/'\n data_2016_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'\n data_2016_postProcessing_directory = 'stops_2016_nano_v0p19/dilep/'\n mc_2017_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'\n mc_2017_postProcessing_directory = 'stops_2017_nano_v0p23/dilep/'\n data_2017_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'\n data_2017_postProcessing_directory = 'stops_2017_nano_v0p19/dilep/'\n mc_2018_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'\n mc_2018_postProcessing_directory = 'stops_2018_nano_v0p23/dilep/'\n data_2018_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'\n data_2018_postProcessing_directory = 'stops_2018_nano_v0p19/dilep/'\n\n\nimport os\nif os.environ['HOSTNAME'].startswith('clip'):\n default_locations.mc_2016_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n default_locations.data_2016_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n default_locations.mc_2017_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n default_locations.data_2017_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n default_locations.mc_2018_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n default_locations.data_2018_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n", "step-5": "class default_locations:\n mc_2016_data_directory = \"/afs/hephy.at/data/cms06/nanoTuples/\" \n mc_2016_postProcessing_directory = \"stops_2016_nano_v0p23/dilep/\" \n data_2016_data_directory = \"/afs/hephy.at/data/cms07/nanoTuples/\" \n data_2016_postProcessing_directory = \"stops_2016_nano_v0p19/dilep/\" \n \n mc_2017_data_directory = \"/afs/hephy.at/data/cms06/nanoTuples/\" \n mc_2017_postProcessing_directory = \"stops_2017_nano_v0p23/dilep/\" \n data_2017_data_directory = \"/afs/hephy.at/data/cms07/nanoTuples/\" \n data_2017_postProcessing_directory = \"stops_2017_nano_v0p19/dilep/\" \n \n mc_2018_data_directory = \"/afs/hephy.at/data/cms06/nanoTuples/\" \n mc_2018_postProcessing_directory = \"stops_2018_nano_v0p23/dilep/\" \n data_2018_data_directory = \"/afs/hephy.at/data/cms07/nanoTuples/\" \n data_2018_postProcessing_directory = \"stops_2018_nano_v0p19/dilep/\"\n\nimport os\nif os.environ['HOSTNAME'].startswith('clip'):\n default_locations.mc_2016_data_directory = \"/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/\"\n default_locations.data_2016_data_directory = \"/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/\"\n default_locations.mc_2017_data_directory = \"/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/\"\n default_locations.data_2017_data_directory = \"/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/\"\n default_locations.mc_2018_data_directory = \"/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/\"\n default_locations.data_2018_data_directory = \"/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/\"\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]